././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5870135 zaqar-20.1.0.dev29/0000775000175100017510000000000015033040026012741 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/.coveragerc0000664000175100017510000000017015033040005015055 0ustar00mylesmyles[run] branch = True omit = zaqar/tests/* [report] exclude_lines = if _ZAQAR_SETUP__: raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/.stestr.conf0000664000175100017510000000012415033040005015204 0ustar00mylesmyles[DEFAULT] test_path=${OS_TEST_PATH:-./zaqar/tests/unit} top_dir=${OS_TOP_LEVEL:-./} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/.zuul.yaml0000664000175100017510000000535115033040005014703 0ustar00mylesmyles- job: name: zaqar-grenade-base abstract: true parent: grenade description: | Abstract base grenade job for zaqar tempest plugin required-projects: - opendev.org/openstack/python-zaqarclient - opendev.org/openstack/zaqar - opendev.org/openstack/zaqar-tempest-plugin vars: devstack_plugins: zaqar: https://opendev.org/openstack/zaqar devstack_services: # probably other services can be disabled; # the old legacy job only uses mysql, keystone, zaqar and tempest s-account: false s-container: false s-object: false s-proxy: false c-bak: false tempest_plugins: - zaqar-tempest-plugin tempest_test_regex: zaqar_tempest_plugin.tests tox_envlist: all irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - ^doc/.*$ - ^zaqar/hacking/.*$ - ^zaqar/tests/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tox.ini$ - job: name: zaqar-grenade-mongodb parent: zaqar-grenade-base voting: false vars: grenade_devstack_localrc: shared: ZAQAR_BACKEND: mongodb - job: name: zaqar-grenade-redis parent: zaqar-grenade-base voting: false vars: grenade_devstack_localrc: shared: ZAQAR_BACKEND: redis - job: name: zaqar-grenade-swift parent: zaqar-grenade-base voting: false vars: devstack_services: s-account: true s-container: true s-object: true s-proxy: true grenade_devstack_localrc: shared: ZAQAR_BACKEND: swift - job: name: zaqar-tox-integration parent: openstack-tox description: | Run Zaqar integration tests using tox with environment ``integration``. irrelevant-files: - ^\.gitreview$ - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^zaqar/hacking/.*$ - ^zaqar/tests/unit/.*$ - ^releasenotes/.*$ vars: tox_envlist: integration - project: queue: zaqar templates: - openstack-python3-jobs - openstack-python3-jobs-arm64 - publish-openstack-docs-pti - periodic-stable-jobs - check-requirements - release-notes-jobs-python3 check: jobs: - zaqar-tox-integration - zaqar-tempest-plugin-mongodb - zaqar-tempest-plugin-redis - zaqar-tempest-plugin-swift - zaqar-tempest-plugin-swift-ipv6 - zaqar-grenade-mongodb - zaqar-grenade-redis - zaqar-grenade-swift gate: jobs: - zaqar-tox-integration - zaqar-tempest-plugin-mongodb - zaqar-tempest-plugin-redis - zaqar-tempest-plugin-swift - zaqar-tempest-plugin-swift-ipv6 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/AUTHORS0000664000175100017510000002201715033040006014011 0ustar00mylesmyles98k <18552437190@163.com> Abhishek Chanda Akanksha Akanksha08 Alejandro Cabrera Alex Gaynor Andreas Jaeger Andreas Jaeger Angus Salkeld Anusree AvnishPal Balaji Iyer Bartosz Fic Bertrand Lallau Bhagyashri Shewale Boris Pavlovic Boris Pavlovic Bryan Davidson Béla Vancsics Cao Xuan Hoang Chad Lung Chandan Kumar Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) ChenZheng Chenchong Qin Christian Schwede Cindy Pallares Clark Boylan Corey Bryant Damien Ciabrini David Vossel Derek Higgins Dirk Mueller Doug Hellmann Emilien Macchi Erik Olof Gunnar Andersson Ethan Lynn Eva Balycheva Fei Long Wang Fei Long Wang Feilong Wang Fengqian Gao Fernando Ribeiro Flaper Fesp Flavio Percoco Flavio Percoco Flavio Percoco Francesco Vollero Frank Kloeker Ghanshyam Mann Graham Hayes Guoqiang Ding Hangdong Zhang He Yongli Hervé Beraud Ian Wienand Ihar Hrachyshka James E. Blair Jeffrey Zhang Jeremy Stanley Joe Gordon Jon Bernard Jonathan Herlin Joshua Harlow Juan Antonio Osorio Robles Jude Cross Junyuan Leng Kai Zhang Ken'ichi Ohmichi Kevin_Zheng Kristi Nikolla Kui Shi Kurt Griffiths Lance Bragstad Lei Zhang LiuNanke Louis Taylor Louis Taylor Luigi Toscano Luong Anh Tuan MD NADEEM Malini Kamalambal Malini Kamalambal Mark McLoughlin Martin André Martin Kletzander Masaki Matsushita Matthew Treinish Md Nadeem Michael Still Mike Bayer Mike Metral Mike Panetta Monty Taylor Nam Nguyen Hoai Nataliia Uvarova Nate Johnston Neerja Ngo Quoc Cuong Nguyen Hung Phuong Nguyen Phuong An Nguyen Van Trung Noorul Islam K M Obulapathi N Challa Ondřej Nový OpenStack Release Bot Oz Akan Ozgur Akan Perry Myers Prashanth Raghu Rabi Mishra Rafael Folco Rafael Rivero Ricardo Ferreira Ronald Bradford Rosario Di Somma Rose Ames Rui Yuan Dou Ryan S. Brown Sascha Peilicke Sascha Peilicke Sean McGinnis Seif Lotfy Serge Kovaleff Sergey Lukjanov Sergey Vilgelm Shaifali Agrawal Shane Wang ShangXiao Shu Yingya Shuangtai Tian Shuquan Huang Slawek Kaplonski Sphoorti Joglekar Sriram Madapusi Vasudevan Stephen Finucane Steve Linabery Swapnil Kulkarni (coolsvap) Tahio Avila Takashi Kajinami Takashi Kajinami Takashi NATSUME Takashi Natsume Tamer Tas TheSriram Thierry Carrez Thomas Herve Thomas Herve Tobias Urdin Valeriy Ponomaryov Van Hung Pham Victor Sergeyev Victoria Martinez de la Cruz Victoria Martinez de la Cruz Victoria Martínez de la Cruz Vieri <15050873171@163.com> Vu Cong Tuan Xing Zhang Xingjian Zhang Yang Shengming YangShengMing Yatin Kumbhare Yeela Kaplan ZhaoBo Zhi Yan Liu ZhiQiang Fan ZhiQiang Fan Zhihao Yuan Zhihao Yuan ZhijunWei ZhongShengping Zhongyue Luo ZijianGuo abettadapur akanksha anilkumarthovi caihui changyufei chengebj5238 chioleong cpallares daohanli dynarro e earnThis gaofei gecong gecong1973 gengchc2 ghanshyam ghanshyam hwang inspurericzhang jolie kavithahr kgriffs liu-sheng liusheng liushuobj liuyamin ljhuang luke.li luqitao maaoyu melissaml miaohb mohit.mohit2atcognizant.com niuke nizam ossanna16 pawnesh kumar pawnesh.kumar pengfei wang qings zhao rabi rahulram rajat29 ricolin ritesh.arya root sanoojm shangxiaobj sharat.sharma tanlin tengqm ting.wang venkatamahesh wander.way wanghao wanghao wanghui wangxiyuan wangzihao whoami-rajat wu.shiming xywang <233652566@qq.com> yangyapeng yangzhenyu yanyanhu yatin yushangbin zengjianfang zhang.lei zhangboye zhangdebo ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/AUTHORS.rst0000664000175100017510000000054015033040005014614 0ustar00mylesmylesMaintainer ---------- OpenStack Foundation IRC: #openstack on OFTC Original Authors ---------------- Bryan Davidson (bryan.davidson@rackspace.com) Kurt Griffiths (mail@kgriffs.com) Jamie Painter (jamie.painter@rackspace.com) Flavio Premoli (flaper87@flaper87.org) Zhihao Yuan (lichray@gmail.com) See also AUTHORS for a complete list of contributors. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/CONTRIBUTING.rst0000664000175100017510000000112615033040005015377 0ustar00mylesmylesThe source repository for this project can be found at: https://opendev.org/openstack/zaqar Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/zaqar For more specific information about contributing to this repository, see the zaqar contributor guide: https://docs.openstack.org/zaqar/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/ChangeLog0000664000175100017510000021372615033040006014524 0ustar00mylesmylesCHANGES ======= * Drop unused os-client-config * Replace deprecated assertItemsEqual * Fix a typo in release note * Remove Python 3.9 support * Remove v1 api test base * Avoid mkdir error caused by the existing directory * api: Resolve deprecation warnings * tests: Set host for unit tests * docs: Increase ulimit for Docker container * add pyproject.toml to support pip 23.1 * Drop redundant injection of VIRTUAL\_ENV * Remove dox.yaml * tox: Bump minimum to v4 * doc: Update contributor guide for running tests * devstack: Be tolerant of existing /etc/apt/sources.list.d dir * Skip integration tests for irrelevant files * Remove job with Ubuntu Jammy * Update master for stable/2025.1 20.0.0 ------ * Remove MongoDB from bindep * Remove unused iso8601 * Install mongodb 7.0 for Focal * doc: Fix OSprofiler docs * Remove unused Babel * Replace pytz * Remove api vi support 2 * Remove Api V1 Support-3 * [signed\_url] secret\_key should be secret * Skip grenade job for doc/test update * doc: Use dnf instead of yum * Remove remaining reference to ZeroMQ * Use module path to load wsgi application * devstack: Fix missing cleanup support * Use common helper to generate uwsgi setting * Drop rally job * grenade: Fix missing zaqar-sql-db-manage * Update gate jobs as per the 2025.1 cycle testing runtime * Fix TypeError found in Python 3.12 * Replace deprecated configure\_auth\_token\_middleware * reno: Update master for unmaintained/2023.1 * Remove default override for config options policy\_file * Imported Translations from Zanata * Drop unicode prefix * Fix compatibility with falcon >= 4.0 * Drop unnecessary version check of falcon library * Fix redundant format of enable\_deprecated\_api\_versions * Add note about requirements lower bounds * Use escape\_ipv6 from oslo.utils * Remove Python 3.8 support * Imported Translations from Zanata * Use oslo.utils implementation to parse server format * Replace deprecated datetime.utcfromtimestamp() * Get rid of distutils * Fix mongodb installation in Ubuntu 24.04 * Update master for stable/2024.2 19.0.0 ------ * Imported Translations from Zanata * Remove API V1 Support Part-1 * do not use str(url) to stringify a URL for subsequent use * Remove dependency fallback for Python 2 * Update master for stable/2023.1 * reno: Update master for unmaintained/zed * Remove nosetestes options * Support authentication with Redis Sentinel * redis: Add username * Stop overriding install\_command * Fix invalid parsing of IPv6 address in sentinel server * Explicitly manage extra dependencies * Deploy mongodb only when needed * Do not enable bionic-security repository * Update master for stable/2024.1 * devstack: Fix version check * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria 18.0.0 ------ * Replace CRLF by LF * reno: Update master for unmaintained/yoga * Bump hacking * Remove integrated-gate-storage * Update python classifier in setup.cfg * Support asymmetric encryption RSA * swift: Fix ignored \*\_domain\_name options * Imported Translations from Zanata * Remove indirect dependency (python-memcached) * Storage URIs should be secret * Switch to MySQLOpportunisticFixture from test\_fixtures * Fix ci broken for Debian 12 * Update master for stable/2023.2 17.0.0 ------ * Fix devstack plugin to support centos/rhel 9 * Use processutils to fix TypeError in subprocess * fix the gateway CI error * Add missing key for bionic-security repository * Revert "Refactor Zaqar to satisfy openstack uwsgi goal" * Use bionic package of libssl1.1 * Fix install of mongodb on Ubuntu 22.04 * Change StrictRedis usage to Redis * Refactor Zaqar to satisfy openstack uwsgi goal * Unblock CI by fixing libssl url 16.0.0 ------ * Optimize devstack plugin * Update deprecated zuul syntax 16.0.0.0b1 ---------- * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed * Cleanup py27 support * remove unicode prefix from code * Replace abc.abstractproperty with property and abc.abstractmethod 15.0.0 ------ * Fix API DOC * Add python3.8 requirement * Add Python 3 only classifier * Remove lower-constraints job/tripleo job * Update json module to jsonutils * Update master for stable/yoga 14.0.0 ------ * [Part 10] Remove all usage of six library * Fix tempest error * Add release notes for xena * Run TripleO jobs on CentOS8 instead of CentOS7 * Update master for stable/xena 13.0.0 ------ * Replace deprecated arguments of RequestContext * Replace deprecated import of ABCs from collections * Changed minversion in tox to 3.18.0 * Fix confirmation api doc missing * [community goal] Update contributor documentation * [doc]Update IRC Info * Decode bytes to strings * Update swift client in Zaqar * setup.cfg: Replace dashes with underscores * Add release notes foldler for wallaby * Update master for stable/wallaby * Update the requirement of oslo db * Support extra specs to subscription confirming 12.0.0 ------ * [goal] Deprecate the JSON formatted policy file * Imported Translations from Zanata * requirements: Drop os-testr * Use unittest.mock instead of third party mock * Fix hacking min version to 3.0.1 * update lower constraints * Use TOX\_CONSTRAINTS\_FILE * [Part12] Remove six * [Part7] Remove six * [Part1] Remove six * [Part4] Remove six * [Part6] Remove six * [Part5] Remove six * [Part2] Remove six * [Part3] Remove six * [Part9] Remove six * [Part8] Remove six * [Part11] Remove six * requirements: Drop os-testr * Fix the issue that the subscription api always returns unconfirmed * Fix the messages get bug * Add releasenotes for victoria * Update master for stable/victoria 11.0.0 ------ * [goal] Migrate testing to ubuntu focal * Cleanup the gate scripts after the Zuul v3 migration * Native Zuul v3 version of the grenade jobs * Native Zuul v3 version of tempest and rally jobs * Fix SSLError caused by not passing the cafile * Fix the issue that the function unpackb has no encoding option * Encrypted Messages in Queue * Remove neutron-fwaas from the jobs' required project * Stop to use the \_\_future\_\_ module * Cap jsonschema 3.2.0 as the minimal version * Switch to newer openstackdocstheme and reno versions * Remove translation sections from setup.cfg * Add realeasenotes index for ussuri cycle * Doc cleanups * Update hacking for Python3 10.0.0 ------ * Support query queues with count * Fix the incompatibility for python3 * [ussuri][goal] Drop python 2.7 support and testing * Imported Translations from Zanata * Update master for stable/train * [train][goal] Define new 'zaqar-tempest-swift-ipv6' job 9.0.0 ----- * Add zaqar-specs link to readme.rst * Quick fix for v1.1 api schema * Modify api schema for remove pool group * Fix api-ref link * Fit the StopIteration for py37 * Update api-ref location * Imported Translations from Zanata * Update master for stable/stein * Fix exception mishandling * Switch to using stestr directly * Unblock gate failures * Retire neutron-lbaas * Rename review.openstack.org to review.opendev.org * Imported Translations from Zanata * Dropping the py35 testing * Fix Sphinx formating errors * OpenDev Migration Patch * Replace openstack.org git:// URLs with https:// 8.0.0 ----- * Introduce the Topic resource into Zaqar-1 * Add releasenotes for bp remove-pool-group-totally * Python 3: Fix parsing of received notification * The instructions on README.rst to create a sample queue was outdated * Imported Translations from Zanata * Remove the pool group totally * Replace tripleo-scenario002-multinode with scenario002-standalone * Add .stestr.conf configuration * Miss name filter in querying queues * Add zaqar-status upgrade check command framework * Support delete messages with claim\_ids * Update mailinglist from dev to discuss * Fix redis CI job * Fix python3 Compatibility for hashlib md5 update * Missing response parameters in API DOC * Fix python3 Compatibility for urllib * Increase sleep in test\_delay * Update min tox version to 2.0 * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Increment versioning with pbr instruction * Update link for blueprints of zaqar-ui * Cleanup .zuul.yaml * Imported Translations from Zanata * Update the client version in samples * Fix lower constraints job * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Using smtplib for Zaqar mail delivery * [trivial] fix some typo * Update reno for stable/rocky 7.0.0 ----- * Update os\_client\_config to openstack.config * fix tox python3 overrides * Fix wsgiref py3 error * Handle bytes and str in py3 7.0.0.0b3 --------- * Remove setting of DEVSTACK\_GATE\_EXERCISES * Remove pool group from zaqar for api-ref * Fix assert error during queue creating in ut * Remove some unused lib * Update the Bugs link for triage * Fix syntax errors * Remove format constraint of client id * Update the method's parameters * ignore linter error for 'long' type * replace windows line endings with unix format * CONF file structure refactor * Remove ignore D000 in validation code * Fix format * Make doc more clearness * Update pypi url to new url * Add release notes link to README * Follow the new PTI for document build * Adding Swift backend Region support * Update the lower-constrainsts * Update auth\_url in install docs * Update docs to properly describe delete with pop 7.0.0.0b1 --------- * Move openstackdocstheme to extensions in api-ref * Update auth\_uri option to www\_authenticate\_uri * Support query filter in queue * Fix one issue in UT * Updated from global requirements * Use rest\_status\_code for api-ref response codes * add lower-constraints job * Updated from global requirements * Pypy is not checked at gate * Updated from global requirements * Add reserved metadatas for dead letter queue * Updated from global requirements * Fix claims on non-existing queue on swift * Update links in README * Wrap subscriber IP in square brackets when IPv6 * Fix assert error during queue creating in ut * Imported Translations from Zanata * Imported Translations from Zanata * Nit update in jenkins.rst document * Update reno for stable/queens * Modify grammatical errors 6.0.0 ----- * The doc of bp support-md5-of-body * Support md5 of message body * Add heat job to zaqar check pipeline * Add some missing releasenotes for Queens * Revert "Support md5 of message body" * Support md5 of message body 6.0.0.0b3 --------- * Support redis as mgmt storage backend * Replace curly quotes with straight quotes * Updated from global requirements * Remove use of unsupported TEMPEST\_SERVICES variable * Remove pool group from zaqar * Update .zuul.yaml * Imported Translations from Zanata * Updated from global requirements * msgpack-python has been renamed to msgpack * Update the install guide about policy json file * Remove the deprecated "giturl" option * Modify delay queue api-doc description * Redis connection support password configure in zaqar * Updated from global requirements * Test delay queues * Convert zaqar-tox-integration to native Zuul v3 * Support delayed queues for mongo * Replace pymongo deprecated api * Skip period test for message * Doc of delayed queues 6.0.0.0b2 --------- * Updated from global requirements * Support delayed queues for swift * Support delayed queues for redis * update devstack README.rst with more info * tox -e docs: Line too long * Update the documentation link * Zuul: add file extension to playbook path * Updated from global requirements * Missing claim\_count in mongodb MessageController \_basic\_message * Remove setting of version/release from releasenotes * Fix gate job failure * zuul: run TripleO jobs with new zuulv3 layout * Updated from global requirements * Policy in code: Update the related doc * Updated from global requirements * Add input type check * Clean up useless code * Updated from global requirements * Do not use “-y” for package install * Use generic user for both zuul v2 and v3 * Remove the remaining flavor tuple in test\_put\_auto\_get\_capabilities test * Register default subscription policies in code * Reduce swiftclient logging * Use dict.keys() for key iteratation * Register default pools and health policies in code * Create subscription return error id * Cleanup test-requirements * Metadata zero value will skip valid check * Migrate to Zuul v3 * Add dbid parameter to pools's uri based on redis * Remove bundled intree tempest plugin from Zaqar * Register default claim and flavors policies in code * Register default message policies in code * Add more backoff functions * Imported Translations from Zanata * Move to Zuulv3 link to check status 6.0.0.0b1 --------- * Register default queue policies in code * flavor.update's parameter capabilities error in flavor.create * Correct descriptions of install doc * delete some no use code * Update message api-ref * Escape IPv6 address in square brackets * Updated from global requirements * fix a typo for retry policy * Implement basic policy module in code * Updated from global requirements * Correcting typo on doc * Adding folder path * Delete some duplicate code of mongodb/claims.py * Correct some typos * Port Rally Task to format v2 * Retry container creation in swift * Remove deprecated context usage * Updated from global requirements * Fix gate-grenade jenkins failure * Remove iso8601\_from\_timestamp * Remove "enabled" in CORS guide * Reduce duplication in swift post messages * Replace http with https for doc links * [api-ref]Change 'queues' to required in response body * Fix mongodb scoped\_query's param note * Fix Py27/35 jenkins failure * Remove usage of parameter enforce\_type * Correct typo of zaqar index doc page * Imported Translations from Zanata * Update reno for stable/pike 5.0.0 ----- * Fix unconfirm for swift backend * Fix claims for redis when enabling DLQ * Use doc8 check * Fix claims of MongoDB when enabling DLQ * Add some more uwsgi options for zaqar-uwsgi * Notification Delivery Policy * Fix bug in mongodb backend for dead letter queue * Update the dead letter queue's releasenote * Fix tempest test\_set\_and\_get\_queue\_metadata * Fix api doc in creating pre-signed url * Remove install-guide env which is no longer effective * Support dead letter queue for swift * Add project info to contributor doc * Remove Deprecation tempest Warning * Updated from global requirements * Migrate configuration docs * Stop using deprecated 'message' attribute in Exception 5.0.0.0b3 --------- * Fix a small bug in dead queue patch * Replace uuid.uuid4().hex with uuidutils.generate\_uuid() * Replace mongodb backend pymongo deprecated API * Update and optimize documentation links * Fix message claim expires problem in swift storage * Updated from global requirements * Support dead letter queue for redis * Refactor the doc tree * Trivial Fix in api-ref * Update URL home-page in documents according to document migration * Updated from global requirements * Support dead letter queue for MongoDB * Fix mongodb list method's param note * Use mongodb delete\_one method instead of deprecated remove method * Add log hacking rules * Minor cleanup on conf.py files * Fix html\_last\_updated\_fmt for Python3 * Revert "Remove startup check from devstack" * Remove obsolete user-guide * Switch from oslosphinx to openstackdocstheme * Add release note: allow configuration of websocket notification * Remove unused None from dict.get() * Allow configuration of websocket notification * Remove unnecessary parameter in Zaqar * Using assertFalse(A) instead of assertEqual(False, A) * Updated from global requirements * Fix jenkins link in running-tests * Update subscription confirm doc * Fix some typos in zaqar * Updated from global requirements * Fix the official Doc site of CORS * API Ref bug about the mailto protocol * Remove log translations * modify doc of Claim messages * Use oslo\_db for create\_engine 5.0.0.0b2 --------- * Ignore dead subscriptions in Swift backend * Remove unused methods from the swift backend * Simplify SQL run() method * Support dot character in queue's name * Optimize the link address * Remove usage of parameter enforce\_type * Updated from global requirements * Replace deprecated oslo.messaging.get\_transport with get\_notification\_transport * Updated from global requirements * Updated from global requirements * [Swift] Fix 503 error when get queue stats * Correct the trace name for redis * Use absolute path for executables * Fix OSProfiler issue for Swift and Redis driver * Revise text about py34 * Replace test.attr with decorators.attr * Fix queues actions for pooling * Fix swift messages posting when pooling * Fix failure when adding swift/redis as pool * Updated from global requirements * Add missing modules to config generator * swift driver: rely on keystone session for swift connection * Add domain configuration in Swift backend * Add .idea pattern to .gitignore * Fix list detailed queue without reserved metadata * Remove redundancy code * Updated from global requirements * Fix detailed missing in api-ref 5.0.0.0b1 --------- * Automatically get pools capabilities for flavor * Check if swift is alive instead of hardcode * Remove enabled option for CORS * Using generic instead of v3 for identity * Fix some reST field lists in docstrings * Support CORS * Fix the link of 'Message Queuing API v2' * Use HostAddressOpt for opts that accept IP and hostnames * Updated from global requirements * Move auth and osprofiler to middleware folder * Don't share swift client instances * Add missing content type in Swift objects * Fix claim ttl test * Updated from global requirements * Introduce Guru to Zaqar * Switch to use stable data\_utils * Make import clearly in i18n.py * Use Sphinx 1.5 warning-is-error * Fix api-ref building * [Fix gate]Update test requirement * Remove unused import * Support cold migration * Remove table creation from the SQL driver * Set wsgi name to application * Filter empty messages when creating claims in Swift * Check body type when patching subscription * Ignore conflict when deleting queues with Swift * Updated from global requirements * Subscription Confirmation Support-3 * Using cp insteald ln for zaqar-ui * Address TODO in schema * Switch to use test\_utils.call\_until\_true * Remove the Warning log * Replace github with git.openstack.org * Sync the resource string * Add the missed decorator to pools * Remove "TODO" as already support gettext * Using os-testr to get better report * Fix typo * Use auth\_type instead of auth\_plugin * Update reno for stable/ocata * Add Swift backend releasenote 4.0.0.0rc1 ---------- * Update the purge exception catch * Fix sqlalchemy migration * Enable swift in the gate hook * Updated from global requirements 4.0.0.0b3 --------- * Switch to decorators.idempotent\_id * Add validation of limit value in querying flavors and poolss * Add the release note for OSprofiler feature * Remove py34 support in favour of py35 * Make the log work when deploy Zaqar with uwsgi * Fix wrong value of require\_confirmation * Replace assertTrue(\*>\*) with assertGreater * Use \_LW() to wrap the warning message * Fix typos in HACKING.rst * Swift storage * Fix unit test unstable issue * don't allow get/update/delete subscirtions with wrong queue * Removes unnecessary utf-8 encoding * Updated from global requirements * Use pop in message\_delete\_many * Enable coverage report in console output * Add tempest test for purge queue * Update README.rst * Fix CI failure * Add API ref of purging queue * insert Apache 2.0 license into source file * Support purge queue -- websocket 4.0.0.0b2 --------- * Clean transport logs for API endpoints * Add support for sqlalchemy migration based on alembic * Support purge queue -- wsgi * Convert user getting started guide to rst * Add OSprofiler guide * Updated from global requirements * Updated from global requirements * Support to query single resource with pre-signed url * Show team and repo badges on README * Remove startup check from devstack * Use KEYSTONE\_IDENTITY\_BACKEND * Consistent with the project code * Fix SSL and verbose issue of zaqar bench * Integrate OSprofiler with Zaqar 4.0.0.0b1 --------- * Fix an typo in flavors.inc * Fix zaqar client gate * Use build interval and timeout from conf * Fix zaqar-ui clone issue * Add Feature section in doc * Reword description of grace * Add missing policy check for queue stats and share * Don't send confirmation notification if it's not enabled * Add some descriptions for resources in API Ref * Updated from global requirements * Update .coveragerc after the removal of openstack directory * Enanble Zaqar-UI in devstack * Fix doc build if git is absent * Enable release notes translation * Updated from global requirements * Fix a typo: remove redundant 'the' * Change assertTrue(isinstance()) by optimal assert * Subscription Confirmation Support-2 * Update reno for stable/newton * Don't check key format in mongodb 3.0.0.0rc1 ---------- * Remove small duplication from message storage * Consolidate constants to a module * [api-ref] Remove temporary block in conf.py * Officially deprecate API v1.1 * Fix links in devstack/README.rst * Disable DEPRECATED API versions by default * Updated from global requirements * Config logABug feature for zaqar api-ref * Clean imports in code 3.0.0.0b3 --------- * Using oslo\_config's min attribute * Remove openstack-doc-tools * Use upper constraints for all jobs in tox.ini * Get ready for os-api-ref sphinx theme change * Subscription Confirmation Support-1 * Fix a minor error in API reference * Add install guide for ubuntu and suse * Remove the useless function * Don't check for project-id if accessing the / route * Adding Negative tests cases to Zaqar v2 * Start accepting \*\*kwargs in service clients * Fix errors in API reference for message resource * modify the home-page info with the developer documentation * Add flavors api ref * Support \`messages\` API ref * Cleanup i18n marker functions to match Oslo usage * Updated from global requirements * This patch removes extra spaces from files 3.0.0.0b2 --------- * Fix falcon.HTTPConflict initialization getting error * Add Python 3.5 classifier and venv * Remove docs,comments,references to Oslo Incubator * Fixes: Typo in comments * Add a trust notifier task * Ensure queue exists before get/update the claim * Add pools api ref * Disable warnerrors=true in setup.cfg * Add \`Pre-signed queue' api ref * Config: no need to set default=None * Updated from global requirements * Fix issue with subscription list in Redis * Remove tempest smoke tests run * (redis)Fix the 503 error when get/delete messages * Add \`Health\` api ref * Fix the wrong check of X-PROJECT-ID * Add subscription api ref * Cleanup integration tests setup * Clean the auth app after authentication failure * Add Claims for api ref * Let v2 tempest base on the base tests * Fix debug log missing issue * Remove leftovers on websocket disconnections * Fix config parsing in zaqar-server * Updated from global requirements * Fix post install jobs * Relocates cli options registration from bootstrap to server * Delete in tree config ref * Add release note for bug 1590173 * Add Queues for api ref * Add Zaqar installation guide * Fix incorrect message when getting non-existing claim * Make queues lazy in subscriptions * Add release notes * Use is\_valid\_ipv6 from oslo.utils * Use debug instead of warning for pipeline implement error * Fix subscription limit per queue * Fix fail of getting claim after claim expire(Redis) * Make the notifier max\_workers configurable * Show 'age' field in subscriptions (redis) * Show 'age' field in subscriptions (mongodb) * Add reserved metadata check * Support Tempest test as gate job * Avoid multi-threading problem with sql backend * Fix fail of claim after some message expire(Redis) 3.0.0.0b1 --------- * Always use pip constraints * Init commit to add API ref in tree * Updated from global requirements * Define context.roles with base class * Use run() wrapper instead of connection.execute() * Improve the log to avoid unexpected ERROR * Rename notification/task to notification/tasks * Replace tempest-lib with tempest.lib * Refactor zaqar/storage/utils.py to respect DRY * remove verbose option in devstack plugin * Fix falcon confilct in requirements.txt * Fix py34 gate job * Move to falcon middleware * Changed assertTrue(isinstance()) to assertIsInstance * Trivial: remove openstack/common in flake8 exclude list * Fix cache decorator tests * Imported Translations from Zanata * Updated from global requirements * Fix wrong claim error message * Default message ttl is needed to change correctly * Fix auth issue when accessing root path "/" * Make sure use IPv6 sockets for Zaqar in IPv6 environment * Show default attributes for queue * Updated from global requirements * Added samples in multiple languages * Updated from global requirements * Fix tempest tests list * Fix service\_available opt registration and extra comma * expires should be checked when using pre-signed url 2.0.0 ----- * Validate PUT of reserved queue attributes metadata * Validate PUT of reserved queue attributes metadata * Warn on upcoming deprecations for v1.0 and v1.1 * Generate oslo.cache options via tox -e genconfig * Warn on upcoming deprecations for v1.0 and v1.1 * Update reno for stable/mitaka * Update .gitreview for stable/mitaka 2.0.0.0rc1 ---------- * Fix PATCH queue's metadata * Remove unnecessary assert * Don't pass debug option to websocket constructor * Fix cleanup in metadata putting test * Updated from global requirements * Renew subscription on update with TTL * Add missing /ping for v1.1 homedoc * Forbid update subscription to duplicate * Fix updating subscription options in Redis driver * Updated from global requirements 2.0.0.0b3 --------- * Support metadata update issue for v2 * Fix the TTL issue of subscriptions for MongoDB * Fix the TTL issue of subscriptions for Redis * Fix freeze on some requests * Add \`\_max\_messages\_post\_size\` and \`\_default\_message\_ttl\` for queue * Use assertEqual/GreaterEqual/LessEqual * Remove unused pngmath Sphinx extension * Updated from global requirements * Fix validation in websocket unit tests * Add tempest test for wsgi v2 * Add description to 404 and 409 error responses * Support binary messages over websocket * Add tempest test for v1.1 * Fix the tempest test error * Catch NoPoolFound on resource listing in transport * Fix arguments order in error\_response (Websocket) * Fix cross-transport error handler * "group" word should be used instead "pool" * Fix subscriptions\_post resource in wsgi 2 homedoc * Updated from global requirements * Add zaqar tempest plugin * Update operation should return updated result * Fix 500 error when create pools in wsgi v2 * Get non-existing queue should return 200 * Ensure hmget return right data format * Don't return links if subscriptions are empty * Updated from global requirements * Add queue name to notifications * Use Client-ID in devstack ping request to Zaqar * Fix improperly LOG using in Zaqar * Add the missing Client-ID check in wsgi v2 * Make error response better for patching flavors * Use uppercase 'S' in word "OpenStack" * Don't return links if subscriptions are empty * Update homedoc in wsgi v2 * Updated from global requirements * Fix typo in test\_on\_mesage\_with\_invalid\_input * Replace six.iteritems(dict) with dict.items() * Ensure JSON sent over websocket is a dictionary * Imported Translations from Zanata * Updated from global requirements * Make enforce\_type=True in CONF.set\_override * Remove argparse from requirements * Use default pool for queue listing * Add new information to zaqar-bench tutorial * Fix zaqar-bench auth method detection 2.0.0.0b2 --------- * Add tutorial for writing storage pipeline stages * Fix gate issues * Fix requirements of message\_delete\_many API * Replace deprecated library function os.popen() with subprocess * Make zaqar-bench use credentials from os\_client\_config * Updated from global requirements * Make websocket.html process notifications * Refactoring of docs during Mitaka cycle * Make enforce\_type=True in CONF.set\_override * Move subscription test from v1\_1 to v2 * Use assertTrue/False instead of assertEqual(T/F) * Move in-line function outside method * Updated from global requirements * Replace deprecated LOG.warn with LOG.warning * Fix the docs link * Fix for the deprecated library function * Changing LOG.warn to LOG.warning * Trival: Remove unused logging import In some files * remove python 2.6 trove classifier * Replace assertEqual(None, \*) with assertIsNone in tests * Deprecated tox -downloadcache option removed * Updated from global requirements * Imported Translations from Zanata * Fix zaqar-bench not working * Updated from global requirements * Explicitly use asyncio protocol for websockets * Put py34 first in the env order of tox 2.0.0.0b1 --------- * Fix duplicate auth\_section issue * Fix queue create failure when metadata is {} * Add unreleased notes for releasenotes tree * Remove version from setup.cfg * Drop MANIFEST.in - it's not needed with PBR * delete python bytecode before every test run * Updated from global requirements * Add the default repos into devstack/settings file * Move development.environment.rst to standard location * Replace "MARCONI" with "ZAQAR" * Fix typo * Automatically drop functional tests databases * Added CONTRIBUTING.rst file * Remove netaddr useless requirement * Fix write concern check for py34 * Fix tox not performing some tests in some Envs * Updated from global requirements * Updated from global requirements * Add reno for release notes management * Imported Translations from Zanata * Use oslo\_config new type PortOpt for port options * Some prerequisites for Fedora OS is missing in zaqar README.rst * Fix mongodb.conf corruption during stack.sh * Correctly stop uwsgi process during unstack.sh * Add oslo\_config IPOpt support * Use keystonemiddleware in config generation conf * Fix client constructor for zaqar-bench * Updated from global requirements * WebSocket functional tests * Some prerequisites for Ubuntu OS is missing in zaqar README.rst * Make websocket run in correct logging mode * Fix incorrect usage of assertEqual() in some tests * FIX TypeError for list queue command * Updated from global requirements * Adding namespace for [drivers] section in oslo-config-generator * Updated from global requirements * Notifications over websocket 1.0.0 ----- * Imported Translations from Zanata * Removes redundant debug option * The \`subtype\` formatting arg does not exist * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Imported Translations from Zanata * Cleanup of Translations * Cleanup of Translations * Use decorator instead of request logging in responders * Switch to oslo\_cache * Open Mitaka development 1.0.0.0rc1 ---------- * Updated from global requirements * Add note for unreliable in README.rst * Adds max failure rate limit * Allow running zaqarclient for gate tests * Close backend connections in tests * Fix few typos in headings * Updated from global requirements * Max failure percent not longer needed for Rally gate * Imported Translations from Zanata * Correct "log\_file" option in zaqar documents * Fix devstack option to plugin zaqar * Fix "message\_store" config option in zaqar doc * Fix redis tests * Fix usage of mongo test URL * Fix duplicated queues on multi pools * Add Redis backend docs * RBAC support based on oslo.policy * Use a Text field instead of Binary * Prevent mutation of out-of-scope args * Don't raise 404 on missing queues * Add a global error handler for wsgi * Ensure group exists on updates * Fix error messages formatting * Don't create a connection on constructor * Rename get\_group to get\_pools\_by\_group * Enable pooling for redis' job * Don't set keystone as auth if not enabled * Fix 500 error when adding pool to wrong group * Let devstack redirect output * Hide project id for flavor * Fix accurate links in README.rst * Refactoring to make Websocket driver available in v2 * Remove race condition in test\_claim\_lifecycle * Fix typos in sqlalchemy flavor.py * Don't register the default pool * Fix duplicated notification * Add email driver for notification * Updated from global requirements * Use thread executor all the time for notifier * Support various paths for signed URLs * Add unittests for Flavors in sqlalchemy * Restore sqlalchemy unit tests 1.0.0.0b3 --------- * s/update/\_update/ sqlalchemy's pool controller * Add support for flavors in sqlalchemy * Updated from global requirements * Return empty \`links\` on empty collections * Fixed the mistake in zaqar dev-environment * Update 'doc/source/development-environment.rst' * Removing unused dependency: discover * Fix GET queue metadata for sqlalchemy * Make False as default value for \`unreliable\` * Don't generate docs for zaqar.transport.wsgi.app * Updated from global requirements * Support signed URLs in WebSocket * Fix typos in test\_suite.rst and test\_utils.py * The link for plugin samples is updated * Mistakes in the sentences are corrected * Set a secret key in devstack * Update README.rst in zaqar project * Fix WS deauthenticate * API v1.1 Claims endpoints * Move \_(get|inc)\_counter out of QueueController * Imported Translations from Transifex * Updated from global requirements * Fix WS endpoint registration * Use the executor directly in notifier * Authentication for websocket * Refactor notification framework * Add a websocket example * Add configuration to run service using uwsgi * Imported Translations from Transifex * Updated from global requirements * Remove unused oslo-incubator modules * Remove mock side\_effect in test\_messages * Imported Translations from Transifex * Set auth\_strategy in devstack * Enable tox debug * Fix validation errors handling in websocket * Move to using the futurist library for async executors 1.0.0.0b2 --------- * Skip tasks run when there is no subscriber * Updated from global requirements * Imported Translations from Transifex * Decouple the queue and subscription * Return the websocket response on invalid request * Return subscription ID on create * Handle missing document in subscription operations * Send message notifications as json * Enable message pipeline on devstack * Fix enabling of Zaqar in Devstack plugin * Update development documentation * Update README * Fix test failure linked to new mock release * Imported Translations from Transifex * Allow config of mongo test url using env variable * Add signed URL verification * Add pre-signed url generation endpoint * Move common configs under common.configs * Updated from global requirements * Validate UUIDs before the driver sends them * Update development-environment.rst guide * Make errors preventing driver load more useful * API v1.1 Messages endpoints * Satisfy abstract base by adding queue\_controller 1.0.0.0b1 --------- * Add support for pymongo 3.0.2 * Move functional tests out of tests/ * Imported Translations from Transifex * Update version for Liberty 1.0.0a0 ------- * Added 'pool name' and 'flavor name' to response * Imported Translations from Transifex * Move all remaining unit tests out of tests/ * Move storage unit tests out of tests/ * Move transport tests out of tests/ * Remove unused \`pooling:catalog:storage\` option * Try to use a default pool when creating a queue * Split \`storage\` into (message|management)\_storage * Add missing doc files * Remove the sqlite entry point * Remove messages and claims from sql code * Move transport v1 tests out of tests/ * Move transport v1\_1 tests out of tests/ * Deprecate v1 * Remove \`marconi-server\` * Move transport v2 tests out of tests/ * Speed up message expiration tests * Add cleanups to several tests * Imported Translations from Transifex * Clarify code comment in Redis backend * s/Marconi/Zaqar/ in method docstring * Switch from MySQL-python to PyMySQL * Port to oslo\_log * Drop incubating theme from docs * Enable pooling in devstack * Imported Translations from Transifex * Delete unused README links * Replace ci.o.o links with docs.o.o/infra * Imported Translations from Transifex * Using oslo.context * Set unreliable True * Fix gate\_hook and post\_test\_hook perms * Drop use of 'oslo' namespace package * Add gate hooks to enhance Zaqar's job * Add function dispatchers * Update dataplane section name * Remove support for sqlalchemy in devstack * Rename service to zaqar * Install packages from plugin.sh * Fix duplicated test cases of notifier 2015.1.0 -------- * Move devstack's Zaqar code into the codebase * Release Import of Translations from Transifex * update .gitreview for stable/kilo * Updated from global requirements 2015.1.0rc1 ----------- * Updated from global requirements * Open Liberty development * Adds a test for queues validations * makes uri unique to avoid duplicated pools * Remove QueueController from data to control plane * Fix instructions in README.rst file * Imported Translations from Transifex * Add websockets to our test suite * Imported Translations from Transifex * Implement webhook notifier driver * Adds message processing to WebSockets driver * Implement redis driver for notifications * Add REST API for notification/subscriptions * Allow for internal pipelines to be defined * Imported Translations from Transifex * Remove sqlalchemy from our data plane * Correcting Instruction For Mongodb URI in README.rst 2015.1.0b3 ---------- * Updates the readme.rst file to add dependencies steps * Updated from global requirements * Update json schema for API v1\_1 * Fix common misspellings * Use generate\_uuid() from oslo.utils * Make the Pipeline inmutable * Adds a status code field to the Response * This change adds missing help strings for claims * Adds a representation for the Request and Response objects * Clean up inconsistent if statement syntax in transport auth * Adds websockets driver to Zaqar server * Syntax enhancements * Updated from global requirements * Updated from global requirements * Imported Translations from Transifex * API handler and API v1.1 Queue endpoints * Use pool's capabilities for flavors in v2 * Split MongoDB's default implementation from FIFO's * Forbid adding stores with mismatching capabilities * Replace API v1 with v1.1 in README * Expose healt in homedoc 2015.1.0b2 ---------- * Imported Translations from Transifex * Refactor Api classes to enhance consistency * Updated from global requirements * Move oslo libraries out of the oslo namespace * Check marker before accessing it * Implement mongodb driver for notifications * Updated from global requirements * Fork v1\_1 of the API into v2 * Invalid grave accents at the end of the mongodb installation URLs * Fix the last assertion in test\_impl\_mongodb.test\_message\_counter * Version discovery for root URI * Imported Translations from Transifex * Use the oslo.serialization instead of openstack/common/jsonutils * Use keystoneclient auth and register required options * Clean up the 'queues' package for tests * Removed includes for zaqar.config.sample from doc 2015.1.0b1 ---------- * Temporally remove Sphinx from test-requirements-py3 * Updated from global requirements * Slighty refactoring for the cross api spec * Fix the duplicate lines in autoindex doc * Clean up pooling meta-controllers * Wrap abstract method with base methods * Add capabilities property to the DataDriver * Split Control and Data planes of Storage layer * Expose pools and flavors in homedoc * Imported Translations from Transifex * Removes ZMQ directory * Refactor code structure for notification * Updated from global requirements * Don't register pool endpoints when pooling=False * Workflow documentation is now in infra-manual * Replace sqlite with sqlalchemy * Document mongodb's driver deployments * Rename rally-scenarios/ to rally-jobs/ * Add rally job related files * Add instructions on generating sample config * Make the refrence to storage.errors to be consistent * Used a pooled config for health * Correctly stop functional servers * Test message delete when claim expires * Remove the outdated openstack common modules * Update oslo-config-generation code * Claim post requires a body * pooling conf is using a wrong deprecated conf * Remove config sample and generate it using code * Let the test client serialize request's body * Imported Translations from Transifex * Always include the project id in the logs * remove the old name conf file * Capture the stdout and stderr based on environment * Updated from global requirements * Why do we increment the counter after the insert? * Updated from global requirements * Imported Translations from Transifex * Put a cap on our cyclomatic complexity * Updated from global requirements * Fix ttl and grace story for claims updates * Fix the exception raised when running test by nose * Sync the zaqar.conf.sample file * Fix the doc typo * Imported Translations from Transifex * Fix no links property in flavor list response * Fix no links property in pool list response * Queue's empty list should return 200 and an object * Remove all traces of nose * Reduce duration of some long-running tests * Add support for redis-sentinel * Fix the param name typo * Updated from global requirements * Fix the typo in the description of the write concern error * Move marker field to the first position (index) * Imported Translations from Transifex * Add first reliability enforcement * Move the module reference to the top * Imported Translations from Transifex * Open Kilo development 2014.2.rc1 ---------- * Add a pool\_group to pools in v1.1 * Fix coverage testing * Don't skip pools' functional tests * Accept existing conf in dynamic\_conf * Improve efficiency of the redis driver when posting messages * Move Redis driver's claim transaction to Lua * Fix typo in README * Use OpenStack Incubated branding for docs * Imported Translations from Transifex * V1.1: Add id property to messages in claims * Fix regression: No handlers could be found for logger when start * Fixes docstring typos * Don't register options in \_\_init\_\_ * Updated from global requirements * Replace misleading max\_message\_size option name * Enable ZAQAR\_TEST\_SLOW for py27 * Catch Exception instead of RuntimeError for driver * Fix the msg encode/decode error * Imported Translations from Transifex * Added checks enforcements rule to the developers manual * Add genconfig to tox.ini * Add the redis opts to the generate sample shell script * Removes claim\_id param in v1.0 * Stop using intersphinx * Not permitted to delete pools which are used by flavor * Workaround pypy bug on trusty when running tests * Add \_ prefix to local variables * Remove test\_correct\_has from our test suite * Don't import client from the version package * Updated from global requirements * Remove extraneous created\_iso property * Fix messages getting reclaimed in the Redis driver * Imported Translations from Transifex * Update Zaqar backend requirements * In the Redis driver, only read the msg body when needed * Fix: Redis driver does not fall back to default port * Add missing flavors\_controller method to Redis driver * Adds the API reference to the devs guide * Remove recursive imports * Add a dox.yml config file * Add ZAQAR\_TEST\_EVERYTHING to enable all tests * V1.1: Add claim\_id to message url * Fix pep8 error in bench tool * Use oslo.utils.encodeutils instead of strutils * Updated from global requirements 2014.2.b3 --------- * Don't pass pipe when filtering messages in the Redis driver * In benchmark observer worker, include claimed messages when listing * Work toward Python 3.4 support and testing * Switch Redis driver modules over to oslo.utils.timeutils * Calculate stats dynamically instead of using side counters * Removing test\_duration variable from bench/consumer.py * Imported Translations from Transifex * First patch and first review docs * Adds a manual garbage collector script * Zaqar configuration reference docs * Make Client-ID a required header * Don't return content\_location anymore * Make X-Project-ID a required header * Implement claim and message expiration logic for Redis * Fix Redis message controller getting stuck in while loop * Fix the help string * Imported Translations from Transifex * Adds partial field deprecation note in the users guide * Ensure \`pool\` exists before creating a flavor * Fix the flavor tests failing under py33 * Raise proper HTTP error when flavor doesn't exist * Imported Translations from Transifex * Adds more resources section to to developers guide * Enhance docstrings for the config reference * Implements queue, message and claim controllers for Redis * Add observer role to benchmark tool * Do not fail silently when given a bogus claim ID * Make the catalog flavor-aware * Updated from global requirements * API v1.1: adding id field to messages * Add API support for flavors * Add the ability to benchmark across multiple queues * Fix divide by zero bug in benchmark workers * Imported Translations from Transifex * Switch to oslo.utils * Remove obsolete README for benchmark * Fix incorrect accounting of total requests in benchmark consumer * Make claim batch size configurable in benchmark tool * Remove common.transport.wsgi.utils * Report claim and delete latency separately in the benchmark tool * Add dev docs for transport/storage dirvers * Updated from global requirements * Fix config generator in Zaqar * Don't call config() in the global space * V1.1 Homedoc * Add separate proc and worker options for producer and consumer * Clean up expired messages for SQLAlchemy driver * Encapsulate top-level lists in responses in object * Updated from global requirements * Update use cases to align with dev guide and positioning * Improve benchmarking tool * feat(v1.1): Implement new /health endpoint for v1.1 * Fix .gitreview due to the repo rename/move * Setting up a development environment docs * Update files from oslo-incubator * Reorganize docs folder * Add page about tests to developer docs * Use auth\_token from keystonemiddleware * Improve developer guide introduction * Make \`admin\_mode\` a cli option * Remove user oriented documentation from the dev guide * Add flavors support to mongodb * Enable MongoDB tests on py27 * Require mongodb >= 2.2 or fail misserably * Updated from global requirements * Rename Marconi to Zaqar * Fix py3k incompatibility with itertools.ifilter * Fixes several failing mongo driver tests * Mark sqlalchemy's data driver as deprecated * Updated from global requirements * Filter out expired messages * Updated from global requirements * Deprecate partial field in v1.0, remove in v1.1 * Imported Translations from Transifex 2014.2.b2 --------- * Minor cleanup of utils unit test * Correct the test instructions in README * API v1.1 - Encapsulate message post bodies in a JSON object * Rename remaining 'shard' to 'pool' * Updated from global requirements * feat(v1.1): Default TTL and grace values * Remove check queue exists endpoint * Imported Translations from Transifex * feat(benchmarking) : Producer-Consumer scenario * Adding dependencies for redis support in marconi * Implement POP in v1.1 API * Drop pylint due to the huge amount of false positives * Updated from global requirements * Run storage unit tests in pooled context * Make storage/pooling reflect storage/base * Switch over oslo.i18n * Primary key for pool in catalogue table is unreasonable * Fix typos * Clean up queues created for certain tests * Use the class queue\_name and project for tests * limit=0 in sqlalchemy means 0 instead of unlimited * Fix broken wiki link * Wrong param order when using errors.QueueNotMapped * Changing default type for pooling Catalog storage driver * Add None checking for the Mongodb uri * Start scratching Queue Flavors api * V1.1 Functional Tests * Updated from global requirements * Remove alphabetical sorting assumption in tests * Rename shard to pool * Fix two caching-related comments in the code * Correct the rtype in the function docstrings * Fix Python 3 leftovers * Removed buggy httpretty to unblock the gate * Sync with oslo-incubator * Expose pymongo's SSL cert options * Updated from global requirements * Update hacking version and fix new violations * Rename shards to pool * Updated from global requirements * Make storage.utils.keyify py3k compatible * Implement Lazy Create Queue in v1.1 API * Sync from oslo-incubator * Fix tests to be Python 3 compatible 2014.2.b1 --------- * remove default=None for config options * Decoupled Unit Tests * Refactor auxiliary methods for MongoDB unit tests * Re-enable tests affected by #1321840 * fix: Checking whether queue exists adds latency * Small fixes in storage drivers for Python 3 * Prepare marconi for localization * Removed now unnecesary workaround for PyPy * Fix handling of request/response body for Python 3 * Updated from global requirements * Fix sqlalchemy storage driver for Python 3 * V1 Tests JsonSchema * Synced jsonutils from oslo-incubator * Fixed a few typos in the codebase * Add separate requirements file for Python 3 * Revert "Disable Metadata write operations on v1.1" * Updated from global requirements * Updated from global requirements * Fixing some spelling, grammar, etc in docs * Fixed incorrect reST syntax * Updated from global requirements * Removes host header from functional tests * Synced jsonutils from oslo-incubator * fix(metadata): Fix inconsistent error response under v1.1 * Use jsonschema for testing queue * Updated from global requirements * Adds command line option "daemon" to Marconi * Disable Metadata write operations on v1.1 * Remove MySQL-python from the dependency list * Instantiate app only when needed * fix(sharding): deregister queue before delete * Use config options to determine message count * Updated from global requirements * Update comment for version to say "Juno" instead of "Icehouse" * Open Juno development * Add specific doc build option and gate on warnings * Dedup HACKING.rst and remove any pep8 or OpenStack hacking rules 2014.1.rc1 ---------- * Don't fork marconi-server unless USE\_SCREEN=False * keystone middleware version change due to a bug * Add a help text for auth\_strategy * Improve cleanup of test\_insert\_queue\_header\_asterisk * Switch to oslosphinx * keystoneclient now supports Py3K * Add keystone\_authtoken to our config generator * fix() : introduce division import * Fix message.first return value for mongodb * Updated from global requirements * Add more unit tests for message controller * Include full license text * Update license headers on select source files * Fix functional config file instructions * Add default mongod URI to README * Marconi Operations Document * adds docs directory with dev guide * fix(gate): one for pypy, one for docs * Improve README file * Enable the basic dev doc for Marconi * Allow marconi-server to properly daemonize * Rename logging.conf-sample to logging.conf.sample * Fix config file name in README instructions * Keep python 3.X compatibility for xrange * Use oslo's config generator to generate sample * Prepare Marconi to support oslo's config.generator * Add an options module to sqlalchemy * Updated from global requirements * Use a colon instead of a . in setup.cfg * Sync from oslo-incubator * Add MySQL-python as a dependency * Add missing logging trace queues collection * fix(cli): Print statement in global error handler * Enable shards test for sqlalchemy * Remove unused method from timeutils * Reuse the transaction before getting messages * Set time\_zone to UTC on MySQL * MySQL requires VARCHARs to have a length * Slighty refactored url\_prefix usage in WSGI tests * Update marconi.conf-sample to deprecate sqlite * Replace \`Sqlite\` with \`Sqlalchemy\` in test names * Removed debug level messages translations * fix(MongoDB): Driver does not retry on AutoReconnect errors 2014.1.b3 --------- * Fix assertion for Testutils to check on sqlite://:memory: * Deprecate sqlite in favor of sqlalchemy * Don't raise \`DoesNotExist\` on claims deletion * Register sqlalchemy endpoints * Fix message's get and deletion * Enable pragma foreign key for sqlite * Remove termios support (and associated issues) * Updated from global requirements * fix(mongodb): Limit kwarg must be an int * test(wsgi): Refactor tests in preparation for API v1.1 * test(wsgi): Enable test\_default\_limits and fix UUID bug * Add an sqlalchemy storage to Marconi * Updated from global requirements * feat(sql/catalogue): add catalogue controller * feat(sqlalchemy): add shards controller * Replace unicode() by six.text\_type for python3 compatability * Replace basestring by six for python3 compatability * feat(v1.1): Implement /ping endpoint for v1.1 * feat(v1.1): Plumb v1.1 API in the WSGI transport * Sync common modules from olso and remove py3kcompat * doc(README): Have the user pass '-v' when starting marconi-server * Fix two small typos in marconi/queues/storage/sharding.py * feat(sql/driver): expose ControlDriver, more config * Apply six for metaclass * Use six.moves.urllib.parse instead of urlparse * Improve help strings * fix(wsgi): Cleanup limit config options * fix(shards): guard against registering bad shard URIs * Fix misspellings in marconi * test(functional): Use direct WSGI requests in lieu of a wsgiref server * chore: Fix pylint errors * test(functional): Don't use a dead test server * fix(testtools): 0.9.35 is not compatible with subunit 0.0.17 * Skip message\_get\_many schema test until we can repro reliably * Removed copyright from files without code * add copyright notices to some source files * DRY applied to class names * Add a \_config\_options function to Marconi * chore: Add pylint envs * Drop msgpack dependency * fix: Move decorator out of class definition since it is static * Sync oslo-cache from oslo-incubator * Don't use \`override\_time\` but mock instead 2014.1.b2 --------- * fix: ConnectionError no longer takes positional arguments * Remove dev1 from version, since it is added automagically by pbr * fix(mongodb): Ensure batch message posts are atomic (all or nothing) * Enforce size verification on content-length * Use testr instead of nosetest * Remove \_\_MARCONI\_SETUP\_\_ * test: Use srmock header\_dict in preparation for Falcon 0.1.8 * Added a request and response file to common * Map HTTP request codes in jsonschema * Verify response jsonschema for List Messages by ID * Change shards & catalogue db into single property * Implement DataDriver for sqlalchemy storage backend * Implement tables and unit tests for sqlalchemy storage backend * chore: Sync local (marconi) requirements with global reqs * DRY queues storage error message format * Added logging to HTTP errors * chore: Bump version to icehouse-2 dev * Return relative URIs for href-template of homedoc * Remove the \_\_MARCONI\_SETUP\_ global from init * Use relative import for base in tests * Don't use tuple parameters * Disable keystone client if running under Py3K * Remove gettext magic and import \_ explicitly * fix: Reduce logging severity of certain errors * chore: Remove remaining proxy cruft * fix(mongo): Delete claimed message fails * feat(sharding): Cache shard mappings * chore: Pass cache into drivers * fix: shard test failing * fix: mongodb tests consume storage like crazy * Fixes wrong hrefs when getting set of messages * Make sqlite connection lazy * cleanup: remove proxy cruft * opt: add hints to make delete/claim checks faster * fix: cache contract for memcached backend * Make consumer\_for a context manager * feat(health): make health API storage-aware * feat(validation): verify project id length * feat(shard): queue listing from multiple sources * Added empty packages and a test for the API v1 * Remove proxy code * Cleans up marconi-queues and marconi-proxy config files * Revert "Support building wheels (PEP-427)" * Isolate tests a bit more * feat: connect sharding manager to control drivers * Support building wheels (PEP-427) * refactor: drop simplejson requirement * Update openstack/common/lockutils * Add Tests for non-existing resources * feat: integrate shard storage with transport * Renamed "exceptions" module to "errors" * feat: add catalogue storage driver for queues * Sync with global requirements * feat(logging): format msg when log level met * feat: shards mongodb driver + tests * feat: shards storage controller interface * Update the Marconi readme with more detailed installation instructions * feat: add shard management resource * Don't enable\_echo when we don't have a tty * Start 2014.1 development * feat: split queues API into public/admin * fix: one less unnecessary modification * Move misplaced test under the right package * Fix Pep8 indentation errors missed by Pep8 1.4.5 * fix(bootstrap): Logging CLI options don't work * feat: Storage sharding foundation * Validation for messages returned by queue/stats * Return a consumer function instead of consuming * Use stevedore instead of importutils * fix(queues): Global config used everywhere * 'Persist' \_\_getattr\_\_ results * Setup storage pipeline in the boostrap instead of driver base * chore: Remove GC cruft from storage driver base class * Follow hacking rules about import * doc: Update conf filename used to enable mongod * fix: invalidate partition cache entry on delete * doc: Recommend installing marconi via pip * Updated Marconi ReadMe file with correct git repo address * Tests for Client-ID validation * Use the pipeline for the storage layer * Implement common pipeline class * feat: storage sharding schema for marconi-queues * fix(mongo): Negative age reported in stats * fix: proxy mongodb storage fields overspecified * fix: validate queue listing limits in proxy * Use oslo.config directly instead of common.config * Replace deprecated method aliases in tests * fix(mongo): Queue listing may include queues from other projects * fix: stream request data rather than loading it into memory * fix: allow multi-update on partition storage * feat(api): Client-ID as a real UUID * feat(api): give validation resp a different title * bug: proxy headers aren't forwarded properly * feat: add logging to proxy * fix: listing queue fails when detailed=True * fix: do not duplicate queues across partitions * chore: Designate semi-private "friend" helper methods as such * chore: Rename MONGODB\_TEST\_LIVE to MARCONI\_TEST\_MONGODB * feat(proxy): listing opts returns lists * Add Tests for Queue Stats * fix(wsgi): Non-ASCII characters in the queue name trigger 500 status * fix: encode keys before caching * fix: force exception content to be text\_type * feat (proxy/admin): allow partition modifications * feat: split proxy API into admin and public apps * feat: separate config for queues and proxy * proxy: mirror structure of marconi queues + bootstrap * fix(proxy): forward marconi headers * Fix queue's packages namespaces * feat(mongodb): Partition data across multiple databases * chore: Expose commit hash in version module * WSGI: added the control for non empty X-PROJECT-ID * Update oslo to latest and greatest * perf(mongodb): Combine project and queue message fields * fix(mongodb): Marker generation creates a bottleneck under heavy load * fix: age presented as negative * test(proxy): fix proxy-transport tests * proxy: adds oslo.cache, config support, & stevedore * Require oslo.config 1.2.0 final * Move queues' unittests under tests/unit/queues/ * Move test\_default\_limits to the new tests location * Move tests/unit/storage/base under m/tests/storage * proxy: mongodb storage driver * proxy: memory storage driver + tests + fixes * fix(mongodb): Remove $or clause used to claim messages * Help Bleeding Eyes * Update pbr and requirements to be current * Update HACKING file with the new structure * test(proxy): catalog tested * Fix old import paths * test(proxy): partition related endpoints tested * feat: define interfaces for proxy storage * feat: marconi-proxy forwarding * feat: marconi proxy (v1, health) * feat: marconi proxy * Change Marconi's codebase structure * Fix Functional Tests * chore: Update Falcon dep to allow version 0.1.7 * Update .gitreview for incubation org move * feat(mongo): use UNIX timestamp instead of datetime * Replace old functional http.post call * chore: Update oslo.config to version 1.2.0a4 * fix(tests): unit tests disabled due to missing \_\_ini\_\_.py * fix: Requests get slower when queues have a lot of messages * Move Unit tests under a unit package * Use format instead of % for string formatting * Move functional tests into wsgi/v1 * Implement small http client for tests * Implement embedded marconi-server execution * Run functional tests under tox * Use oslo.config for functional tests * Remove util packages * Pull actual tests out of marconi/tests * fix(transport.wsgi): JSON home doc missing GET and HEAD hints * feat(storage): configurable default paging size * fix(storage.mongodb): Race condition when creating a claim * Make tox use develop instead of sdist * fix: Claim can return 404 * chore: Update openstack.common to get latest timeutils * chore: Track the up-and-coming oslo.cache module * fix: claimed message require claim\_id to delete * chore: Update openstack.common, add lockutils * Refactor System Tests * feat(wsgi): homedoc now ships relative URIs * chore: remove queue\_id cruft in mongo driver * chore: increase coverage in some trivial ways * chore: remove unneeded statements in wsgi * Update Tests for Input Validation * chore: cleanup the unit tests with ddt * Added a PyPy env to tox * Log pymongo's exception on ConnectionFailure * fix: bad marker behaves like non-existing marker * Don't wait for ack when deleting expired messages * Use req.get\_param's store feature in lieu of utils.purge * restructure: rm -rf tools, move test requires * test: delete msg w/ invalid claim isn't deleted * fix: fetching nonexistent message/claims raises 503s * Ignore \*.egg dirs * Don't use claim creation time to calc claim's age * fix: rename \_payload\_uplimit into \_paging\_uplimit * feat(validation): check metadata JSON size * Convert "outgoing" literals to unicode * fix(storage/mongo): unclaimed messages not listed * Fixes unique constraint regex * doc(validation): update the sample config file * Use oslo.config 1.2 * feat(validation): validate user input * Buffer message documents when enqueuing them, in lieu of streaming * Rename transport helpers to utils * Update tests to reflect http 201 on Post Claim * Add "total", "oldest" and "newest" to queue stats * feat(wsgi): claim creation returns 201 * Add Tests for GET/HEAD queue * Update oslo-incubator requirements * feat(transport): place request size limit for JSON * Don't check for active messages * Use queue's name and project to get messages * Test for bulk delete of messages * feat(api): GET and HEAD methods on queue * Update tests to reflect queue metadata changes * Implement v1 JSON Home Document * feat(wsgi): message bulk deletion * feat(storage): message bulk deletion * Fix active index's order * Sort everything by K * Tell \_list queries what index to use * Verify headers in server response * refactor: \_basic\_message builder for mongo driver * Gate on Python 3.3 * Include queue ID in unclaim to trigger use of index * refactor: get single message in storage * Adds test for include\_claimed flag * fix(typo): Phython to Python * Tests for out of range integers * Implements new metadata endpoint * feat(storage): separate queue set\_metadata() * Exception messages are no longer marked as translatable * Add support for listing claimed messages (transport) * Add support for listing claimed messages (storage) * feat(transport): define acceptable integer range * chore: drop unused bulk message support on /queues * chore: switch to py3 print function * feat(wsgi): check for client media type support * Add Test for Health endpoint * Fix Message Tests * feat(storage): do not restrict the container type * Log all transport actions * Cleanup PATCH Claim Tests * Print caught exceptions in cli * Replace foo.next() with next(foo) * fix(wsgi): patching claims only requires "ttl" * fix(wsgi): a non-existing queue stats returns 404 * Fix route for bulk-get message by ID * Update README and make it more explicit * Handle AutoReconnect errors * Add test for bulk delete of messages * Implement grace period for claimed message lifetimes * Implement health endpoint for WSGI driver * Fix Verification for Bulk Inserts * Factor out entry point helper and apply to marconi.cmd.server * Fixed pep8 dependency conflict * Refactored storage controllers into standalone modules * System Tests - Switch from robot to nosetests * Remove project ID from the URI * Get several messages by ID using a queue filter parameter * Don't sink stdout when running unit tests * Get messages by id * Set Falcon version in requirements.txt * Edited the READMEs to improve layout and prose * Added some de-facto style guidelines to HACKING and fixed violations * Update marconi.conf-sample to support stevedore * Migrate to stevedore * Removing cliff from dependencies * Rename pip-requires / test-requires into (test-)requirements.txt * Migrate to pbr * Log retry attempts resulting from parallel message posts * Move log.setup to Bootstrap and use cfg.CONF opts * Guarantee FIFO/once-and-only-once delivery when using MongoDB * Add Verification for List Queue * Pass content length to req.stream.read() in WSGI transport * Make sure transport's conf are registered * Wrap transport's app with keystoneclient's middleware * Replace gunicorn with wsgiref * Add System Tests * Don't use BaseException.message * Properly handle message post with an empty iterable * Expand controller abbreviations in WSGI resources * Clean up exception handling in the WSGI transport driver * Use oslo's log * Rename bin package into cmd * perf: Decrease time to create a claim by ~1 us * refactor: Hoist helpers.expect into package namespace * style: Fix flake8 and hacking errors * Rename tenant into project * Raise NotFound when an invalid ID is passed * style: Migrate to the new hacking package on pypi * style: Enable flake8 with proper exclusions * Set messages ttl to claims ttl * Fix tests which were broken by incomplete changes * Switch to flake8 and fix errors * A helper to remove None value entries from a dict * Support list queue pagination in transport * Support list queue pagination in storage * Message listing switches to the new API * Define the new pagination API in storage base * Use ttl indexes as message scrubber * Add simplejson into dependency * Support the new claim TTL semantics in SQlite * Get rid of SQlite's own test code * Support message deletion with claim in MongoDB * A self-format exception, ClaimNotPermitted * Bugfix: Deleting a claimed msg is idempotent * Move SQlite ID tests to driver-specific tests * Let the storage tests cover the SQlite driver * All endpoints in WSGI pick up the faulty driver * Make Paste use default configs * Fix a bug in test; "href" needs to be splitted * Bugfix: Exclude expired claimed messages in stats * Count "claimed" and "free" msgs in Queue stats * Bugfix: Do not claim expired messages in SQlite * Queue mgmt picks up the faulty driver in WSGI * Delete messages when a queue is deleted * Improved mongodb's storage queries and indexes * Make use of simplejson in the transport layer * fix: Enable reading from secondaries in the MongoDB driver * Bugfix: respond 204 if no message get claimed * Log the traceback as well * Immigrate SQlite message tests to storage tests * client\_uuid is not optional to POST messages * Added total, claimed, expired to queue stats * chore: Remove bin/marconi-self-host * Queue stats returns claimed, expired, and total * Config module no longer remember cli args * Added gunicorn as server application manager * Remove unused code in storage * Implemented marconi-server entry point * Queue stats in MongoDB * Add the "href" field to listing queues * Use "href" instead of "id" in the claimed messages * Use "href" instead of "id" in the message body * Make use of the features in falcon 0.1.3 & 0.1.4 * Queue Stats in WSGI * List queues in WSGI (w/o paging, but compatible) * Delete queue in WSGI * Check for malformed JSON when creating a queue * Claim support in WSGI * ClaimMessage implementation for mongodb * Do not allow updating an expired claim in SQlite * Bugfix: exclude expired messages in Queue stats * ClaimMessage Base Tests * Return (claim\_id, message\_iter) in SQLite create() * Raise self-formatted exceptions in SQlite * Don't call super on queue deletion * Set 'Content-Location' on get-messages correctly * MessageController implementation for mongodb storage * Rename QueueResource to queues.ItemResource * Claim messages in SQlite * Message support in WSGI * split message's get method in 2 separate methods * Suppress some Coverage report output * Improve the documentation of the config module * Message support in SQlite * Use metadata instead of \*\*metadata for claim's updates * Keystone Authentication * test: Added mongodb and faulty storage tests, and made them pass * style(HACKING): Add notes about whitespace, naming, and wrapping * chore: Remove unused module (superseded by bootstrap) * Bootstrap now dynamically imports storage and transport modules * style: Enable pep8 warnings/errors and fix ensuing chaos * SQlite storage improved * feat(transport.wsgi): Create or update queue * chore: Remove unused test * refactor: Rename AUTHORS so that it doesn't keep getting overwritten * refactor: Rename TestSuite to TestBase * refactor: Rename Kernel class to make it less confusing * QueueController for mongodb storage * refactor(tests.util): Remove prepare method support * Implementation of the reference storage * Add py26 to tox.ini * Move .set\_cli and .load to the project() object * Metadata is not optional (to storage) * Replace create / update with upsert * Added tenant to the \*ControllerBaseTest * Updated README file with Marconi's goal * Implement some logic in abstract methods * Changed Maintainer from OpenStack LLC to OpenStack Foundation * Add gettext support * fix(nose): Comment out detailed-errors to workaround a testtools+nose bug * fix(tox): Address warnings * Decentralized configuration * Use issublcass instead of isinstance * Implements base classes for storage controllers * doc(transport.driver\_base): Add docstrings * Added hacking file * Move marconi.common.Kernel to marconi.Kernel * Switch to oslo.config * Ignore vim backup files * doc: Make file headings consistent, and update docstrings per the proposed HACKING guide * feat(Kernel): Demonstrate wiring up endpoints * test: Add test suite utility class and sample conf * chore: Stub out how the kernel will be configured, and connect transports and storage * chore: Setup directory structure * Added license to setup.py * Added oslo-config as dependency * Fixed setup, tox and requirements * Added files to gitignore * Add README * Initial import ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/HACKING.rst0000664000175100017510000001025515033040005014537 0ustar00mylesmyles======================== Zaqar style commandments ======================== - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on for Zaqar specific commandments General ------- - Optimize for readability; whitespace is your friend. - Use blank lines to group related logic. - All classes must inherit from ``object`` (explicitly). - Use single-quotes for strings unless the string contains a single-quote. - Use the double-quote character for blockquotes (``"""``, not ``'''``) - USE_ALL_CAPS_FOR_GLOBAL_CONSTANTS Comments -------- - In general use comments as "memory pegs" for those coming after you up the trail. - Guide the reader though long functions with a comments introducing different sections of the code. - Choose clean, descriptive names for functions and variables to make them self-documenting. - Add ``# NOTE(termie): blah blah...`` comments to clarify your intent, or to explain a tricky algorithm, when it isn't obvious from just reading the code. Identifiers ----------- - Don't use single characters in identifiers except in trivial loop variables and mathematical algorithms. - Avoid abbreviations, especially if they are ambiguous or their meaning would not be immediately clear to the casual reader or newcomer. Wrapping -------- Wrap long lines by using Python's implied line continuation inside parentheses, brackets and braces. Make sure to indent the continued line appropriately. The preferred place to break around a binary operator is after the operator, not before it. Example:: class Rectangle(Blob): def __init__(self, width, height, color='black', emphasis=None, highlight=0): # More indentation included to distinguish this from the rest. if (width == 0 and height == 0 and color == 'red' and emphasis == 'strong' or highlight > 100): raise ValueError('sorry, you lose') if width == 0 and height == 0 and (color == 'red' or emphasis is None): raise ValueError("I don't think so -- values are {0}, {1}".format( width, height)) msg = ('this is a very long string that goes on and on and on and' 'on and on and on...') super(Rectangle, self).__init__(width, height, color, emphasis, highlight) Imports ------- - Classes and functions may be hoisted into a package namespace, via __init__ files, with some discretion. More Import Examples -------------------- **INCORRECT** :: import zaqar.transport.wsgi as wsgi **CORRECT** :: from zaqar.transport import wsgi Docstrings ---------- Docstrings are required for all functions and methods. Docstrings should ONLY use triple-double-quotes (``"""``) Single-line docstrings should NEVER have extraneous whitespace between enclosing triple-double-quotes. **INCORRECT** :: """ There is some whitespace between the enclosing quotes :( """ **CORRECT** :: """There is no whitespace between the enclosing quotes :)""" Docstrings should document default values for named arguments if they're not None Docstrings that span more than one line should look like this: Example:: """Single-line summary, right after the opening triple-double-quote. If you are going to describe parameters and return values, use Sphinx; the appropriate syntax is as follows. :param foo: the foo parameter :param bar: (Default True) the bar parameter :param foo_long_bar: the foo parameter description is very long so we have to split it in multiple lines in order to keep things ordered :returns: return_type -- description of the return value :returns: description of the return value :raises ValueError: if the message_body exceeds 160 characters :raises TypeError: if the message_body is not a basestring """ **DO NOT** leave an extra newline before the closing triple-double-quote. Creating Unit Tests ------------------- NOTE: 100% coverage is required Logging ------- Use __name__ as the name of your logger and name your module-level logger objects 'LOG':: LOG = logging.getLogger(__name__) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/LICENSE0000664000175100017510000002363715033040005013756 0ustar00mylesmyles Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5870135 zaqar-20.1.0.dev29/PKG-INFO0000644000175100017510000001326615033040026014044 0ustar00mylesmylesMetadata-Version: 2.2 Name: zaqar Version: 20.1.0.dev29 Summary: OpenStack Queuing and Notification Service Home-page: https://docs.openstack.org/zaqar/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Project-URL: Source, https://opendev.org/openstack/zaqar Project-URL: Tracker, https://bugs.launchpad.net/zaqar Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.10 License-File: LICENSE License-File: AUTHORS.rst Requires-Dist: pbr!=2.1.0,>=2.0.0 Requires-Dist: alembic>=0.9.6 Requires-Dist: cryptography>=2.7 Requires-Dist: falcon>=3.0.0 Requires-Dist: jsonschema>=3.2.0 Requires-Dist: keystonemiddleware>=9.1.0 Requires-Dist: msgpack>=1.0.0 Requires-Dist: python-swiftclient>=3.10.1 Requires-Dist: WebOb>=1.7.1 Requires-Dist: stevedore>=3.2.2 Requires-Dist: oslo.cache>=1.26.0 Requires-Dist: oslo.concurrency>=5.0.1 Requires-Dist: oslo.config>=8.3.2 Requires-Dist: oslo.context>=2.19.2 Requires-Dist: oslo.db>=11.0.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=4.6.1 Requires-Dist: oslo.messaging>=12.5.0 Requires-Dist: oslo.reports>=2.2.0 Requires-Dist: oslo.serialization>=4.2.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=4.12.1 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: osprofiler>=1.4.0 Requires-Dist: SQLAlchemy>=1.3.19 Requires-Dist: autobahn>=22.3.2 Requires-Dist: requests>=2.25.0 Requires-Dist: futurist>=1.2.0 Provides-Extra: mongodb Requires-Dist: pymongo>=3.6.0; extra == "mongodb" Provides-Extra: redis Requires-Dist: redis>=3.4.0; extra == "redis" Provides-Extra: mysql Requires-Dist: PyMySQL>=0.8.0; extra == "mysql" Provides-Extra: test Requires-Dist: hacking<6.2.0,>=6.1.0; extra == "test" Requires-Dist: redis>=3.4.0; extra == "test" Requires-Dist: pymongo>=3.6.0; extra == "test" Requires-Dist: python-swiftclient>=3.10.1; extra == "test" Requires-Dist: websocket-client>=0.44.0; extra == "test" Requires-Dist: PyMySQL>=0.8.0; extra == "test" Requires-Dist: coverage!=4.4,>=4.0; extra == "test" Requires-Dist: cryptography>=2.7; extra == "test" Requires-Dist: ddt>=1.0.1; extra == "test" Requires-Dist: doc8>=0.8.1; extra == "test" Requires-Dist: Pygments>=2.2.0; extra == "test" Requires-Dist: fixtures>=3.0.0; extra == "test" Requires-Dist: testscenarios>=0.4; extra == "test" Requires-Dist: testtools>=2.2.0; extra == "test" Requires-Dist: testresources>=2.0.0; extra == "test" Requires-Dist: oslotest>=3.2.0; extra == "test" Requires-Dist: stestr>=2.0.0; extra == "test" Requires-Dist: osprofiler>=1.4.0; extra == "test" Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: project-url Dynamic: provides-extra Dynamic: requires-dist Dynamic: requires-python Dynamic: summary ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/zaqar.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===== Zaqar ===== Zaqar is a multi-tenant cloud messaging and notification service for web and mobile developers. It combines the ideas pioneered by Amazon's SQS product with additional semantics to support event broadcasting. The service features a fully RESTful API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Underlying this API is an efficient messaging engine designed with scalability and security in mind. Other OpenStack components can integrate with Zaqar to surface events to end users and to communicate with guest agents that run in the "over-cloud" layer. Cloud operators can leverage Zaqar to provide equivalents of SQS and SNS to their customers. General information is available in wiki: https://wiki.openstack.org/wiki/Zaqar The API v2.0 (stable) specification and documentation are available at: https://wiki.openstack.org/wiki/Zaqar/specs/api/v2.0 Zaqar's Documentation, the source of which is in ``doc/source/``, is available at: https://docs.openstack.org/zaqar/latest Zaqar's Release notes are available at: https://docs.openstack.org/releasenotes/zaqar/ Contributors are encouraged to join IRC (``#openstack-zaqar`` channel on ``OFTC``): https://wiki.openstack.org/wiki/IRC Information on how to run unit and functional tests is available at: https://docs.openstack.org/zaqar/latest/contributor/running_tests.html Information on how to run benchmarking tool is available at: https://docs.openstack.org/zaqar/latest/admin/running_benchmark.html Zaqar's design specifications is tracked at: https://specs.openstack.org/openstack/zaqar-specs/ Using Zaqar ----------- If you are new to Zaqar and just want to try it, you can set up Zaqar in the development environment. Using Zaqar in production environment: Coming soon! Using Zaqar in development environment: The instruction is available at: https://docs.openstack.org/zaqar/latest/contributor/development.environment.html This will allow you to run local Zaqar server with MongoDB as database. This way is the easiest, quickest and most suitable for beginners. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/README.rst0000664000175100017510000000466215033040005014435 0ustar00mylesmyles======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/zaqar.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===== Zaqar ===== Zaqar is a multi-tenant cloud messaging and notification service for web and mobile developers. It combines the ideas pioneered by Amazon's SQS product with additional semantics to support event broadcasting. The service features a fully RESTful API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Underlying this API is an efficient messaging engine designed with scalability and security in mind. Other OpenStack components can integrate with Zaqar to surface events to end users and to communicate with guest agents that run in the "over-cloud" layer. Cloud operators can leverage Zaqar to provide equivalents of SQS and SNS to their customers. General information is available in wiki: https://wiki.openstack.org/wiki/Zaqar The API v2.0 (stable) specification and documentation are available at: https://wiki.openstack.org/wiki/Zaqar/specs/api/v2.0 Zaqar's Documentation, the source of which is in ``doc/source/``, is available at: https://docs.openstack.org/zaqar/latest Zaqar's Release notes are available at: https://docs.openstack.org/releasenotes/zaqar/ Contributors are encouraged to join IRC (``#openstack-zaqar`` channel on ``OFTC``): https://wiki.openstack.org/wiki/IRC Information on how to run unit and functional tests is available at: https://docs.openstack.org/zaqar/latest/contributor/running_tests.html Information on how to run benchmarking tool is available at: https://docs.openstack.org/zaqar/latest/admin/running_benchmark.html Zaqar's design specifications is tracked at: https://specs.openstack.org/openstack/zaqar-specs/ Using Zaqar ----------- If you are new to Zaqar and just want to try it, you can set up Zaqar in the development environment. Using Zaqar in production environment: Coming soon! Using Zaqar in development environment: The instruction is available at: https://docs.openstack.org/zaqar/latest/contributor/development.environment.html This will allow you to run local Zaqar server with MongoDB as database. This way is the easiest, quickest and most suitable for beginners. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924757.0 zaqar-20.1.0.dev29/RELEASENOTES.rst0000664000175100017510000005751315033040025015376 0ustar00mylesmyles===== zaqar ===== .. _zaqar_20.0.0-18: 20.0.0-18 ========= .. _zaqar_20.0.0-18_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/remove-py39-cd35d7feff4be5fb.yaml @ b'45627cf408f1e2b20fb217be9ef109c9863babb1' - Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. .. _zaqar_1.1.0: 1.1.0 ===== .. _zaqar_1.1.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml @ b'50b495007c8cc4577babb8d6af27a9d9c5bc7313' - Query for all subscriptions on a given queue by taking into account the returned marker, if any. Without this fix, only 10 subscriptions can be extracted from database to send notification. .. _zaqar_1.1.0_Other Notes: Other Notes ----------- .. releasenotes/notes/3841fa259c509971-start-using-reno.yaml @ b'5a2cb711bb9e9387a241c333288d11313c42c5c3' - Start using reno to manage release notes. .. _zaqar_2.0.0-10: 2.0.0-10 ======== .. _zaqar_2.0.0-10_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml @ b'cfb4a9a0585fdba5d22f9ea9b1838c81d00c5dd5' - Query for all subscriptions on a given queue by taking into account the returned marker, if any. Without this fix, only 10 subscriptions can be extracted from database to send notification. .. _zaqar_3.0.0: 3.0.0 ===== .. _zaqar_3.0.0_New Features: New Features ------------ .. releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml @ b'51604b4954b14a2ae14b15b50c302d32db0e40a7' - Add a new webhook notifier using trust authentication. When using the 'trust+' URL prefix, Zaqar will create a Keystone trust for the user, and then use it when a notification happens to authenticate against Keystone and send the token to the endpoint. .. releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml @ b'51604b4954b14a2ae14b15b50c302d32db0e40a7' - Support 'post_data' and 'post_headers' options on subscribers, allowing customization of the payload when having a webhook subscriber. The 'post_data' option supports the '$zaqar_message$' string template, which will be replaced by the serialized JSON message if specified. .. releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml @ b'4c2b7e04dbca0ae1e5d3480c8bad60dcbfac8ff8' - Queues now behave lazy in subscriptions also. So there is no need for the user to pre-create a queue before creating a subscription for this queue. Zaqar will create the queue automatically on the subscription creation request. As before, all subscriptions will continue to stay active even if the corresponding queue was deleted. .. releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml @ b'e9dbb19a3a103321739ef683ebfd60804b52b5eb' - Currently Zaqar can support more built-in/reserved attributes in queue. For now there are two important attributes 'max_messages_post_size' and 'max_message_ttl'. With this feature, when user query queues Zaqar will show those two attributes (read from config file if there is no customized value from user) in queue metadata so that user can know what value it is. .. releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml @ b'f38cecfdfc54c16e7cd8533c91255d07498a982b' - Currently, the v1 API is still accessible though it has been deprecated for a while. And we're going to deprecate v1.1 soon. To keep the backward compatibility, a new config option - ``enable_deprecated_api_versions`` is added so that operator can totally turn off an API version or still support it by adding the API version to the list of the new config option. .. releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml @ b'69c799734bcd0d1a0e85096f687f17ee3d0743c0' - Now before users send messages to subscribers through a queue, the subscribers should be confirmed first. Zaqar only sends messages to the confirmed subscribers. This feature supports "webhook" and "mailto" subscribers with mongoDB or redis backend. The "mailto" part will be done in O cycle. Set "require_confirmation = True" to enable this feature. The default value is "False" now and we will enable it by default after one or two cycles. .. _zaqar_3.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml @ b'b745146df42acab86fc027fd01088ab95ee309e5' - Zaqar API v2 has been released for several cycles and it is integrated as the default API version by most of the OpenStack services. So it is time to deprecated v1.1 in favor of v2. Now in Newton cycle, Zaqar API v1.1 is officially deprecated. .. _zaqar_3.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml @ b'e9dbb19a3a103321739ef683ebfd60804b52b5eb' - When access the root path of Zaqar service, for example: curl GET http://127.0.0.1:8888/, user will see 401 error. Which will cause some front end proxy (like HAProxy) to complain. Now this issue has been fixed. .. releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml @ b'b8a70e4aeca83ebefad4b127af71b1bd125efa40' - Query for all subscriptions on a given queue by taking into account the returned marker, if any. Without this fix, only 10 subscriptions can be extracted from database to send notification. .. releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml @ b'e9dbb19a3a103321739ef683ebfd60804b52b5eb' - In IPv6 management network environment, starting Zaqar server will run into 'Address family for hostname not support' error when use WSGI simple server. The root cause is that Python's TCPServer implementation is hard-coded to use IPv4, even in IPv6 environments. Now this issue has been fixed. .. _zaqar_4.0.0: 4.0.0 ===== .. _zaqar_4.0.0_New Features: New Features ------------ .. releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml @ b'ef7110c28338af76833e515d56a5d08ef0d0b26c' - The OSprofiler is integrated to Zaqar in Ocata. It is a library from oslo. It aims to analyse the performance bottleneck issue by making possible to generate one trace per request affecting all involved services and build a tree of calls. .. releasenotes/notes/purge-queue-6788a249ee59d55a.yaml @ b'460c34529868d2dc330afc12b4662702bf1f982a' - A new queue action is added so that users can purge a queue quickly. That means all the messages and subscriptions will be deleted automatically but the metadata of the queue will be kept. .. releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml @ b'fb8da5d535546754904cd26f2664f455ba724acf' - Add migration support for Zaqar's sqlalchemy storage driver. .. releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml @ b'f196f9a8a92bb7206ef64e8bba151ba659c3aab6' - This feature is the third part of subscription confirmation feature. Support to send email to subscriber if confirmation is needed. To use this feature, user need to set the config option "external_confirmation_url", "subscription_confirmation_email_template" and "unsubscribe_confirmation_email_template". The confirmation page url that will be used in email subscription confirmation before notification, this page is not hosted in Zaqar server, user should build their own web service to provide this web page. The subscription_confirmation_email_template let user to customize the subscription confimation email content, including topic, body and sender. The unsubscribe_confirmation_email_template let user to customize the unsubscribe confimation email content, including topic, body and sender too. .. _zaqar_5.0.0: 5.0.0 ===== .. _zaqar_5.0.0_New Features: New Features ------------ .. releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml @ b'76484d883af89444d261cb2f7c4842c740a3ac75' - The new Swift storage backend is added to Zaqar in Ocata. It's experimental currently. To use this backend, you should modify the "drivers" section in the config file. [Blueprint `swift-storage-driver `_] .. releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml @ b'5118dafc65e4ec39b782fa029ba74919700cb42b' - Introduce Guru to Zaqar. Guru is a mechanism whereby developers and system administrators can generate a report about the state of a running Zaqar executable. This report is called a *Guru Meditation Report*. Now Guru can support wsgi, websocket and uwsgi modes all. .. releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml @ b'4778f708fa9ec86b4137eefd63d07c40ad24296e' - This feature is the third part of subscription confirmation feature. Support to send email to subscriber if confirmation is needed. To use this feature, user need to set the config option "external_confirmation_url", "subscription_confirmation_email_template" and "unsubscribe_confirmation_email_template". The confirmation page url that will be used in email subscription confirmation before notification, this page is not hosted in Zaqar server, user should build their own web service to provide this web page. The subscription_confirmation_email_template let user to customize the subscription confimation email content, including topic, body and sender. The unsubscribe_confirmation_email_template let user to customize the unsubscribe confimation email content, including topic, body and sender too. .. releasenotes/notes/support-cors-af8349382a44aa0d.yaml @ b'49a397ebfb495f84411aa99bde7c99549a7aa3bc' - Zaqar now supports Cross-Origin Resource Sharing (CORS). .. releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml @ b'a887d789578c8cedf7bd427ff4e25f039000001f' - Support dot character in queue's name, like 'service.test_queue'. .. releasenotes/notes/support-notification-delivery-policy-fbc94083b4e6b8d0.yaml @ b'900bdbe3d9aa80b3e44d18f637711629cb456e3d' - Support notificaiton delivery policy in webhook type. It will work when the notification is sent from Zaqar to the subscriber failed. User can define the retry policy in the options of subscription or metadata of queue. .. releasenotes/notes/support_dead_letter_queue-c8b7303319e7f920.yaml @ b'f032be81314564a88806f96231d3f5a16159dbfe' - Support for dead letter queue is added for MongoDB, Redis and Swift. With this feature, message will be moved to the specified dead letter queue if it's claimed many times but still can't successfully processed by a client. New reseved metadata keys of queue are added: _max_claim_count, _dead_letter_queue and _dead_letter_queue_messages_ttl. .. _zaqar_5.0.0_Critical Issues: Critical Issues --------------- .. releasenotes/notes/sql_init-c9b3883241631f24.yaml @ b'10b07c9ccf5b2925858a48e124374adda267659f' - When using the sqlalchemy driver, operators now are required to run "zaqar-sql-db-manage upgrade" before making the service available. The service previously tried to create the database on the first request, but it was bound to race conditions. .. _zaqar_5.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml @ b'a68a03a228732050b33c2a7f35d1caa9f3467718' - Add two configurations for the notification endpoint of the websocket server, instead of a random port and local address. One is 'notification-bind', address on which the notification server will listen. Another is 'notification-port', port on which the notification server will listen. .. releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml @ b'b7141b52706c3d58393cbc5dcf6b71bb1a6472ab' - Zaqar didn't return the reserved metadata when listing detailed queue. After this fix, Zaqar will return reserved metadata '_default_message_ttl' and '_max_messages_post_size' in response of listing detailed queue. .. _zaqar_6.0.0: 6.0.0 ===== .. _zaqar_6.0.0_New Features: New Features ------------ .. releasenotes/notes/support_md5_of_body-84c1cdc6809b6417.yaml @ b'f605dd9bae5c43811bcf32962825d8f65b60d6e6' - Support non-URL encoded message body checksum function, the default algorithm is MD5. Back-end support for MongoDB, Redis and Swift. With this feature, when a user sends a message to the queue, Zaqar calculates a "checksum" value for the body of the non-URL encoded message, which the user can then get after the message is got or claimed. Finally, the user can use it to verify that the body of the newly obtained message is correct. .. _zaqar_7.0.0: 7.0.0 ===== .. _zaqar_7.0.0_New Features: New Features ------------ .. releasenotes/notes/queue-filter-support-b704a1c27f7473b9.yaml @ b'b4c395c79a77988a4b19fdefaa8c26676848b5c6' - Support for queue filter when queue listing. With this feature, users can add filter of name or metadata in query string parameters in queue list to filter queues. .. releasenotes/notes/remove-format-contraint-of-client-id-ab787960df6e1606.yaml @ b'fff82e7a1178238ce72a82c87e410f89e020deae' - Since some clients use different format of client id not only uuid, like user id of ldap, so Zaqar will remove the format contrain of client id. Add one option 'client_id_uuid_safe' to allow user to control the validation of client id. Add two options 'min_length_client_id' and 'max_length_client_id' to allow user to control the length of client id if not using uuid. This also requires user to ensure the client id is immutable. .. releasenotes/notes/remove_pool_group_from_zaqar-f8eafeed21779959.yaml @ b'93bd4fed93af355f3fe633253cbbfc55092a0a20' - Since we have introduced the 'pool_list' instead of pool_group in Queens, Now we will update the APIs to suggest users use new argument. .. releasenotes/notes/return_reserved_metdata_for_dead_letter_queue-da160301f6d8cfa4.yaml @ b'83300343d23051db31702d1cadbf9e250c37ef24' - Add three new reserved metdata in response body of querying queue. "_dead_letter_queue", "_dead_letter_queue_messages_ttl" and "_max_claim_count". Those metadata will help user to know better about dead letter queue. .. _zaqar_7.0.0_Other Notes: Other Notes ----------- .. releasenotes/notes/configuration-refactor-0ff219ac59c96347.yaml @ b'a7df08deb925b79a4eda6dbbd3a1ee1577d6ffff' - The code structure for configuration files are changed. This is insensitvie for end users, but the persons who work for downstream changes should pay attention. Please refactor your private configurations to ``zaqar/conf/`` folder as well. .. _zaqar_8.0.0: 8.0.0 ===== .. _zaqar_8.0.0_Prelude: Prelude ------- .. releasenotes/notes/zaqar-status-upgrade-check-framework-09caa1f741f6119d.yaml @ b'37fc134d1102adaae2d7097c47669e1f8d52afb9' Added new tool ``zaqar-status upgrade check``. .. _zaqar_8.0.0_New Features: New Features ------------ .. releasenotes/notes/delete_messages_with_claim_ids-64bb8105de3768b1.yaml @ b'be84598d3a931832869d77a6591f9456ea6c10db' - Add an new option named 'message_delete_with_claim_id', when it is True, delete messages must need claim_ids and message_ids both in request parameters. This will improve the security of the message. .. releasenotes/notes/email-notification-by-internal-tool-08910ab2247c3864.yaml @ b'e1c62707d387dc07ef8333c0bc271313d26facf3' - Currently the email subscription in Zaqar relay on the third part tools, such as "sendmail". It means that deployer should install it out of Zaqar. If he forgets, Zaqar will raise internal error. This work let Zaqar support email subscription by itself using the ``smtp`` python library. .. releasenotes/notes/introduce-topic-resource-9b40674cac06bdc2.yaml @ b'88f0dd7c8f181f2e1c0a38cf5a2933c1f5286fd3' - Introduce a new resource called Topic into Zaqar. Topic is a concept from AWS Simple Notification Service (SNS), it will has relevance with subscriptions. User can send message to a topic, and then the subscribers will get the message according to different protocols, like http, email, sms, etc. This feature will help Zaqar to split Messaging Queue Service and Notification Service clearly. .. releasenotes/notes/remove-pool-group-totally-062ecfccd90a6725.yaml @ b'd1a01d59190a2a30f20731411c0655a0d1d48b74' - In Queens, we support the old way to use pool_group and the new way without it in Flavor both. In Stein, we will remove the pool_group totally and only keep the new way in Flavor and Pool. .. releasenotes/notes/zaqar-status-upgrade-check-framework-09caa1f741f6119d.yaml @ b'37fc134d1102adaae2d7097c47669e1f8d52afb9' - New framework for ``zaqar-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Zaqar upgrade to ensure if the upgrade can be performed safely. .. _zaqar_8.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/zaqar-status-upgrade-check-framework-09caa1f741f6119d.yaml @ b'37fc134d1102adaae2d7097c47669e1f8d52afb9' - Operator can now use new CLI tool ``zaqar-status upgrade check`` to check if Zaqar deployment can be safely upgraded from N-1 to N release. .. _zaqar_10.0.0: 10.0.0 ====== .. _zaqar_10.0.0_New Features: New Features ------------ .. releasenotes/notes/support-query-quques-with-count-4453825671bb5298.yaml @ b'7aa2522e3d370a70882d07d7641741679616fa55' - Support query queues with filter 'with_count=true' to return the amount of the queues. This will help users to quickly get the exact total number of queues which they own. .. _zaqar_10.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/drop-py-2-7-09cf95d7d843d8f6.yaml @ b'22ade4fa7748aa7c3895aa932f65be8b516900b0' - Python 2.7 support has been dropped. Last release of Zaqar to support py2.7 is OpenStack Train. The minimum version of Python now supported by Zaqar is Python 3.6. .. _zaqar_11.0.0-7: 11.0.0-7 ======== .. _zaqar_11.0.0-7_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57ebd.yaml @ b'a0c32cbe4d066dad51b0bebbba9368d1e0f1b578' - Upgrade one of storage drivers, mongo driver with new version of pymongo. Pymongo has been updated to 4.0.0, there are some changes which are not supported in new version: 1. Collection.count and Cursor.count is removed. 2. Collection.ensure_index is removed. 3. Collection.__bool__ raises NotImplementedError. 4. Should use Binary.from_uuid to handle the UUID object. Those changes need to upgrade the mongo driver's code to work well. .. _zaqar_11.0.0: 11.0.0 ====== .. _zaqar_11.0.0_New Features: New Features ------------ .. releasenotes/notes/encrypted-messages-in-queue-d7438d4f185be444.yaml @ b'e12c65a369825ea5469bdb31f5c7151268d7926b' - To enhance the security of messaging service, the queue in Zaqar supports to encrypt messages before storing them into storage backends, also could support to decrypt messages when those are claimed by consumer. To enable this feature, user just need to take "_enable_encrypt_messages=True" when creating queue. AES-256 is used as the default of encryption algorithm and encryption key is configurable in the zaqar.conf. .. _zaqar_12.0.0: 12.0.0 ====== .. _zaqar_12.0.0_Prelude: Prelude ------- .. releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml @ b'0e435a35225374a2097ec435a7fb1376babfed36' Welcome to the Victoria release of the OpenStack Message service (zaqar). In this cycle, the Zaqar team would like to bring the following points to your attention. Details may be found below. * Support encrypted messages in queue. * Fixed bugs for stable and security. .. _zaqar_12.0.0_New Features: New Features ------------ .. releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml @ b'0e435a35225374a2097ec435a7fb1376babfed36' - Encrypted Messages in Queue (Change-Id `Icecfb9a232cfeefc2f9603934696bb2dcd56bc9c `_) .. _zaqar_12.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/deprecate-json-formatted-policy-file-f2abc160715c3f9b.yaml @ b'948e88c2682b71d64ea1abbe47f03fa280b30913' - The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. .. _zaqar_12.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-json-formatted-policy-file-f2abc160715c3f9b.yaml @ b'948e88c2682b71d64ea1abbe47f03fa280b30913' - Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. .. _zaqar_12.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml @ b'0e435a35225374a2097ec435a7fb1376babfed36' - Fix SSLError caused by not passing the cafile (Change-Id `I176e3876f2652608aaf51b0f74f4d971d31253e2 `_) .. releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml @ b'0e435a35225374a2097ec435a7fb1376babfed36' - Fix the issue that the function unpackb has no encoding option (Change-Id `bb92e983a79e5c1608f6a603816e1b88283e34c9 `_) .. _zaqar_13.0.0: 13.0.0 ====== .. _zaqar_13.0.0_New Features: New Features ------------ .. releasenotes/notes/support-extra-specs-to-subscription-confirming-edbdbebbdcd0cd74.yaml @ b'9b6edcf6ca5aca45536fb6f5038068e506c9c673' - Introduce a new request header called "EXTRA-SPEC" and driver mechanism with stevedore to let developers to implement the task about how to deal with this informtaion. In Wallaby, there's just an empty handler by default. .. _zaqar_14.0.0: 14.0.0 ====== .. _zaqar_14.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57ebd.yaml @ b'0f6ddd57099fc80776f50858c6b3178e8ec3e011' - Upgrade one of storage drivers, mongo driver with new version of pymongo. Pymongo has been updated to 4.0.0, there are some changes which are not supported in new version: 1. Collection.count and Cursor.count is removed. 2. Collection.ensure_index is removed. 3. Collection.__bool__ raises NotImplementedError. 4. Should use Binary.from_uuid to handle the UUID object. Those changes need to upgrade the mongo driver's code to work well. .. _zaqar_17.0.0: 17.0.0 ====== .. _zaqar_17.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/remove-strict-redis-e50cccbdf4a86f76.yaml @ b'928dfc618e81854da198d8615d499cd199ddbd73' - The minimum redis-py version required is now >= 3.0.0 .. _zaqar_19.0.0: 19.0.0 ====== .. _zaqar_19.0.0_New Features: New Features ------------ .. releasenotes/notes/redis-sentinel-authentication-93fa9b1846979e41.yaml @ b'fbe83c8a7b49df25d599792817e1ad52469cf129' - Now Redis driver supports authentication with Redis Sentinel. To use this feature, add the ``redis_password`` query to the Redis URI. The ``redis_username`` can be used when ACL feature is enabled. .. releasenotes/notes/redis-username-98a265f61fca6a1c.yaml @ b'a45f70e9386432b3d1ee2b30bcaacfa9f1b76b89' - Redis messaging store now supports authentication with username. .. _zaqar_20.0.0: 20.0.0 ====== .. _zaqar_20.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/remove-py38-005b0eda63232532.yaml @ b'870ea048be7e1b7593532273dcff0d3dbc2dd103' - Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. .. _zaqar_20.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/falcon-4-e4b5aab856e3228c.yaml @ b'4543e7691ce0e164c2ceb3535f107c83bf9dbe8f' - Fixed compatibility with falcon 4.0.0 and later. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5360136 zaqar-20.1.0.dev29/api-ref/0000775000175100017510000000000015033040026014264 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5480137 zaqar-20.1.0.dev29/api-ref/source/0000775000175100017510000000000015033040026015564 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/claims.inc0000664000175100017510000001347215033040005017533 0ustar00mylesmyles=============== Claims (claims) =============== Claim is a mechanism to mark messages so that other workers will not process the same message. Claim messages ============== .. rest_method:: POST /v2/queues/{queue_name}/claims Claims a set of messages from the specified queue. This operation claims a set of messages (up to the value of the ``limit`` parameter) from oldest to newest and skips any messages that are already claimed. If no unclaimed messages are available, the API returns a ``204 No Content`` message. When a client (worker) finishes processing a message, it should delete the message before the claim expires to ensure that the message is processed only once. As part of the delete operation, workers should specify the claim ID (which is best done by simply using the provided href). If workers perform these actions, then if a claim simply expires, the server can return an error and notify the worker of the race condition. This action gives the worker a chance to roll back its own processing of the given message because another worker can claim the message and process it. The age given for a claim is relative to the server's clock. The claim's age is useful for determining how quickly messages are getting processed and whether a given message's claim is about to expire. When a claim expires, it is released. If the original worker failed to process the message, another client worker can then claim the message. Note that claim creation is best-effort, meaning the worker may claim and return less than the requested number of messages. The ``ttl`` attribute specifies how long the server waits before releasing the claim. The ttl value must be between 60 and 43200 seconds (12 hours). You must include a value for this attribute in your request. The ``grace`` attribute specifies the message grace period in seconds. The value of ``grace`` value must be between 60 and 43200 seconds (12 hours). You must include a value for this attribute in your request. To deal with workers that have stopped responding (for up to 1209600 seconds or 14 days, including claim lifetime), the server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus the specified grace period. If a claimed message would normally live longer than the claim's live period, its expiration is not adjusted. Response codes -------------- .. rest_status_code:: success status.yaml - 201 - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - limit: claim_limit - ttl: claim_ttl - grace: claim_grace **Example Claim Messages: JSON request** .. literalinclude:: samples/claim_messages_request.json :language: javascript Response Parameters ------------------- **Example Claim Messages: JSON response** .. literalinclude:: samples/claim_messages_response.json :language: javascript Query Claim =========== .. rest_method:: GET /v2/queues/{queue_name}/claims/{claim_id} Queries the specified claim for the specified queue. This operation queries the specified claim for the specified queue. Claims with malformed IDs or claims that are not found by ID are ignored. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - claim_id: claim_id Response Parameters ------------------- **Example Query Claim: JSON response** .. literalinclude:: samples/claim_query_response.json :language: javascript Update(Renew) Claim =================== .. rest_method:: PATCH /v2/queues/{queue_name}/claims/{claim_id} Updates the specified claim for the specified queue. This operation updates the specified claim for the specified queue. Claims with malformed IDs or claims that are not found by ID are ignored. Clients should periodically renew claims during long-running batches of work to avoid losing a claim while processing a message. The client can renew a claim by issuing a ``PATCH`` command to a specific claim resource and including a new TTL for the claim (which can be different from the original TTL). The server resets the age of the claim and applies the new TTL. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - claim_id: claim_id - ttl: claim_ttl - grace: claim_grace **Example Update Claim: JSON request** .. literalinclude:: samples/claim_update_request.json :language: javascript This operation does not return a response body. Delete(Release) Claim ===================== .. rest_method:: DELETE /v2/queues/{queue_name}/claims/{claim_id} Releases the specified claim for the specified queue. This operation immediately releases a claim, making any remaining, undeleted) messages that are associated with the claim available to other workers. Claims with malformed IDs or claims that are not found by ID are ignored. This operation is useful when a worker is performing a graceful shutdown, fails to process one or more messages, or is taking longer than expected to process messages, and wants to make the remainder of the messages available to other workers. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - claim_id: claim_id This operation does not accept a request body and does not return a response body. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/conf.py0000664000175100017510000000466415033040005017072 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. extensions = [ 'openstackdocstheme', 'os_api_ref', ] # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2010-present, OpenStack Foundation' # openstackdocstheme options openstackdocs_repo_name = 'openstack/zaqar' openstackdocs_bug_project = 'zaqar' openstackdocs_bug_tag = 'api-ref' # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "sidebar_mode": "toc", } # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Nova.tex', u'OpenStack Messaging Service API Documentation', u'OpenStack Foundation', 'manual'), ] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/flavors.inc0000664000175100017510000001236715033040005017741 0ustar00mylesmyles================= Flavors (flavors) ================= Queue flavors allow users to have different types of queues based on the storage capabilities. By using flavors, it's possible to allow consumers of the service to choose between durable storage, fast storage, etc. Flavors must be created by service administrators and they rely on the existence of pools. List flavors ============ .. rest_method:: GET /v2/flavors Lists flavors. This operation lists flavors for the project. The flavors are sorted alphabetically by name. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Query Parameters ----------------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - detailed: detailed Response Parameters ------------------- .. rest_parameters:: parameters.yaml - flavors: flavors - links: flavor_links Response Example ---------------- NOTE: Suggest config pool_list in stead of pool_group from the beginning of Queens. .. literalinclude:: samples/flavor-list-response-new.json :language: javascript Response Example ---------------- NOTE: Remove pool_group in Rocky release and use pool_list instead for pool .. literalinclude:: samples/flavor-list-response.json :language: javascript Create flavor ============= .. rest_method:: PUT /v2/flavors/{flavor_name} Creates a flavor. This operation creates a new flavor. ``flavor_name`` is the name that you give to the flavor. The name must not exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, underscores, and hyphens. Response codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - flavor_name: flavor_name_path - pool_group: flavor_pool_group - pool_list: flavor_pool_list Request Example --------------- NOTE: Suggest config pool_list in stead of pool_group from the beginning of Queens. .. literalinclude:: samples/flavor-create-request-new.json :language: javascript Request Example --------------- NOTE: Remove pool_group in Rocky release and use pool_list instead for pool .. literalinclude:: samples/flavor-create-request.json :language: javascript This operation does not return a response body. Update flavor ============= .. rest_method:: PATCH /v2/flavors/{flavor_name} Updates a flavor. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code: error status.yaml - 400 - 401 - 403 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - flavor_name: flavor_name_path - pool_group: flavor_pool_group - pool_list: flavor_pool_list Request Example --------------- NOTE: Suggest config pool_list in stead of pool_group from the beginning of Queens. .. literalinclude:: samples/flavor-update-request-new.json :language: javascript Response Example ---------------- NOTE: Suggest config pool_list in stead of pool_group from the beginning of Queens. .. literalinclude:: samples/flavor-update-response-new.json :language: javascript Request Example --------------- NOTE: Remove pool_group in Rocky release and use pool_list instead for pool .. literalinclude:: samples/flavor-update-request.json :language: javascript Response Example ---------------- NOTE: Remove pool_group in Rocky release and use pool_list instead for pool .. literalinclude:: samples/flavor-update-response.json :language: javascript Show flavor details =================== .. rest_method:: GET /v2/flavors/{flavor_name} Shows details for a flavor. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - flavor_name: flavor_name_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: flavor_name - capabilities: capabilities - pool_group: flavor_pool_group - pool_list: flavor_pool_list - href: flavor_href Response Example ---------------- NOTE: Suggest config pool_list in stead of pool_group from the beginning of Queens. .. literalinclude:: samples/flavor-show-response-new.json :language: javascript Response Example ---------------- NOTE: Remove pool_group in Rocky release and use pool_list instead for pool .. literalinclude:: samples/flavor-show-response.json :language: javascript Delete flavor ============= .. rest_method:: DELETE /v2/flavors/{flavor_name} Deletes the specified flavor. This operation immediately deletes a flavor. ``flavor_name`` is the name that you give to the flavor. The name must not exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, underscores, and hyphens. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - flavor_name: flavor_name_path This operation does not accept a request body and does not return a response body. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/health.inc0000664000175100017510000000314315033040005017522 0ustar00mylesmyles=============== Health (health) =============== With health API, user or operator can get a general idea about the status of Zaqar server. Those information can be used for basic validation, performance checking, etc. Ping ==== .. rest_method:: GET /v2/ping Simple health check for end user. A request to ping Zaqar server when server is working returns 204, otherwise returns 503. This can be a handy API for end user to check if the messaging service is in working status. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 503 This operation does not accept a request body and does not return a response body. Health ====== .. rest_method:: GET /v2/health Detailed health check for cloud operator/admin. This is an ``admin only`` API. A request to get detailed health information of Zaqar server. The response body will depend on the storage setting of Zaqar server. By default, there is no pool created. Then the response body will only contain the ``catalog_reachable``. Otherwise, the response body will have ``catalog_reachable`` and the health status for each pool. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 503 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - catalog_reachable: catalog_reachable - storage_reachable: storage_reachable - operation_status: operation_status Response Example ---------------- .. literalinclude:: samples/health-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/index.rst0000664000175100017510000000063715033040005017430 0ustar00mylesmyles:tocdepth: 2 ======================== Messaging Service API v2 ======================== This is a reference for the OpenStack Messaging Service API which is provided by the Zaqar project. .. rest_expand_all:: .. include:: versions.inc .. include:: queues.inc .. include:: messages.inc .. include:: claims.inc .. include:: subscription.inc .. include:: health.inc .. include:: pools.inc .. include:: flavors.inc ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/messages.inc0000664000175100017510000002346315033040005020073 0ustar00mylesmyles=================== Messages (messages) =================== Message is sent through a queue and exists until it is deleted by a recipient or automatically by the system based on a TTL (time-to-live) value. All message-related operations require Client-Id to be included in the headers. This is to ensure that messages are not echoed back to the client that posted them unless the client explicitly requests this. Post Message ============ .. rest_method:: POST /v2/queues/{queue_name}/messages Posts the message or messages for the specified queue. This operation posts the specified message or messages. You can submit up to 10 messages in a single request, but you must always encapsulate the messages in a collection container (an array in JSON, even for a single message - without the JSON array, you receive the "Invalid request body" message). The resulting value of the Location header or response body might be used to retrieve the created messages for further processing. The client specifies only the body and TTL for the message. The server inserts metadata, such as ID and age. The response body contains a list of resource paths that correspond to each message submitted in the request, in the order of the messages. If a server-side error occurs during the processing of the submitted messages, a partial list is returned, the partial attribute is set to true, and the client tries to post the remaining messages again. If the server cannot enqueue any messages, the server returns a ``503 Service Unavailable`` error message. The ``body`` attribute specifies an arbitrary document that constitutes the body of the message being sent. .The following rules apply for the maximum size: The maximum size of posted messages is the maximum size of the entire request document (rather than the sum of the individual message body field values as it was in earlier releases). On error, the client will now be notified of how much it exceeded the limit. The size is limited to 256 KB, including whitespace. The document must be valid JSON. (The Message Queuing service validates it.) The ``ttl`` attribute specifies how long the server waits before marking the message as expired and removing it from the queue. The value of ``ttl`` must be between 60 and 1209600 seconds (14 days). Note that the server might not actually delete the message until its age has reached up to (ttl + 60) seconds, to allow for flexibility in storage implementations. The ``delay`` attribute specifies how long the message can be claimed. The value of ``delay`` must be between 0 and 900 seconds (15 mins). Response codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name Request Example --------------- .. literalinclude:: samples/messages-post-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - resources: messages_resources Response Example ---------------- .. literalinclude:: samples/messages-post-response.json :language: javascript List Messages ============= .. rest_method:: GET /v2/queues/{queue_name}/messages List the messages in the specified queue. A request to list messages when the queue is not found or when messages are not found returns 204, instead of 200, because there was no information to send back. Messages with malformed IDs or messages that are not found by ID are ignored. This operation gets the message or messages in the specified queue. Message IDs and markers are opaque strings. Clients should make no assumptions about their format or length. Furthermore, clients should assume that there is no relationship between markers and message IDs (that is, one cannot be derived from the other). This allows for a wide variety of storage driver implementations. Results are ordered by age, oldest message first. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - marker: marker - limit: limit - echo: echo - include_claimed: include_claimed - include_delayed: include_delayed Response Parameters ------------------- .. rest_parameters:: parameters.yaml - messages: messages - links: links Response Example ---------------- .. literalinclude:: samples/messages-list-response.json :language: javascript Get A Set Of Messages ===================== .. rest_method:: GET /v2/queues/{queue_name}/messages Gets a specified set of messages from the specified queue. This operation provides a more efficient way to query multiple messages compared to using a series of individual ``GET`` s. Note that the list of IDs cannot exceed 20. If a malformed ID or a nonexistent message ID is provided, it is ignored, and the remaining messages are returned. Unlike the Get Messages operation, a client's own messages are always returned in this operation. If you use the ids parameter, the echo parameter is not used and is ignored if it is specified. The message attributes are defined as follows: ``href`` is an opaque relative URI that the client can use to uniquely identify a message resource and interact with it. ``ttl`` is the TTL that was set on the message when it was posted. The message expires after (ttl - age) seconds. ``age`` is the number of seconds relative to the server's clock. ``body`` is the arbitrary document that was submitted with the original request to post the message. ``checksum`` is the hash digest of the ``body``, default algorithm is MD5. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - ids: ids Response Parameters ------------------- .. rest_parameters:: parameters.yaml - messages: messages Response Example ---------------- .. literalinclude:: samples/messages-get-byids-response.json :language: javascript Delete A Set Of Messages ======================== .. rest_method:: DELETE /v2/queues/{queue_name}/messages Provides a bulk delete for messages. This operation immediately deletes the specified messages. If any of the message IDs are malformed or non-existent, they are ignored. The remaining valid messages IDs are deleted. Please note that users should input either ``ids`` or ``pop`` parameter, otherwise this API will delete nothing. If ``pop`` is provided, the value must be at least 1 and may not be greater than ``max_messages_per_claim_or_pop`` in conf. If ``ids`` is provided, it should contain at least one id and not greater than ``max_messages_per_page`` in conf. Response codes -------------- .. rest_status_code:: success status.yaml - 200 - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request Parameters ------------------ This operation does not accept a request body. .. rest_parameters:: parameters.yaml - queue_name: queue_name - ids: ids - pop: pop Response Example ---------------- This operation only returns a response body when the ``pop`` query parameter is used. .. literalinclude:: samples/messages-delete-bypop-response.json :language: javascript Get A Specific Message ====================== .. rest_method:: GET /v2/queues/{queue_name}/messages/{message_id} Gets the specified message from the specified queue. This operation gets the specified message from the specified queue. If either the message ID is malformed or nonexistent, no message is returned. Message fields are defined as follows: ``href`` is an opaque relative URI that the client can use to uniquely identify a message resource and interact with it. ``ttl`` is the TTL that was set on the message when it was posted. The message expires after (ttl - age) seconds. ``age`` is the number of seconds relative to the server's clock. ``body`` is the arbitrary document that was submitted with the original request to post the message. ``checksum`` is the hash digest of the ``body``, default algorithm is MD5. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - message_id: message_id Response Example ---------------- .. literalinclude:: samples/messages-get-response.json :language: javascript Delete A Specific Message ========================= .. rest_method:: DELETE /v2/queues/{queue_name}/messages/{message_id} Deletes the specified message from the specified queue. This operation immediately deletes the specified message. The ``claim_id`` parameter specifies that the message is deleted only if it has the specified claim ID and that claim has not expired. This specification is useful for ensuring only one worker processes any given message. When a worker's claim expires before it can delete a message that it has processed, the worker must roll back any actions it took based on that message because another worker can now claim and process the same message. If you do not specify ``claim_id``, but the message is claimed, the operation fails. You can only delete claimed messages by providing an appropriate ``claim_id``. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request ------- .. rest_parameters:: parameters.yaml - queue_name: queue_name - message_id: message_id This operation does not accept a request body and does not return a response body. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/parameters.yaml0000664000175100017510000004230515033040005020614 0ustar00mylesmyles# variables in header client_id: type: string in: header description: | The identification for each client instance. The format of client id is UUID by default, but Zaqar also supports a Non-UUID string by setting configuration "client_id_uuid_safe=off". The UUID must be submitted in its canonical form (for example, 3381af92-2b9e-11e3-b191-71861300734c). The string must be longer than "min_length_client_id=20" and smaller than "max_length_client_id=300" by default. User can control the length of client id by using those two options. The client generates the Client-ID once. Client-ID persists between restarts of the client so the client should reuse that same Client-ID. Note: All message-related operations require the use of ``Client-ID`` in the headers to ensure that messages are not echoed back to the client that posted them, unless the client explicitly requests this. # variables in path claim_id: type: string in: path required: True description: | The id of the claim. flavor_name_path: type: string in: path required: True description: The name of the flavor. message_id: type: string in: path required: True description: | The ID of the message. pool_name_path: type: string in: path required: True description: The name of the pool. queue_name: type: string in: path required: True description: | The name of the queue. subscription_id_path: type: string in: path required: True description: | The id of the subscription. # variables in query claim_limit: type: integer in: query required: false description: | The ``limit`` specifies up to 20 messages (configurable) to claim. If not specified, limit defaults to 10. Note that claim creation is best-effort, meaning the server may claim and return less than the requested number of messages. detailed: type: boolean in: query required: false description: | The 'detailed' specifies if showing the detailed information when querying queues, flavors and pools. echo: type: boolean in: query required: false description: Indicate if the messages can be echoed back to the client that posted them. ids: type: list in: query required: false description: | A list of the messages ids. ``pop`` & ``ids`` parameters are mutually exclusive. Using them together in a request will result in HTTP 400. NOTE: Actually, it's not a real list, it's string combined with many message ids separated with comma, for example: /messages?ids=578f0055508f153f256f717e,578f0055508f153f256f717f include_claimed: type: boolean in: query required: false description: Indicate if the messages list should include the claimed messages. include_delayed: type: boolean in: query required: false description: Indicate if the messages list should include the delayed messages. limit: type: integer in: query required: false description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. marker: type: string in: query required: false description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. name: type: string in: query required: false description: | The 'name' specifies if filter the queues by queue's name when querying queues. pop: type: integer in: query required: false description: | The ``pop`` specifies how many messages will be popped up from the queue. ``pop`` & ``ids`` parameters are mutually exclusive. Using them together in a request will result in HTTP 400. with_count: type: boolean in: query required: false description: | The 'with_count' specifies if showing the amount of queues when querying them. # variables in body _dead_letter_queue: type: string in: body required: False description: | The target the message will be moved to when the message can't processed successfully after meet the max claim count. It's not supported to add queue C as the dead letter queue for queue B where queue B has been set as a dead letter queue for queue A. There is no default value for this attribute. If it's not set explicitly, then that means there is no dead letter queue for current queue. It is one of the ``reserved attributes`` of Zaqar queues. _dead_letter_queue_messages_ttl: type: integer in: body required: False description: | The new TTL setting for messages when moved to dead letter queue. If it's not set, current TTL will be kept. It is one of the ``reserved attributes`` of Zaqar queues. _dead_letter_queue_messages_ttl_response: type: integer in: body required: True description: | The new TTL setting for messages when moved to dead letter queue. If it's not set, current TTL will be kept. It is one of the ``reserved attributes`` of Zaqar queues. _dead_letter_queue_response: type: string in: body required: True description: | The target the message will be moved to when the message can't processed successfully after meet the max claim count. It's not supported to add queue C as the dead letter queue for queue B where queue B has been set as a dead letter queue for queue A. There is no default value for this attribute. If it's not set explicitly, then that means there is no dead letter queue for current queue. It is one of the ``reserved attributes`` _default_message_delay: type: string in: body required: False description: | The delay of messages defined for a queue. When the messages send to the queue, it will be delayed for some times and means it can not be claimed until the delay expired. And user can define a queue's level value for delay, also can define a message's level. The latter has a higher priority. It is one of the ``reserved attributes`` of Zaqar queues. _default_message_delay_response: type: string in: body required: True description: | The delay of messages defined for a queue. When the messages send to the queue, it will be delayed for some times and means it can not be claimed until the delay expired. And user can define a queue's level value for delay, also can define a message's level. The latter has a higher priority. It is one of the ``reserved attributes`` of Zaqar _default_message_ttl: type: integer in: body required: True description: | The default TTL of messages defined for a queue, which will effect for any messages posted to the queue. So when there is no TTL defined for a message, the queue's _default_message_ttl will be used. By default, the value is the same value defined as ''max_message_ttl'' in zaqar.conf. It is one of the ``reserved attributes`` of Zaqar queues. The value will be reverted to the default value after deleting it explicitly. _enable_encrypt_messages: type: boolean in: body required: False description: | The switch of encrypting messages for a queue, which will effect for any messages posted to the queue. By default, the value is False. It is one of the ``reserved attributes`` of Zaqar queues. _flavor: type: string in: body required: False description: | The flavor name which can tell Zaqar which storage pool will be used to create the queue. It is one of the ``reserved attributes`` of Zaqar queues. _max_claim_count: type: integer in: body required: False description: | The max number the message can be claimed. Generally, it means the message cannot be processed successfully. There is no default value for this attribute. If it's not set, then that means this feature won't be enabled for current queue. It is one of the ``reserved attributes`` of Zaqar queues. _max_claim_count_response: type: integer in: body required: True description: | The max number the message can be claimed. Generally, it means the message cannot be processed successfully. There is no default value for this attribute. If it's not set, then that means this feature won't be enabled for current queue. It is one of the ``reserved attributes`` of Zaqar queues. _max_messages_post_size: type: integer in: body required: True description: | The max post size of messages defined for a queue, which will effect for any messages posted to the queue. So user can define a queue's level cap for post size which can't bigger than the max_messages_post_size defined in zaqar.conf. It is one of the ``reserved attributes`` of Zaqar queues. The value will be reverted to the default value after deleting it explicitly. capabilities: type: list in: body description: | Capabilities describe what this flavor is capable of base on the storage capabilities. They are used to inform the final user such capabilities. catalog_reachable: type: boolean in: body required: True description: | A boolean value to indicate if the management(catalog) datatabse is reachable or not. claim_grace: type: integer in: body required: false description: | The ``grace`` attribute specifies the message grace period in seconds. The value of ``grace`` value must be between 60 and 43200 seconds (12 hours). You must include a value for this attribute in your request. To deal with workers that have stopped responding (for up to 1209600 seconds or 14 days, including claim lifetime), the server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus the specified grace period. If a claimed message would normally live longer than the claim's live period, its expiration is not adjusted. claim_ttl: type: integer in: body required: false description: | The ``ttl`` attribute specifies how long the server waits before releasing the claim. The ttl value must be between 60 and 43200 seconds (12 hours). You must include a value for this attribute in your request. confirmed: type: boolean in: body required: true description: | The ``confirmed`` attribute specifies whether to confirm a subscription. count: type: integer in: body required: false description: | The ``count`` attribute specifies how many queus in current project. flavor_href: type: string in: body description: | The url of the flavor. flavor_links: type: array in: body required: true description: | Links related to the flavors. This is a list of dictionaries, each including keys ``href`` and ``rel``. flavor_name: type: string in: body required: true description: | The name of the flavor. flavor_pool_group: type: string in: body required: true description: | The ``pool_group`` attribute specifies the name of the pool group this flavor sits on top of. NOTE: Remove pool_group in Rocky release and use pool_list instead for flavor. flavor_pool_list: type: list in: body description: | A list of pools in the flavor. NOTE: Suggest config pool_list in stead of pool_group from the beginning of Queens. flavors: type: list in: body description: | A list of the flaovrs. links: type: array in: body required: true description: | Links related to the queues. This is a list of dictionaries, each including keys ``href`` and ``rel``. messages: type: list in: body required: True description: | A list of the messages. messages_resources: type: list in: body description: | A list of the URL to messages. operation_status: type: dict in: body required: False description: | A dict which will contain the status for many different actions/operations. For example, post_messages, delete_messages, delete queue, etc. And each status is a dict which contains three items: ``seconds``, ``ref`` and ``succeeded``. Seconds means how long the operation took and succeeded will indicate if the actions was successful or not. Ref may contain the information if the succeeded is False, otherwise it's null. pool_flavor: type: string in: body required: false description: | The ``flavor`` attribute specifies a tag to given to more than one pool so that it keeps user remind the purpose/capabilities of all pools that falls under that flavor. NOTE: Suggest config flavor in stead of group from the beginning of queen. pool_group: type: string in: body required: false description: | The ``group`` attribute specifies a tag to given to more than one pool so that it keeps user remind the purpose/capabilities of all pools that falls under that group. NOTE: Remove group in Rocky release and use flavor instead for pool. pool_href: type: string in: body description: | The url of the pool. pool_links: type: array in: body required: true description: | Links related to the pools. This is a list of dictionaries, each including keys ``href`` and ``rel``. pool_name: type: string in: body description: | The name of the pool. pool_options: type: dict in: body required: false description: | The ``options`` attribute gives storage-specific options used by storage driver implementations. The value must be a dict and valid key-value come from the registered options for a given storage backend. pool_uri: type: string in: body required: true description: | The ``uri`` attribute specifies a connection string compatible with a storage client (e.g., pymongo) attempting to connect to that pool. pool_weight: type: integer in: body required: true description: | The ``weight`` attribute specifies the likelihood that this pool will be selected for the next queue allocation. The value must be an integer greater than -1. pools: type: list in: body description: | A list of the pools. pre_signed_queue_expires: type: string in: body required: False description: | The time to indicate when the pre-signed will be expired. pre_signed_queue_methods: type: list in: body required: False description: | A list of HTTP methods. The HTTP method(s) this URL was created for. By selecting the HTTP method, it's possible to give either read or read/write access to a specific resource. pre_signed_queue_paths: type: list in: body required: False description: | A list of paths the pre-signed queue can support. It could be a set of ``messages``, ``subscriptions``, ``claims``. pre_signed_queue_signature: type: list in: body required: True description: | The signature is generated after create the pre-signed URL. It can be consumed by adding below to HTTP headers: URL-Signature: 6a63d63242ebd18c3518871dda6fdcb6273db2672c599bf985469241e9a1c799 URL-Expires: 2015-05-31T19:00:17Z project_id: type: string in: body required: True description: | The ID of current project/tenant. queue_metadata: type: dict in: body description: | Metadata of queue. queues: type: list in: body required: true description: | A list of the queues. resource_types: type: list in: body required: false description: | The ``resource_types`` attribute allows user to purge particular resource of the queue. storage_reachable: type: boolean in: body required: False description: | A boolean value to indicate if the messages(pool) datatabse is reachable or not. subscriber: type: string in: body required: True description: | The ``subscriber`` attribute specifies the destination where the message notify to. It has been designed to match the Internet RFC on Relative Uniform Resource Locators. Zaqar now support two kinds of subscribers: http/https and email. The http/https subscriber should start with ``http/https``. The email subscriber should start with ``mailto``. subscription_age: type: integer in: body description: | How long the subscription has be existed. subscription_id: type: string in: body description: | The id of the subscription. subscription_options: type: dict in: body required: false description: | The ``options`` attribute specifies the extra metadata for the subscription . The value must be a dict and could contain any key-value. If the subscriber is "mailto". The ``options`` can contain ``from`` and ``subject`` to indicate the email's author and title. subscription_source: type: string in: body description: | The queue name which the subscription is registered on. subscription_ttl: type: integer in: body required: false description: | The ``ttl`` attribute specifies how long the subscription be alive. The ttl value must be great than 60 seconds. The default value is 3600 seconds. subscriptions: type: list in: body description: | A list of the subscriptions. versions: type: list in: body required: True description: | A list of supported major API versions. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/pools.inc0000664000175100017510000001231615033040005017413 0ustar00mylesmyles=============== Pools (pools) =============== If pooling is enabled, queuing service uses multiple queues databases in order to scale horizontally. A pool (queues database) can be added any time without stopping the service. Each pool has a weight that is assigned during the creation time but can be changed later. Pooling is done by queue which indicates that all messages for a particular queue can be found in the same pool (queues database). List pools ========== .. rest_method:: GET /v2/pools Lists pools. This operation lists pools for the project. The pools are sorted alphabetically by name. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 404 - 401 Query Parameters ----------------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - detailed: detailed Response Parameters ------------------- .. rest_parameters:: parameters.yaml - pools: pools - links: pool_links Response Example ---------------- NOTE: Suggest config flavor in stead of group from the beginning of Queens. .. literalinclude:: samples/pool-list-response-new.json :language: javascript Response Example ---------------- NOTE: Remove group in Rocky release and use flavor instead for pool .. literalinclude:: samples/pool-list-response.json :language: javascript Create pool ============ .. rest_method:: PUT /v2/pools/{pool_name} Creates a pool. This operation creates a new pool. ``pool_name`` is the name that you give to the pool. The name must not exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, underscores, and hyphens. Response codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 409 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - pool_name: pool_name_path - weight: pool_weight - uri: pool_uri - group: pool_group - flavor: pool_flavor - options: pool_options Request Example --------------- NOTE: Suggest config flavor in stead of group from the beginning of Queens. .. literalinclude:: samples/pool-create-request-new.json :language: javascript Request Example --------------- NOTE: Remove group in Rocky release and use flavor instead for pool .. literalinclude:: samples/pool-create-request.json :language: javascript This operation does not return a response body. Update pool ============ .. rest_method:: PATCH /v2/pools/{pool_name} Updates a pool. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - pool_name: pool_name_path - weight: pool_weight - uri: pool_uri - group: pool_group - flavor: pool_flavor - options: pool_options Request Example --------------- NOTE: Suggest config flavor in stead of group from the beginning of Queens. .. literalinclude:: samples/pool-update-request-new.json :language: javascript Response Example ---------------- NOTE: Suggest config flavor in stead of group from the beginning of Queens. .. literalinclude:: samples/pool-update-response-new.json :language: javascript Request Example --------------- NOTE: Remove group in Rocky release and use flavor instead for pool .. literalinclude:: samples/pool-update-request.json :language: javascript Response Example ---------------- NOTE: Remove group in Rocky release and use flavor instead for pool .. literalinclude:: samples/pool-update-response.json :language: javascript Show pool details ================== .. rest_method:: GET /v2/pools/{pool_name} Shows details for a pool. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - pool_name: pool_name_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: pool_name - weight: pool_weight - uri: pool_uri - group: pool_group - flavor: pool_flavor - href: pool_href Response Example ---------------- NOTE: Suggest config flavor in stead of group from the beginning of Queens. .. literalinclude:: samples/pool-show-response-new.json :language: javascript Response Example ---------------- NOTE: Remove group in Rocky release and use flavor instead for pool .. literalinclude:: samples/pool-show-response.json :language: javascript Delete pool =============== .. rest_method:: DELETE /v2/pools/{pool_name} Deletes the specified pool. This operation immediately deletes a pool. ``pool_name`` is the name that you give to the pool. The name must not exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, underscores, and hyphens. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - pool_name: pool_name_path This operation does not accept a request body and does not return a response body. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/queues.inc0000664000175100017510000002071615033040005017571 0ustar00mylesmyles=============== Queues (queues) =============== Queue is a logical entity that groups messages. Ideally a queue is created per work type. For example, if you want to compress files, you would create a queue dedicated for this job. Any application that reads from this queue would only compress files. Nowadays, queue in Zaqar is most like a topic, it's created lazily. User can post messages to a queue before creating the queue. Zaqar will create the queue/topic automatically. List queues =========== .. rest_method:: GET /v2/queues Lists queues. A request to list queues when you have no queues in your account returns 204, instead of 200, because there was no information to send back. This operation lists queues for the project. The queues are sorted alphabetically by name. When queue listing , we can add filter in query string parameter to filter queue, like name and metadata. If metadata or name of queue is consistent with the filter,the queue will be listed to the user, otherwise the queue will be filtered. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - detailed: detailed - name: name - with_count: with_count Response Parameters ------------------- .. rest_parameters:: parameters.yaml - queues: queues - links: links - count: count Response Example ---------------- .. literalinclude:: samples/queues-list-response.json :language: javascript Create queue ============ .. rest_method:: PUT /v2/queues/{queue_name} Creates a queue. This operation creates a new queue. The body of the request is empty. ``queue_name`` is the name that you give to the queue. The name must not exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, underscores, and hyphens. When create queue, user can specify metadata for the queue. Currently, Zaqar supports below metadata: _flavor, _max_claim_count, _dead_letter_queue, _dead_letter_queue_messages_ttl and _enable_encrypt_messages. In order to support the delayed queues, now add a metadata ``_default_message_delay``. Response codes -------------- .. rest_status_code:: success status.yaml - 201 - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - _dead_letter_queue: _dead_letter_queue - _dead_letter_queue_messages_ttl: _dead_letter_queue_messages_ttl - _default_message_delay: _default_message_delay - _default_message_ttl: _default_message_ttl - _flavor: _flavor - _max_claim_count: _max_claim_count - _max_messages_post_size: _max_messages_post_size - _enable_encrypt_messages: _enable_encrypt_messages Request Example --------------- .. literalinclude:: samples/queue-create-request.json :language: javascript This operation does not return a response body. Update queue ============ .. rest_method:: PATCH /v2/queues/{queue_name} Updates a queue. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 409 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name When setting the request body of updating queue, the body must be a list which contains a series of json object which follows https://tools.ietf.org/html/draft-ietf-appsawg-json-patch-10. .. note:: - The "Content-Type" header should be "application/openstack-messaging-v2.0-json-patch" - The ''path'' must start with /metadata, for example, if the key is ''ttl'', then the path should be /metadata/ttl Request Example --------------- .. literalinclude:: samples/queue-update-request.json :language: javascript Response Example ---------------- .. literalinclude:: samples/queue-update-response.json :language: javascript Show queue details ================== .. rest_method:: GET /v2/queues/{queue_name} Shows details for a queue. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name Response Parameters ------------------- .. rest_parameters:: parameters.yaml - _max_messages_post_size: _max_messages_post_size - _default_message_delay: _default_message_delay_response - _default_message_ttl: _default_message_ttl - _max_claim_count: _max_claim_count_response - _dead_letter_queue: _dead_letter_queue_response - _dead_letter_queue_messages_ttl: _dead_letter_queue_messages_ttl_response - _enable_encrypt_messages: _enable_encrypt_messages Response Example ---------------- .. literalinclude:: samples/queue-show-response.json :language: javascript Delete queue =============== .. rest_method:: DELETE /v2/queues/{queue_name} Deletes the specified queue. This operation immediately deletes a queue and all of its existing messages. ``queue_name`` is the name that you give to the queue. The name must not exceed 64 bytes in length, and it is limited to US-ASCII letters, digits, underscores, and hyphens. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name This operation does not accept a request body and does not return a response body. Get queue stats =============== .. rest_method:: GET /v2/queues/{queue_name}/stats Returns statistics for the specified queue. This operation returns queue statistics, including how many messages are in the queue, categorized by status. If the value of the ``total`` attribute is 0, then ``oldest`` and ``newest`` message statistics are not included in the response. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name Response Example ---------------- .. literalinclude:: samples/queue-stats-response.json :language: javascript Pre-signed queue ================ .. rest_method:: POST /v2/queues/{queue_name}/share Create a pre-signed URL for a given queue. .. note:: In the case of pre-signed URLs, the queue cannot be created lazily. This is to prevent cases where queues are deleted and users still have a valid URL. This is not a big issues in cases where there's just 1 pool. However, if there's a deployment using more than 1 type of pool, the lazily created queue may end up in an undesired pool and it'd be possible for an attacker to try a DoS on that pool. Therefore, whenever a pre-signed URL is created, if a pool doesn't exist, it needs to be created. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - paths: pre_signed_queue_paths - methods: pre_signed_queue_methods - expires: pre_signed_queue_expires Request Example --------------- .. literalinclude:: samples/queue-pre-signed-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - project: project_id - paths: pre_signed_queue_paths - methods: pre_signed_queue_methods - expires: pre_signed_queue_expires - signature: pre_signed_queue_signature Response Example ---------------- .. literalinclude:: samples/queue-pre-signed-response.json :language: javascript Purge queue =========== .. rest_method:: POST /v2/queues/{queue_name}/purge Purge particular resource of the queue. .. note:: Now Zaqar supports to purge "messages" and "subscriptions" resource from a queue. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - resource_types: resource_types Request Example --------------- .. literalinclude:: samples/purge-queue-request.json :language: javascript ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5510137 zaqar-20.1.0.dev29/api-ref/source/samples/0000775000175100017510000000000015033040026017230 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/claim_messages_request.json0000664000175100017510000000004415033040005024642 0ustar00mylesmyles{ "ttl": 300, "grace": 300 }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/claim_messages_response.json0000664000175100017510000000053515033040005025015 0ustar00mylesmyles{ "messages": [ { "body": { "event": "BackupStarted" }, "age": 239, "href": "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357?claim_id=51db7067821e727dc24df754", "ttl": 300, "checksum": "MD5:82eb2714b7c0237d373947c046cac78d" } ] } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/claim_query_response.json0000664000175100017510000000050715033040005024352 0ustar00mylesmyles{ "age": 57, "href": "/v2/queues/demoqueue/claims/51db7067821e727dc24df754", "messages": [ { "body": { "event": "BackupStarted" }, "age": 296, "href": "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357?claim_id=51db7067821e727dc24df754", "ttl": 300 } ], "ttl": 300 }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/claim_update_request.json0000664000175100017510000000004015033040005024311 0ustar00mylesmyles{ "ttl": 300, "grace": 300 }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-create-request-new.json0000664000175100017510000000005515033040005025127 0ustar00mylesmyles{ "pool_list": "[testpool1, testpool2]" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-create-request.json0000664000175100017510000000004115033040005024333 0ustar00mylesmyles{ "pool_group": "testgroup" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-list-response-new.json0000664000175100017510000000074015033040005025006 0ustar00mylesmyles{ "flavors": [ { "href": "/v2/flavors/test_flavor1", "pool_group": "", "pool_list": "[testpool1, testpool2]", "name": "test_flavor1", "pool": "testgroup" }, { "href": "/v2/flavors/test_flavor2", "pool_group": "", "pool_list": "[testpool3, testpool4]", "name": "test_flavor2", "pool": "testgroup" } ], "links": [ { "href": "/v2/flavors?marker=test_flavor2", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-list-response.json0000664000175100017510000000070615033040005024221 0ustar00mylesmyles{ "flavors": [ { "href": "/v2/flavors/test_flavor1", "pool_group": "testgroup", "pool_list": "", "name": "test_flavor1", "pool": "testgroup" }, { "href": "/v2/flavors/test_flavor2", "pool_group": "testgroup", "pool_list": "", "name": "test_flavor2", "pool": "testgroup" } ], "links": [ { "href": "/v2/flavors?marker=test_flavor2", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-show-response-new.json0000664000175100017510000000034015033040005025007 0ustar00mylesmyles{ "href": "/v2/flavors/testflavor", "capabilities": [ "FIFO", "CLAIMS", "DURABILITY", "AOD", "HIGH_THROUGHPUT" ], "pool_group": "", "pool_list": "[testpool1, testpool2]" "name": "testflavor" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-show-response.json0000664000175100017510000000032315033040005024221 0ustar00mylesmyles{ "href": "/v2/flavors/testflavor", "capabilities": [ "FIFO", "CLAIMS", "DURABILITY", "AOD", "HIGH_THROUGHPUT" ], "pool_group": "testgroup", "pool_list": "" "name": "testflavor" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-update-request-new.json0000664000175100017510000000005515033040005025146 0ustar00mylesmyles{ "pool_list": "[testpool1, testpool3]" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-update-request.json0000664000175100017510000000004115033040005024352 0ustar00mylesmyles{ "pool_group": "testgroup" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-update-response-new.json0000664000175100017510000000031515033040005025313 0ustar00mylesmyles{ "href": "/v2/flavors/testflavor", "pool_list": "[testpool1, testpool3]", "name": "testflavor", "capabilities": [ "FIFO", "CLAIMS", "DURABILITY", "AOD", "HIGH_THROUGHPUT" ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/flavor-update-response.json0000664000175100017510000000030115033040005024517 0ustar00mylesmyles{ "href": "/v2/flavors/testflavor", "pool_group": "testgroup", "name": "testflavor", "capabilities": [ "FIFO", "CLAIMS", "DURABILITY", "AOD", "HIGH_THROUGHPUT" ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/health-response.json0000664000175100017510000000257415033040005023231 0ustar00mylesmyles{ "catalog_reachable": true, "redis": { "storage_reachable": true, "operation_status": { "post_messages": { "seconds": 0.027673959732055664, "ref": null, "succeeded": true }, "delete_messages": { "seconds": 0.0028481483459472656, "ref": null, "succeeded": true }, "delete_queue": { "seconds": 0.017709016799926758, "ref": null, "succeeded": true }, "bulk_delete_messages": { "seconds": 0.03959178924560547, "ref": null, "succeeded": true }, "create_queue": { "seconds": 0.021075963973999023, "ref": null, "succeeded": true }, "list_messages": { "seconds": 0.00003504753112792969, "ref": null, "succeeded": true }, "delete_claim": { "seconds": 0.0006170272827148438, "ref": null, "succeeded": true }, "claim_messages": { "seconds": 0.008388042449951172, "ref": null, "succeeded": true } } } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/messages-delete-bypop-response.json0000664000175100017510000000046115033040005026153 0ustar00mylesmyles{ "messages": [ { "body": { "current_bytes": "0", "event": "BackupProgress", "total_bytes": "99614720" }, "age": 443, "claim_count": 1, "claim_id": "51db7067821e727dc24df754", "id": "578f0055508f153f256f717f", "ttl": 3600 } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/messages-get-byids-response.json0000664000175100017510000000057215033040005025454 0ustar00mylesmyles{ "messages": [ { "body": { "current_bytes": "0", "event": "BackupProgress", "total_bytes": "99614720" }, "age": 443, "href": "/v2/queues/beijing/messages/578f0055508f153f256f717f", "id": "578f0055508f153f256f717f", "ttl": 3600 } ] } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/messages-get-response.json0000664000175100017510000000047715033040005024350 0ustar00mylesmyles{ "body": { "current_bytes": "0", "event": "BackupProgress", "total_bytes": "99614720" }, "age": 1110, "href": "/v2/queues/beijing/messages/578f0055508f153f256f717f", "id": "578f0055508f153f256f717f", "ttl": 3600, "checksum": "MD5:abf7213555626e29c3cb3e5dc58b3515" } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/messages-list-response.json0000664000175100017510000000174415033040005024542 0ustar00mylesmyles{ "messages": [ { "body": { "current_bytes": "0", "event": "BackupProgress", "total_bytes": "99614720" }, "age": 482, "href": "/v2/queues/beijing/messages/578edfe6508f153f256f717b", "id": "578edfe6508f153f256f717b", "ttl": 3600, "checksum": "MD5:abf7213555626e29c3cb3e5dc58b3515" }, { "body": { "current_bytes": "0", "event": "BackupProgress", "total_bytes": "99614720" }, "age": 456, "href": "/v2/queues/beijing/messages/578ee000508f153f256f717d", "id": "578ee000508f153f256f717d", "ttl": 3600, "checksum": "MD5:abf7213555626e29c3cb3e5dc58b3515" } ], "links": [ { "href": "/v2/queues/beijing/messages?marker=17&echo=true", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/messages-post-request.json0000664000175100017510000000050615033040005024401 0ustar00mylesmyles{ "messages": [ { "ttl": 300, "delay": 20, "body": { "event": "BackupStarted", "backup_id": "c378813c-3f0b-11e2-ad92-7823d2b0f3ce" } }, { "body": { "event": "BackupProgress", "current_bytes": "0", "total_bytes": "99614720" } } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/messages-post-response.json0000664000175100017510000000022315033040005024543 0ustar00mylesmyles{ "resources": [ "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357", "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924358" ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-create-request-new.json0000664000175100017510000000021515033040005024605 0ustar00mylesmyles{ "weight": 100, "uri": "mongodb://127.0.0.1:27017", "options":{ "max_retry_sleep": 1 }, "flavor": "poolflavor" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-create-request.json0000664000175100017510000000021315033040005024014 0ustar00mylesmyles{ "weight": 100, "uri": "mongodb://127.0.0.1:27017", "options":{ "max_retry_sleep": 1 }, "group": "poolgroup" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-list-response-new.json0000664000175100017510000000076215033040005024472 0ustar00mylesmyles{ "pools": [ { "href": "/v2/pools/test_pool1", "group": "", "flavor": "poolflavor", "name": "test_pool1", "weight": 60, "uri": "mongodb://192.168.1.10:27017" }, { "href": "/v2/pools/test_pool2", "group": "", "flavor": "poolflavor", "name": "test_pool2", "weight": 40, "uri": "mongodb://192.168.1.20:27017" } ], "links": [ { "href": "/v2/pools?marker=test_pool2", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-list-response.json0000664000175100017510000000076015033040005023701 0ustar00mylesmyles{ "pools": [ { "href": "/v2/pools/test_pool1", "group": "poolgroup", "flavor": "", "name": "test_pool1", "weight": 60, "uri": "mongodb://192.168.1.10:27017" }, { "href": "/v2/pools/test_pool2", "group": "poolgroup", "flavor": "", "name": "test_pool2", "weight": 40, "uri": "mongodb://192.168.1.20:27017" } ], "links": [ { "href": "/v2/pools?marker=test_pool2", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-show-response-new.json0000664000175100017510000000023615033040005024473 0ustar00mylesmyles{ "href": "/v2/pools/test_pool", "group": "", "flavor": "testpoolflavor", "name": "test_pool", "weight": 100, "uri": "mongodb://127.0.0.1:27017" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-show-response.json0000664000175100017510000000023515033040005023703 0ustar00mylesmyles{ "href": "/v2/pools/test_pool", "group": "testpoolgroup", "flavor": "", "name": "test_pool", "weight": 100, "uri": "mongodb://127.0.0.1:27017" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-update-request-new.json0000664000175100017510000000021715033040005024626 0ustar00mylesmyles{ "weight": 60, "uri": "mongodb://127.0.0.1:27017", "options":{ "max_retry_sleep": 1 }, "flavor": "newpoolflavor" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-update-request.json0000664000175100017510000000021515033040005024035 0ustar00mylesmyles{ "weight": 60, "uri": "mongodb://127.0.0.1:27017", "options":{ "max_retry_sleep": 1 }, "group": "newpoolgroup" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-update-response-new.json0000664000175100017510000000023415033040005024773 0ustar00mylesmyles{ "href": "/v2/pools/test_pool", "group": "", "flavor": "newpoolflavor", "name": "test_pool", "weight": 60, "uri": "mongodb://127.0.0.1:27017" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/pool-update-response.json0000664000175100017510000000021315033040005024201 0ustar00mylesmyles{ "href": "/v2/pools/test_pool", "group": "newpoolgroup", "name": "test_pool", "weight": 60, "uri": "mongodb://127.0.0.1:27017" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/purge-queue-request.json0000664000175100017510000000006715033040005024055 0ustar00mylesmyles{ "resource_types": ["messages", "subscriptions"] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queue-create-request.json0000664000175100017510000000050415033040005024172 0ustar00mylesmyles{ "_max_messages_post_size": 262144, "_default_message_ttl": 3600, "_default_message_delay": 30, "_dead_letter_queue": "dead_letter", "_dead_letter_queue_messages_ttl": 3600, "_max_claim_count": 10, "_enable_encrypt_messages": true, "description": "Queue for international traffic billing." }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queue-pre-signed-request.json0000664000175100017510000000021015033040005024756 0ustar00mylesmyles{ "paths": ["messages", "claims", "subscriptions"], "methods": ["GET", "POST", "PUT", "PATCH"], "expires": "2016-09-01T00:00:00" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queue-pre-signed-response.json0000664000175100017510000000061215033040005025132 0ustar00mylesmyles{ "project": "2887aabf368046a3bb0070f1c0413470", "paths": [ "/v2/queues/test/messages", "/v2/queues/test/claims" "/v2/queues/test/subscriptions" ], "expires": "2016-09-01T00:00:00", "methods": [ "GET", "PATCH", "POST", "PUT" ], "signature": "6a63d63242ebd18c3518871dda6fdcb6273db2672c599bf985469241e9a1c799" } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queue-show-response.json0000664000175100017510000000042115033040005024053 0ustar00mylesmyles{ "_max_messages_post_size": 262144, "_default_message_ttl": 3600, "description": "Queue used for billing.", "_max_claim_count": 10, "_dead_letter_queue": "dead_letter", "_dead_letter_queue_messages_ttl": 3600, "_enable_encrypt_messages": true }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queue-stats-response.json0000664000175100017510000000013415033040005024232 0ustar00mylesmyles{ "messages":{ "claimed": 10, "total": 20, "free": 10 } }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queue-update-request.json0000664000175100017510000000014615033040005024213 0ustar00mylesmyles[ { "op": "replace", "path": "/metadata/max_timeout", "value": 100 } ]././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queue-update-response.json0000664000175100017510000000003215033040005024353 0ustar00mylesmyles{ "max_timeout": 100 }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/queues-list-response.json0000664000175100017510000000060415033040005024234 0ustar00mylesmyles{ "queues":[ { "href":"/v2/queues/beijing", "name":"beijing" }, { "href":"/v2/queues/london", "name":"london" }, { "href":"/v2/queues/wellington", "name":"wellington" } ], "links":[ { "href":"/v2/queues?marker=wellington", "rel":"next" } ], "count": 3 }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/subscription-confirm-request.json0000664000175100017510000000003115033040005025757 0ustar00mylesmyles{ "confirmed": true }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/subscription-create-request-http.json0000664000175100017510000000012215033040005026543 0ustar00mylesmyles{ "subscriber":"http://10.229.49.117:5679", "ttl":3600, "options":{} }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/subscription-create-request-mail.json0000664000175100017510000000020215033040005026505 0ustar00mylesmyles{ "subscriber":"mailto:test@gmail.com", "ttl":3600, "options":{ "from": "Jack", "subject": "Hello" } }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/subscription-create-response.json0000664000175100017510000000006315033040005025740 0ustar00mylesmyles{ "subscription_id": "57692ab13990b48c644bb7e6" }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/subscription-show-response.json0000664000175100017510000000025415033040005025457 0ustar00mylesmyles{ "age": 1632, "id": "576b54963990b48c644bb7e7", "subscriber": "http://10.229.49.117:5679", "source": "test", "ttl": 3600, "options": { "name": "test" } }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/subscription-update-request.json0000664000175100017510000000015315033040005025611 0ustar00mylesmyles{ "subscriber":"http://10.229.49.117:1234", "ttl":360, "options":{ "name": "test" } }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/subscriptions-list-response.json0000664000175100017510000000100515033040005025630 0ustar00mylesmyles{ "links": [ { "href": "/v2/queues/test/subscriptions?marker=57692ab13990b48c644bb7e6", "rel": "next" } ], "subscriptions": [ { "age": 13, "id": "57692aa63990b48c644bb7e5", "subscriber": "http://10.229.49.117:5678", "source": "test", "ttl": 360, "options": {} }, { "age": 2, "id": "57692ab13990b48c644bb7e6", "subscriber": "http://10.229.49.117:5679", "source": "test", "ttl": 360, "options": {} } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/samples/versions-list-response.json0000664000175100017510000000231415033040005024575 0ustar00mylesmyles{ "versions":[ { "status":"DEPRECATED", "updated":"2014-9-11T17:47:05Z", "media-types":[ { "base":"application/json", "type":"application/vnd.openstack.messaging-v1+json" } ], "id":"1", "links":[ { "href":"/v1/", "rel":"self" } ] }, { "status":"SUPPORTED", "updated":"2014-9-24T04:06:47Z", "media-types":[ { "base":"application/json", "type":"application/vnd.openstack.messaging-v1_1+json" } ], "id":"1.1", "links":[ { "href":"/v1.1/", "rel":"self" } ] }, { "status":"CURRENT", "updated":"2014-9-24T04:06:47Z", "media-types":[ { "base":"application/json", "type":"application/vnd.openstack.messaging-v2+json" } ], "id":"2", "links":[ { "href":"/v2/", "rel":"self" } ] } ] }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/status.yaml0000664000175100017510000000325015033040005017770 0ustar00mylesmyles 200: default: | Request was successful. 201: default: | Request has been fulfilled and new resource created. 202: default: | Request is accepted, but processing may take some time. 203: default: | Returned information is not full set, but a subset. 204: default: | Request fulfilled but service does not return anything. 300: default: | The resource corresponds to more than one representation. 400: default: | Some content in the request was invalid. 401: default: | User must authenticate before making a request. 403: default: | Policy does not allow current user to do this operation. 404: default: | The requested resource could not be found. 405: default: | Method is not valid for this endpoint and resource. 409: default: | This resource has an action in progress that would conflict with this request. 413: default: | This operation cannot be completed. 415: default: | The entity of the request is in a format not supported by the requested resource for the method. 422: default: | The entity of the request is in a format not processable by the requested resource for the method. 500: default: | Something went wrong with the service which prevents it from fulfilling the request. 501: default: | The service does not have the functionality required to fulfill this request. 503: default: | The service cannot handle the request right now. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/subscription.inc0000664000175100017510000001161215033040005021001 0ustar00mylesmyles============================ Subscriptions(subscriptions) ============================ Subscriptions are relationships between queue/topic and the targeted subscribers. After created subscriptions for a particular subscriber, like an email or a webhook, then when new messages posted to the queue, the subscriber will be notified automatically. List Subscriptions ================== .. rest_method:: GET /v2/queues/{queue_name}/subscriptions Lists a queue's subscriptions. This operation lists subscriptions for a queue. The subscriptions are sorted alphabetically by name. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name Query Parameters ~~~~~~~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limit: limit - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - subscriptions: subscriptions - links: links Response Example ---------------- .. literalinclude:: samples/subscriptions-list-response.json :language: javascript Create Subscription =================== .. rest_method:: POST /v2/queues/{queue_name}/subscriptions Creates a subscription. This operation creates a new subscription. Response codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - subscriber: subscriber - ttl: subscription_ttl - options: subscription_options Request Example --------------- .. literalinclude:: samples/subscription-create-request-http.json :language: javascript .. literalinclude:: samples/subscription-create-request-mail.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - subscription_id: subscription_id Response Example ---------------- .. literalinclude:: samples/subscription-create-response.json :language: javascript Update Subscription =================== .. rest_method:: PATCH /v2/queues/{queue_name}/subscriptions/{subscription_id} Updates a subscription. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 409 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - subscription_id: subscription_id_path - subscriber: subscriber - ttl: subscription_ttl - options: subscription_options Request Example --------------- .. literalinclude:: samples/subscription-update-request.json :language: javascript This operation does not return a response body. Show Subscription Details ========================= .. rest_method:: GET /v2/queues/{queue_name}/subscriptions/{subscription_id} Shows details for a subscription. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - subscription_id: subscription_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - age: subscription_age - id: subscription_id - subscriber: subscriber - source: subscription_source - ttl: subscription_ttl - options: subscription_options Response Example ---------------- .. literalinclude:: samples/subscription-show-response.json :language: javascript Delete Subscription =================== .. rest_method:: DELETE /v2/queues/{queue_name}/subscriptions/{subscription_id} Deletes the specified subscription. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - subscription_id: subscription_id_path This operation does not accept a request body and does not return a response body. Confirm Subscription ==================== .. rest_method:: POST /v2/queues/{queue_name}/subscriptions/{subscription_id}/confirm Confirm a subscription. This operation can confirm or cancel a subscription. Response codes -------------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 503 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - queue_name: queue_name - subscription_id: subscription_id_path - confirmed: confirmed Request Example --------------- .. literalinclude:: samples/subscription-confirm-request.json :language: javascript This operation does not return a response body. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/api-ref/source/versions.inc0000664000175100017510000000214415033040005020125 0ustar00mylesmyles============ API Versions ============ The Zaqar API only supports ''major versions'' expressed in request URLs. List major versions =================== .. rest_method:: GET / Gets the home document. This operation gets the home document. The entire API is discoverable from a single starting point, the home document. To explore the entire API, you need to know only this one URI. This document is cacheable. The home document lets you write clients by using relational links, so clients do not have to construct their own URLs. You can click through and view the JSON doc in your browser. For more information about home documents, see `http://tools.ietf.org/html/draft-nottingham-json-home-02 `__. Response codes -------------- .. rest_status_code:: success status.yaml - 300 .. rest_status_code:: error status.yaml - 503 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - versions: versions Response Example ---------------- .. literalinclude:: samples/versions-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/bench-requirements.txt0000664000175100017510000000007015033040005017274 0ustar00mylesmylesgevent>=1.0.1 marktime>=0.2.0 python-zaqarclient>=1.1.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/bindep.txt0000664000175100017510000000102015033040005014731 0ustar00mylesmyles# This is the fallback list for packages to install. Do not add # additional packages here. Repositories should use bindep and create # their own bindep.txt files if the list below is not # working for them. redis [platform:rpm] redis-server [platform:dpkg] dev-db/redis [platform:gentoo] default-libmysqlclient-dev [platform:dpkg] libmysqlclient-devel [platform:suse] mysql-client [platform:dpkg !platform:debian] mysql-server [platform:dpkg !platform:debian] mariadb-client [platform:debian] mariadb-server [platform:debian] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5510137 zaqar-20.1.0.dev29/devstack/0000775000175100017510000000000015033040026014545 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/devstack/README.rst0000664000175100017510000000115315033040005016231 0ustar00mylesmyles===== Zaqar ===== ====================== Enabling in Devstack ====================== 1. Download DevStack -------------------- For more info on devstack installation follow the below link: .. code-block:: ini https://docs.openstack.org/devstack/latest/ 2. Add this repo as an external repository ------------------------------------------ .. code-block:: ini cat > /opt/stack/devstack/local.conf << END [[local|localrc]] enable_plugin zaqar https://git.openstack.org/openstack/zaqar END 3. Run devstack -------------------- .. code-block:: ini cd /opt/stack/devstack ./stack.sh ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/devstack/plugin.sh0000775000175100017510000002723515033040005016410 0ustar00mylesmyles#!/bin/bash # # lib/zaqar # Install and start **Zaqar** service # To enable a minimal set of Zaqar services, add the following to localrc: # # enable_service zaqar-websocket zaqar-wsgi # # Dependencies: # - functions # - OS_AUTH_URL for auth in api # - DEST set to the destination directory # - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api # - STACK_USER service user # stack.sh # --------- # install_zaqar # install_zaqarui # configure_zaqar # init_zaqar # start_zaqar # stop_zaqar # cleanup_zaqar # cleanup_zaqar_mongodb # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Functions # --------- # Test if any Zaqar services are enabled # is_zaqar_enabled function is_zaqar_enabled { [[ ,${ENABLED_SERVICES} =~ ,"zaqar" ]] && return 0 return 1 } # cleanup_zaqar() - Cleans up general things from previous # runs and storage specific left overs. function cleanup_zaqar { if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then cleanup_zaqar_mongodb fi remove_uwsgi_config "$ZAQAR_UWSGI_CONF" "zaqar" } # cleanup_zaqar_mongodb() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up # After mongodb 6.0, the mongo shell has been remove, now using mongosh. function cleanup_zaqar_mongodb { if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongosh zaqar --eval 'db.dropDatabase();'; do sleep 1; done"; then die $LINENO "Mongo DB did not start" else mongo_version=$(mongosh zaqar --eval 'db.version();') required_mongo_version='6.0' if [[ $mongo_version < $required_mongo_version ]]; then die $LINENO "Zaqar needs Mongo DB version >= 6.0 to run." fi fi } # configure_zaqarclient() - Set config files, create data dirs, etc function configure_zaqarclient { setup_develop $ZAQARCLIENT_DIR } # configure_zaqar() - Set config files, create data dirs, etc function configure_zaqar { setup_develop $ZAQAR_DIR [ ! -d $ZAQAR_CONF_DIR ] && sudo mkdir -m 755 -p $ZAQAR_CONF_DIR sudo chown $USER $ZAQAR_CONF_DIR [ ! -d $ZAQAR_API_LOG_DIR ] && sudo mkdir -m 755 -p $ZAQAR_API_LOG_DIR sudo chown $USER $ZAQAR_API_LOG_DIR iniset $ZAQAR_CONF DEFAULT debug True iniset $ZAQAR_CONF DEFAULT unreliable True iniset $ZAQAR_CONF DEFAULT admin_mode True iniset $ZAQAR_CONF DEFAULT enable_deprecated_api_versions 1,1.1 iniset $ZAQAR_CONF signed_url secret_key notreallysecret if is_service_enabled key; then iniset $ZAQAR_CONF DEFAULT auth_strategy keystone fi iniset $ZAQAR_CONF storage message_pipeline zaqar.notification.notifier # Enable pooling by default for now iniset $ZAQAR_CONF DEFAULT admin_mode True iniset $ZAQAR_CONF 'drivers:transport:websocket' bind $(ipv6_unquote $ZAQAR_SERVICE_HOST) iniset $ZAQAR_CONF 'drivers:transport:websocket' port $ZAQAR_WEBSOCKET_PORT iniset $ZAQAR_CONF drivers transport websocket configure_keystone_authtoken_middleware $ZAQAR_CONF zaqar iniset $ZAQAR_CONF trustee auth_type password iniset $ZAQAR_CONF trustee auth_url $KEYSTONE_AUTH_URI iniset $ZAQAR_CONF trustee username $ZAQAR_TRUSTEE_USER iniset $ZAQAR_CONF trustee password $ZAQAR_TRUSTEE_PASSWORD iniset $ZAQAR_CONF trustee user_domain_id $ZAQAR_TRUSTEE_DOMAIN iniset $ZAQAR_CONF DEFAULT pooling True iniset $ZAQAR_CONF 'pooling:catalog' enable_virtual_pool True if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then iniset $ZAQAR_CONF drivers message_store mongodb iniset $ZAQAR_CONF 'drivers:message_store:mongodb' uri mongodb://localhost:27017/zaqar iniset $ZAQAR_CONF 'drivers:message_store:mongodb' database zaqar iniset $ZAQAR_CONF drivers management_store mongodb iniset $ZAQAR_CONF 'drivers:management_store:mongodb' uri mongodb://localhost:27017/zaqar_mgmt iniset $ZAQAR_CONF 'drivers:management_store:mongodb' database zaqar_mgmt configure_mongodb elif [ "$ZAQAR_BACKEND" = 'redis' ] ; then recreate_database zaqar iniset $ZAQAR_CONF drivers management_store sqlalchemy iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' uri `database_connection_url zaqar` iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' database zaqar_mgmt $ZAQAR_BIN_DIR/zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head iniset $ZAQAR_CONF drivers message_store redis iniset $ZAQAR_CONF 'drivers:message_store:redis' uri redis://localhost:6379 iniset $ZAQAR_CONF 'drivers:message_store:redis' database zaqar configure_redis elif [ "$ZAQAR_BACKEND" = 'swift' ] ; then recreate_database zaqar iniset $ZAQAR_CONF drivers management_store sqlalchemy iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' uri `database_connection_url zaqar` iniset $ZAQAR_CONF 'drivers:management_store:sqlalchemy' database zaqar_mgmt $ZAQAR_BIN_DIR/zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head iniset $ZAQAR_CONF drivers message_store swift iniset $ZAQAR_CONF 'drivers:message_store:swift' auth_url $KEYSTONE_AUTH_URI iniset $ZAQAR_CONF 'drivers:message_store:swift' uri swift://zaqar:$SERVICE_PASSWORD@/service fi if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then iniset $ZAQAR_CONF DEFAULT notification_driver messaging iniset $ZAQAR_CONF DEFAULT control_exchange zaqar fi iniset_rpc_backend zaqar $ZAQAR_CONF DEFAULT write_uwsgi_config "$ZAQAR_UWSGI_CONF" "$ZAQAR_UWSGI" "/messaging" "" "zaqar" } function configure_redis { if is_ubuntu; then install_package redis-server pip_install redis elif is_fedora; then install_package redis pip_install redis else exit_distro_not_supported "redis installation" fi } function configure_mongodb { # Set nssize to 2GB. This increases the number of namespaces supported # per database. pip_install pymongo if is_ubuntu; then # NOTE: To fix the mongodb's issue in ubuntu 22.04/24.04 LTS ubuntu_version=$(source /etc/os-release ; echo $VERSION_ID) if [[ $ubuntu_version == '24.04' ]]; then if [[ ! -d /etc/apt/sources.list.d ]]; then sudo mkdir -p /etc/apt/sources.list.d fi wget -qO - https://www.mongodb.org/static/pgp/server-8.0.asc | sudo apt-key add - echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu noble/mongodb-org/8.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-8.0.list sudo apt update install_package mongodb-org restart_service mongod sudo systemctl status mongod elif [[ $ubuntu_version == '22.04' ]]; then wget -qO - https://www.mongodb.org/static/pgp/server-7.0.asc | sudo apt-key add - echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu jammy/mongodb-org/7.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-7.0.list sudo apt update install_package mongodb-org restart_service mongod sudo systemctl status mongod else install_package mongodb-server restart_service mongodb fi elif is_fedora; then fedora_version=$(source /etc/os-release ; echo $VERSION_ID) if [[ $fedora_version == '9' ]]; then cat > /etc/yum.repos.d/mongodb-org-6.0.repo << __EOF__ [mongodb-org-6.0] name=MongoDB Repository baseurl=https://repo.mongodb.org/yum/redhat/8/mongodb-org/6.0/x86_64/ gpgcheck=1 enabled=1 gpgkey=https://www.mongodb.org/static/pgp/server-6.0.asc __EOF__ install_package mongodb-org restart_service mongod else install_package mongodb install_package mongodb-server restart_service mongod fi fi } # init_zaqar() - Initialize etc. function init_zaqar { # Nothing to do : } # install_zaqar() - Collect source and prepare function install_zaqar { setup_develop $ZAQAR_DIR if is_service_enabled horizon; then install_zaqarui fi pip_install uwsgi } function install_zaqarui { git_clone $ZAQARUI_REPO $ZAQARUI_DIR $ZAQARUI_BRANCH # NOTE(flwang): Workaround for devstack bug: 1540328 # where devstack install 'test-requirements' but should not do it # for zaqar-ui project as it installs Horizon from url. # Remove following two 'mv' commands when mentioned bug is fixed. mv $ZAQARUI_DIR/test-requirements.txt $ZAQARUI_DIR/_test-requirements.txt setup_develop $ZAQARUI_DIR mv $ZAQARUI_DIR/_test-requirements.txt $ZAQARUI_DIR/test-requirements.txt cp -a $ZAQARUI_DIR/zaqar_ui/enabled/* $HORIZON_DIR/openstack_dashboard/local/enabled/ if [ -d $ZAQARUI_DIR/zaqar-ui/locale ]; then (cd $ZAQARUI_DIR/zaqar-ui; DJANGO_SETTINGS_MODULE=openstack_dashboard.settings ../manage.py compilemessages) fi } # install_zaqarclient() - Collect source and prepare function install_zaqarclient { git_clone $ZAQARCLIENT_REPO $ZAQARCLIENT_DIR $ZAQARCLIENT_BRANCH # NOTE(flaper87): Ideally, this should be developed, but apparently # there's a bug in devstack that skips test-requirements when using # setup_develop setup_install $ZAQARCLIENT_DIR } # start_zaqar() - Start running processes, including screen function start_zaqar { run_process zaqar-wsgi "$ZAQAR_BIN_DIR/uwsgi --ini $ZAQAR_UWSGI_CONF" run_process zaqar-websocket "$ZAQAR_BIN_DIR/zaqar-server --config-file $ZAQAR_CONF" echo "Waiting for Zaqar to start..." local www_authenticate_uri=http://${ZAQAR_SERVICE_HOST}/identity local ping_url=$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST/messaging/v2/ping token=$(openstack token issue -c id -f value --os-auth-url ${www_authenticate_uri}) if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q --header=\"Client-ID:$(uuidgen)\" --header=\"X-Auth-Token:$token\" -O- $ping_url; do sleep 1; done"; then die $LINENO "Zaqar did not start" fi } # stop_zaqar() - Stop running processes function stop_zaqar { local serv # Kill the zaqar screen windows for serv in zaqar-wsgi zaqar-websocket; do stop_process serv done } function create_zaqar_accounts { create_service_user "zaqar" if [[ "$KEYSTONE_IDENTITY_BACKEND" = 'sql' ]]; then get_or_create_service "zaqar" "messaging" "Zaqar Service" get_or_create_endpoint "messaging" \ "$REGION_NAME" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST/messaging" get_or_create_service "zaqar-websocket" \ "messaging-websocket" "Zaqar Websocket Service" get_or_create_endpoint "messaging-websocket" \ "$REGION_NAME" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_WEBSOCKET_PORT" fi if [ "$ZAQAR_BACKEND" = 'swift' ] ; then get_or_add_user_project_role ResellerAdmin zaqar service fi } if is_service_enabled zaqar-websocket || is_service_enabled zaqar-wsgi; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Zaqar" install_zaqarclient install_zaqar elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Zaqar" configure_zaqar configure_zaqarclient if is_service_enabled key; then create_zaqar_accounts fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Zaqar" init_zaqar start_zaqar fi if [[ "$1" == "unstack" ]]; then stop_zaqar fi if [[ "$1" == "clean" ]]; then cleanup_zaqar fi fi # Restore xtrace $XTRACE # Local variables: # mode: shell-script # End: ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/devstack/settings0000664000175100017510000000341015033040005016323 0ustar00mylesmyles# Set up default directories #--------------------------- ZAQAR_DIR=$DEST/zaqar ZAQARCLIENT_DIR=$DEST/python-zaqarclient ZAQAR_CONF_DIR=/etc/zaqar ZAQAR_CONF=$ZAQAR_CONF_DIR/zaqar.conf ZAQAR_POLICY_CONF=$ZAQAR_CONF_DIR/policy.yaml ZAQAR_UWSGI_CONF=$ZAQAR_CONF_DIR/uwsgi.conf ZAQAR_UWSGI=zaqar.transport.wsgi.app:application ZAQAR_API_LOG_DIR=/var/log/zaqar ZAQAR_API_LOG_FILE=$ZAQAR_API_LOG_DIR/queues.log ZAQAR_AUTH_CACHE_DIR=${ZAQAR_AUTH_CACHE_DIR:-/var/cache/zaqar} # Support potential entry-points console scripts ZAQAR_BIN_DIR=$(get_python_exec_prefix) # Set up database backend ZAQAR_BACKEND=${ZAQAR_BACKEND:-mongodb} # Set Zaqar repository ZAQAR_REPO=${ZAQAR_REPO:-${GIT_BASE}/openstack/zaqar.git} ZAQAR_BRANCH=${ZAQAR_BRANCH:-master} # Set client library repository ZAQARCLIENT_REPO=${ZAQARCLIENT_REPO:-${GIT_BASE}/openstack/python-zaqarclient.git} ZAQARCLIENT_BRANCH=${ZAQARCLIENT_BRANCH:-master} # Set Zaqar UI repository ZAQARUI_DIR=$DEST/zaqar-ui ZAQARUI_REPO=${ZAQARUI_REPO:-${GIT_BASE}/openstack/zaqar-ui.git} ZAQARUI_BRANCH=${ZAQARUI_BRANCH:-$ZAQAR_BRANCH} # Set Zaqar Connection Info ZAQAR_SERVICE_HOST=${ZAQAR_SERVICE_HOST:-$SERVICE_HOST} ZAQAR_SERVICE_PORT=${ZAQAR_SERVICE_PORT:-8888} ZAQAR_WEBSOCKET_PORT=${ZAQAR_WEBSOCKET_PORT:-9000} ZAQAR_SERVICE_PROTOCOL=${ZAQAR_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Set Zaqar trust configuration ZAQAR_TRUSTEE_USER=${ZAQAR_TRUSTEE_USER:-zaqar} ZAQAR_TRUSTEE_PASSWORD=${ZAQAR_TRUSTEE_PASSWORD:-$SERVICE_PASSWORD} ZAQAR_TRUSTEE_DOMAIN=${ZAQAR_TRUSTEE_DOMAIN:-default} GITREPO["zaqar-tempest-plugin"]=${ZAQARTEMPEST_REPO:-${GIT_BASE}/openstack/zaqar-tempest-plugin.git} GITBRANCH["zaqar-tempest-plugin"]=${ZAQARTEMPEST_BRANCH:-master} GITDIR["zaqar-tempest-plugin"]=$DEST/zaqar-tempest-plugin enable_service zaqar-websocket zaqar-wsgi ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5510137 zaqar-20.1.0.dev29/devstack/upgrade/0000775000175100017510000000000015033040026016174 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/devstack/upgrade/resource.sh0000775000175100017510000000257015033040005020363 0ustar00mylesmyles#!/bin/bash # # Copyright 2017 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $TOP_DIR/openrc admin admin ZAQAR_DEVSTACK_DIR=$(cd $(dirname "$0")/.. && pwd) source $ZAQAR_DEVSTACK_DIR/settings set -o xtrace function create { # TODO(flwang): Create queue, create subscriptions, post messages, # delete queue : } function verify { # TODO(flwang): Get queue, get messages, get subscriptions : } function verify_noapi { : } function destroy { # TODO(flwang): Purge queue, delete queue : } # Dispatcher case $1 in "create") create ;; "verify") verify ;; "verify_noapi") verify_noapi ;; "destroy") destroy ;; "force_destroy") set +o errexit destroy ;; esac ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/devstack/upgrade/settings0000664000175100017510000000145215033040005017756 0ustar00mylesmyles# Grenade needs to know that Zaqar has a Grenade plugin. This is done in the # gate by setting GRENADE_PLUGINRC when using openstack-infra/devstack-gate. # That means that in the project openstack-infra/project-config we will need to # update the Zaqar grenade job(s) in jenkins/jobs/devstack-gate.yaml with # this: # export GRENADE_PLUGINRC="enable_grenade_plugin zaqar https://git.openstack.org/openstack/zaqar" # If openstack-infra/project-config is not updated then the Grenade tests will # never get run for Zaqar register_project_for_upgrade zaqar if grep -q 'management_store *= *sqlalchemy' /etc/zaqar/zaqar.conf; then register_db_to_save zaqar fi devstack_localrc base enable_service zaqar-wsgi zaqar-websocket zaqar devstack_localrc target enable_service zaqar-wsgi zaqar-websocket zaqar././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/devstack/upgrade/shutdown.sh0000775000175100017510000000077515033040005020414 0ustar00mylesmyles#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions # We need base DevStack functions for this source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls # Keep track of the DevStack directory ZAQAR_DEVSTACK_DIR=$(dirname "$0")/.. source $ZAQAR_DEVSTACK_DIR/settings source $ZAQAR_DEVSTACK_DIR/plugin.sh set -o xtrace for serv in zaqar-wsgi zaqar-websocket; do stop_process $serv done ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/devstack/upgrade/upgrade.sh0000775000175100017510000000711115033040005020157 0ustar00mylesmyles#!/usr/bin/env bash # ``upgrade-zaqar`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc source $TOP_DIR/openrc admin admin # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit if grep -q '_store *= *mongodb' /etc/zaqar/zaqar.conf; then # mongo-tools is the name of the package which includes mongodump on # basically all distributions (Ubuntu, Debian, Fedora, CentOS and # openSUSE). install_package mongo-tools fi if grep -q 'management_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar_mgmt --out $SAVE_DIR/zaqar-mongodb-mgmt-dump.$BASE_RELEASE fi if grep -q 'message_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar --out $SAVE_DIR/zaqar-mongodb-message-dump.$BASE_RELEASE fi if grep -q 'message_store *= *redis' /etc/zaqar/zaqar.conf; then redis-cli save sudo cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$BASE_RELEASE.rdb fi # Upgrade Zaqar # ============= # Duplicate some setup bits from target DevStack source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/tls # Keep track of the DevStack directory ZAQAR_DEVSTACK_DIR=$(dirname "$0")/.. source $ZAQAR_DEVSTACK_DIR/settings source $ZAQAR_DEVSTACK_DIR/plugin.sh # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace function wait_for_keystone { local www_authenticate_uri=http://${ZAQAR_SERVICE_HOST}/identity if ! wait_for_service $SERVICE_TIMEOUT ${www_authenticate_uri}/v$IDENTITY_API_VERSION/; then die $LINENO "keystone did not start" fi } # Save current config files for posterity [[ -d $SAVE_DIR/etc.zaqar ]] || cp -pr $ZAQAR_CONF_DIR $SAVE_DIR/etc.zaqar stack_install_service zaqar if grep -q 'management_store *= *sqlalchemy' /etc/zaqar/zaqar.conf; then $ZAQAR_BIN_DIR/zaqar-sql-db-manage --config-file $ZAQAR_CONF upgrade head || die $LINENO "DB sync error" fi # calls upgrade-zaqar for specific release upgrade_project zaqar $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH wait_for_keystone start_zaqar # Don't succeed unless the services come up ensure_services_started zaqar-server if grep -q 'management_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar_mgmt --out $SAVE_DIR/zaqar-mongodb-mgmt-dump.$TARGET_RELEASE fi if grep -q 'message_store *= *mongodb' /etc/zaqar/zaqar.conf; then mongodump --db zaqar --out $SAVE_DIR/zaqar-mongodb-message-dump.$TARGET_RELEASE fi if grep -q 'message_store *= *redis' /etc/zaqar/zaqar.conf; then redis-cli save sudo cp /var/lib/redis/dump.rdb $SAVE_DIR/zaqar-redis-message-dump-$TARGET_RELEASE.rdb fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5510137 zaqar-20.1.0.dev29/doc/0000775000175100017510000000000015033040026013506 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/README.md0000664000175100017510000000003715033040005014762 0ustar00mylesmylesMessage-Queuing ===============././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/requirements.txt0000664000175100017510000000031615033040005016767 0ustar00mylesmylesopenstackdocstheme>=2.2.1 # Apache-2.0 sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD reno>=3.1.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 pymongo>=3.6.0 # Apache-2.0 redis>=2.10.0 # MIT ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5520136 zaqar-20.1.0.dev29/doc/source/0000775000175100017510000000000015033040026015006 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5520136 zaqar-20.1.0.dev29/doc/source/_static/0000775000175100017510000000000015033040026016434 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/_static/.placeholder0000664000175100017510000000000015033040005020702 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5520136 zaqar-20.1.0.dev29/doc/source/admin/0000775000175100017510000000000015033040026016076 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/admin/CORS.rst0000664000175100017510000000737315033040005017405 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========== CORS Guide ========== Zaqar supports Cross-Origin Resource Sharing (CORS) now. The function is provided by oslo.middleware. Please see `Official Doc`_ and `OpenStack Spec`_ for more detail. This guide is mainly tell users how to use it in Zaqar. New Config Options ------------------ There are some new config options. **allowed_origin** Indicate whether this resource may be shared with the domain received in the requests "origin" header. Format: "://[:]", no trailing slash. Example: https://horizon.example.com'. **allow_credentials** Indicate that the actual request can include user credentials. The default value is True. **expose_headers** Indicate which headers are safe to expose to the API. Defaults to HTTP Simple Headers. The default value is []. **max_age** Maximum cache age of CORS preflight requests. The default value is 3600. **allow_methods** Indicate which methods can be used during the actual request. The default value is ['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'PATCH']. **allow_headers** Indicate which header field names may be used during the actual request. The default value is []. Request and Response example ---------------------------- The CORS feature is enabled by default in Zaqar. Here is a config example::: [cors] allowed_origin = http://example allow_methods = GET the above example config options mean that Zaqar only receive the GET request from http://example domain. Here are some example request: 1. Zaqar will do nothing if the request doesn't contain "Origin" header:: # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" HTTP/1.1 300 Multiple Choices content-length: 668 content-type: application/json; charset=UTF-8 Connection: close 2. Zaqar will return nothing in response headers if the "Origin" is not in ``allowed_origin``:: # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://" HTTP/1.1 300 Multiple Choices content-length: 668 content-type: application/json; charset=UTF-8 Connection: close In the Zaqar log, we can see a message:: CORS request from origin 'http://' not permitted. 3. Zaqar will return CORS information if the "Origin" header is in ``allowed_origin``:: # curl -I -X GET http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://example" HTTP/1.1 300 Multiple Choices content-length: 668 content-type: application/json; charset=UTF-8 Vary: Origin Access-Control-Allow-Origin: http://example Access-Control-Allow-Credentials: true Connection: close 4. Zaqar will return more information if the request doesn't follow Zaqar's\ CORS rule:: # curl -I -X PUT http://10.229.47.217:8888 -H "Accept: application/json" -H "Origin: http://example" HTTP/1.1 405 Method Not Allowed content-length: 0 content-type: application/json; charset=UTF-8 allow: GET, OPTIONS Vary: Origin Access-Control-Allow-Origin: http://example Access-Control-Allow-Credentials: true Connection: close .. _Official Doc: https://docs.openstack.org/oslo.middleware/latest/reference/cors.html .. _OpenStack Spec: https://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/admin/OSprofiler.rst0000664000175100017510000001071715033040005020717 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ OSprofiler Guide ================ OSprofiler is a library from oslo. It's used for performance analysis. Please see `Official Doc`_ for more detail. Preparation ----------- OSprofiler now supports some kind of backends, such as Ceilometer, ElasticSearch , Messaging and MongoDB. .. note:: 1. Ceilometer is only used for data collection, and Messaging is only used for data transfer. So Ceilometer only works when Messaging is enabled. 2. ElasticSearch and MongoDB support both data collection and transfer. So they can be used standalone. In this guide, we take MongoDB for example. There are some new config options. **enabled** Enables the profiling for all services on this node. Default value is False (fully disable the profiling feature). This function may bring down Zaqar's performance, so please set to disable in production environment. **connection_string** Connection string for a notifier backend. Default value is messaging:// which sets the notifier to oslo_messaging. Here we set it to "mongodb://localhost:27017" **hmac_keys** Secret key(s) to use for encrypting context data for performance profiling. This string value should have the following format: [,,...], where each key is some random string. A user who triggers the profiling via the REST API has to set one of these keys in the headers of the REST API call to include profiling results of this node for this particular project. **trace_wsgi_transport**, **trace_message_store** and **trace_management_store** The three layers during a user's request flow. Set to True to enable tracing for each layer. So In this example, we should add the following config options:: [profiler] enabled = True connection_string = mongodb://localhost:27017 hmac_keys = 123 trace_wsgi_transport = True trace_message_store = True trace_management_store = True .. note:: If you want to use MQ and Ceilometer, please leave the **connection_string** empty or indicate the MQ information. And please make sure that the following config options have be set in Ceilometer.conf :: [DEFAULT] event_dispatchers = database [oslo_messaging_notifications] topics = notifications, profiler Then restart Zaqar service. Command Line ------------ We can use OpenStack Client to analyse the user request now. For example, if we want to know the performance for "queue list", we can do like this: 1. OpenStack Client now supports OSprofiler by default. Only thing we need to do is adding ``--os-profile {hmac_keys}`` in the command:: openstack queue list --os-profile 123 "123" here is what we set in Zaqar config file. After the request is done, OpenStack Client will return a trace ID like:: Trace ID: 2902c7a3-ee18-4b08-aae7-4e34388f9352 Display trace with command: osprofiler trace show --html 2902c7a3-ee18-4b08-aae7-4e34388f9352 Now the trace information has been stored in MongoDB already. 2. Use the command from the openstack client return information. The osprofiler command uses Ceilometer for data collection by default, so we need use ``--connection-string`` to change it to mongoDB here:: osprofiler trace show --html 2902c7a3-ee18-4b08-aae7-4e34388f9352 --connection-string mongodb://localhost:27017 Then you can see the analysis information in html format now. It also supports json format:: osprofiler trace show --json 2902c7a3-ee18-4b08-aae7-4e34388f9352 --connection-string mongodb://localhost:27017 Of course it supports to save the result to a file:: osprofiler trace show --json 2902c7a3-ee18-4b08-aae7-4e34388f9352 --out list_test --connection-string mongodb://localhost:27017 Then you can open the file "list_test" to get the result. .. note:: If you used MQ for data transfer, the "--connection-string" here could be ignored or set it to your Ceilometer endpoint. .. _Official Doc: https://docs.openstack.org/osprofiler/latest/user/background.html ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/admin/gmr.rst0000664000175100017510000000653515033040005017423 0ustar00mylesmyles.. Copyright (c) 2017 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Guru Meditation Reports ======================= Zaqar contains a mechanism whereby developers and system administrators can generate a report about the state of a running Zaqar executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- For wsgi and websocket mode, a *GMR* can be generated by sending the *USR2* signal to any Zaqar process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``zaqar-server`` has process id ``8675``, and was run with ``2>/var/log/zaqar/zaqar-server-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/zaqar/zaqar-server-err.log``. For uwsgi mode, user should add a configuration in Zaqar's conf file:: [oslo_reports] file_event_handler=['The path to a file to watch for changes to trigger ' 'the reports, instead of signals. Setting this option ' 'disables the signal trigger for the reports.'] file_event_handler_interval=['How many seconds to wait between polls when ' 'file_event_handler is set, default value ' 'is 1'] For example, you can specify "file_event_handler=/tmp/guru_report" and "file_event_handler_interval=1" in Zaqar's conf file. A *GMR* can be generated by "touch"ing the file which was specified in file_event_handler. The *GMR* will then output to standard error for that particular process. For example, suppose that ``zaqar-server`` was run with ``2>/var/log/zaqar/zaqar-server-err.log``, and the file path is ``/tmp/guru_report``. Then, ``touch /tmp/guru_report`` will trigger the Guru Meditation report to be printed to ``/var/log/zaqar/zaqar-server-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/admin/index.rst0000664000175100017510000000030315033040005017730 0ustar00mylesmyles==================== Administration Guide ==================== .. toctree:: :maxdepth: 2 subscription_confirm OSprofiler CORS gmr running_benchmark writing_pipeline_stages ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/admin/running_benchmark.rst0000664000175100017510000001433715033040005022327 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running benchmark ================= Introduction ------------ This document describes how to run benchmarking tool. Zaqar Contributors can use this tool to test how the particular code change affects Zaqar's performance. Usage ----- 1. First install and run zaqar-server. For example, you can setup Zaqar in development environment. See :doc:`../contributor/development.environment`. 2. In your terminal cd into your local Zaqar repo and install additional requirements: .. code-block:: console $ pip install -r bench-requirements.txt 3. Copy the configuration file to ~/.zaqar: .. code-block:: console $ cp etc/zaqar-benchmark.conf.sample ~/.zaqar/zaqar-benchmark.conf 4. In this configuration file specify where zaqar-server can be found: .. code-block:: ini server_url = http://localhost:8888 5. The benchmarking tool needs a set of messages to work with. Specify the path to the file with messages in the configuration file. Alternatively, put it in the directory with the configuration file and name it ``zaqar-benchmark-messages.json``. As a starting point, you can use the sample file from the etc directory: .. code-block:: console $ cp etc/zaqar-benchmark-messages.json ~/.zaqar/ If the file is not found or no file is specified, a single hard-coded message is used for all requests. 6. Run the benchmarking tool using the following command: .. code-block:: console $ zaqar-bench By default, the command will run a performance test for 5 seconds, using one producer process with 10 greenlet workers, and one observer process with 5 workers. The consumer role is disabled by default. You can override these defaults in the config file or on the command line using a variety of options. For example, the following command runs a performance test for 30 seconds using 4 producer processes with 20 workers each, plus 4 consumer processes with 20 workers each. Note that the observer role is also disabled in this example by setting its number of workers to zero: .. code-block:: console $ zaqar-bench -pp 4 -pw 10 -cp 4 -cw 20 -ow 0 -t 30 By default, the results are in human-readable format. For JSON output add the ``--noverbose`` flag. The non-verbose output looks similar to the following: .. code-block:: console $ zaqar-bench --noverbose Using 'envvars' credentials Using 'keystone' authentication method Benchmarking Zaqar API v2... {"params": {"consumer": {"processes": 1, "workers": 0}, "observer": {"processes": 1, "workers": 5}, "producer": {"processes": 1, "workers": 10}}, "consumer": {"claim_total_requests": 0, "ms_per_claim": 0, "total_reqs": 0, "reqs_per_sec": 0, "successful_reqs": 0, "duration_sec": 0, "ms_per_delete": 0, "messages_processed": 0}, "producer": {"duration_sec": 8.569170951843262, "ms_per_req": 201.715140507139, "total_reqs": 29, "successful_reqs": 29, "reqs_per_sec": 3.384224700729303}, "observer": {"duration_sec": 8.481178045272827, "ms_per_req": 407.40778711107043, "total_reqs": 18, "successful_reqs": 18, "reqs_per_sec": 2.122346672115049}} By default, zaqar-bench is benchmarking Zaqar API version 2. To run benchmark against other API versions use ``-api`` parameter. For example: .. code-block:: console $ zaqar-bench -api 1.1 Configuring zaqar-bench to use Keystone authentication ###################################################### It's possible to use zaqar-bench with Keystone authentication, if your Zaqar is configured to use Keystone authentication method and the Keystone service is running. For example, this is always true when running DevStack_ with unmodified ``zaqar.conf``. Let's configure zaqar-bench too to use Keystone: #. Set zaqar-bench's authentication method to Keystone. By default zaqar-bench is using ``noauth`` method. This can be changed by setting the environment variable ``OS_AUTH_STRATEGY`` to ``keystone``. To set this environment variable: * temporarily, run: .. code-block:: console $ export OS_AUTH_STRATEGY=keystone * permanently, add this line to your ``~/bashrc`` file: .. code-block:: bash export OS_AUTH_STRATEGY=keystone Reboot your computer or just run in the terminal where you will start zaqar-bench: .. code-block:: console $ source ~/.bashrc #. Set Keystone credentials for zaqar-bench. * If you're running Zaqar under DevStack, **you can omit this step**, because zaqar-bench will automatically get administrator or user credentials from the one of the files created by DevStack: either from ``/etc/openstack/clouds.yaml`` file or from ``~/.config/openstack/clouds.yaml`` file, if it exists. * If you're running manually configured Zaqar with manually configured Keystone (not under DevStack): Add these lines to your ``~/.bashrc`` file and specify the valid Keystone credentials: .. code-block:: bash export OS_AUTH_URL="http:///v2.0" export OS_USERNAME="" export OS_PASSWORD="" export OS_PROJECT_NAME="" Reboot your computer or just run in the terminal where you will start zaqar-bench: .. code-block:: console $ source ~/.bashrc #. Run zaqar-bench as usual, for example: .. code-block:: console $ zaqar-bench If everything is properly configured, zaqar-bench must show the line ``Using 'keystone' authentication method`` and execute without authentication errors. .. _DevStack: https://docs.openstack.org/devstack/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/admin/subscription_confirm.rst0000664000175100017510000002755615033040005023105 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================== The subscription Confirm Guide ============================== The subscription confirm feature now supports webhook and email with both mongoDB and redis backend. This guide shows how to use this feature: Webhook ------- .. note:: You should make sure that the message notification is enabled. By default, the ``message_pipeline`` config option in [storage] section should be set like: message_pipeline = zaqar.notification.notifier 1. Set the config option "require_confirmation" and add the policy to the policy.yaml file. Then restart Zaqar-wsgi service:: In the config file: [notification] require_confirmation = True In the policy.yaml file: "subscription:confirm": "" 2. Create a subscription. Here used zaqar/samples/zaqar/subscriber_service_sample.py be the subscriber endpoint for example.So before the step 2, you should start the subscriber service first. The service could be started simply by the command:: python zaqar/samples/zaqar/subscriber_service_sample.py The service's default port is 5678. If you want to use a new port, the command will be like:: python zaqar/samples/zaqar/subscriber_service_sample.py new_port_number The service will not confirm the subscription automatically by default. If you want to do that, the command will be like:: python zaqar/samples/zaqar/subscriber_service_sample.py --auto-confirm Then create a subscription:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/subscriptions \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"subscriber":"http://10.229.47.217:5678", "ttl":3600, "options":{}}' The response:: HTTP/1.1 201 Created content-length: 47 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/subscriptions Connection: close {"subscription_id": "576256b03990b480617b4063"} At the same time, If the subscriber sample service is not start by "--auto confirm", you will receive a POST request in the subscriber sample service, the request is like:: WARNING:root:{"UnsubscribeBody": {"confirmed": false}, "URL-Methods": "PUT", "X-Project-ID": "51be2c72393e457ebf0a22a668e10a64", "URL-Paths": "/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm", "URL-Expires": "2016-07-06T04:35:56", "queue_name": "test", "SubscribeURL": ["/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm"], "SubscribeBody": {"confirmed": true}, "URL-Signature": "d4038a40589cdb61cd13d5a6997472f5be779db441dd8fe0c597a6e465f30c41", "Message": "You have chosen to subscribe to the queue: test", "Message_Type": "SubscriptionConfirmation"} 10.229.47.217 - - [06/Jul/2016 11:35:56] "POST / HTTP/1.1" 200 - If you start the sample service with "--auto confirm", please go to step 6 directly, because the step 5 will be done by the service automatically. 3. Get the subscription. The request:: curl -i -X GET http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063 \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" The response:: HTTP/1.1 200 OK content-length: 154 content-type: application/json; charset=UTF-8 Connection: close {"confirmed": false, "age": 73, "id": "576256b03990b480617b4063", "subscriber": "http://10.229.47.217:5678", "source": "test", "ttl": 3600, "options": {}} You can find that the "confirmed" property is false by default. 4. Post a message to the subscription's queue The request:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/messages \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"messages": [{"ttl": 3600,"body": "test123"}]}' The response:: HTTP/1.1 201 Created content-length: 68 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/messages?ids=57624dee3990b4634d71bb4a Connection: close {"resources": ["/v2/queues/test/messages/57624dee3990b4634d71bb4a"]} The subscriber received nothing and you will find a log info in zaqar-wsgi.:: 2016-07-06 11:37:57.929 98400 INFO zaqar.notification.notifier [(None,)2473911afe2642c0b74d7e1200d9bba7 51be2c72393e457ebf0a22a668e10a64 - - -] The subscriber http://10.229.47.217:5678 is not confirmed. 5. Use the information showed in step3 to confirm the subscription The request:: curl -i -X PUT http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "URL-Methods: PUT" -H "X-Project-ID: 51be2c72393e457ebf0a22a668e10a64" \ -H "URL-Signature: d28dced4eabbb09878a73d9a7a651df3a3ce5434fcdb6c3727decf6c7078b282" \ -H "URL-Paths: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm" \ -H "URL-Expires: 2016-06-16T08:35:12" -d '{"confirmed": true}' The response:: HTTP/1.1 204 No Content location: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm Connection: close 6. Repeat step3 to get the subscription The request:: curl -i -X GET http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063 \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" The response:: HTTP/1.1 200 OK content-length: 155 content-type: application/json; charset=UTF-8 Connection: close {"confirmed": true, "age": 1370, "id": "576256b03990b480617b4063", "subscriber": "http://10.229.47.217:5678", "source": "test", "ttl": 3600, "options": {}} The subscription is confirmed now. 7. Repeat step4 to post a new message. The request:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/messages \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"messages": [{"ttl": 3600,"body": "test123"}]}' The response:: HTTP/1.1 201 Created content-length: 68 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/messages?ids=5762526d3990b474c80d5483 Connection: close {"resources": ["/v2/queues/test/messages/5762526d3990b474c80d5483"]} Then in subscriber sample service, you will receive a request:: WARNING:root:{"body": {"event": "BackupStarted"}, "queue_name": "test", "Message_Type": "Notification", "ttl": 3600} 10.229.47.217 - - [06/Jul/2016 13:19:07] "POST / HTTP/1.1" 200 - 8. Unsubscription. The request:: curl -i -X PUT http://10.229.47.217:8888/v2/queues/test/subscriptions/576256b03990b480617b4063/confirm \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "URL-Methods: PUT" -H "X-Project-ID: 51be2c72393e457ebf0a22a668e10a64" \ -H "URL-Signature: d28dced4eabbb09878a73d9a7a651df3a3ce5434fcdb6c3727decf6c7078b282" \ -H "URL-Paths: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm" \ -H "URL-Expires: 2016-06-16T08:35:12" -d '{"confirmed": false}' The response:: HTTP/1.1 204 No Content location: /v2/queues/test/subscriptions/576256b03990b480617b4063/confirm Connection: close Then try to post a message. The subscriber will not receive the notification any more. Email ----- 1. For the email confirmation way, also need to set the config option "external_confirmation_url", "subscription_confirmation_email_template" and "unsubscribe_confirmation_email_template". The confirmation page url that will be used in email subscription confirmation before notification, this page is not hosted in Zaqar server, user should build their own web service to provide this web page. The subscription_confirmation_email_template let user to customize the subscription confirmation email content, including topic, body and sender. The unsubscribe_confirmation_email_template let user to customize the unsubscribe confirmation email content, including topic, body and sender too:: In the config file: [notification] require_confirmation = True external_confirmation_url = http://web_service_url/ subscription_confirmation_email_template = topic:Zaqar Notification - Subscription Confirmation,\ body:'You have chosen to subscribe to the queue: {0}. This queue belongs to project: {1}. To confirm this subscription, click or visit this link below: {2}',\ sender:Zaqar Notifications unsubscribe_confirmation_email_template = topic: Zaqar Notification - Unsubscribe Confirmation,\ body:'You have unsubscribed successfully to the queue: {0}. This queue belongs to project: {1}. To resubscribe this subscription, click or visit this link below: {2}',\ sender:Zaqar Notifications In the policy.yaml file: "subscription:confirm": "" 2. Create a subscription. For email confirmation, you should create a subscription like this:: curl -i -X POST http://10.229.47.217:8888/v2/queues/test/subscriptions \ -H "Content-type: application/json" \ -H "Client-ID: de305d54-75b4-431b-adb2-eb6b9e546014" \ -H "X-Auth-Token: 440b677561454ea8a7f872201dd4e2c4" \ -d '{"subscriber":"your email address", "ttl":3600, "options":{}}' The response:: HTTP/1.1 201 Created content-length: 47 content-type: application/json; charset=UTF-8 location: http://10.229.47.217:8888/v2/queues/test/subscriptions Connection: close {"subscription_id": "576256b03990b480617b4063"} After the subscription created, Zaqar will send a email to the email address of subscriber. The email specifies how to confirm the subscription. 3. Click the confirmation page link in the email body 4. The confirmation page will send the subscription confirmation request to Zaqar server automatically. User also can choose to unsubscribe by clicking the unsubscription link in this page, that will cause Zaqar to cancel this subscription and send another email to notify this unsubscription action. Zaqar providers two examples of those web pages that will help user to build their own pages:: zaqar/sample/html/subscriptionConfirmation.html zaqar/sample/html/unsubscriptionConfirmation.html User can place those pages in web server like Apache to access them by browser, so the external_confirmation_url will be like this:: http://127.0.0.1:8080/subscriptionConfirmation.html For CORS, here used zaqar/samples/html/confirmation_web_service_sample.py be a simple web service for example, it will relay the confirmation request to Zaqar Server. So before Step 3, you should start the web service first. The service could be started simply by the command:: python zaqar/samples/html/confirmation_web_service_sample.py The service's default port is 5678. If you want to use a new port, the command will be like:: python zaqar/samples/html/confirmation_web_service_sample.py new_port_number ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/admin/writing_pipeline_stages.rst0000664000175100017510000001750015033040005023546 0ustar00mylesmyles======================================== Writing stages for the storage pipelines ======================================== Introduction ~~~~~~~~~~~~ A pipeline is a set of stages needed to process a request. When a new request comes to Zaqar, first the message goes through the transport layer pipeline and then through one of the storage layer pipelines depending on the type of operation of each particular request. For example, if Zaqar receives a request to make a queue-related operation, the storage layer pipeline will be ``queue pipeline``. Zaqar always has the actual storage controller as the final storage layer pipeline stage. By setting the options in the ``[storage]`` section of ``zaqar.conf`` you can add additional stages to these storage layer pipelines: * **Claim pipeline** * **Message pipeline** with built-in stage available to use: * ``zaqar.notification.notifier`` - sends notifications to the queue subscribers on each incoming message to the queue, i.e. enables notifications functionality. * **Queue pipeline** * **Subscription pipeline** The storage layer pipelines options are empty by default, because additional stages can affect the performance of Zaqar. Depending on the stages, the sequence in which the option values are listed does matter or not. You can add your own external stages to the storage layer pipelines. Things to know before writing the stage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Stages in the pipeline must implement storage controller methods they need to hook. You can find all available to hook methods in the abstract classes in ``zaqar/storage/base.py``. For example, if you're looking for all methods available to hook for the queue storage layer pipeline, see ``Queue`` class in ``zaqar/storage/base.py``. As you can see, Zaqar's built-in stage ``zaqar.notification.notifier`` implements ``post`` method of ``zaqar.storage.base.Message`` abstract class. A stage can halt the pipeline immediate by returning a value that is not None; otherwise, processing will continue to the next stage, ending with the actual storage controller. .. warning:: For the most of the cases it does not matter what non-None value the storage pipeline returns, but sometimes the returned value is used by the transport layer and you have to be careful. For example, during queue creation request, if the storage driver returns ``True``, the transport layer responds to the client with the ``201`` http response code, if ``False``, it responds with ``204`` http response code. See: ``zaqar.transport.wsgi.v2_0.queues.ItemResource#on_put``. Zaqar finds stages with their source codes through the Python entry points mechanism. All Python packages containing stages for Zaqar must register their stages under ``zaqar.storage.stages`` entry point group during their install either by ``setup.py`` or by ``setup.cfg``. If the stage is registered, and the name of the stage's entry point is specified by the user in the one of ``zaqar.conf`` storage layer pipeline options, the stage will be loaded to the particular storage layer pipeline. Zaqar imports stages as plugins. See ``zaqar.storage.pipeline#_get_storage_pipeline``. For additional information about plugins see: `Stevedore - Creating Plugins`_ and `Stevedore - Loading the Plugins`_. Example of external stage (written outside Zaqar package) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is an example of small package with a stage that can process queue-related requests in Zaqar. The stage does not do anything useful, but is good as example. File tree structure of the package:: . ├── setup.py └── ubershystages ├── __init__.py └── queues ├── __init__.py └── lovely.py 2 directories, 4 files ``setup.py``: .. code-block:: python from setuptools import setup, find_packages setup( name='ubershystages', version='1.0', description='Demonstration package for Zaqar with plugin pipeline stage', author='Ubershy', author_email='ubershy@gmail.com', url='', classifiers=['Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Intended Audience :: Developers', 'Environment :: Console', ], platforms=['Any'], scripts=[], packages=find_packages(), include_package_data=True, entry_points={ 'zaqar.storage.stages': [ 'ubershy.lovelyplugin = ubershystages.queues.lovely:LovelyStage', ], }, zip_safe=False, ) ``lovely.py``: .. code-block:: python class LovelyStage(object): """This stage: 1. Prints 'Lovely stage is processing request...' on each queue creation or deletion request. 2. Prints 'Oh, what a lovely day!' on each creation request of a queue named 'lovely'. 3. Prevents deletion of a queue named 'lovely' and prints 'Secretly keeping lovely queue' on such attempt. """ def __init__(self, *args, **kwargs): print("Lovely stage is loaded!") def create(self, name, metadata=None, project=None): """Stage's method which processes queue creation request. :param name: The queue name :param project: Project id """ self.printprocessing() if name == 'lovely': print("Oh, what a lovely day!") def delete(self, name, project=None): """Stage's method which processes queue deletion request. :param name: The queue name :param project: Project id :returns: Something non-None, if the queue has a name 'lovely'. It will stop further processing through the other stages of the pipeline, and the request will not reach the storage controller driver, preventing queue deletion from the database. """ self.printprocessing() if name == 'lovely': print('Secretly keeping lovely queue') something = "shhh... it's a bad practice" return something def printprocessing(self): print('Lovely stage is processing request...') To install the package to the system in the root directory of the package run: .. code-block:: console # pip install -e . In ``zaqar.conf`` add ``ubershy.lovelyplugin`` to the ``queue_pipeline`` option: .. code-block:: ini [storage] queue_pipeline = ubershy.lovelyplugin Start Zaqar: .. code-block:: console $ zaqar-server If the stage has successfully loaded to Zaqar you will see amongst terminal output lines the ``Lovely stage is loaded!`` line. Then you can try to perform queue create and queue delete operations with the queue 'lovely' and see what will happen in Zaqar's database. .. note:: You can hold multiple stages in one package, just be sure that all stages will be registered as entry points. For example, in the ``setup.py`` you can register additional ``ubershy.nastyplugin`` stage: .. code-block:: python entry_points={ 'zaqar.storage.stages': [ 'ubershy.lovelyplugin = ubershystages.queues.lovely:LovelyStage', 'ubershy.nastyplugin = ubershystages.messages.nasty:NastyStage', ], }, .. _`Stevedore - Creating Plugins`: https://docs.openstack.org/stevedore/latest/user/tutorial/creating_plugins.html .. _`Stevedore - Loading the Plugins`: https://docs.openstack.org/stevedore/latest/user/tutorial/loading.html ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5520136 zaqar-20.1.0.dev29/doc/source/cli/0000775000175100017510000000000015033040026015555 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/cli/index.rst0000664000175100017510000000011315033040005017406 0ustar00mylesmylesCLI Reference ============= .. toctree:: :maxdepth: 1 zaqar-status ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/cli/zaqar-status.rst0000664000175100017510000000333015033040005020742 0ustar00mylesmyles============ zaqar-status ============ Synopsis ======== :: zaqar-status [] Description =========== :program:`zaqar-status` is a tool that provides routines for checking the status of a Zaqar deployment. Options ======= The standard pattern for executing a :program:`zaqar-status` command is:: zaqar-status [] Run without arguments to see a list of available command categories:: zaqar-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: zaqar-status upgrade These sections describe the available categories and arguments for :program:`zaqar-status`. Upgrade ~~~~~~~ .. _zaqar-status-checks: ``zaqar-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **8.0.0 (Stein)** * Placeholder to be filled in with checks as they are added in Stein. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/conf.py0000664000175100017510000001732215033040005016307 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is execfile()d with the current directory set # to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = ['sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'stevedore.sphinxext', 'oslo_config.sphinxext', 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'oslo_config.sphinxconfiggen', 'openstackdocstheme', ] config_generator_config_file = '../../etc/oslo-config-generator/zaqar.conf' sample_config_basename = '_static/zaqar' # autodoc generation is a bit aggressive and a nuisance # when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1" # in your terminal to disable todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = 'zaqar' copyright = '2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from zaqar.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [ 'api_ext/rst_extension_template', 'installer', ] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['zaqar.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'zaqardoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Zaqar.tex', 'Zaqar Documentation', 'Anso Labs, LLC', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Options for openstackdocstheme openstackdocs_repo_name = 'openstack/zaqar' openstackdocs_bug_project = 'zaqar' openstackdocs_bug_tag = '' ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5520136 zaqar-20.1.0.dev29/doc/source/configuration/0000775000175100017510000000000015033040026017655 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/configuration/configuring.rst0000664000175100017510000001541315033040005022722 0ustar00mylesmyles.. _basic-configuration: Basic Configuration =================== The ``zaqar.conf`` configuration file is an `INI file format `_. This file is located in ``/etc/zaqar``. If there is a file ``zaqar.conf`` in ``~/.zaqar`` directory, it is used instead of the one in ``/etc/zaqar`` directory. When you manually install the Message service, you must generate the zaqar.conf file using the config samples generator located inside Zaqar installation directory and customize it according to your preferences. To generate the sample configuration file ``zaqar/etc/zaqar.conf.sample``: .. code-block:: console # pip install tox $ cd zaqar $ tox -e genconfig Where :samp:`{zaqar}` is your Message service installation directory. Then copy Message service configuration sample to the directory ``/etc/zaqar``: .. code-block:: console # cp etc/zaqar.conf.sample /etc/zaqar/zaqar.conf For a list of configuration options, see the tables in this guide. .. important:: Do not specify quotes around configuration options. Message API configuration ------------------------- The Message service has two APIs: the HTTP REST API for WSGI transport driver, and the Websocket API for Websocket transport driver. The Message service can use only one transport driver at the same time. The functionality and behavior of the APIs are defined by API versions. For example, the Websocket API v2 acts the same as the HTTP REST API v2. For now there are v1, v1.1 and v2 versions of HTTP REST API and only v2 version of Websocket API. Permission control options in each API version: * The v1 does not have any permission options. * The v1.1 has only ``admin_mode`` option which controls the global permission to access the pools and flavors functionality. * The v2 has only: * RBAC policy options: ``policy_default_rule``, ``policy_dirs``, ``policy_file`` which controls the permissions to access each type of functionality for different types of users. .. warning:: JSON formatted policy file is deprecated since Zaqar 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html * ``secret_key`` option which defines a secret key to use for signing special URLs. These are called pre-signed URLs and give temporary permissions to outsiders of the system. Authentication and authorization -------------------------------- All requests to the API may only be performed by an authenticated agent. The preferred authentication system is the OpenStack Identity service, code-named keystone. Identity service authentication ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To authenticate, an agent issues an authentication request to an Identity service endpoint. In response to valid credentials, Identity service responds with an authentication token and a service catalog that contains a list of all services and endpoints available for the given token. Multiple endpoints may be returned for Message service according to physical locations and performance/availability characteristics of different deployments. Normally, Identity service middleware provides the ``X-Project-Id`` header based on the authentication token submitted by the Message service client. For this to work, clients must specify a valid authentication token in the ``X-Auth-Token`` header for each request to the Message service API. The API validates authentication tokens against Identity service before servicing each request. No authentication ~~~~~~~~~~~~~~~~~ If authentication is not enabled, clients must provide the ``X-Project-Id`` header themselves. Notifications options --------------------- The notifications feature in the Message service can be enabled by adding ``zaqar.notification.notifier`` stage to the message storage layer pipeline. To do it, ensure that ``zaqar.notification.notifier`` is added to ``message_pipeline`` option in the ``[storage]`` section of ``zaqar.conf``: .. code-block:: ini [storage] message_pipeline = zaqar.notification.notifier Pooling options --------------- The Message service supports pooling. Pooling aims to make the Message service highly scalable without losing any of its flexibility by allowing users to use multiple back ends. Storage drivers options ----------------------- Storage back ends ~~~~~~~~~~~~~~~~~ The Message service supports several different storage back ends (storage drivers) for storing management information, messages and their metadata. The recommended storage back end is MongoDB. For information on how to specify the storage back ends. When the storage back end is chosen, the corresponding back-end options become active. For example, if Redis is chosen as the management storage back end, the options in ``[drivers:management_store:redis]`` section become active. Storage layer pipelines ~~~~~~~~~~~~~~~~~~~~~~~ A pipeline is a set of stages needed to process a request. When a new request comes to the Message service, first it goes through the transport layer pipeline and then through one of the storage layer pipelines depending on the type of operation of each particular request. For example, if the Message service receives a request to make a queue-related operation, the storage layer pipeline will be ``queue pipeline``. The Message service always has the actual storage controller as the final storage layer pipeline stage. By setting the options in the ``[storage]`` section of ``zaqar.conf``, you can add additional stages to these storage layer pipelines: * **Claim pipeline** * **Message pipeline** with built-in stage available to use: * ``zaqar.notification.notifier`` - sends notifications to the queue subscribers on each incoming message to the queue, in other words, enables notifications functionality. * **Queue pipeline** * **Subscription pipeline** The storage layer pipelines options are empty by default, because additional stages can affect the performance of the Message service. Depending on the stages, the sequence in which the option values are listed does matter or not. You can add external stages to the storage layer pipelines. For information how to write and add your own external stages, see `Writing stages for the storage pipelines `_ tutorial. Messaging log files ------------------- The corresponding log file of each Messaging service is stored in the ``/var/log/zaqar/`` directory of the host on which each service runs. .. list-table:: Log files used by Messaging services :widths: 35 35 :header-rows: 1 * - Log filename - Service that logs to the file * - ``server.log`` - Messaging service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/configuration/index.rst0000664000175100017510000000060315033040005021512 0ustar00mylesmyles.. _configuring: =================== Configuration Guide =================== This section provides a list of all possible options for each configuration file. Refer to :ref:`basic-configuration` for a detailed guide in getting started with various option settings. Zaqar uses the following configuration files for its various services. .. toctree:: :glob: :maxdepth: 1 * ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/configuration/sample-configuration.rst0000664000175100017510000000077515033040005024543 0ustar00mylesmyles.. _sample-configuration: ========================== Zaqar Sample Configuration ========================== The following are sample configuration files for all Zaqar services and utilities. These are generated from code and reflect the current state of code in the Zaqar repository. Sample configuration for Zaqar API ---------------------------------- This sample configuration can also be viewed in `zaqar.conf.sample <../_static/zaqar.conf.sample>`_. .. literalinclude:: ../_static/zaqar.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/configuration/zaqar.rst0000664000175100017510000000017315033040005021523 0ustar00mylesmyles.. _zaqar.conf: ---------- zaqar.conf ---------- .. show-options:: :config-file: etc/oslo-config-generator/zaqar.conf ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5530136 zaqar-20.1.0.dev29/doc/source/contributor/0000775000175100017510000000000015033040026017360 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/contributing.rst0000664000175100017510000000344015033040005022617 0ustar00mylesmyles============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Zaqar. Communication ~~~~~~~~~~~~~ * IRC channel #openstack-zaqar at OFTC * Mailing list (prefix subjects with ``[zaqar]`` for faster responses) http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ Please refer the `zaqar Core Team `_ contacts. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ zaqar features are tracked on `Launchpad `_. Task Tracking ~~~~~~~~~~~~~ We track our tasks in `Launchpad `_. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag. Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so on `Launchpad `_. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ All changes proposed to the zaqar project require one or two +2 votes from zaqar core reviewers before one of the core reviewers can approve patch by giving ``Workflow +1`` vote. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ All common PTL duties are enumerated in the `PTL guide `_. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/development.environment.rst0000664000175100017510000002037015033040005024776 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================== Setting up a development environment ==================================== This section describes how to setup a working Python development environment that you can use in developing Zaqar on Ubuntu or Fedora. These instructions assume that you are familiar with Git. Refer to GettingTheCode_ for additional information. .. _GettingTheCode: https://wiki.openstack.org/wiki/Getting_The_Code Virtual environments -------------------- Use virtualenv_ to track and manage Python dependencies for developing and testing Zaqar. Using virtualenv_ enables you to install Python dependencies in an isolated virtual environment, instead of installing the packages at the system level. .. _virtualenv: https://pypi.org/project/virtualenv .. note:: Virtualenv is useful for development purposes, but is not typically used for full integration testing or production usage. If you want to learn about production best practices, check out the `OpenStack Operations Guide`_. .. _`OpenStack Operations Guide`: https://wiki.openstack.org/wiki/OpsGuide Install GNU/Linux system dependencies ##################################### .. note:: This section is tested for Zaqar on Ubuntu 14.04 (Trusty) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. Learn more about contributing to Zaqar documentation in the :doc:`welcome` manual. Install the prerequisite packages. On Ubuntu: .. code-block:: console $ sudo apt-get install gcc python-pip libxml2-dev libxslt1-dev python-dev zlib1g-dev On Fedora-based distributions (e.g., Fedora/RHEL/CentOS): .. code-block:: console $ sudo dnf install gcc python-pip libxml2-devel libxslt-devel python3-devel Install MongoDB ############### You also need to have MongoDB_ installed and running. .. _MongoDB: http://www.mongodb.org On Ubuntu, follow the instructions in the `MongoDB on Ubuntu Installation Guide`_. .. _`MongoDB on Ubuntu installation guide`: http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ On Fedora-based distributions, follow the instructions in the `MongoDB on Red Hat Enterprise, CentOS, Fedora, or Amazon Linux Installation Guide`_. .. _`MongoDB on Red Hat Enterprise, CentOS, Fedora, or Amazon Linux installation guide`: http://docs.mongodb.org/manual/tutorial/install-mongodb-on-red-hat-centos-or-fedora-linux/ .. note:: If you are Contributor and plan to run Unit tests on Zaqar, you may want to add this line to mongodb configuration file (``etc/mongod.conf`` or ``etc/mongodb.conf`` depending on distribution): .. code-block:: ini smallfiles = true Many Zaqar's Unit tests do not clean up their testing databases after executing. And database files consume much disk space even if they do not contain any records. This behavior will be fixed soon. Getting the code ################ Get the code from git.openstack.org to create a local repository with Zaqar: .. code-block:: console $ git clone https://git.openstack.org/openstack/zaqar.git Configuration ############# #. From your home folder create the ``~/.zaqar`` folder. This directory holds the configuration files for Zaqar: .. code-block:: console $ mkdir ~/.zaqar #. Generate the sample configuration file ``zaqar/etc/zaqar.conf.sample``: .. code-block:: console $ pip install tox $ cd zaqar $ tox -e genconfig #. Copy the Zaqar configuration samples to the directory ``~/.zaqar/``: .. code-block:: console $ cp etc/zaqar.conf.sample ~/.zaqar/zaqar.conf $ cp etc/logging.conf.sample ~/.zaqar/logging.conf #. Find the ``[drivers]`` section in ``~/.zaqar/zaqar.conf`` and specify ``mongodb`` as the message store: .. code-block:: ini message_store = mongodb management_store = mongodb #. Then find ``[drivers:message_store:mongodb]`` and ``[drivers:management_store:mongodb]`` sections and specify the :samp:`{URI}` to point to your local mongodb instance by adding this line to both the sections: .. code-block:: ini uri = mongodb://$MONGODB_HOST:$MONGODB_PORT By default you will have: .. code-block:: ini uri = mongodb://127.0.0.1:27017 This :samp:`{URI}` points to single mongodb node which of course is not reliable, so you need to set in the ``[default]`` section of configuration file: .. code-block:: ini unreliable = True For your reference, you can omit this parameter or set it to False only if the provided :samp:`{URI}` to your mongodb is actually the URI to mongodb Replica Set or Mongos. Also it must have "Write concern" parameter set to ``majority`` or to a number more than ``1``. For example, :samp:`{URI}` to reliable mongodb can look like this: .. code-block:: ini uri = mongodb://mydb0,mydb1,mydb2:27017/?replicaSet=foo&w=2 Where ``mydb0``, ``mydb1``, ``mydb2`` are addresses of the configured mongodb Replica Set nodes, ``replicaSet`` (Replica Set name) parameter is set to ``foo``, ``w`` (Write concern) parameter is set to ``2``. #. For logging, find the ``[handler_file]`` section in ``~/.zaqar/logging.conf`` and modify as desired: .. code-block:: ini args=('zaqar.log', 'w') Installing and using virtualenv ############################### #. Install virtualenv by running: .. code-block:: console $ pip install virtualenv #. Create and activate a virtual environment: .. code-block:: console $ virtualenv zaqarenv $ source zaqarenv/bin/activate #. Install Zaqar: .. code-block:: console $ pip install -e . #. Install the required Python binding for MongoDB: .. code-block:: console $ pip install pymongo #. Start Zaqar server in ``info`` logging mode: .. code-block:: console $ zaqar-server -v Or you can start Zaqar server in ``debug`` logging mode: .. code-block:: console $ zaqar-server -d #. Verify Zaqar is running by creating a queue via curl. In a separate terminal run: .. code-block:: console $ curl -i -X PUT http://localhost:8888/v2/queues/samplequeue -H "Content-type: application/json" -H 'Client-ID: 123e4567-e89b-12d3-a456-426655440000' -H 'X-PROJECT-ID: 12345' .. note:: ``Client-ID`` expects a valid UUID. ``X-PROJECT-ID`` expects a user-defined project identifier. #. Get ready to code! .. note:: You can run the Zaqar server in the background by passing the ``--daemon`` flag: .. code-block:: console $ zaqar-server -v --daemon But with this method you will not get immediate visual feedback and it will be harder to kill and restart the process. Troubleshooting ^^^^^^^^^^^^^^^ No handlers found for zaqar.client (...) """""""""""""""""""""""""""""""""""""""" This happens because the current user cannot create the log file (for the default configuration in ``/var/log/zaqar/server.log``). To solve it, create the folder: .. code-block:: console $ sudo mkdir /var/log/zaqar Create the file: .. code-block:: console $ sudo touch /var/log/zaqar/server.log And try running the server again. DevStack -------- If you want to use Zaqar in an integrated OpenStack developing environment, you can add it to your DevStack_ deployment. To do this, you first need to add the following setting to your ``local.conf``: .. code-block:: bash enable_plugin zaqar https://git.openstack.org/openstack/zaqar Then run the ``stack.sh`` script as usual. .. _DevStack: https://docs.openstack.org/devstack/latest/ Running tests ------------- See :doc:`running_tests` for details. Running the benchmarking tool ----------------------------- See :doc:`../admin/running_benchmark` for details. Contributing your work ---------------------- See :doc:`welcome` and :doc:`first_patch` for details. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/first_patch.rst0000664000175100017510000002502515033040005022421 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ Your first patch ================ This section describes how to create your first patch and upload it to Gerrit_ for reviewing. Create your contributor accounts and set up your code environment ----------------------------------------------------------------- Accounts setup ############## You will need to create a Launchpad_ account to login to the Gerrit_ review system dashboard. This is also useful for automatically crediting bug fixes to you when you address them with your code commits. You will also have to sign the `Contributors License Agreement`_ and `join the OpenStack Foundation`_. It is a good idea to use the same email all of these accounts to avoid hooks errors. Visit the `Gerrit Workflow's account setup`_ section in the wiki to get more information on setting up your accounts. .. _Launchpad: https://launchpad.net/ .. _Gerrit: https://review.opendev.org/ .. _`Contributors License Agreement`: https://docs.openstack.org/infra/manual/developers.html#account-setup .. _`join the OpenStack Foundation`: https://www.openstack.org/join/ .. _`Gerrit Workflow's account setup`: https://docs.openstack.org/infra/manual/developers.html#account-setup SSH setup ######### You are going to need to create and upload an SSH key to Gerrit to be able to commit changes for review. To create an SSH key: .. code-block:: console $ ssh-keygen –t rsa You can optionally enter a password to enhance security. View and copy your SSH key: .. code-block:: console $ less ~/.ssh/id_rsa.pub Now you can `upload the SSH key to Gerrit`_. .. _`upload the SSH key to Gerrit`: https://review.opendev.org/#/settings/ssh-keys Git Review installation ####################### Before you start working, make sure you have ``git-review`` installed on your system. You can install it with the following command: .. code-block:: console $ pip install git-review ``Git-review`` checks if you can authenticate to Gerrit with your SSH key. It will ask you for your username. You can configure your Gerrit username so you don't have to keep re-entering it every time you want to use ``git-review``: .. code-block:: console $ git config --global gitreview.username yourgerritusername You can also save some time by entering your email and your name: .. code-block:: console $ git config --global gitreview.email "yourgerritemail" $ git config --global gitreview.name "Firstname Lastname" You can view your Gerrit user name in the `settings page`_. .. _`settings page`: https://review.opendev.org/#/settings/ Project setup ############# Clone the Zaqar repository with the following git command: .. code-block:: console $ git clone https://git.openstack.org/openstack/zaqar.git For information on how to set up the Zaqar development environment see :doc:`development.environment`. Before writing code, you will have to do some configurations to connect your local repository with Gerrit. You will only need to do this your first time setting up the development environment. You can set ``git-review`` to configure the project and install the Gerrit change-id commit hook with the following command: .. code-block:: console $ cd zaqar $ git review -s If you get the error "We don't know where your Gerrit is", you will need to add a new git remote. The URL should be in the error message. Copy that and create the new remote. It looks something like: .. code-block:: console $ git remote add gerrit ssh://@review.opendev.org:29418/openstack/zaqar.git In the project directory you have a hidden ``.git`` directory and a ``.gitreview`` file. You can view them with the following command: .. code-block:: console $ ls -la Making a patch -------------- Pick or report a bug #################### You can start tackling some bugs from the `bugs list in Launchpad`_. If you find a bug you want to work on, assign yourself. Make sure to read the bug report. If you need more information, ask the reporter to provide more details through a comment on Launchpad or through IRC or email. If you find a bug, look through Launchpad to see if it has been reported. If it hasn't, report the bug, and ask for another developer to confirm it. You can start working on it if another developer confirms the bug. Here are some details you might want to include when filling out a bug report: * The release, or milestone, or commit ID corresponding to the software that you are running * The operating system and version where you've identified the bug * Steps to reproduce the bug, including what went wrong * Description of the expected results instead of what you saw * Portions of your log files so that you include only relevant excerpts In the bug comments, you can contribute instructions on how to fix a given bug, and set the status to "Triaged". You can read more about `Launchpad bugs`_ in the official guide. .. _`bugs list in Launchpad`: https://bugs.launchpad.net/zaqar .. _`Launchpad bugs`: https://docs.openstack.org/project-team-guide/bugs.html Workflow ######## Make sure your repo is up to date. You can update it with the following git commands: .. code-block:: console $ git remote update $ git checkout master $ git pull --ff-only origin master Create a topic branch. You can create one with the following git command: .. code-block:: console $ git checkout -b TOPIC-BRANCH If you are working on a blueprint, name your :samp:`{TOPIC-BRANCH}` ``bp/BLUEPRINT`` where :samp:`{BLUEPRINT}` is the name of a blueprint in Launchpad (for example, "bp/authentication"). The general convention when working on bugs is to name the branch ``bug/BUG-NUMBER`` (for example, "bug/1234567"). Read more about the commit syntax in the `Gerrit workflow`_ wiki. .. _`Gerrit workflow`: https://docs.openstack.org/infra/manual/developers.html#development-workflow Common problems ^^^^^^^^^^^^^^^ #. You realized that you were working in master and you haven't made any commits. Solution: .. code-block:: console $ git checkout -b newbranch $ git commit -a -m "Edited" If you already created the branch, omit the ``-b``. You put all your changes to :samp:`{newbranch}`. Problem solved. #. You realized that you were working in master and you have made commits to master. Solution: .. code-block:: console $ git branch newbranch $ git reset --hard HEAD~x $ git checkout newbranch Where ``x`` is the number of commits you have made to master. And remember, you will lose any uncommitted work. You put your commits in :samp:`{newbranch}`. Problem solved. #. You made multiple commits and realized that Gerrit requires one commit per patch. Solution: * You need to squash your previous commits. Make sure you are in your branch and follow `squashing guide`_. Then fill commit message properly. You squashed your commits. Problem solved. Design principles ################# Zaqar lives by the following design principles: * `DRY`_ * `YAGNI`_ * `KISS`_ .. _`DRY`: https://en.wikipedia.org/wiki/Don%27t_repeat_yourself .. _`YAGNI`: https://en.wikipedia.org/wiki/YAGNI .. _`KISS`: https://en.wikipedia.org/wiki/KISS_principle Try to stick to these design principles when working on your patch. Test your code ############## It is important to test your code and follow the python code style guidelines. See :doc:`running_tests` for details on testing. Submitting a patch ------------------ Once you finished coding your fix, add and commit your final changes. Your commit message should: * Provide a brief description of the change in the first line. * Insert a single blank line after the first line. * Provide a detailed description of the change in the following lines, breaking paragraphs where needed. * The first line should be limited to 50 characters and should not end with a period. * Subsequent lines should be wrapped at 72 characters. * Put the 'Change-id', 'Closes-Bug #NNNNN' and 'blueprint NNNNNNNNNNN' lines at the very end. Read more about `making a good commit message`_. To submit it for review use the following git command: .. code-block:: console $ git review You will see the URL of your review page once it is successfully sent. You can also see your reviews in :guilabel:`My Changes` in Gerrit. The first thing to watch for is a ``+1`` in the :guilabel:`Verified` column next to your patch in the server and/or client list of pending patches. If the "Jenkins" user gives you a ``-1``, you'll need to check the log it posts to find out what gate test failed, update your patch, and resubmit. You can set your patch as a :guilabel:`work in progress` if your patch is not ready to be merged, but you would still like some feedback from other developers. To do this leave a review on your patch setting :guilabel:`Workflow` to ``-1``. Once the gate has verified your patch, other Zaqar developers will take a look and submit their comments. When you get two or more ``+2``'s from core reviewers, the patch will be approved and merged. Don't be discouraged if a reviewer submits their comments with a ``-1``. Patches iterate through several updates and reviews before they are ready for merging. To reply to feedback save all your comments as draft, then click on the :guilabel:`Review` button. When replying to feedback, you as the patch author can use the score of ``0``. The only exception to using the score of ``0`` is when you discover a blocking issue and you don't want your patch to be merged. In which case, you can review your own patch with a ``-2``, while you decide whether to keep, refactor, or withdraw the patch. Professional conduct -------------------- The Zaqar team holds reviewers accountable for promoting a positive, constructive culture within our program. If you ever feel that a reviewer is not acting professionally or is violating the OpenStack community code of conduct, please let the PTL know immediately so that he or she can help resolve the issue. .. _`making a good commit message`: https://wiki.openstack.org/wiki/GitCommitMessages .. _`squashing guide` : http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/first_review.rst0000664000175100017510000001150615033040005022622 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Your first review ================= The review stage is a very important part in the development process. Following are some of the reasons this stage is important: * Getting other developers feedback minimizes the risk of adding regressions to the code base and ensures the quality of the code being merged. * Building the community encourages everyone to review code. Everyone appreciates having their code reviewed. * Since developers are always learning from being exposed to the points of view of others, reviews help developers to improve their coding skills. * Providing a review is a great way to become familiar with the code. Everyone is encourages to review code. You don't need to know every detail of the code base. You need to understand only what the code related to the fix does. Step by step ------------ Go to ``review.opendev.org`` and filter by `Open Zaqar fixes`_. Select a fix from the list to review. Try to select an easy patch for your first review. That will help you to gain some confidence. Download the patch to your local repository and test it: .. code-block:: console $ git review -d [review-id] The :samp:`{review-id}` is the number in the URL (check the screenshot for more details). Example: .. code-block:: console $ git review -d 92979 .. image:: images/zaqar_review_id.png :alt: Zaqar review id This git command creates a branch with the author's name and enables you to test the patch in your local environment. * Inspect the code. Use all of the best programming practices you know as you review the code. * Give code location feedback. Do you consider that some code should be better located in another place within the file, or maybe in another file? If so, suggest this in the review comment and score with a ``-1`` if you think that it's that important. * Give code-style feedback. Do you think that the code structure could be improved? Keep the DRY, YAGNI and KISS principles in mind. * Give grammar and orthography feedback. Many of our contributors are not native English speakers, so it is common to find some errors of this type. * Make sure that: * The commit message is formatted appropriately. Check `Git Commit Messages`_ for more information on how you should write a git commit message. * The coding style matches guidelines given in ``HACKING.rst``. * The patch is not too big. You might need to split some patches to improve cohesion and/or reduce size. * The patch does what the commit message promises. * Unit and functional tests are included and/or updated. * If during the inspection you see a specific line you would like to bring up to discussion in the final review, leave feedback as an inline comment in Gerrit. This will make the review process easier. You can also use prefixes described in :doc:`reviewer_guide` for Zaqar inline comments. * Keep in mind the :doc:`reviewer_guide` and be respectful when leaving feedback. * Hit the :guilabel:`Review` button in the web UI to publish your comments and assign a score. * Things to consider when leaving a score: * You can score with a ``-1`` if you think that there are things to fix. We have to be careful to not stall the cycle just because a few nits, so downvoting also depends on the current stage of the development cycle and the severity of the flaw you see. * You can score with a "0" if you are the author of the fix and you want to respond to the reviewers comments, or if you are a reviewer and you want to point out some reminder for future developing (e.g. the deadline is the next day and the fix needs to be merged, but you want something to be improved). * You can score with ``+1`` if the fix works and you think that the code looks good, upvoting is your choice. * Remember to leave any comment that you think is important in the comment form. When you are done, click :guilabel:`Publish Comments`. For more details on how to do a review, check out the `Gerrit Workflow Review section`_ document. .. _`Open Zaqar fixes`: https://review.opendev.org/#/q/status:open+zaqar,n,z .. _`Git Commit Messages`: https://wiki.openstack.org/wiki/GitCommitMessages .. _`Gerrit Workflow Review section`: https://docs.openstack.org/infra/manual/developers.html#code-review ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/gerrit.rst0000664000175100017510000000220715033040005021404 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== Code reviews with Gerrit ======================== Zaqar uses the `Gerrit`_ tool to review proposed code changes. The review site is https://review.opendev.org. Gerrit is a complete replacement for GitHub pull requests. `All GitHub pull requests to the Zaqar repository will be ignored`. See `Development Workflow with Gerrit`_ for more detailed documentation on how to work with Gerrit. .. _Gerrit: https://www.gerritcodereview.com/ .. _Development Workflow with Gerrit: https://docs.openstack.org/infra/manual/developers.html#development-workflow ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5530136 zaqar-20.1.0.dev29/doc/source/contributor/images/0000775000175100017510000000000015033040026020625 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/images/zaqar_review_id.png0000664000175100017510000023016315033040005024510 0ustar00mylesmylesPNG  IHDR1bKGD pHYs  tIME  i IDATxw|e3@R Q@ ؕrֳ`N7HХHHH;cv&3|xE;<|TPP" p!)9S P(B\9BUM!6N rr'Е*&D@ ݻnǾm8P/G搑 89r&ɤ⌊. p"9#CLYlTVVg.N( I2۝E+4lKq9]ٺu3q:=UTRHa kqrHLH"11qq(//c'm[%!>gKMM ۶GRRc~]:wuGUMH@wDHMW_ب(M:8(;wmcӥ/> 3zVXEgԧۉwKJ8sD:uLttCn8pp;vm]E(??e|m+z 11BYn .)h# [T6r6l\ǰ5uӶM;>-֭;˗/gȑ,\ɓ>Yw(Gss8g>+NJNPTtj<FbchG~Sޟ'fʦQxJJh(aY#ؼe),dfe 0b(63ÇvZfj&\r)_~cƌe<<$^\.)ֱcȒLQSSMQQ!NYvrE#IPYYxi0~}{pJGQq!'o0n)*.dRUUMZjhҸ UUU( hb9e޹{;Օ+rbcb)++aFˢ%)))s;v`HKKrQSSCnn._5^N:P@@@@@@@7:v*.6 uJcꤜN.MBVVsaĈ̟?)OMe괧>kʎ]}~?jjjpkp:lظM[6DжM{ςEXnrTRRsbCQ6nހTWWSUU͎rضGQQPQY8dVEQNBkl\,xW9s&*qqq :+W'P@W UUOT@@@@ා:Y6Xn%.KLLl8v+58Fu@QQQu=<=)zpgfر,X0Ss}2id뗝ƉMp:rEMD޵nR7FezPIjmXCNءex<^N'kׯGuy^&ek{?lfapˣA{qkk'+$///l ;\q>H?nƪ5kWаaCNru<$.3F|"TTV2({8r33j=Y}q࿟~Fӌ Z˵<Ȧ[8QxFqh۶ z06rrirz\.ztJnIAµPe#V}Sq 2زQV^01>לZed p۵|Yveeee[,]Ғzub{~k佨|$Ǐ'z!C_͂>]:/(/+')*:vDzuuKV,"tp%($ a@4lq׸h>,S]SMnt>~}Ŝϩ{'$Z6<!5jd^((š8Rc{̓O ?OFzɱzT .6v-guorOY{aOf\?Ǽ INNf`n݊-[c5^ŋE޽1,:v`݆oپcqL$ 8C~RRRp84mq;?& prȲQ^Q+=j1q8z(˖/%;k8 ];0bH[71 6mN:sḋٱs;ׯeXze˗O;o3p =Xg?ˬYo2`@xxyx#̞%cƌe<;9v{ #"R;JϞppAZhWYx>EEٶGArr2%%8z\Zh9x Yӻ}}?F^0CiެgͺUd^7!E xl$Cw9J<ѵ5Jvdۉ{=RTx*hh,䕕=v}|y ֔6( W)c}b#:K#\E4iIJ>NUU.Y {%7jdٞ㡴İ8˜AI s8'q{<ݷj锔w>QhЮm{*++HMIe=ݩ=ձdgkzuY3OʫxWx_p5駟?_yh^&LвT 2yqם0g܇MhyH!"_;kN^6~̯?(7^U2f5tiqNHW$"S׏Uk K;τ.hZN$ш{Wi??PU6[YG>6YFYؾcsrx 6Cx'੩4l555F/ s!zǗsl'bԅ9r˘p~$~l^z_s_ZZƔ<3dr]I#\82z njc`0>>7L~ux^0xg%oH13䏗`"~ڻ}ѳO?np-4aoH3!s=oH#Gsѥ`O?Y.6o-7GQM UUa~¡|tzn۱z Dd8G&K/&!>t*++hܸ W]>4MbB"M6e4`÷iLoղ5֯EZ6mիWOW _1\x8jNM7 hq ӦOܯ2zfl&?2)OM:e<$*QNIZiM9zFqq?+2dP6!k߮111̞=~ҲE+vIJ=Y'jjh)̠[MgkCg.=,W;\ɫCu9POLEՅ-.a栗ɯ<Ό;OXþ\.>o-?-F?4iLuu_tεR;kiؠ!M4ᆛoYӦ|(/{e;rM7rE_o(z//rw۶3z̞ݻve\O֭Yz 5rgx~>tqqq|ruG6Dvmy޻),,?7 ?nw Ӟzt6)ӟaЀ޳ɏ=Ο|v >˯ƑG$7ie&ƪkxYуr}x9RRRط?-[sۻo7rgw:gN ZK=OII{CEE\l_ΙÏ۶ۮK/q̆\UUp0rD2!!v6kO{ҡC{'%QUU}5"(Kkumm;vб}{܎3( +qqq 4EЯ_?S=Jjj:J>5n]{qXJV9W?5kW`|ڵmG~Y4lC>`cFV`?pV#GrTdӬi3|%G!?8[~Gvְz;xn75}j(ٍUP[J Ƣ-C[ѭ2g|]7~Gdn=Z!_ n]x'^oҸ1\|!3IAA_ΝKtY䚲xkGeFs`+9x0,7VsoԶm͇T?kn|Ϧ],Tvmy_qkh߮-tyO-X̗^&7/xv(**bBr#ځk> ޜe^XXȏ۶,"-F`rP4aC_t!yyCqq +V$! ,^vmېy^{i;555mڶDN$ϛ@lh;I \0|s̀HO`}lњi"z /q"sG{}Bc̙%ÙCL; мEsVYQ(++!%9TV|Z,[:ұc'ؘ8rx,W#wrreyα\Yipk տp蓙ILvd>#%9Iļ;vGn受Nrg`R}u!c"Mǎ4kڔ^=zyI\;p<vCFHJL⬎rZ}qqnJDzZiinY408xӼY3lI;?nNMM =u}C& pP~ 5˖p81 cinbf|QTt G{XѿZ1-cǎ墬Gg;QRZBQQ!?ÿ1 ҿ@2=zĄeRUl. G%5%%7qqq8HSφP!]Ej۟S&-KwpQT]u}%{).d>Zĵdd|vw[q.6醊ˡU_t__`#mo f^={4#̚8p k֮cͺ11,+g#Y;ҺU+^yu2;&MsHKM7g'3#r]ws<=_pr #sf͚6e3ϲ }}ȵӢysڷkKbb"QN'۵}\$$$о][Zliynӌ 5cU%F6CSCyy9{˹s`)Xh1QQQo׎w>:TvcZjII֟s!ډy8}MLL⢱OO?<Ǽy_ӯ_fFФ,zyYpr***9z}2ٱk_Dfdp&>>"*_ ]|x<1ȔԸy[o3wOyԷW]mop+Ʋso#+miЎ!͆h0[͵kr7>3xW8QGHHx~-_|0[n>}3|Lc)/zJr0vXE&4kDtt4^V%G4 ]y3>=I==ܣ</i39|nUQ̙Zlܳ3i.^xEMGqFji))f+҉\qQywxﳉs%"k$0kkCg1`\yU/ǩQjhܶ )// Z8TWWS^QARbbꏜvWD9Z^SBn)A5mPPP$7iby2jn'%Ue8VZagÏ?ڮ"*+Hر\D5B­\<>qRx IDATdY&!>6f%}~j^ CQQv>sːAh۶[BzZ$&&Pqݬ^+}9YC{x8vDEE7[62zX4NcԸk$ Yq:4NjYt1|BCUUEDxzb Ngnxܨʴ)ӉrxzȲ\gr*aoِh܆VvZ s@d&X`vHbö1!::/m}qک׮,p'2M|)mpSЭT;vtFRbILL摞Vur">>5%唌h>ϛ@lSQQAii)۳fZ͛GjJ:={(~k?^~%3ӹ{ٷoguǏ'--픉jZj|g$$&ҡ}{Z4oIRbc(('?뾠={ѲE+9BÆ Rr4'>9GO{q{ؽ{7[f߁P}HJJa==AyMiȌQ$jCv*"Ͽ@(YbZjɶ;s8Z;u=N&  ǓFUUFfh4h\9i {(..ɏPQQAVZaS˦jjjHJjL֐=z]rJ**+%ͪѰaC28n4hp8p:8:}Kee%:uÏ>Zc&Mp9]Yx몪YFRS۷?UUU4hР}ep9IjB|tRȴى-Dv_pu\-&F@@@@7vS]]m5Iϲ$˲!,\.!Ƣz)//u"!!^[73tЈ?;N&~! pSPDEE~;㉏?mcU#DeC^(.)G8>]X6NeC^hUUپc;UbBpdǾdNeCJ$**J; XE1^C@@@@@@@@@ "8~SC$iՠeYF$$Yq IEE"qѸKd_SUի]@AUqx>nrEUA))Fw>iշE-ks( nUUdz?0ڹԯ!2zEQP{ c6ϛ16_C\ߛNٿ]}L,##!I#)Ƽ/uqe *E~s.INj$I8c~Afສ^>Q~%{TTNI.aMA}N l#_7sexm+}Y}ԏS0D&hoE5,#CE~vp|,cdkb&AJFlHN bTUE%lFX87y y#}2esvqb*19vMrHܕBOAVoq%vkjNݰ[WalE20c"@ _г$!;Y239!i.CVH ?GUe{B.MGUUdvW'0,#!Qy<=-snEn<Wo $hVEk˸6|;H4p8l_`#fdJj#΂c_ȺHıyUl~%)apϮWafw2q'u* 3~Sbxֺ|UjP+as;̵C%軤YԵ?* 4/ERᒅ$_v$HQ=&w.PUWCdHV'sZPYlEexn7rΓqHt:j]Q+$Zv}P'=KDSQ(j$:քik eߜTR$;@Q8 Uej$_d?' 0K5nÔQOKoEj97*H z,?( Jy"aga0z ta^N1 t C_ ;r*]y=G֞eҦ?Ǻ9>T\I?VR!?7DJ]EX1YjS #^ERS[vMGQ@O$i1 ^e6@[U}YT+"QunٺI#eڡR?4/!h12?DGQ%ْH;eǜ&V?_K;zmZ  k0CX){ev[-^>rec*8{UduO٪n&^z;jOFR!FQ6I EÑsߏQ) 45o8!!J)iVC٦j{١K_b=Uh*"{r??^R$Nz Ih>R/PD®q`f7ݵ\aڜ:WGX % -wnKK"#q=C^ 2]KQT^\DŽ#MJXK@]ȖtnA~sdI/rVb.서]Pb!TZHDdv֖ 0#!ET+jP~ q |}E8˖n% A_?a -{n;~.Lx=ex~(Żw/6hfLBU7)X4\6(*\[AOqϓ.egznǃƵdeLoE$x]xUpe!~4;$`Ҡ$ax:䫆*Ae.L.*U|*5 f/Y jw\$Ԋr jpEȱq1YH^C:i]b^C9S5x탏?Ir jeDza#6B,8?0|EJUwՖؑ@B 3떞b:Π,ap\dӅGv~82 @JDG$ vwl#vtEZ!\9Y\5ո7|cK2ro8Hy `?DtͿ˪8O ZaS[sWe\+frLhFxiFnMm._+VpW|rO.9Ms3o<Ҹ{ٽ{or7nnَ xpZ sm[];\PkOG|B۷ibcměvJo.8u vM$Whdq: uV^˪k,XŰrٲeI \yYF|sĉz|ᇿ1tMl޼ǯ_^X5~bCvhBü몪׏$Y9eyk ̂C4hK5J$))TRRѴ5M[Ӫu'ڴ{X,  jͩx#MiЯg<d $Zg pYםj+Q(^%JR cE<Z#V5e"^k"E(P=src@VBH^dz:'6o:u;.g䐁>t2r@r_k ,`d{3YQCˠ~}rӍFK/QpE~A~}߯E~1GrE׫qj׫0 wѣG׷7;vছn {hò;9ri x F1cx饗z9s&Æ cѼk~󗙙Iff& .d|!NڵkM"_fر <[oGK<fƌѣ5kÆ cܸq^:y[ncǎeܸqZx|@NNz+Ce\uU[G.# +4x'ԋ$8y(>{Add-],@QΔKUQ DH(^P@8u4UU<$$d٨Abv3t!VƸjYÑ$A+{?K;);pVU(UVPe/TYA8$$Y=̿k5p G?sgjjjX4kjjj, R QvJ˺58w@oH7nvѭ9l(_|)ƌ;r}~&IUj#Z/׬YY `yqJ4Æӥ9^|ݰ颻L#u&s,7%˖Ƭ7#aZ"9/33!C駟2vX^㽥K3lkٳg0bF?_ϰaB`邾ւpc;w~.]X~=fͪu >l﫯N:pر:ۄ ի@HK[oEӦMy饗x8~xt]D3pֹ>)>߯𹄨@".Z5#1;A]@l2fDm_A!ɨFspEFJv>:jvf_ *ԧXVGw$1Xs9e (悇c *5Uzb$kdoSf qc~-qFbV]n:kD>[ǹݺÏOl4h@-U$֤P+V7- )111yy3>>x-77>;ۣG]`y^\\\~qםL:ݻw3}4^}~|DD(_{}Ç9pgnUXXX._]_2V `3ٝ?<ۗI&qنB}օH-  fn_{IMKɯ $"vVxܨ@˔3NhԖ1Yڵ,YoMeTucyR*[l;b HԦ(JH`x9а)I)ЭLL HU*iGP*SPR PnVmͽ}+υ`|_㹸w|SW? !"~}Qgf|bRi>t w\<|VXΫ/EQI6%kRX=JiiA222lHBB,XؒDraJKKILLða8p ֮eɒ%,Ӧ x.]j9^}JJJό_ShܸqwsV4gϞ=ZS{^TWnSL d ?f߁j;KBPq( DPPZ@ftlԹȲ8 2KNm%GwǢ}߿ナ w`/dSeo$op21+ٺL]Ύ +w4rU۩ZP]w7i+bwsmTXh;VBR,X* Ҷ8&Ia \6n@>} % l!q{r$3: N-Zŋ{pmӷ6֯͛t!g IDAT^sq`lټ~}k{믻_נ]3`Μ9lڴL6^³`.o„ A̚5p4hPw:PYYG}ફxF`…l޼Lj?i&}ݟ>ymO>$t]6DQCQ4-+_/zbμ1TU!m [b\06{xj]Q6zE@,![Vd.0UErٴ@1i֚dMCC>8=V֢pX_v$"^7ZC/_f"bv AI/ےvܙKpg[~fo2_dfùp4aοڿQUUŬYoro1a¥~y~ n7׉TTV;oSQQA>>gS# z!^/={⮻ i,Nm:n޽{_wuTUUor˂С/"}믯~idffp8h׮=;vx'Nz c;`ԩL6|gOp}1e&O̙3;>}:>(C {| "] HK@RH-+~eZ\{7!;F.Tj܁AؘG(h/^-@4^VᶻOvD9dqmU"c&VoE X[|eR)TR{z} #V"'p|1J b(p5i 7n~,#9 -6׶(m*<_rpvsmm)U i`A0ԦB~}baזeb.T<3\Oq~pڹXqoU]ْH QCmޚm[(("̗pxTԩFи [16ۍ3PUXIEU/L A4؆#^ P޴Oۈ~$GO(LdH$TY'>mїvMYYclYV 5.V1b+pgRgYׅͦB#5SQQA2Ռ{dq=Y 0 s]jᎉrzٿo!cNܟ)@@@@@@F/p~쁤ˏ0IM:Kϱ/Ơ4o8BV$RҴHM "GzP; Iur 58+Pr4ͩʒVxN1H_0dJjW.Y1Džn$Qފ޷@Ҭ]SŠ<ԅG"Rv0BEYdEE/ ulڴ-[rwq~BbCN),9H`i;ȜW\5ew=*CFZ4*a*(^Şxj]Ca튦ل 9YsmP4Bp11ox֗_^T΁DI֤؏G@QUe`[ݲ_Q+\N^Hi3ݪTE㣞~)bMp }.!B)+!o:wN:/Ůj >掤]Q5{wP^]e^ۖɼҠdȏJ' 8o/hՆ,SRTHqQ!IHJhު5 IC,:Q@HHlӖĤAD~ͭuHĮ?{4 :?%^rY^;r0Ol(eToZx\xٮ3<+ЅHe<%E7:8қY=]\Ȳ3QZPZt (4㮡Qb2'nHtØ:3c _TJ!~?L|\VQ4@WXBK_CubBHLԑXs_@@@@@ õUClKY$%lfD"0㎪_h/*b\Fi9c7ft3YYΛˆ]n=}e~u[YtgwAVmXg~\p+hgmf\r)tG $'^CLLܖfnIyE~$v?Ͽxlm_Y $ѥIZhFe땣QUV.YȆРACysٶu c2tX,϶iѺ i~F*ŧ$%5eB A!YeE2޴Y{z*;vNޱ,c.̘Oޡw≽efTgZ=# G^ 8[ LHđޜ#0T|({i!Y ?Sk⴫{&Q{ ~e12TNG|BE!Q"%E%%㮮88lA1[д#w%_sžʯ_GjkIMUUS̐PMj[+iN \!L'u7Q%L2 PF %2EDe [ndE_7Ub-+mK.$ }޼UrY|%wlL?&vbK!AJ{1Q|!b^R&"7kclat i0 4 =Ι2ʹ7ǙcGбkw 3q02ЭO?t *+58}O6k8WW<9q2j5"GgO#>z+.*ij/ C03.^ĕTQs'znC!Nb\:uKXNF(<̱[忛͟Xt12~7ʟ[y8O4<\!FpkKǮ|jEC`jo_d^EP横(2'R4jJq!z] \^1} RPPнq  I61>2CR$M/Nt3E 2TjT*A$Tu5cH^g q8p!Sgm; nVTVjp5"3ܭ'vWW7  %lnnnN|4.^Ѻ BBB XDn˩QYA[h%]Fz0 !#dFiIBBzѐ;Vz)cx Z5ׂjvrfg޵o^>o @#9?Qakļ Sl,Ca-}<͢e$ު\ s&FA"H">LJF[ِsĖH5IV5y_ڨ e{ї +=Q_B GȸzkJfAG,G7@@<JĎ׆hX:ɋ%np nhf;̅ܽzhL3]I@悐͚Tj4(*,/0׏?Vm n;BД- XÓ+v6vU 'nG& '_AϾ yJy 3̙Q:9I[y.g`uz= =EJ**J3$zKh:q2X3b9  ӊ 0pGrn"=1a %aLsoߒ?1aը5.ÈXC h9~CieYdhA{4?})xOoSM$7#]\* (^`X\b4<{D6BZ7+"e9wT$nRvJJ7?F!w RDzc@3oFּPb:O˲PZG͖x[dN8f<\+8m$3 :`%0k+#:K+?DaW&8Bh}N-c~(((((ٰCopSC c<ָ\pbmpANȒN) b8|.#||C3c:2ID"|~KEѻϣ*Oh/ 4D5Y w7Q0w<t PܗMǰA;#dzs*PqCقyb~7A c!Hhi# dhKbZEl"EH73`/>k/G4jΪkP Z:}RQKS Ɲۼ)Y|JaBVG1_[[6f8`q2߾c zD{ݟیdC>FWpZү?1-Y<%p o - $T)}sx<2T 9ߒ5KLYb9`$Rʐj2jTc1z Ͼ9Μb6ֆ8ď˼1~u;9jǨWSaK#c-J0P2_>z4"mO ~lfZx~pv} ٰE0l},ŢQ9BLJⱧ1_&z!ܑW *8";TWrN؍5drw5 BBL7IF12ﯘ AΔ0''Q/Ky3sa)liG=k[+$><ǥS,K o\*aHϧ'c8u<#%7.R.Q$̲noq7noO`SCq%,{NyvlcӁMN7ǡ\e1/TvRF={!ֵ=:ch""QPPPPɆJ]BL^ 'O52ɚ$%P:ե K*o__@Yn0cȦjcFb*+{P!*|>ka2=6ۨILq!*䃡 hE!Sp([9=6i7V-˳, _sѡQHT2#Gt,39ٰ G`G|ƞ7-L`9 J**ڒgxQVylu?5vG} <*^8{ Z5/-`;5A=1 +>c{g}!{b` ,^aQ1Ee+,)ޣwTTVde7} 'ASU-8~Z͚| ~RZ:~c :^Ͼqgc3z><<9 {% :ݥ+XGעr̘-:=5NP_/b}0}(_+V> 3#LL)ߺ=}>b&_uYʴ̸Ud[k[YQ 4 fWSSEHH(45󲂪ܪ6w\z<5 NZ ,q >zD.J6k00˿\JV-sHà,|̒RaVvEvN>~_͝XeS`ͥڴK)Rp?1Cpԣ#_/l|i]ZaWܕLa9oŋcઅ\9q+#9-(pytl ?oth Y9뗼#aٯ[o/,qU7+Kn#:HL&f[ѣC$YQXf72/=3 GCzu7찫> ٰH8a++?56 gLVm}ٲR_ 'guUi[Q܊r(=EP u^pQ~UԱ#WW7G1MBcGPUY as.q; P\ q8[ŶruHh&tחo .9sJ8}IS) ֪bU=-)Ɠ[Y(e[|]I}m˟ȜhXVO@Nf+iSP o/A=QTZ&fApwޝq$"S) mg' N$ 6j]]зKtzVV;(1l)0 NN[?C=bxÃa#=vËc ;M C9p9 <]1}5Ǟcץ=(>&4PkĝLo= .x, K2Pb`MOã}0fSXP`dX;,8>)-e=5g vM> lI9͂=وKsq:0 !WS*2me) @7ʁR4!Ņ(V[j*z6| z^n8QlOۇB$r!#Gl@L-.*·_.FLV4 An=CpH#0{q)""۠׮"c )y>Ià kgIz D5L8l ޅkvPuP@6

>P ԕp+pWcCtN%c"p1& {w#h 닇 ͬ3g&?WF IDATV+NGέ|~W770 zpep(/@{tj'A=f)*vϧt*Cd3bUnʶ(7_[N*Ѷ( q[fZ70D\PHXǠc݈hfeƮ?)Muz~+2 Y1OP6T#p+b6zz|LO8X3o6oһpF/evjg Ϛݢ[ahCg㉒r |=317MU5n.()Sm׻K|}aq"&9˲ttԠ uZː}GíV(ZDc77X}:g$Mj\7vMc ek<ޡsgDVv4l<˱j~ >*wlވ rnaނNƲ5o-5[/)Et*d;cK^}F p_>K /L isI=Pzq”ssk^hr4]R5^W”D@TF[&Rm2bGNmqegzyk6Ŭqbӥ=WL[gBXhCl[څ|<.6H[ϗ"[`ٯ[˜!VnBXh06?Mq'WE5 >v }=޸w o^ SzǿwߵHOcl|v#V2%b`yixh0@ak{bNu,脍qѫS[ѯilw Ǻ^Y ˕UB< [D%E().FII1JQR\krob8sy9T܊2㼦 W: i vpHdlF q(H-%0/͚2B[L7D5d[F\Rzy5&?? ^!&p剆_-"h1s&1_,򗥏@.f{5G4ky/Mď b߻0$09j kp3?wϞxgDɘ=yOѐ#VbŴiNN31|n;zNz>gHR-B;0g|6t>_Ƿk8y2Μf6Z 0[`#:̅kt_|O)6Ɲ-xi^b -4W&|x}4K9Q`s!j+%( T*B[CUU14h;e *UJ[(\]94*TNP$oJѱ⾳73 RQlg&tdXCyDL b, aԒp9҆W= MUڇ7'/l)ɠSwJ*B]L8dƼgzh**P\\bh*4T!/$ ~\lUyyѬrrPRĭt|TǢpl%Z~Е`/-$1ޘ )2 ڬMp)FmUS‚L%+olXWظ7D~rS**!瀂aPYZTWi0Z8+!PL8R0\@tLn Fk"o+<yMM]ߑ1Q8" e&;F^0MGlX}gNein"eaRգZC zB|> JR`/:Toɇiت\aiAP9mٔ#B)+fn>B`Xe&a$42jMD8"B8_(aDBOTz^5陔&ڼ%,?F3 &W~?>_(kCŪ%e((sژNlc ^ޞ ?czKxxBբlXU{DW`g)PmrJ?ryGΞGiyN$ MKr; %Ny(-YW~a Robҳu۪OwTP/I8*zk*,˲f>ZS[>Ll;2bdFLK@n5M64[y1>>hܴZFDTt{_gDttn !AV jFnBѿk$]=WTVarmvGH?]T#^jw.NNj J|WZ`<Ԯ0 aQU췬&B=!m9+efG#u*V[Elm_D˲ Ą1LjU~`YzWz螻ٖpvjepuVW^a1ztlkyFaKr@xќI ۵渣׵#O*)|Lu Ds:1RoUXc~VZo9L9D! ڬGr;jHVoڞu 6i:D}(Ѡ ub;<|[RB`ꊀ@^NR= }=`8E5pjZVK,ecH:n (m%zeI d!=6}Kx֨! K8s.L# 0CjΗSZ^g M`b䄨ܤ;oyup䅦sr4tpM*7%!J!G@[Nb,2,Ap0Фi3h:wFp2v^^^pusJyQ4-rJ[po.ȟ>w й]:08ssGHyTUk>N*VI떫^W>Tnj#,47s*4\a40^R u>5{R9 +hcɻ?ߑsISZA y;no*B21F `KNkb";۳0 0Ie@П诞Ξ-Ec>O l[%N4Y:־E$sZ*Y/5(ѠWj3 .'R.ٟg90P( NJͼqŖu0f0y-9}ƚ|H*G5O'ǘ |DB yAĉ\櫴RtHh3Π`ꕌ׼cFAAAAAq7ɆQ`$ po s"R-0`N2`JD0|фc[惄uL9ԁ*m1s)*4SPPPPPP +Jbe [Ѩ̅K2"U$co / \!7P%T~:`BOGQSrV0V V15Kϩ%맽൙#:I$8@eQ-u[|YƆ@Vh6Ф% q՗V>2  cģ>j)Ft =e9Fr0h˔R@l;E mU6F c!&WЭq!W_mۺp^Ԩ8IS؊&7m3Ad%M 3Or~p~ׁ֞QPPPPPPH W B0$zk)ț V0|К,j 9NJ^,#u-o#ɞ׊={|zkbז9W]MB%&T83a@%vLAcNVB22^Y9"mK"Cb>2NukVM!'؋'ĘkƬܱaçFf^0 #i$5.I Q;'"naͰD2靔&H4*K1"۠!K3#Fbֆ@gȑ 1$yWk_$M@&̢Y%1#ƿ &̖d]'jzDAAAAAAe"++sjBkM]㯉VCJd7R; Q͕庋f$7>55=cRlT`{2 E=&ԭ)pJ((((((*[\%Lgj/jw؇>X5"l+r΃"Rv Dz VpMAAAAAAq7Ɇ=U岅fj[S"g"gx<7;rԧX ua.#wG5.3)"=g< <;!'ԯި=u%,{aya"K8[W–ukͩ&ÚOVfcIUALMAAAAAA!G6[qrF##X8s= zbNrLƪ}3ӵ#uH9;BJMФ`DY k;!A\PPPPPPdC.=TM2TD05ꪕ]"aBXн(q3Ħ6yX$aj.(((((((jG6jݩQ[{< Pͼըks_oK=UC%򠰵1͜cƌQ2 &9ٰWgmC;s 0V0{lu/˖F2Mߋz <K\;ViaǀxR4kZj|?2l,Ko (5#|(4.O<\c/8A7f]< z%\-}@b1~rSϵ  9RpVpL^fTYhyRn=^&+ 1ِ3GaFr=QӦ:u;.2[ݖ޾IP gb[K.c{.3x+a.PDAAAAAAq\k{D-# $:+&E(;`6L0OAAAAAAq_ )~PBc] r|No+|G}?o>QCAAAAAA!K6̃5պ.dQѮ̙4>qtVB.Xv٨o/ }L{ 0ٰW1k0]ߋ:H$/aښ9Z&0PsUhW#js7}6Cȗʸ^4=!V G3s[Sp#Wr&h'ڣxPdA} Z)JjLWG> <}"u/E[>gyxjQajf fTUUtd((((|=k|maY@ mxECrOW]SK Cj{sϏ(Y7CE{+<vG!LͳA/ ׊QDE@ŦgA^1qħnF{:Llذ999v#GYf٥J ap-GAo"E,'E[lhП J5 LA5lق=z ::ڮ󓓓_^gm*vA;?`p}ͤ}v؎;vAC"q\< (|vvͨP:ZOG^^QQR7 +i+%hT5R hٲ%nzOۜQ!^;J4q˗KhI`W}{#b WG܋ΣG.xlXLFV:Ec#??t7a)thNa zyE2l]}ϝlo.ӡE[` 3qPz%'`S~Dاׯ}ztCH?:o_|n?brnݺd?pZۖYeNJa$]vqh<=ѡCnbU뤒>Hpy}N*WΝmj7tnכ'B|(V2 pt Owt#G08zǎ nwW7Dk-[~?ݻw;Z>ѭ[7!**f/^Dll,1p@ܼymٰaܰc1ocбcG]v%XV\Q c}7ov;:vc8PBn{h'݋usz6pșߊ+ *B b'e~3|{xwq%l|t=}W7h)o:|||$4 CA~`7_-GбS'*mRq)#O -ߖڅ;7pOە| -BnL> ˗/Ǵӡko̅믿'NJ+PXXƏyƳ&Ν;>t1-Z˗.`„ xř(,, 0n8a}->H0Ǐ3g ,رce|oo^z吏z l9g>&L`x o"9pf̘!8nٗ}O?E~~>f̘?~{Q]|Ú"+FNx;l톬2YG؆V!xFSٮD,se+ wwwxy{cW IDATfO{;m4iDv5_ 1!O-ZRi?˙7n&;Tګ3nλJFZjСCΝ; 6jv)#: R.O̧zla:u$;v?FblےCuZΝ;iӦCDx DGlj9QFCVcIHJJ i;v Oa/Ϟ-8w? 0Ӧ> 5zj5&N$0 C"YC]p9:u{UV rY2b8p͍؝lZ]>x 'Nꍈ@`` -[ Gs9i$$&rylƏe???84oڵǭ7̙KH.h֬5kƷ?11Æ +z-$ݻV!,, m۶0 Z-x 0 c9bOKKã> nnn)SXݛ;v )) ӧO~s9sވAJJU_N4ЩU" JȽJW;.hȠw[LM0(J3^xBmoPM6=۷mElL'4 G#;+?6| } Z#ѣk6VܱO==vS25n$HᇑwԷcس{7ڴm˽wOnhQ=[noۺ;D!$Haeggc#88a󦍦y{7vMB_/?l-uN6DDS3t-4 1YHIÄ@+p_zblw {;-Naٷc8{*ƾ}}6}s0{7<1g!N&7ײкG6^[ G.^Y[ C)H6,m["Raa!222SNah<n/#z C U˭Ngƍ#^3_)ܥ ˿?z0j,,WAW~OzE<<C_pd%%.\o߾=9t9p%CC4 ꫯlHӼ Αk?!z2auVA;v 6mhHdd$ɕ//r6mJvIZ-9w*SR^Ϸ[TBٱc`رH> dۢ!$ctqr'H܆}ɞ[ȁCIff&)(-';'9l|ʹRBȦ ,IΪ,IӦSqYFrv.=䗔O//cɉ3 ٙ0?s?sxR-(-' 6$iIBy,,L~~$# %e$+iۮ)(=6sɛ ??koW@V?ҸIuރ̛ܼOvK||}IAi9iРgr5yQ$$$?sf[Fc撜bRPZNng/G@~qc)HmgI֝x2oHAi9E:C'_DKH1sɄ8MK'mG"MFΦ]#~rk׿o#Hӯm1-vo+YIAi9:NN_J}䗔 ~3LxKr^ɾ$;;lt%31Oߌ F~q&)(-''g/Qܣ$Țycȗ(s߂zFu$>yNa%4 yb$O>e2_I x'1y'//o|+:2S$ǕW0k$'%aԩǕ _/_7_}?Ut agUWm[͊ 5.&U+buCF+ WbȐpvv]rPP4 j5***%({Р`O?AeY~} xWM*S Z׿bk9HMMgffVݻw/{믿8̜9СN>'3/O>y}^~q@QQ1rH֛}:/.] sB<==Q\\x}&L+WdW@*+TA6pmx0@J U,R8vpKiš@Q9AjI[hn%b܊o~mVWV 88P*X* ;;GAP 5Ͻ}6J%ˎѷtH뙙󃗗6DQa!\?:jy/ty= xaK砭^~q$hױ6"m"-- ma,:p4nl[((7sQDwC&xfx X3ݢ9-}y\z{ŧad΃Nû3FrP(Ѭ@N~"@QKߚg #;j7.)raI" w K(DnA1**1{xz0A_dś_О1xn65ք`U Z Є W@#>J\Kҥf}< ???#Z! /ߴ}7äjqy|hޢ|~F Ջ͛())' ex80fBee#Zf*[nAR! ٢Ekջ⬗Ѫ5ߦi?rlmٲ%Chmȴo9WWWcMFOطo/كAyq X`0Ǟݻqi}Tnջi3 Qڴɓ'q9tу&<<vr?~<ƍŋ [=p*!NNNh467,, ˲'qaܸq}QQQزM6!:: 22GE___~<[l}СCh4ǎcɒ%biG@A֭ox aBPQe~' '']vň#d5v:rT#`Μ9VLx*__bjJ0{C{mc^^^v1GI_J]ڪJh5UWRFnF>nz6Ҁ+?SBЫFh*E$1! 8~ɒ Iۿ?y3^}?ݥkWMJH@kCӯk{ûqÚU0HINB6 Ց#xeTSV\YY߻~Dc5֝G'F--:wy l EPeޝy?êyX9 O#vkM~:-ANmx#-3]G З[IBLۖh z1^+ލMOѯRe!௑H9U,tj>,vZZλy=xa:wfhд |B  @p."M6ƒCk5.dz\S0~,ƫs; )IIho/xѴiS6n;9)!=zB*T#/[6/~j~_AisxX$!sݤž<gΜFNe.]ӞJ4d6}:=7'G`` ߈?׭ݻ_g%޵j}2ÆO> zZ߬ZOW[XJS9a:t#F`_titY@V\'|/_Fxx8~_t)ƍ"̜9 ,tL: ZϟӧOǥKu|W2e *++zjAM&(_ r/[ cǎۿpBXӦMøq0|p6jaɸ~:I1i$xcbbC_b]ӦMիWѽ{w>cƌ2e ƍ@?Fϱ_6`u Z=HrAh{/^UH:QiI٫^p>f}<+߲i#2$%KXCD pĄoݑXb>$%}TG64EjE#$$)ɼFFwj0ެ)9^W>lϿ8S?7o1l,˓2غy3PPh٤9UZ\.2n":RF6ڛNv-giCNCX4 Oki]0#V ## Z^vm[62Wg<7z`S8s bڴt`ImU%E$6_jɨ ?VEb95?ըw͆dzia:uv3#"g l_̇oCr_I <gӹ/r~ʹ f9\Dټq#b:.UcaC뇼_r5Yٷ0x0&ǝTUe^ٷnc5VkbC CΝ\^N~uZdߺCzcccq61ť%عk'BCM# \YFfcɒ%`YVZC߾} $%%o߾Vm7/LDü?Gzܾ}'F]tAbb"***pqt2 ٿddd2d@-))q\QRR B?\rXn`NJ+NGkffVxvZ"y.V\ʉERRʰ{n<4^ }Err24 EGqGй:;! "445BHH!liв0R޾[q.%Z׮asSѤIS31V%x8qz[]gQQQu~ȵ>ȆMZᏝQYErZFN$ Md"&Z53SH\LBSVBף\VK<1HMBDg2{1#/bz Bū7жE/][F {\D ̝FF&M9ǺB޹4hZn-X`dt JaŜ&"p /K;1Ѥ-vQQ믐w?[Ʈe CFQk,&*y'Sq~ضm BA3}w8ڭU`2LNL}⌿>[V^ݺJ!%) 팚ƍz"'q>%*Kcߞ=h$g>Zs\z;Du0ۇUkg' b3~}>*7 O?DVHNN}k E7Dl9e4'ia1+ffIp@s:,wZ7nj '?7X)/GDx,N2LҮʸa&hcl\f!&):=R.e"Raa!qeŀWPh߾=T*T*VhuRpTY[6o’ORZ5 fa¤gڵ Xm8x7߄f<>'Μ?ڄcy-?ի\v OWBڵ zf&f>?gNBӦocѢm#%93 ²kXKvVvW6RTJ{bK6䚟爐n0%PKiӦv \pTV_B;.+h $f4ѣO5pPV!3rU{X8ٻ?]*Ş=B13 ""e2cdB,E6^tu[νP뾸9s9Ϲ|yin +r߶?ܔ(D<"P"HHH 11&&&x::w,]III=ϢÇ"4s/ϗ|7|WFNAaɚ 6C˂/=Yh> |oܹsO PP1c`#>So?MjuϿĥfKur|i~jyt޾yu5&L:衃hW=9mp;2^^^"PƧ 3ve5~-OOj; ߿g}JJJ;vM:"#+HOi}9yy >-Z]RmkƧ?' CG҇v 69 6!>`Թ`.S0A(ؠ`B(ب$iWotN!B(ؠ`b'B!'`[IˢGJBjG}N!X}"[.ؠ_knO8_(BHYB! Ix!͑'O˗x7S]9#@[[w3cCW]t`̳AU}}@fHOKAYN~άv & NaCLSG]Om&c5q8Qj7Ź3+EA+uCzj6Q`b1zNm)E~Ŋ9rq7O'}~s/XŃ|ٮ5ڌ_榣EJ({oDeRXTDt DW`EreVnnn(((ڵkٳG`3gBEEM6Źsߏ&M@YYxgٌ3---dC^^ƍ㼢ZcU~AAA@Z~~>uuu̟?YYYu)9q,=aRM?T#(nRWWGv֝3gѧO|AhR,??Ԅ&_./EEE BJJ &&&tkp=Oq/ڷmXWrνut Ӓ3YFXruڕy ;o>?y4\W}LLfl̸@ƒ3^; gK`#FZ  ~"!!\*STTF-T'rIfuSK^1_F9c.~e lOo|)"e9%Wln$p=s;^}/ciP^գ:-p}>|i~ŕKSN1oooiii̙>~ȂfaaQi>mmmcgݻΝ;V]auJ2e v%Ņ` srrZOQ\M[h޼yBBBl2<{a{eشiD֧̽{~ry̙233nu f,00M05W]\Ǵ|\#5=7FÆ YTl|i 鬹.K౱'3/ؤfҫ7;zwvܟ[lYk&%%^e<>)*) 1Ғص_nt\SPTdO\ t$~XRSR@ZFejm;%=.qzIQQxTNF ..U?[n PRRJ[e&&&)++ EGGdmTT2EA߾}???BnQRR@_U_Yz{111SWWSNF^ׯȸ.\ckkc㳺~ o+LOR[l(\rVGCY8xz]vdgCB3xWt\§`# :|E`x߮Ҿ<.Hq /HHH@IN_\&I q$H&[/;Gfnz4Ĺ^T;>Ás蓀K&&&??*1cUUUPRRReत 99Y MKKVrcoAAAKQQ>UHR 49|}}q=XYY!,%%ZrGuK2KWL*Pgjj?~,w/ŗuK_{\s=nlr, l|s+VW\FZj*222yƎZih٪5|}|ƍCYY^S'!00d@?t-EȤwoܿx닮Fr?hպWk48n?6ѦySvSq--pwbnb 9jmm'cljZ/IìסG6mH0orҠOR/9;;CRR Ǐaoo/4_I=. u`# 61 ;nݺ!%%رcwYf}&!! b޼y>|8utPQ}%4R:Bȷn4*B!BUQM]$B!4Bx,L7IBMs T~ RBHK.hܸ1tuuq1~Zqqq077<'Pmۆf͚AFFgxx8,,, --<033,V^-=)S@II ǏEj_||< YYY,_\zekժ?x9ZcY!6 !~:u*֭[Tܽ{1ӈJ-[#22m۶sܹOOOnPW푔#Gb޼yWXv!::eY҈9.]*Ra``Pa\,n߾ ss*ճBȏLUKkM"T6FլY3X[[cGÆ  Tpφ( ::Z ۷oѢEJ땓x<>|DGGCGG-ZM6EZZڵk>m6|}}l֬@[ճ_t ­[0d̟?F !| 6( Z6>} 777"%%۷oǯ*QFD 999 )***7  E^^7a-QF4)))1"Ph۹'Wӡ4oѐ¶G!?bQgQ< лZš}V_T߫VާO3f3`oo0:!ֹsg?QQQxbӡ*:VUUE||\ݹ41;!?2MMMl7oyj! 6g4y2wQ1:}Wa2̂?#,-lCOPCG8z?xV}/u+N:@۷oܹszOB`e2ҍ0Ogx L9i'B!Qե#;~/d#-pKHC&M͚7ǵ*,..\Vn,|J[zjVTK,;v omg 4QE01ǖE3J'ݦ0C:m5cܝ~᧙l*YGq1s0k@9 'p7UVaq18N-/☋> ơW}⪟>'B!XAAeOOKùg#ݿe񭞕\SR;~ĿA!l,ks^QM!BHM?ZQB!V!..NP*B!PA!B_GJfu!B`]N;ֵB^1vAaa!\+=z36)ʁ|_aڡ p-,uVYW{,UxV㡢s31_iTie9Ggc|۷aaaiii 8߿秉U/11fffի~x2e Я_?|AAVV˗/}廾a9PPX`:{%NZcW<^M 5Th%O&-ĴHJW/;nGz˓R30};-Ď]HVp1sC1vqI"Yh=r2Ґf}-.Y9p~ ]'-˶#>9Mpc-ztPQ+VX+G\jP8 %9*c_h]ڠoi }Kk0|\dJ IDATm?vt#(`#00T8K\۫vqV1O7Zl 3xMJ…XhkU﾿Fڮ3/B~0^^^077W0}k׮#`oo$9)ߊ+Ю];DGG#33S Ϛ5k --cҥ_>gggTQm}v7"ȅ}ց5H,ϛXth߱usG}R3x|#ڶƎ'X\i;O\Eѥ}+wڸ|#x9ib2 c1v=]DaQ2uُi#Mm0 k!Rǯx"3+Y98~.w(w6G]Ǹ=~Ik{ ^6<.ށ}+#qʎw1qBՆE8t g\ck{\W۹Yu|*_{3g ,{5n3lBFgϗ~>p嗐`999`1ƲTeggƍO[[EFF2bbtuuYLL cTYvVuŢcEFFVN%_s_zO[^E|/3.4}Kk_clκ},/ocB2c ɬeu,Yx5u)KI,W>ӗ}HHHK= `?/ζ~Yh11v"Y|r3鍜FgfsW4t c,=3TE& ݷ\G\jl?XJz&KLIgN;kCEu կDi;cG~d&Sؽ'Dg%QWu󅌌 NUn5.z%9y < FO ݠwd+-Dp/?O :EQsœK Y9u;.`z +0!u*MWSSHII!'g' ѨQ#HKK#++K|hҤ @]]] ǏѬY3HHH@QQqqq@D [j5ޯ}fhL,61lY!3+px52xh,Ո(_9Vfi8VrSmJz&?\ HCfضx&.z-'(vGN^H}| ONo0oW,s]tmlub$^UQvu?ކ1Mk{\k{\[9w<2a6w5]Ѫ&d WT7(m~Vau tq%Uw|r\Yu|]Wg 'O`oc e9ˢnsvVZ FrR"޿Gh[8 sg̈́Ax>\b=yF0YfN e&27~pqnn\՞X֓_ {,_oٮ5 ~bKl:zpV108>0+/P6{O pŎzꅿfΜk"##葉X(Lhii!55`\PP+^-o=1'r*U:Nx,<u>$L&f@YAV oei!#l拗$/ϗX>au)MB\EWvIpngO檬B^ah3HC^1"ށ+vu?B.]^1|׾>WT{<9^oڶZ6pn>am/s]u$k,qmx5Ϊ;)233C 2l|[8.ZX|b5uu^)))Ka1b$BC 9ÇHOOMhݦ QPX111lߺ`fn)))gax98ZAIYm'zhL12ҍ0En_!ѨaX@J_w} ~];Ρun ф8zCz#-˗/ǪUp5dffݻw8q|PSSCnn.֯~/ l޼iiiصk@رctR 22ӧO 2;v@ZZo^:5}φ_0dѭcL+,tW$:u V=] oWV2Oq۹=]D/]G$$cE Wm\!( =]k{\k{\[?'!-3 7W0wujjWb8 wq5Wu FFmS D.]Z< $( զ{O\cJoNjQxY*rgaØ8怎UXNBJ::|eD!]=z'rJ?Xz|;wĘ1cu։=WWWL0ZZZpttϕ5k`ѢEӃ4\\D \:X(Տ7bҤI҂]\-kK_xs~*1dʆO)lQw-+3aK~(+2Ӭ7i#MҸ2[G8l qPQ~]a3Bhο!nr϶;†`:{%sVضx`;i86z!AF a_j\uj}y|ymg ݷ^qG6 YJ@v-օ \F?XY`8XRuc\N+kaue̝=bi? :ݺ 6<XMаQ#88.l۲99Xz 5]7. #0idطX\]!&.e$ۘ˭naޱ kaK'@5xoSeጻK=)Y$&&111 !|WGF1|  ''>1g ^Uu 4,qX?_[G`JKu{ᑯ/ CйKj06\m]ѥ}+x\#fa^ R3>? RRB\omv7Q_3`’tdBؒ%KԔ:B~purU{1114Յ=͘6Z 66aIPVQ=TT>?]!80@Ӵvp߳ kvhiic hߡxG~RjpP@:jq1s0k@FX8n;11%wscY?Q;L'ݦ0BD˖-ѱcG9s:B~purU]T_.BjV}FE!}7|OiwB!B( B!PA!B`B!Bj$uA7BBVb!(g !M"B!PA!B`B!B!B!Լz{{aԈᕦ"R`6B^H]-vu8[d_'>9 o+_X9w<ԔTOҺ|*.q {I +掃Lc[|hR'A_WTB! 6@rPXXֺ2 _\FG!0 [<11E`MG016. 8m,~ 7s+~1i=0kv/B/?t[fy4j[$/?cg%B!u1*8(-[@.8f?[G̴9iHc޸x^Y7$ SF vmJ2ҍ G!놾E - h*AaL1 B! 6o>ѳWro\ =t*Fh@AAvЮuK!|?O;oz{|hGgcV_ fރR/;% .߉^Ӗԍ{iiYZN^n1 y!} PDɗ_Pgr*((, cb ZQI!BF 6aҫ'6ջcCϟ=CTd$|}4~  y`ϟ`ԩpZo"~UǯxK1j O^)W`E˨-$%wqveYp?yӭ wwl *A X82xtB!P-lض+Wb'x{?p۲]glB\Bjjؾu+;9RRR0:^W ~RУ> )ϿB`er2Ұh!8?ݯ!+ə3V=%R¢"x\U)Dbj:<z0@Q}8怇'6m -lRB! 6OaaPQQr_OOOd'1i~ZjJ dd 1zj(JJ"9-C6@9 HbqPWVT8> B!7zY<_rݺwlC[GvŸ7` 0) /Oz{SժgJz& 7? PSR@ZF/Ѧ&eWEPUiݻ~ӰiQpu ~.V%yYPTTDG(!Bq!z.sx=}'233qMlEWaiizwѱ8 qg~#__X:Fժs7 lb_.)!;>Ás4Ĺ^})TXٹSc=1ּHy|-nx(n"֦&.EVN.2raѷ B!l|7L*x՞ͱe-=LL#-0t 퍕pq@4*C`}^x{mUU.0v=.yb,~3@IìסG6/ޝwc@7y..^BBB )ot\5QRڪ.f5G0>ӗ!*6GQJ!RGVJJGhwl8-rdjS31'Ш&I̪B![.1:ԉLu=ˇ;ڡ5@B!ݐ.{D]]ONEv-ytځB!AӨ&AM"R4*B!BHiT_ ]-"B!lZA$!"!g4B!B!B! B!BB!RO ]HOKXVTT:Z~dll,:ӯ:š~s8uTi'ODΝkm3gTTTФI̛7" СCUUUL>tXYYAVV4ix<4o޼߿11jmZׯCLL ׯ_B! !bbb Ӹ1fS4n,)NA044zs_%_@@˝4`˖-ѶmܸNNNEXX6mNjw4ik(**bݻwCWWqqqBfͰ{n~t;e˖jo&ZCa8|7ߏB!l =P2ahl\ BnݾZ>KKKPRR /[0{l@NN̓?uJ~1kESݻO>ELL7ݏB!l 6^BO_"Va'\xZDyRW6n\ =t*Fhz111;:M٠#^OzMΟCӧܥ ߻:M1j$>~(R0a<<<yxx`„ ЫW/ʢiӦOkݺ5޽{Gv*VVVx<}NN󡬬 xZhOgׯ_;VZ+/^\UV?30yr*B!lVl_( ?{Hi F La`d'6ջc"{zsg̈́Ax>ϟa w=7p_02 :{6V[7н .v &&Ǐ=zr'3gŋ 6</͛7aaaQF#++ Xd .]ɓ'RSS!##OܼrBBB`\+TkY/f͚G7Q#!B_rc !!dP<X -[!//kxFbb ѤIض+Wb'x{=2,FDXh(B!''غi.vD~!ko 999lۼ W 33`Bh۶,Y`O湮2Tڪkdd$DEEUy?B!P75˚HLL@FF.]{wbܹ ~=;oػ{n{`Ndff͛ش/@~~Ұa|}зo/ $&&eN~Z޽qp>dgg㑯/KBUׄ pJބ`ĉӧOOv277Ǿ}8tǏGnn.^xN3f ʝa޽HIIANNΝ;!C-'@^9 IDATpm{{{4jH`lmm+Qk?B!X3x_>u5a 4`zzάFft[`FFf͛a,9Ǟ?c&z3)))֡cGvezx0eeֺMg/SQQe#Yram۵gm#Y,>%%gXwLJJeG/+_һ!22 /\۱c+ wQQ\" HUPQl @콡&&jb-ƨbEM5QhlhT DQ( eA)ㄕeA4=2έ3e̊]vMj19{jSQ@BU>kFQضmʼ[ã\5;;#G' --MnBϞ=ajj KKK >,FFF055ŰaÐT^P^mOL&+Է%$$`РA0771 D@nn.fϞ {{{T^^^^ SL&T >)))1&"""baX:j(GEA6^Ow G%`jjV~kFQ/8==/Fppp 0}t$&&"226662dVe aÆ!>>w܁)ƏXjի$šnݺXjT6++  Bll/^\bգo4hx8::L4 gΜAjj* w}WcBɓ'Ǝc"""ׁL!O|xq_}TJ߽so,u˗.&~6Y~uk[!f$LLM˭ Md;w.0n8)}x!֭͛A=++ vvv*u]Woc͚5puuDGGcȑ?[ƦMr*F䭛h\,=44h{#ZBҾgW[tsŞ];r8pm888HyVVV4kLm]ǔ)SJlzT !Cšu/tȨLǘK6n 6`Fx8bcq\bIpN ‘ÇqA?]?jćԹ u0iӿ6@=ɟ~}9_[wѪM[|>eVmFÆ !peKPPPPC~)S %%Spޭ[7:tp[e[}Ś5kdL6 e:N퓖eddP300P{j B{mjG]ߞ0`ׯYfaҤ\.Gxx84h|!Mr9֮]A1xnGF 6 kW{.^5j`̸ƒE0iTk5LL0fx4tnccc}ݷ"oBd-)=itYjs鷋0slt?qNkFiaÆ ;""000￯tr=pA)իvڵkuh߾}IJJ0k,浹xzɓyo߿1bFC9SN!11͛7g}g7A}6.\XcLDDDTY辮 ۷#Qf-+1t>]r&( /233ТMn݊/>{{{թrѫOٳgp ='??ZrDDD JKU4ܹsaeeZ*V\ KKK'|ziC-9DDDDk{e#++&=x` w(m_rvu}{vK9JE˷~=pctu oA*(7S&WH( GGG4jF#U,#0`233Jy5jԀ͛mjAx6mh{aĈꫯPvm)QFHJJ7@FJoڴi~tu}+IAAJzuYY1CN9HII\.Ǿ=>g\.ǝۑXY8{&߭ZSbwsƖ?K/@~~231mgx<ڵk$@WOOG[//]=rssqts}%QV~~~P&,, Co%K޽;{W 5k?FDD֮] &hտcǎ .,Ht}#ܹ=z(΀ѱϮL<777ڵ yyyG6m͛7CP ++ +W,N%c"""JC&W篘bc]:BOOO4l,v+<$7(98nnbREMs#vFQ^\oi1YJ@DDT߳QPEVJqu񪞣ݻkkm=֭c4N{62RQ;~e4w,BP`E,E̹N; 1ѯ/hѢ85j&xe*3݊;v؁>}@__8\]]q9*.'zĉ2+ӕ'O/gժUs#L9~gʇͭu떸P8::@CغuR֭[ѿχO@ѣGc|gQقnؽkR n$m88.m ǟn];UM H۶lAagU?mڈgպjbuҾ Fpc[bϟ;6^Ji; #=߀I h{^[!uiNGc] !!!L7nTJ۸q#Ҳ SSS@.Ky2 +V; 55:u5֬YIS={VV݌ 8pqq+W*o>COOnnn *ŋ􄑑lll/K'''iLQQQhܸ1{D&Wh|݌v7nԬb[vZDTLH+pm\>wA Q0QtY"5+[ר!:w*_ }}}ѹKq5q5abj*R2"MkEp QľO~_SJܥ8~*H~uk⯠BWWWt 5[պH+3焥eMk~P|>Kѷ65yvĥKB\xQ4iD)_!&N(_.ņ ԩSYfxBѣGUV h]=?.|}}UQΝ;E^^عsh֬VĚ5kDAAصkprr*qb"77WlڴIXZZO>D^Z!ĪUɓQy(QEV`kb ¾^=s>)rp$mGuirq;J[؄$ahh( q54L;w6njժ;1Kok/ q箰irXioK˚R}^'M~, ݢO~"MWK%>ժUir[,_JʋOJ&e6.\(F)bȑ믿.a^ !xpttT'))IڶB!ݻu=/l!ʼn'JP+Mu&'הWT^^^q}=z!ի obADDTzm0oB,YS>(_r-ܞ.G F}`krTT\;!hס,,,V[Ku=Litԩ>GFނT3..ٳ{gϞ'ZYD~^^sȐ!ؾ};}bK 11zzzdUk֬)C|kSϋZt)f̘BMNNFjT֧ܶmې>}VZzVy7o|}}\hEs8{,RSSqoߞEJP`ƺXZOOB^q{vD־ifJm{U6R^hp0ZJھ iT3Ah߾C?,xzKo7ww߷ :qWaWNoߞh߱#Qn.$"M^Ie>hԨFGGG---JJE裡]6ؽ$%ХK_^vCS5kԩS̄?Uހ?.fѹQ0o2J }]iX"55|?A@cߞ=u& ~'O1s`z(\AÆHJJBNNtJ6ΝWt K3f~ s;W +3gU+1yT@[//]=rssqts}/t@x1֭[OS6l@VVV'D9s&~_GaǎpqqѪ\ӦMsNZ充aСxdɒӽ{w|իsil܎#>)l섾S:s.mNaaa)llmkkkq;:F9s#QZ5ѡc'qE>ێ$%ޯaWqKe^-?~Ds77a)?$- ˖,FAAϟǎ!)+;A&)jԨ>} 11|}qVR&&&>|8RRRN$"""bzdǨZ*:mn܏Ǣ}|LMʹ4!>ڴiI&P}//:'O ..^^^;v,߉DDDDPqy.LLM^[ a7#Q4jKӣoe2Yrvvv̬<'q>z5kք\.绑Hq""*ŕnؽkrbc7̍ :JȠިc] p`}N ĉO-\aS=왠r`נdee>7hwܽ{ 4ʗT 7d8< ** NNNjV طo_YPP###VXwww@ZZ5jҾԩf͚b*Z9}A__zzzpssCPPVyDDDDG{C !d2:wzH+2F|NNL,~XƏSokh!m 0qXn`3z Ò%KЯ_?)}Μ9ꫯ>}`ܹ^zرc@իXv]lq #GгgOPر~~~ZU.cڵ4hRԯ9s[nx YfI}8p "##qb?_9???\=¬Y0b򈈈HJꙠӧѣ8,Y]wl\zWARb"eK#bލ xzu7}04xlTeTEbРAXz5CTTtuu6mݻ8z(֭[}}zdI]c֭صkz777\x'N@>}'(\v fffhҤ vvv)訶_Ye`` &ի{4."""̟ǎagW>?֭i]:H~`톍ݹ7~~`?v؁~3Aؿo>:[`M{ܹsqi{?~,mxܽ{ժUSЯ\.bbbPNܹsGxl=J 6J\&ɓ'R`b``BZj˓Ƒ jժJzUs4h.\===:u -Z(1f~u6( oAڞ?+ 8.^}7$8;ԳGZZ*ڵ˖#vႲ*U0x`|ŖXZZ"//OzӳIjժW]YccZX IDATc4jWŋ]eJ""W@j֬,BWvm4֣nq)dff}**yծ6#bt|CyIe LL.^8vګ7.6HMM1⏣GiѩKHOB^z/<1c@.+lݷo_DDDXn<==޽{cڵ*(l=ߢ?t+W~wزe  vޭԯ`Æ ‚_'^h Ξc];8ԱEzZZWdocLg`nlX,K. ϡ_jbb"_Q 6BѳwoH+Ys/OYYY!f$ ˗.sfcY;ΜÃ߿L£E7Ԩq\ QJ v ة aӆ 1K\ 3s2,qqJiK>thզMu:\\]??`4o72U`#$nVT z{w9T!`eAիWG Ll2~iS~=-z~?5jƪ1/\wčh׾=j֬B8"/B%bB,R+<, \\hhLDD/ {GZVV_'W[Աp~N!>w#?4wk'-[aj !? lso< P׮OǠaC>Tpih,ӯ/KygW[tsŞ];r%׭[Сc'ǡW._B6jޏGZj*{2tς73g0p \|ǎEvpswǔ&"nc`E8q*GCh=z.h!!3n˗.fKnӦK7/YLEcƍGč066xWSժUK5΍~|={'NcgUyyO?8;#F8__̚#^b#..{7ű?bСގz3 Qnn`tpy򲲲8~EeffE%E:UOejfkkk܌Ç} )4Ϣc}N (f|Si"..CCq~rstϣ\)Р3nGF"`k666|Ȼ&ի#syގ0ѤiS\& Ks TV]}jt|nnLLLhߢiT7pٽ #u<,:;w"ԠIVSyNp<,--QzupqqL&Xvd$;9͘0#=#ԭk`VX,Z0M h5߭FJJ2[6,=NU9XOe 6R 45|//Ͽ?Ν;hXdtJl!%%з߿W,-- (t~@{ ~'.deQ|25<宔]Kkgy/ň9z_%Q}KjO`1'k[ <'OTr?.n[o5wo--hȿ/?cLZQJ_9]qn˷uXU U˭1~^mLHIG+b2L>8v/w.V~4GE-M"""" lZp8Z9N] WBQNmTAq\Z#]P [[7>Qo׶0çCz;M'0VՎS8%y;}Pݠ*Z4ĉKajUݷ6^e)\[iKƆйkӳgt>{qy/Xri07ygeaZ,هtxHR;N[+ OJE:RZǻaNћl;냋ۖ[JǁU_ǘxc[DrF&_mcr]OzU[xt*5MMq"` _6tuk8vNXިnPںRޞ?p9Xchѡe3GZ;'-ʒӳgoajkHƃt2W:Mc 'S]a SEs ""7%0KK0y0f 0ĉX< d.onbm 'q^>s/x*Em2DDDDQY[AP>–' uj傻q /A z: wbRع8|*ttt`fl y6.QE 63 G^C#|f"M~?VzߝG`Jי*2L?-@LxvLZ?%""""Ʉ+g̨:g*4ǕoN_+/""*R?"0x/ """"""DDDDD`*7ћ7Qy8TY:Ss@DDTl """""bADMQ,[NY[w*u}Y%x>M}{ w·ҺClxLXA<u$z tLi9'ެ`C]!W`ܰ>.o7%S#DXp9rH?OZ+}eS?T>%eK瑖p0rdADDDh@\11uҼ;!0go8fNm>4 zفF`y8!DDDT uKn;{OR%Kꂃ[! 迪Tӗih3Cg˺ ʍ(MBzt|z'll=xN].zziiSLڒdޢ=uq1&-(OZ__/|j &*w 8* S}3NU e^FId °^JU;MW/R*A OV. q Գ-&J}z]E|0}[p({0~r={n+U ]Pu5< 056Tlsr1jhtGl]Et=DgæξY3nނ7gf#0N;Lh c겟ݠq奤~{\>s:::036DrSDDD`Cߩ5)oDfJcmulںa锧ߏRo)|JUYԏLxvѼD(4)"""dB|bzv̌svB{w`"89rN96UJƳ{7**"""""bADDDDD 6QS4*""ziTDD҃ """""eTDDDDD`l """"""DDDDD`lU`C&a˖-*_F[aeeǏ,;;;l߾m۷UV<1DDDDD:KBP'mvvv_T011!CJUoJ۱m6;ML&cJ5uu߿3f@DD!\\\0k, :}uJŕѣG#00*󳲲 SSS@.k.]6mR۾7 pA#Gɓ'U Mmd2XҶ˫DDDDD68y$`ff'O,ZӦMS?g|WHIIA>}0w\f̘N:!11ϟW۾L&Ü9stR%K`٨RJm;jw"""" /255Effszzz:Pdy/ƍN:)׫WQQQErr2ڴiwgcc˗/pttTI777lڴ -[ĕ+WƍR-L$ԪUظ\^}'"""" 222lDGGcС8wtuutEKۚ===jժ?޽vŽ;wޑ5%R%w"""" /  LLLBu988K.X~R% B<+++$$$JloƥKQpMm= $Jg6Mmmmjkkf^yرO?ӧOS;wtoW;vS999~NOOWaaRSS=?3*))qTڵk*--W"((H˖-өST__w<ڟ?|M%''KuVkqm{8~%3--=zg:uOemmm I&EEEvttaaafSS+W]Z[[T3,,̜?u^ݿ/'| _?~ܔd=z4MӬ5%\gz l l laaa,yuuu,g^Ŭ*ђ%KExˬ1ai, |I6%l, aE6#l.@lk̙=oذ\le=t;pD󫪁Zt"""xC6qA-^XZ`sG~u7fYV׼| l Hmm4lmPp8~edd(22RtIjҥ6m. yyyzk.<2|]|իW#WY/eiוWD G||jjjdYPIIGАG}TiO?իY$` QAuuu5kΝ?'jժUYf͚fyM6i˖-jhhpegg^7o͛بfmٲE7np٩b***:۷O999ڷonܸ#Gs~׌b 1 #ht9sVZ}޺ݿ>qrrr{n]vg!푟Ebh(ommUZZN||4M9˗|rM6M˖-˗e,0ܬxpu0 i&effph޼yz뭷rdUYY)˥Jmz wPUWWr\sYŢ"s !ɽٳ=V^(*66Vׯ_(OJJbQdd.\}WƏEFFo:&b(::ZQQQ ZÆ >3nW֎>}IJJRppbccijw5eEDD0 EDDhʔ)*//wW_gUpp֮]ў^0a&LzH6MΝ믿>?8p@anJaZ6ǻ%l6:tHaZr>,0TQQn[~RSS5n8Đ6%%EZr;vL.\Џ?أ^XXϩ(\kڸq#GzիW,I q>7tMӗO4jԨ~+`؆ /٣#G8>fhKN$]tIe\y竬7?0 8p@ok q駟4ydE˥?Zn:_[k=[ޮ'Nmٳ? k.c`c*""Buuu 5yd=cھ}{ պuܿ 5jN 0cO@@G}Z5fݸqm62W/ ҢoNҙ3/I7nP :r^'N 4i.ў2!>yE} }xj..PPE111zm! v!a+o.EGGk}hmUttCFe^L4w,!!Aǎ8vEhԩwۏӧ߱@擑{lZ~;`n޼ukjjt:U__t෪Kݫ+V(((b_/ڻw PJJ䫣Cn{ף=_e6yޮ>},99YPAAe6/{M_u+ М9s>5/.;cH?Abj},--MNVkNNN'|NӽkUVV M:U .U}yKT>c0?DFFj…vϮMEɿ\]xxl٢Oo|Er=$iٿ/ӢŋZ%&$W_UAA#XBO$*$$D/e,;ܤ$vU7Dm~k̞#e:۪~[ }wdV^ڽ`e]]ƍǢFrrrXB,La<. lܖ=Ŭ,@\܉ gʠ$l l l l@@@  ߌIENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/index.rst0000664000175100017510000000211315033040005021213 0ustar00mylesmyles================== Contribution Guide ================== .. toctree:: :maxdepth: 2 welcome project_info development.environment first_patch first_review launchpad gerrit jenkins reviewer_guide running_tests test_suite Modules reference ~~~~~~~~~~~~~~~~~ Zaqar is composed of two layers: .. toctree:: :maxdepth: 1 transport storage The **transport drivers** are responsible for interacting with Zaqar clients. Every query made by clients is processed by the transport layer, which is in charge of passing this information to the backend and then returning the response in a format understandable by the client. The **storage drivers** are responsible for interacting with the storage backends and, that way, store or retrieve the data coming from the transport layer. In order to keep these layers decoupled, we have established that **checks should be performed in the appropriate layer**. In other words, transport drivers must guarantee that the incoming data is well-formed and storage drivers must enforce their data model stays consistent. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/jenkins.rst0000664000175100017510000000260115033040005021547 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================== Continuous integration with Jenkins =================================== Zaqar uses a `Jenkins`_ server to automate development tasks. The Jenkins front-end is at http://jenkins.openstack.org. You must have an account on `Launchpad`_ to be able to access the OpenStack Jenkins site. Jenkins performs tasks such as running static code analysis, running unit tests, and running functional tests. For more details on the jobs being run by Jenkins, see the code reviews on https://review.opendev.org. Tests are run automatically and comments are put on the reviews automatically with the results. You can also get a view of the jobs that are currently running from the Zuul status dashboard, http://zuul.openstack.org/. .. _Jenkins: http://jenkins-ci.org .. _Launchpad: http://launchpad.net ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/launchpad.rst0000664000175100017510000000337715033040005022060 0ustar00mylesmyles============================== Project hosting with Launchpad ============================== `Launchpad`_ hosts the Zaqar project. The Zaqar project homepage on Launchpad is http://launchpad.net/zaqar. Launchpad credentials --------------------- Creating a login on Launchpad is important even if you don't use the Launchpad site itself, since Launchpad credentials are used for logging in on several OpenStack-related sites. These sites include: * `Wiki`_ * Gerrit (see :doc:`gerrit`) * Jenkins (see :doc:`jenkins`) Mailing list ------------ The mailing list address is ``openstack-discuss@lists.openstack.org``. This is a common mailing list across all OpenStack projects. To participate in the mailing list: Subscribe at http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss The mailing list archives are at http://lists.openstack.org/pipermail/openstack-dev (pre December 2018) and http://lists.openstack.org/pipermail/openstack-discuss for current. Bug tracking ------------ Report Zaqar bugs at https://bugs.launchpad.net/zaqar Feature requests (Blueprints) ----------------------------- Zaqar uses Launchpad Blueprints to track feature requests. Blueprints are at https://blueprints.launchpad.net/zaqar. Technical support (Answers) --------------------------- Zaqar uses Launchpad Answers to track Zaqar technical support questions. The Zaqar Answers page is at https://answers.launchpad.net/zaqar. Note that `Ask OpenStack`_ (which is not hosted on Launchpad) can also be used for technical support requests. You can also reach us in ``#openstack-zaqar`` IRC channel at ``OFTC``. .. _Launchpad: https://launchpad.net .. _Wiki: https://wiki.openstack.org .. _Zaqar Team: https://launchpad.net/zaqar .. _Ask OpenStack: https://ask.openstack.org/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/project_info.rst0000664000175100017510000001411515033040005022572 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _project_info: Project Info ============ Maintainers ----------- Project Team Lead (PTL) ~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------+---------------------------------------------+ | Contact | Area of interest | +------------------------------+---------------------------------------------+ | | Feilong Wang | * Chief Architect | | | flwang (irc) | * Release management | | | flwang@catalyst.net.nz | * Community management | | | flwang@qq.com | * Core team management | | | | * Road Map | +------------------------------+---------------------------------------------+ | *If you would like to refactor whole Zaqar or have UX/community/other issues please contact me.* Project Core maintainers ~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------+---------------------------------------------+ | Contact | Area of interest | +------------------------------+---------------------------------------------+ | | Xiyuan Wang | * Zaqar related questions or bugs. | | | wxy (irc) UTC0200-UTC1000 | * Production problem for Zaqar. | | | wangxiyuan1007@gmail.com | * Integration with container. | | | | | +------------------------------+---------------------------------------------+ | | Hao Wang | * Introduce interesting and helpful | | | | features | | | wanghao (irc) | * Bug Fix and Code Optimization | | | sxmatch1986@gmail.com | * Notification Service | | | | | +------------------------------+---------------------------------------------+ | | Thomas Herve | * Websocket | | | therve (irc) | * Swift backend | | | therve@redhat.com | | | | | | +------------------------------+---------------------------------------------+ | *All cores from this list are reviewing all changes that are proposed to Zaqar. To avoid duplication of efforts, please contact them before starting work on your code.* Storage Backend Maintainers reviewers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------+---------------------------------------------+ | Contact | Area of interest | +------------------------------+---------------------------------------------+ | | Hao Wang | * MongoDB | | | wanghao (irc) | | | | sxmatch1986@gmail.com | | +------------------------------+---------------------------------------------+ | | gecong | * Swift | | | gecong (irc) | | | | ge.cong@zte.com.cn | | | | | | +------------------------------+---------------------------------------------+ | | gengchc2 | * Redis | | | gengchc (irc) | | | | geng.changcai2@zte.com.cn | | | | | | +------------------------------+---------------------------------------------+ | | Feilong Wang | * SqlAlchemy | | | flwang (irc) | | | | flwang@catalyst.net.nz | | | | | | +------------------------------+---------------------------------------------+ | *All cores from this list are responsible for maintaining the storage backend. To avoid duplication of efforts, please contact them before starting working on your own backends.* Useful links ------------ - `Source code`_ - `Project space`_ - `Bugs`_ - `Patches on review`_ Where can I discuss and propose changes? ---------------------------------------- - Our IRC channel: **#openstack-zaqar** on **OFTC**; - Bi-Weekly Zaqar team meeting (in IRC): **#openstack-zaqar** on **OFTC**, held on Monday at 14:00 UTC; - OpenStack mailing list: **openstack-discuss@lists.openstack.org** (see `Subscription and usage instructions`_); - `Zaqar team on Launchpad`_: Answers/Bugs/Blueprints. .. references: .. _Source code: https://github.com/openstack/zaqar .. _Project space: https://launchpad.net/zaqar .. _Bugs: https://bugs.launchpad.net/zaqar .. _Patches on review: https://review.opendev.org/#/q/status:open+zaqar,n,z .. _IRC logs: http://irclog.perlgeek.de/openstack-zaqar .. _Subscription and usage instructions: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _Zaqar team on Launchpad: https://launchpad.net/zaqar ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/reviewer_guide.rst0000664000175100017510000001416215033040005023120 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============== Reviewer Guide ============== Overview -------- Our program follows the usual OpenStack review process, albeit with some important additions (see below). See also: :doc:`first_review`. Be Professional --------------- The PTL, with the support of the core reviewers, is ultimately responsible for holding contributors accountable for creating a positive, constructive, and productive culture. Inappropriate behavior will not be tolerated. (`Why this is important?`_) Do This: * Act professionally. * Treat others as friends and family. * Seek first to understand. * Be honest, transparent, and constructive. * Use clear, concise language. * Use prefixes to clarify the tone and intent of your comments. Don't Do This: * Use indecent, profane, or degrading language of any kind. * Hold a patch hostage for an ulterior motive, political or otherwise. * Abuse the review system to discuss big issues that would be better hashed out on the mailing list, in IRC, or during OpenStack Summit design sessions. * Engage in bullying behaviors, including but not limited to: * Belittling others' opinions * Persistent teasing or sarcasm * Insulting, threatening, or yelling at someone * Accusing someone of being incompetent * Setting someone up to fail * Humiliating someone * Isolating someone from others * Withholding information to gain an advantage * Falsely accusing someone of errors * Sabotaging someone's work Reviewing Docs -------------- When possible, enlist the help of a professional technical writer to help review each doc patch. All reviewers should familiarize themselves with `OpenStack Documentation Contributor Guide`_. When reviewing user guide patches, please run them through Maven and proof the resulting docs before giving your ``+1`` or ``+2``. Reviewing Code -------------- When reviewing code patches, use your best judgment and seek to provide constructive feedback to the author. Compliment them on things they have done well, and highlight possible improvements. Also, dedicate as much time as necessary in order to provide a careful analysis of the code. Don't assume that someone else will catch any issues you yourself miss; in other words, pretend you are the only person reviewing a given patch. Remember, "given enough eyeballs, all bugs are shallow" ceases to be true the moment individual reviewers become complacent. Some things to check when reviewing code: * Patch aligns with project goals, and is ideally associated with a bp or bug. * Commit message is formatted appropriately and contains external references as needed. * Coding style matches guidelines given in ``HACKING.rst``. * Patch is cohesive and not too big to be reviewed in a timely manner (some patches may need to be split to improve cohesion and/or reduce size). * Patch does what the commit message promises. * Algorithms are implemented correctly, and chosen appropriately. * Data schemas follow best practices. * Unit and functional tests have been included and/or updated. * Code contains no bugs (pay special attention to edge cases that tests may have missed). Use Prefixes ------------ We encourage the use of prefixes to clarify the tone and intent of your review comments. This is one way we try to mitigate misunderstandings that can lead to bad designs, bad code, and bad blood. .. list-table:: **Prefixes** :widths: 6 80 8 :header-rows: 1 * - Prefix - What the reviewer is saying - Blocker? * - KUDO - You did a nice job here, and I wanted to point that out. Keep up the good work! - No * - TEST - I think you are missing a test for this feature, code branch, specific data input, etc. - Yes * - BUG - I don't think this code does what it was intended to do, or I think there is a general design flaw here that we need to discuss. - Yes * - SEC - This is a serious security vulnerability and we better address it before merging the code. - Yes * - PERF - I have a concern that this won't be fast enough or won't scale. Let's discuss the issue and benchmark alternatives. - Yes * - DSQ - I think there is something critical here that we need to discuss this in IRC or on the mailing list before moving forward. - Yes * - STYLE - This doesn't seem to be consistent with other code and with ``HACKING.rst`` - Yes * - Q - I don't understand something. Can you clarify? - Yes * - DRY - This could be modified to reduce duplication of code, data, etc. See also: `Wikipedia: Don't repeat yourself`_ - Maybe * - YAGNI - This feature or flexibility probably isn't needed, or isn't worth the added complexity; if it is, we can always add the feature later. See also: `Wikipedia: You aren't gonna need it`_ - Maybe * - NIT - This is a nitpick that I can live with if we want to merge without addressing it. - No * - IMO - I'm chiming in with my opinion in response to someone else's comment, or I just wanted to share an observation. Please take what I say with a grain of salt. - No * - FYI - I just wanted to share some useful information. - No .. _`Why this is important?` : https://thoughtstreams.io/kgriffs/technical-communities/5060/ .. _`OpenStack Documentation Contributor Guide` : https://docs.openstack.org/contributor-guide/index.html .. _`Wikipedia: Don't repeat yourself` : https://en.wikipedia.org/wiki/Don't_repeat_yourself .. _`Wikipedia: You aren't gonna need it` : https://en.wikipedia.org/wiki/Don't_repeat_yourself ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/running_tests.rst0000664000175100017510000001253415033040005023016 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============= Running tests ============= Zaqar contains a suite of tests (both unit and functional) in the ``zaqar/tests`` directory. See :doc:`test_suite` for details. Any proposed code change is automatically rejected by the OpenStack Zuul server [#f1]_ if the change causes test failures. It is recommended for developers to run the test suite before submitting patch for review. This allows to catch errors as early as possible. Preferred way to run the tests ------------------------------ The preferred way to run the unit tests is using ``tox``. It executes tests in isolated environment, by creating separate virtualenv and installing dependencies from the ``requirements.txt`` and ``test-requirements.txt`` files, so the only package you install is ``tox`` itself: .. code-block:: console $ pip install tox See `the unit testing section of the Testing wiki page`_ for more information. Following are some simple examples. .. note:: Zaqar unit tests currently rely on a running instance of MongoDB. You can either install and start MongoDB locally using your package manager, or you run it via a container. For example: .. code-block:: shell docker run --ulimit nofile=1000000 -p 27017:27017 docker.io/mongo:latest To run the Python 3.12 tests: .. code-block:: console $ tox -e py312 To run the style tests: .. code-block:: console $ tox -e pep8 To run multiple tests separate items by commas: .. code-block:: console $ tox -e py313,pep8 .. _the unit testing section of the Testing wiki page: https://wiki.openstack.org/wiki/Testing#Unit_Tests Running a subset of tests ^^^^^^^^^^^^^^^^^^^^^^^^^ Instead of running all tests, you can specify an individual directory, file, class or method that contains test code, i.e. filter full names of tests by a string. To run the tests located only in the ``zaqar/tests/unit/storage`` directory use: .. code-block:: console $ tox -e py27 -- zaqar.tests.unit.storage To run the tests specific to the MongoDB driver in the ``zaqar/tests/unit/storage/test_impl_mongodb.py`` file: .. code-block:: console $ tox -e py27 -- test_impl_mongodb To run the tests in the ``MongodbMessageTests`` class in the ``tests/unit/storage/test_impl_mongodb.py`` file: .. code-block:: console $ tox -e py27 -- test_impl_mongodb.MongodbMessageTests To run the ``MongodbMessageTests.test_message_lifecycle`` test method in the ``tests/unit/storage/test_impl_mongodb.py`` file: .. code-block:: console $ tox -e py27 -- test_impl_mongodb.MongodbMessageTests.test_message_lifecycle Running functional tests ------------------------ Zaqar's functional tests treat Zaqar as a black box. In other words, the API calls attempt to simulate an actual user. Unlike unit tests, the functional tests do not use mockendpoints. Functional test modes ^^^^^^^^^^^^^^^^^^^^^ Functional tests can run in integration mode and non-integration mode. Integration mode """""""""""""""" In integration mode functional tests are performed on Zaqar server instances running as separate processes. This is real functional testing. To run functional tests in integration mode, execute: .. code-block:: console $ tox -e integration Non-integration mode """""""""""""""""""" In non-integration mode functional tests are performed on Zaqar server instances running as python objects. This mode doesn't guarantee enough black boxness for Zaqar, but tests run 10 times faster than in integration mode. To run functional tests in non-integration mode, execute: .. code-block:: console $ tox -e py27 -- zaqar.tests.functional Using a custom MongoDB instance ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you need to run functional tests against a non-default MongoDB installation, you can set the ``ZAQAR_TEST_MONGODB_URL`` and ``ZAQAR_TEST_MONGODB`` environment variables. For example: .. code-block:: console $ export ZAQAR_TEST_MONGODB=True $ export ZAQAR_TEST_MONGODB_URL=mongodb://remote-server:27017 Using custom parameters ^^^^^^^^^^^^^^^^^^^^^^^ You can edit default functional test configuration file ``zaqar/tests/etc/functional-tests.conf`` according to your needs. For example, you want to run functional tests with keystone authentication enabled, input a valid set of credentials to ``[auth]`` section in configuration file and set ``auth_on`` parameter to ``True``. Using local Mysql database ^^^^^^^^^^^^^^^^^^^^^^^^^^ To use a similar testing environment with database support like upstream CI, you can run ``zaqar/tools/test-setup.sh`` to create a required Mysql user ``openstack_citest`` with same password. The user is required by oslo.db's test. Zaqar needs it because Zaqar's sqlalchemy database migration is leveraging oslo.db's migration test base. .. rubric:: Footnotes .. [#f1] See https://docs.opendev.org/opendev/system-config/latest/zuul.html ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/storage.rst0000664000175100017510000000075215033040005021557 0ustar00mylesmyles--------------------------------- API reference for storage drivers --------------------------------- .. currentmodule:: zaqar.storage.base .. autoclass:: DataDriverBase :noindex: :members: .. autoclass:: ControlDriverBase :noindex: :members: .. autoclass:: Queue :noindex: :members: .. autoclass:: Message :noindex: :members: .. autoclass:: Claim :noindex: :members: -------------- MongoDB Driver -------------- .. automodule:: zaqar.storage.mongodb ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/test_suite.rst0000664000175100017510000000635615033040005022311 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================== Test suite structure ==================== Test types ---------- There are three types of tests for Zaqar: Unit tests Unit tests check modules separately. For example, there are checks for each individual method that the storage layer provides. Functional tests Functional tests verify that the service works as expected. In particular, in Zaqar they exercise the API endpoints and validate that the API responses conform to the specs. These include positive and negative tests. Tempest tests Tempest tests are integration tests for OpenStack [#f1]_. Tempest tests for Zaqar are available in the `Tempest repository`_. Refer to :doc:`running_tests` document for details on how to run Unit and Functional tests. Refer to the `Tempest repository`_ for details on how to run Tempest tests. Code structure -------------- The test suite lives in ``zaqar/tests`` directory of Zaqar: * ``zaqar/tests/etc`` Contains various configuration files for Zaqar. They help to test how Zaqar works in different configurations. * ``zaqar/tests/functional`` Contains functional tests. * ``zaqar/tests/unit`` Contains unit tests. The base class of all test classes is located in the ``zaqar/tests/base.py`` file. Test invocation --------------- When you run tests via ``tox -e py27`` command in the root directory of Zaqar: #. Tox program executes: #. Looks for ``tox.ini`` file. #. Creates ``.tox`` directory for storing python environments. #. Parses this file and finds parameters for py27 testing environment. #. Sets this environment up and activates it. #. Sets environment variables for this environment that are described in ``tox.ini`` #. In case of Zaqar it invokes Testr program in the environment. You can find more information about Tox in `OpenStack Tox testing manual`_ and in official `Tox documentation`_. #. Testr (Test Repository) program executes: #. Looks for ``testr.ini`` file. #. Parses this file and finds parameters for executing tests. #. Creates ``.testrepository`` directory for storing statistics of executing tests. #. In case of Zaqar it invokes ``Subunit`` program which finds all tests and executes it. You can find more information about Testr in `OpenStack Testr manual`_. .. rubric:: Footnotes .. [#f1] See https://docs.openstack.org/tempest/latest/#overview .. _`OpenStack Tox testing manual` : https://wiki.openstack.org/wiki/Testing#Unit_Testing_with_Tox .. _`Tox documentation` : https://tox.readthedocs.org/en/latest/ .. _`OpenStack Testr manual` : https://wiki.openstack.org/wiki/Testr .. _`Tempest repository` : https://git.openstack.org/cgit/openstack/tempest ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/transport.rst0000664000175100017510000000017415033040005022145 0ustar00mylesmyles========= Transport ========= .. currentmodule:: zaqar.transport.base .. autoclass:: DriverBase :noindex: :members: ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/contributor/welcome.rst0000664000175100017510000002064315033040005021547 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== Welcome new contributors ======================== First Steps =========== It's very great that you're interested in contributing to Zaqar. First of all, make sure you join Zaqar communication forums: * Subscribe to Zaqar `mailing lists`_. * Join Zaqar team on IRC. You can chat with us directly in the ``#openstack-zaqar`` channel on ``OFTC``. If you don't know how to use IRC, you can find some directions in `OpenStack IRC wiki`_. * Answer and ask questions on `Ask OpenStack`_. How can I contribute? ===================== There are many ways you can contribute to Zaqar. Of course coding is one, but you can also join Zaqar as a tester, documenter, designer or translator. Coding ------ Bug fixing ^^^^^^^^^^ The first area where you can help is bug fixing. ``Confirmed`` bugs are usually your best choice. ``Triaged`` bugs should even contain tips on how they should be fixed. You can find both of them in `Zaqar's Confirmed and Triaged bugs`_ web page. Once you selected the bug you want to work on, go ahead and assign it to yourself, branch the code, implement the fix, and propose your change for review. You can find information on how to do it in :doc:`first_patch` manual. Some easy-to-fix bugs may be marked with the ``low-hanging-fruit`` tag: those are good targets for a beginner. Bug triaging ^^^^^^^^^^^^ You can also help Zaqar with bug triaging. Reported bugs need care: prioritizing them correctly, confirming them, making sure they don't go stale. All those tasks help immensely. If you want to start contributing in coding, but you are not a hardcore developer, consider helping in this area. Bugs can be marked with different tags according to their status: * ``New`` bugs are those bugs that have been reported by a user but haven't been verified by the community yet. * ``Confirmed`` bugs are those bugs that have been reproduced by someone else than the reporter. * ``Triaged`` bugs are those bugs that have been reproduced by a core developer. * ``Incomplete`` bugs are those bugs that don't have enough information to be reproduced. * ``In Progress`` bugs are those bugs that are being fixed by some developer. This status is set automatically by the Gerrit review system once a fix is proposed by a developer. You don't need to set it manually. * ``Invalid`` bugs are those bugs that don't qualify as a bug. Usually a support request or something unrelated to the project. You can learn more about this in Launchpad's `Of Bugs and Statuses`_. You only have to worry about ``New`` bugs. If you can reproduce them, you can mark them as ``Confirmed``. If you cannot reproduce them, you can ask the reported to provide more information and mark them as ``Incomplete``. If you consider that they aren't bugs, mark them as ``Invalid`` (Be careful with this. Asking someone else in Zaqar is always a good idea). Also, you can contribute instructions on how to fix a given bug. Check out the `Bug Triage`_ wiki for more information. Reviewing ^^^^^^^^^ Every patch submitted to OpenStack gets reviewed before it can be approved and merged. Zaqar gets a lot of contributions and everyone can (and is encouraged to) review Zaqar's existing patches. Pick an open review and go through it, test it if possible, and leave a comment with a ``+1`` or ``-1`` vote describing what you discovered. If you're planning on submitting patches of your own, it's a great way to learn about what the community cares about and to learn about the code base. Make sure you read :doc:`first_review` manual. Feature development ^^^^^^^^^^^^^^^^^^^ Once you get familiar with the code, you can start to contribute new features. New features get implemented every 6 months in `OpenStack development cycle`_. We use `Launchpad Blueprints`_ to track the design and implementation of significant features, and Zaqar team uses Design Summits every 6 months to get together and discuss things in person with the rest of the community. Code should be proposed for inclusion before Zaqar reach the final feature milestone of the development cycle. Testing ------- Testing efforts are highly related to coding. If you find that there are test cases missing or that some tests could be improved, you are encouraged to report it as a bug and then provide your fix. See :doc:`running_tests` and :doc:`test_suite` for information on how to run tests and how the tests are organized in Zaqar. See :doc:`first_patch` for information on how to provide your fix. Documenting ----------- You can contribute to `Zaqar's Contributor Documentation`_ which you are currently reading and to `Zaqar's Wiki`_. To fix a documentation bug check the bugs marked with the ``doc`` tag in Zaqar's bug list. In case that you want to report a documentation bug, then don't forget to add the ``doc`` tag to it. `Zaqar's Contributor Documentation`_ is compiled from source files in ``.rst`` (reStructuredText) format located in ``doc/source/`` directory in Zaqar repository. The `"openstack-manuals" project`_ houses the documentation that is published to ``docs.openstack.org``. Before contributing to `Zaqar's Contributor Documentation`_ you have to read :doc:`first_patch` manual and `OpenStack Documentation Contributor Guide`_. Also, you can monitor `Ask OpenStack`_ to curate the best answers that can be folded into the documentation. Designing --------- Zaqar doesn't have a user interface yet. Zaqar team is working to `integrate Zaqar to the OpenStack Dashboard (Horizon)`_. If you're a designer or usability professional your help will be really appreciated. Whether it's reviewing upcoming features as a user and giving feedback, designing features, testing designs or features with users, or helping to build use cases and requirements, everything is useful. Translating ----------- You can translate Zaqar to language you know. Read the `Translation wiki page`_ for more information on how OpenStack manages translations. Zaqar has adopted Zanata, and you can use the `OpenStack Zanata site`_ as a starting point to translate any of the OpenStack projects, including Zaqar. It's easier to start translating directly on the `OpenStack Zanata site`_, as there is no need to download any files or applications to get started. .. _`mailing lists` : https://wiki.openstack.org/wiki/MailingLists .. _`OpenStack IRC wiki` : https://wiki.openstack.org/wiki/IRC .. _`Ask OpenStack` : https://ask.openstack.org/ .. _`Zaqar's Confirmed and Triaged bugs` : https://bugs.launchpad.net/zaqar/+bugs?field.searchtext=&orderby=-importance&search=Search&field.status%3Alist=CONFIRMED&field.status%3Alist=TRIAGED&assignee_option=any&field.assignee=&field.bug_reporter=&field.bug_commenter=&field.subscriber=&field.structural_subscriber=&field.tag=&field.tags_combinator=ANY&field.has_cve.used=&field.omit_dupes.used=&field.omit_dupes=on&field.affects_me.used=&field.has_patch.used=&field.has_branches.used=&field.has_branches=on&field.has_no_branches.used=&field.has_no_branches=on&field.has_blueprints.used=&field.has_blueprints=on&field.has_no_blueprints.used=&field.has_no_blueprints=on .. _`Of Bugs and Statuses` : http://blog.launchpad.net/general/of-bugs-and-statuses .. _`Bug Triage` : https://wiki.openstack.org/wiki/BugTriage .. _`OpenStack development cycle` : https://wiki.openstack.org/wiki/ReleaseCycle .. _`Launchpad Blueprints` : https://wiki.openstack.org/wiki/Blueprints .. _`OpenStack Documentation Contributor Guide` : https://docs.openstack.org/contributor-guide/index.html .. _`Zaqar's Contributor Documentation` : https://docs.openstack.org/zaqar/latest/ .. _`Zaqar's Wiki` : https://wiki.openstack.org/wiki/Zaqar .. _`"openstack-manuals" project` : https://wiki.openstack.org/wiki/Documentation .. _`integrate Zaqar to the OpenStack Dashboard (Horizon)` : https://blueprints.launchpad.net/zaqar-ui/ .. _`Translation wiki page` : https://wiki.openstack.org/wiki/Translations#Translation_.26_Management .. _`OpenStack Zanata site` : https://translate.openstack.org/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/glossary.rst0000664000175100017510000000643015033040005017403 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======== Glossary ======== Messaging Service Concepts ========================== The Messaging Service is a multi-tenant, message queue implementation that utilizes a RESTful HTTP interface to provide an asynchronous communications protocol, which is one of the main requirements in today's scalable applications. .. glossary:: Queue Queue is a logical entity that groups messages. Ideally a queue is created per work type. For example, if you want to compress files, you would create a queue dedicated for this job. Any application that reads from this queue would only compress files. Message Message is sent through a queue and exists until it is deleted by a recipient or automatically by the system based on a TTL (time-to-live) value. Claim Claim is a mechanism to mark messages so that other workers will not process the same message. Worker Worker is an application that reads one or multiple messages from the queue. Producer Producer is an application that creates messages in one or multiple queues. Publish - Subscribe Publish - Subscribe is a pattern where all worker applications have access to all messages in the queue. Workers can not delete or update messages. Producer - Consumer Producer - Consumer is a pattern where each worker application that reads the queue has to claim the message in order to prevent duplicate processing. Later, when the work is done, the worker is responsible for deleting the message. If message is not deleted in a predefined time (claim TTL), it can be claimed by other workers. Message TTL Message TTL is time-to-live value and defines how long a message will be accessible. Claim TTL Claim TTL is time-to-live value and defines how long a message will be in claimed state. A message can be claimed by one worker at a time. Queues Database Queues database stores the information about the queues and the messages within these queues. Storage layer has to guarantee durability and availability of the data. Pooling If pooling enabled, queuing service uses multiple queues databases in order to scale horizontally. A pool (queues database) can be added anytime without stopping the service. Each pool has a weight that is assigned during the creation time but can be changed later. Pooling is done by queue which indicates that all messages for a particular queue can be found in the same pool (queues database). Catalog Database If pooling is enabled, catalog database has to be created. Catalog database maintains ``queues`` to ``queues database`` mapping. Storage layer has to guarantee durability and availability of data. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/index.rst0000664000175100017510000000757215033040005016657 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===================================== Welcome to the Zaqar's Documentation! ===================================== Zaqar is a multi-tenant cloud messaging and notification service for web and mobile developers. The service features a REST API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Underlying this API is an efficient messaging engine designed with scalability and security in mind. The Websocket API is also available. Other OpenStack components can integrate with Zaqar to surface events to end users and to communicate with guest agents that run in the "over-cloud" layer. Key features ------------ Zaqar provides the following key features: * Choice between two communication transports. Both with Keystone support: * Firewall-friendly, **HTTP-based RESTful API**. Many of today's developers prefer a more web-friendly HTTP API. They value the simplicity and transparency of the protocol, it's firewall-friendly nature, and it's huge ecosystem of tools, load balancers and proxies. In addition, cloud operators appreciate the scalability aspects of the REST architectural style. * **Websocket-based API** for persistent connections. Websocket protocol provides communication over persistent connections. Unlike HTTP, where new connections are opened for each request/response pair, Websocket can transfer multiple requests/responses over single TCP connection. It saves much network traffic and minimizes delays. * Multi-tenant queues based on Keystone project IDs. * Support for several common patterns including event broadcasting, task distribution, and point-to-point messaging. * Component-based architecture with support for custom backends and message filters. * Efficient reference implementation with an eye toward low latency and high throughput (dependent on backend). * Highly-available and horizontally scalable. * Support for subscriptions to queues. Several notification types are available: * Email notifications. * Webhook notifications. * Websocket notifications. Project scope ------------- The Zaqar API is data-oriented. That is, it does not provision message brokers and expose those directly to clients. Instead, the API acts as a bridge between the client and one or more backends. A provisioning service for message brokers, however useful, serves a somewhat different market from what Zaqar is targeting today. With that in mind, if users are interested in a broker provisioning service, the community should consider starting a new project to address that need. Design principles ----------------- Zaqar, as with all OpenStack projects, is designed with the following guidelines in mind: * **Component-based architecture.** Quickly add new behaviors * **Highly available and scalable.** Scale to very serious workloads * **Fault tolerant.** Isolated processes avoid cascading failures * **Recoverable.** Failures should be easy to diagnose, debug, and rectify * **Open standards.** Be a reference implementation for a community-driven Contents -------- .. toctree:: :maxdepth: 2 user/index admin/index install/index configuration/index contributor/contributing contributor/index cli/index .. toctree:: :maxdepth: 1 glossary ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5540135 zaqar-20.1.0.dev29/doc/source/install/0000775000175100017510000000000015033040026016454 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/get_started.rst0000664000175100017510000000516115033040005021513 0ustar00mylesmyles========================== Messaging service overview ========================== The Message service is multi-tenant, fast, reliable, and scalable. It allows developers to share data between distributed application components performing different tasks, without losing messages or requiring each component to be always available. The service features a RESTful API and a Websocket API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Key features ~~~~~~~~~~~~ The Messaging service provides the following key features: * Choice between two communication transports. Both with Identity service support: * Firewall-friendly, **HTTP-based RESTful API**. Many of today's developers prefer a more web-friendly HTTP API. They value the simplicity and transparency of the protocol, its firewall-friendly nature, and its huge ecosystem of tools, load balancers and proxies. In addition, cloud operators appreciate the scalability aspects of the REST architectural style. * **Websocket-based API** for persistent connections. Websocket protocol provides communication over persistent connections. Unlike HTTP, where new connections are opened for each request/response pair, Websocket can transfer multiple requests/responses over single TCP connection. It saves much network traffic and minimizes delays. * Multi-tenant queues based on Identity service IDs. * Support for several common patterns including event broadcasting, task distribution, and point-to-point messaging. * Component-based architecture with support for custom back ends and message filters. * Efficient reference implementation with an eye toward low latency and high throughput (dependent on back end). * Highly-available and horizontally scalable. * Support for subscriptions to queues. Several notification types are available: * Email notifications * Webhook notifications * Websocket notifications Layers of the Messaging service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Messaging service has following layers: * The transport layer (Messaging application) which can provide these APIs: * HTTP RESTful API (via ``wsgi`` driver). * Websocket API (via ``websocket`` driver). * The storage layer which keeps all the data and metadata about queues and messages. It has two sub-layers: * The management store database (Catalog). Can be ``MongoDB`` database (or ``MongoDB`` replica-set) or SQL database. * The message store databases (Pools). Can be ``MongoDB`` database (or ``MongoDB`` replica-set) or ``Redis`` database. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/index.rst0000664000175100017510000000303415033040005020312 0ustar00mylesmyles================== Installation Guide ================== .. toctree:: get_started.rst install.rst verify.rst next-steps.rst The Messaging service is multi-tenant, fast, reliable, and scalable. It allows developers to share data between distributed application components performing different tasks, without losing messages or requiring each component to be always available. The service features a RESTful API and a Websocket API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. This chapter assumes a working setup of OpenStack following the base Installation Guide. Ocata ~~~~~ To install Zaqar, see the Ocata Messaging service install guide for each distribution: - `Ubuntu `__ - `CentOS and RHEL `__ - `openSUSE and SUSE Linux Enterprise `__ Newton ~~~~~~ To install Zaqar, see the Newton Messaging service install guide for each distribution: - `Ubuntu `__ - `CentOS and RHEL `__ - `openSUSE and SUSE Linux Enterprise `__ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/install-obs.rst0000664000175100017510000005021615033040005021436 0ustar00mylesmyles.. _install-obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1. This section assumes that you already have a working OpenStack environment with at least Identity service installed. Here you can find instructions and recommended settings for installing Messaging service in small configuration: one web server with Messaging service configured to use replica-set of three ``MongoDB`` database servers. Because only one web server is used, the Messaging service installed by using these instructions can't be considered as high available, see :doc:`install`. In this tutorial these server names are used as examples: * Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. * Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. * Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. Prerequisites ------------- Before you install Messaging service, you must meet the following system requirements: * Installed Identity service for user and project management. * Python 2.7. Before you install and configure Messaging, you must create a ``MongoDB`` replica-set of three database servers. Also you need to create service credentials and API endpoints in Identity. #. Install and configure ``MongoDB`` replica-set on database servers: #. Install ``MongoDB`` on the database servers: On each database server follow the official `MongoDB installation instructions`_. .. note:: Messaging service works with ``MongoDB`` versions >= 2.4 #. Configure ``MongoDB`` on the database servers: On each database server edit configuration file: ``/etc/mongod.conf`` and modify as needed: .. code-block:: ini # MongoDB sample configuration for Messaging service. # (For MongoDB version >= 2.6) # Edit according to your needs. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log storage: dbPath: /var/lib/mongo journal: enabled: false processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile net: port: 27017 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. operationProfiling: slowOpThresholdMs: 200 mode: slowOp replication: oplogSizeMB: 2048 replSetName: catalog .. note:: In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration file should be written in different format. For information about format for different versions see the official `MongoDB configuration reference`_. .. warning:: Additional steps are required to secure ``MongoDB`` installation. You should modify this configuration for your security requirements. See the official `MongoDB security reference`_. #. Start ``MongoDB`` on the database servers: Start ``MongoDB`` service on all database servers: .. code-block:: console # service mongod start Make ``MongoDB`` service start automatically after reboot: .. code-block:: console # chkconfig mongod on #. Configure ``MongoDB`` Replica Set on the database servers: Once you've installed ``MongoDB`` on three servers and assuming that the primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go to ``MYDB0`` and run these commands: .. code-block:: console # mongo local --eval "printjson(rs.initiate())" # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" .. note:: The database servers must have access to each other and also be accessible from the Messaging service web server. Configure firewalls on all database servers to accept incoming connections to port ``27017`` from the needed source. To check if the replica-set is established see the output of this command: .. code-block:: console # mongo local --eval "printjson(rs.status())" #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create the ``zaqar`` user: .. code-block:: console $ openstack user create --domain default --password-prompt zaqar User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | 7b0ffc83097148dab6ecbef6ddcc46bf | | name | zaqar | +-----------+----------------------------------+ #. Add the ``admin`` role to the ``zaqar`` user: .. code-block:: console $ openstack role add --project service --user zaqar admin .. note:: This command provides no output. #. Create the ``zaqar`` service entity: .. code-block:: console $ openstack service create --name zaqar --description "Messaging" messaging +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Messaging | | enabled | True | | id | b39c22818be5425ba2315dd4b10cd57c | | name | zaqar | | type | messaging | +-------------+----------------------------------+ #. Create the Messaging service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | aabca78860e74c4db0bcb36167bfe106 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 07f9524613de4fd3905e13a87f81fd3f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 686f7b19428f4b5aa1425667dfe4f49d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ Install and configure Messaging web server ------------------------------------------ Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server ``WEB0.EXAMPLE-MESSAGES.NET``. #. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order to cache Identity service tokens and catalog mappings: .. code-block:: console # zypper install memcached Start ``memcached`` service: .. code-block:: console # /etc/init.d/memcached start Make ``memcached`` service start automatically after reboot: .. code-block:: console # chkconfig memcached on #. Install Messaging service and ``uWSGI``: .. code-block:: console # zypper install python-pip # git clone https://git.openstack.org/openstack/zaqar.git # cd zaqar # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log # pip install --upgrade pymongo gevent uwsgi #. Create Zaqar configiration directory ``/etc/zaqar/``: .. code-block:: console # mkdir /etc/zaqar #. Customize the policy file: .. code-block:: console # oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf # cp etc/zaqar.policy.yaml.sample /etc/zaqar/policy.yaml Edit any item as needed in policy.yaml. .. note:: By default, if you do not need custom policy file, you do not need to perform the above steps, then zaqar will use the code's default policy. #. Create log file: .. code-block:: console # touch /var/log/zaqar-server.log # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log # chmod 600 /var/log/zaqar-server.log Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files: .. code-block:: console # mkdir /srv/zaqar #. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: .. code-block:: python from keystonemiddleware import auth_token from zaqar.transport.wsgi import app app = auth_token.AuthProtocol(app.app, {}) #. Increase backlog listen limit from default (128): .. code-block:: console # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf #. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify as needed: .. code-block:: ini [uwsgi] https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY pidfile = /var/run/zaqar.pid gevent = 2000 gevent-monkey-patch = true listen = 1024 enable-threads = true chdir = /srv/zaqar module = zaqar_uwsgi:app workers = 4 harakiri = 60 add-header = Connection: close Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's private key (``*.key``). .. note:: The ``uWSGI`` configuration options above can be modified for different security and performance requirements including load balancing. See the official `uWSGI configuration reference`_. #. Create pid file: .. code-block:: console # touch /var/run/zaqar.pid # chown ZAQARUSER:ZAQARUSER /var/run/zaqar.pid Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create Messaging service's configuration file ``/etc/zaqar/zaqar.conf`` with the following content: .. code-block:: ini [DEFAULT] # Show debugging output in logs (sets DEBUG log level output) #debug = False # Pooling and admin mode configs pooling = True admin_mode = True # Log to file log_file = /var/log/zaqar-server.log # This is taken care of in our custom app.py, so disable here ;auth_strategy = keystone # Modify to make it work with your Identity service. [keystone_authtoken] project_domain_name = Default user_domain_name = Default project_domain_id = default project_name = service user_domain_id = default # File path to a PEM encoded Certificate Authority to use when verifying # HTTPs connections. Defaults to system CAs if commented. cafile = PATH_TO_CA_FILE # Messaging service user name in Identity service. username = ZAQARIDENTITYUSER # Messaging service password in Identity service. password = ZAQARIDENTITYPASSWORD # Complete public Identity API endpoint (HTTPS protocol is more preferable # than HTTP). www_authenticate_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Complete admin Identity API endpoint (HTTPS protocol is more preferable # than HTTP). auth_url = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Token cache time in seconds. token_cache_time = TOKEN_CACHE_TIME memcached_servers = 127.0.0.1:11211 [cache] # Dogpile.cache backend module. It is recommended that Memcache with # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be # used in production deployments. Small workloads (single process) # like devstack can use the dogpile.cache.memory backend. (string # value) backend = dogpile.cache.memory memcache_servers = 127.0.0.1:11211 [drivers] transport = wsgi message_store = mongodb management_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set # accordingly. uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred # Name for the database on mongodb server. database = zaqarmanagementstore # Number of databases across which to partition message data, in order # to reduce writer lock %. DO NOT change this setting after initial # deployment. It MUST remain static. Also, you should not need a large # number of partitions to improve performance, esp. if deploying # MongoDB on SSD storage. (integer value) partitions = 8 # Uncomment any options below if needed. # Maximum number of times to retry a failed operation. Currently # only used for retrying a message post. ;max_attempts = 1000 # Maximum sleep interval between retries (actual sleep time # increases linearly according to number of attempts performed). ;max_retry_sleep = 0.1 # Maximum jitter interval, to be added to the sleep interval, in # order to decrease probability that parallel requests will retry # at the same instant. ;max_retry_jitter = 0.005 # Frequency of message garbage collections, in seconds ;gc_interval = 5 * 60 # Threshold of number of expired messages to reach in a given # queue, before performing the GC. Useful for reducing frequent # locks on the DB for non-busy queues, or for worker queues # which process jobs quickly enough to keep the number of in- # flight messages low. # # Note: The higher this number, the larger the memory-mapped DB # files will be. ;gc_threshold = 1000 [drivers:message_store:mongodb] # This section has same set of available options as # "[drivers:management_store:mongodb]" section. # # If pooling is enabled, all pools inherit values from options in these # settings unless overridden in pool creation request. Also "uri" option # value isn't used in case of pooling. # # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, # ssl_ca_certs options need to be set accordingly. # Name for the database on MondoDB server. database = zaqarmessagestore [transport] max_queues_per_page = 1000 max_queue_metadata = 262144 max_mesages_per_page = 10 max_messages_post_size = 262144 max_message_ttl = 1209600 max_claim_ttl = 43200 max_claim_grace = 43200 [signed_url] # Secret key used to encrypt pre-signed URLs. (string value) secret_key = SOMELONGSECRETKEY Edit any options as needed, especially the options with capitalized values. #. Create a service file for Messaging service ``/etc/systemd/system/zaqar-uwsgi.service``: .. code-block:: ini [Unit] Description=uWSGI Zaqar After=syslog.target [Service] ExecStart=/usr/bin/uwsgi --ini /srv/zaqar/uwsgi.ini # Requires systemd version 211 or newer RuntimeDirectory=uwsgi Restart=always KillSignal=SIGQUIT Type=notify StandardError=syslog NotifyAccess=all User=ZAQARUSER Group=ZAQARUSER [Install] WantedBy=multi-user.target Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. Finalize installation --------------------- Now after you have configured the web server and the database servers to have a functional Messaging service, you need to start the service, make the service automatically start with the system and define the created ``MongoDB`` replica-set as Messaging's pool. #. Start Messaging service on the web server: .. code-block:: console # systemctl start zaqar-uwsgi.service #. Make Messaging service start automatically after reboot on the web server: .. code-block:: console # systemctl enable zaqar-uwsgi.service #. Configure pool: .. code-block:: console # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" \ -H "Content-type: application/json" \ Replace ``POOL1`` variable with the desired name of a pool. Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. .. note:: The ``options`` key in curl request above overrides any options (specified in configuration file or default) in ``[drivers:message_store:mongodb]`` Messaging service configuration file's section. .. tip:: In larger deployments, there should be many load balanced web servers. Also the management store databases and the message store databases (pools) should be on different ``MongoDB`` replica-sets. .. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-suse/ .. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ .. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ .. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/install-rdo.rst0000664000175100017510000005023515033040005021440 0ustar00mylesmyles.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service, code-named ``zaqar`` for Red Hat Enterprise Linux 7 and CentOS 7. This section assumes that you already have a working OpenStack environment with at least Identity service installed. Here you can find instructions and recommended settings for installing Messaging service in small configuration: one web server with Messaging service configured to use replica-set of three ``MongoDB`` database servers. Because only one web server is used, the Messaging service installed by using these instructions can't be considered as high available, see :doc:`install`. In this tutorial these server names are used as examples: * Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. * Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. * Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. Prerequisites ------------- Before you install Messaging service, you must meet the following system requirements: * Installed Identity service for user and project management. * Python 2.7. Before you install and configure Messaging, you must create a ``MongoDB`` replica-set of three database servers. Also you need to create service credentials and API endpoints in Identity. #. Install and configure ``MongoDB`` replica-set on database servers: #. Install ``MongoDB`` on the database servers: On each database server follow the official `MongoDB installation instructions`_. .. note:: Messaging service works with ``MongoDB`` versions >= 2.4 #. Configure ``MongoDB`` on the database servers: On each database server edit configuration file: ``/etc/mongod.conf`` and modify as needed: .. code-block:: ini # MongoDB sample configuration for Messaging service. # (For MongoDB version >= 2.6) # Edit according to your needs. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log storage: dbPath: /var/lib/mongo journal: enabled: false processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile net: port: 27017 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. operationProfiling: slowOpThresholdMs: 200 mode: slowOp replication: oplogSizeMB: 2048 replSetName: catalog .. note:: In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration file should be written in different format. For information about format for different versions see the official `MongoDB configuration reference`_. .. warning:: Additional steps are required to secure ``MongoDB`` installation. You should modify this configuration for your security requirements. See the official `MongoDB security reference`_. #. Start ``MongoDB`` on the database servers: Start ``MongoDB`` service on all database servers: .. code-block:: console # systemctl start mongod Make ``MongoDB`` service start automatically after reboot: .. code-block:: console # systemctl enable mongod #. Configure ``MongoDB`` Replica Set on the database servers: Once you've installed ``MongoDB`` on three servers and assuming that the primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go to ``MYDB0`` and run these commands: .. code-block:: console # mongo local --eval "printjson(rs.initiate())" # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" .. note:: The database servers must have access to each other and also be accessible from the Messaging service web server. Configure firewalls on all database servers to accept incoming connections to port ``27017`` from the needed source. To check if the replica-set is established see the output of this command: .. code-block:: console # mongo local --eval "printjson(rs.status())" #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create the ``zaqar`` user: .. code-block:: console $ openstack user create --domain default --password-prompt zaqar User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | 7b0ffc83097148dab6ecbef6ddcc46bf | | name | zaqar | +-----------+----------------------------------+ #. Add the ``admin`` role to the ``zaqar`` user: .. code-block:: console $ openstack role add --project service --user zaqar admin .. note:: This command provides no output. #. Create the ``zaqar`` service entity: .. code-block:: console $ openstack service create --name zaqar --description "Messaging" messaging +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Messaging | | enabled | True | | id | b39c22818be5425ba2315dd4b10cd57c | | name | zaqar | | type | messaging | +-------------+----------------------------------+ #. Create the Messaging service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | aabca78860e74c4db0bcb36167bfe106 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 07f9524613de4fd3905e13a87f81fd3f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 686f7b19428f4b5aa1425667dfe4f49d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ Install and configure Messaging web server ------------------------------------------ Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server ``WEB0.EXAMPLE-MESSAGES.NET``. #. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order to cache Identity service tokens and catalog mappings: .. code-block:: console # dnf install memcached Start ``memcached`` service: .. code-block:: console # systemctl start memcached Make ``memcached`` service start automatically after reboot: .. code-block:: console # systemctl enable memcached #. Install Messaging service and ``uWSGI``: .. code-block:: console # dnf install python3-pip # git clone https://git.openstack.org/openstack/zaqar.git # cd zaqar # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log # pip install --upgrade pymongo gevent uwsgi #. Create Zaqar configiration directory ``/etc/zaqar/``: .. code-block:: console # mkdir /etc/zaqar #. Customize the policy file: .. code-block:: console # oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf # cp etc/zaqar.policy.yaml.sample /etc/zaqar/policy.yaml Edit any item as needed in policy.yaml. .. note:: By default, if you do not need custom policy file, you do not need to perform the above steps, then zaqar will use the code's default policy. #. Create log file: .. code-block:: console # touch /var/log/zaqar-server.log # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log # chmod 600 /var/log/zaqar-server.log Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files: .. code-block:: console # mkdir /srv/zaqar #. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: .. code-block:: python from keystonemiddleware import auth_token from zaqar.transport.wsgi import app app = auth_token.AuthProtocol(app.app, {}) #. Increase backlog listen limit from default (128): .. code-block:: console # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf #. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify as needed: .. code-block:: ini [uwsgi] https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY pidfile = /var/run/zaqar.pid gevent = 2000 gevent-monkey-patch = true listen = 1024 enable-threads = true chdir = /srv/zaqar module = zaqar_uwsgi:app workers = 4 harakiri = 60 add-header = Connection: close Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's private key (``*.key``). .. note:: The ``uWSGI`` configuration options above can be modified for different security and performance requirements including load balancing. See the official `uWSGI configuration reference`_. #. Create pid file: .. code-block:: console # touch /var/run/zaqar.pid # chown ZAQARUSER:ZAQARUSER /var/run/zaqar.pid Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create Messaging service's configuration file ``/etc/zaqar/zaqar.conf`` with the following content: .. code-block:: ini [DEFAULT] # Show debugging output in logs (sets DEBUG log level output) #debug = False # Pooling and admin mode configs pooling = True admin_mode = True # Log to file log_file = /var/log/zaqar-server.log # This is taken care of in our custom app.py, so disable here ;auth_strategy = keystone # Modify to make it work with your Identity service. [keystone_authtoken] project_domain_name = Default user_domain_name = Default project_domain_id = default project_name = service user_domain_id = default # File path to a PEM encoded Certificate Authority to use when verifying # HTTPs connections. Defaults to system CAs if commented. cafile = PATH_TO_CA_FILE # Messaging service user name in Identity service. username = ZAQARIDENTITYUSER # Messaging service password in Identity service. password = ZAQARIDENTITYPASSWORD # Complete public Identity API endpoint (HTTPS protocol is more preferable # than HTTP). www_authenticate_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Complete admin Identity API endpoint (HTTPS protocol is more preferable # than HTTP). identity_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Token cache time in seconds. token_cache_time = TOKEN_CACHE_TIME memcached_servers = 127.0.0.1:11211 [cache] # Dogpile.cache backend module. It is recommended that Memcache with # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be # used in production deployments. Small workloads (single process) # like devstack can use the dogpile.cache.memory backend. (string # value) backend = dogpile.cache.memory memcache_servers = 127.0.0.1:11211 [drivers] transport = wsgi message_store = mongodb management_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set # accordingly. uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred # Name for the database on mongodb server. database = zaqarmanagementstore # Number of databases across which to partition message data, in order # to reduce writer lock %. DO NOT change this setting after initial # deployment. It MUST remain static. Also, you should not need a large # number of partitions to improve performance, esp. if deploying # MongoDB on SSD storage. (integer value) partitions = 8 # Uncomment any options below if needed. # Maximum number of times to retry a failed operation. Currently # only used for retrying a message post. ;max_attempts = 1000 # Maximum sleep interval between retries (actual sleep time # increases linearly according to number of attempts performed). ;max_retry_sleep = 0.1 # Maximum jitter interval, to be added to the sleep interval, in # order to decrease probability that parallel requests will retry # at the same instant. ;max_retry_jitter = 0.005 # Frequency of message garbage collections, in seconds ;gc_interval = 5 * 60 # Threshold of number of expired messages to reach in a given # queue, before performing the GC. Useful for reducing frequent # locks on the DB for non-busy queues, or for worker queues # which process jobs quickly enough to keep the number of in- # flight messages low. # # Note: The higher this number, the larger the memory-mapped DB # files will be. ;gc_threshold = 1000 [drivers:message_store:mongodb] # This section has same set of available options as # "[drivers:management_store:mongodb]" section. # # If pooling is enabled, all pools inherit values from options in these # settings unless overridden in pool creation request. Also "uri" option # value isn't used in case of pooling. # # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, # ssl_ca_certs options need to be set accordingly. # Name for the database on MondoDB server. database = zaqarmessagestore [transport] max_queues_per_page = 1000 max_queue_metadata = 262144 max_mesages_per_page = 10 max_messages_post_size = 262144 max_message_ttl = 1209600 max_claim_ttl = 43200 max_claim_grace = 43200 [signed_url] # Secret key used to encrypt pre-signed URLs. (string value) secret_key = SOMELONGSECRETKEY Edit any options as needed, especially the options with capitalized values. #. Create a service file for Messaging service ``/etc/systemd/system/zaqar.uwsgi.service``: .. code-block:: ini [Unit] Description=uWSGI Zaqar After=syslog.target [Service] ExecStart=/usr/bin/uwsgi --ini /srv/zaqar/uwsgi.ini # Requires systemd version 211 or newer RuntimeDirectory=uwsgi Restart=always KillSignal=SIGQUIT Type=notify StandardError=syslog NotifyAccess=all User=ZAQARUSER Group=ZAQARUSER [Install] WantedBy=multi-user.target Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. Finalize installation --------------------- Now after you have configured the web server and the database servers to have a functional Messaging service, you need to start the service, make the service automatically start with the system and define the created ``MongoDB`` replica-set as Messaging's pool. #. Start Messaging service on the web server: .. code-block:: console # systemctl start zaqar.uwsgi.service #. Make Messaging service start automatically after reboot on the web server: .. code-block:: console # systemctl enable zaqar.uwsgi.service #. Configure pool: .. code-block:: console # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" \ -H "Content-type: application/json" \ Replace ``POOL1`` variable with the desired name of a pool. Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. .. note:: The ``options`` key in curl request above overrides any options (specified in configuration file or default) in ``[drivers:message_store:mongodb]`` Messaging service configuration file's section. .. tip:: In larger deployments, there should be many load balanced web servers. Also the management store databases and the message store databases (pools) should be on different ``MongoDB`` replica-sets. .. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-red-hat/ .. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ .. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ .. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/install-ubuntu.rst0000664000175100017510000004727615033040005022211 0ustar00mylesmyles.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service for Ubuntu 14.04 (LTS). This section assumes that you already have a working OpenStack environment with at least Identity service installed. Here you can find instructions and recommended settings for installing Messaging service in small configuration: one web server with Messaging service configured to use replica-set of three ``MongoDB`` database servers. Because only one web server is used, the Messaging service installed by using these instructions can't be considered as high available, see :doc:`install`. In this tutorial these server names are used as examples: * Web server with Messaging service: ``WEB0.EXAMPLE-MESSAGES.NET``. * Database servers: ``MYDB0.EXAMPLE-MESSAGES.NET``, ``MYDB1.EXAMPLE-MESSAGES.NET``, ``MYDB2.EXAMPLE-MESSAGES.NET``. * Identity service server: ``IDENTITY.EXAMPLE-MESSAGES.NET``. Prerequisites ------------- Before you install Messaging service, you must meet the following system requirements: * Installed Identity service for user and project management. * Python 2.7. Before you install and configure Messaging, you must create a ``MongoDB`` replica-set of three database servers. Also you need to create service credentials and API endpoints in Identity. #. Install and configure ``MongoDB`` replica-set on database servers: #. Install ``MongoDB`` on the database servers: On each database server follow the official `MongoDB installation instructions`_. .. note:: Messaging service works with ``MongoDB`` versions >= 2.4 #. Configure ``MongoDB`` on the database servers: On each database server edit configuration file: ``/etc/mongod.conf`` and modify as needed: .. code-block:: ini # MongoDB sample configuration for Messaging service. # (For MongoDB version >= 2.6) # Edit according to your needs. systemLog: destination: file logAppend: true path: /var/log/mongodb/mongod.log storage: dbPath: /var/lib/mongo journal: enabled: false processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile net: port: 27017 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. operationProfiling: slowOpThresholdMs: 200 mode: slowOp replication: oplogSizeMB: 2048 replSetName: catalog .. note:: In case of older ``MongoDB`` versions (2.4 and 2.5) the configuration file should be written in different format. For information about format for different versions see the official `MongoDB configuration reference`_. .. warning:: Additional steps are required to secure ``MongoDB`` installation. You should modify this configuration for your security requirements. See the official `MongoDB security reference`_. #. Start ``MongoDB`` on the database servers: Start ``MongoDB`` service on all database servers: .. code-block:: console # service mongodb start #. Configure ``MongoDB`` Replica Set on the database servers: Once you've installed ``MongoDB`` on three servers and assuming that the primary ``MongoDB`` server hostname is ``MYDB0.EXAMPLE-MESSAGES.NET``, go to ``MYDB0`` and run these commands: .. code-block:: console # mongo local --eval "printjson(rs.initiate())" # mongo local --eval "printjson(rs.add('MYDB1.EXAMPLE-MESSAGES.NET'))" # mongo local --eval "printjson(rs.add('MYDB2.EXAMPLE-MESSAGES.NET'))" .. note:: The database servers must have access to each other and also be accessible from the Messaging service web server. Configure firewalls on all database servers to accept incoming connections to port ``27017`` from the needed source. To check if the replica-set is established see the output of this command: .. code-block:: console # mongo local --eval "printjson(rs.status())" #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create the ``zaqar`` user: .. code-block:: console $ openstack user create --domain default --password-prompt zaqar User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | 7b0ffc83097148dab6ecbef6ddcc46bf | | name | zaqar | +-----------+----------------------------------+ #. Add the ``admin`` role to the ``zaqar`` user: .. code-block:: console $ openstack role add --project service --user zaqar admin .. note:: This command provides no output. #. Create the ``zaqar`` service entity: .. code-block:: console $ openstack service create --name zaqar --description "Messaging" messaging +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Messaging | | enabled | True | | id | b39c22818be5425ba2315dd4b10cd57c | | name | zaqar | | type | messaging | +-------------+----------------------------------+ #. Create the Messaging service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne messaging public http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | aabca78860e74c4db0bcb36167bfe106 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging internal http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 07f9524613de4fd3905e13a87f81fd3f | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ $ openstack endpoint create --region RegionOne messaging admin http://WEB0.EXAMPLE-MESSAGES.NET:8888 +--------------+---------------------------------------+ | Field | Value | +--------------+---------------------------------------+ | enabled | True | | id | 686f7b19428f4b5aa1425667dfe4f49d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | b39c22818be5425ba2315dd4b10cd57c | | service_name | zaqar | | service_type | messaging | | url | http://WEB0.EXAMPLE-MESSAGES.NET:8888 | +--------------+---------------------------------------+ Install and configure Messaging web server ------------------------------------------ Install and configure ``memcached``, ``uWSGI`` and Messaging on the web server ``WEB0.EXAMPLE-MESSAGES.NET``. #. Install ``memcached`` on web server ``WEB0.EXAMPLE-MESSAGES.NET`` in order to cache Identity service tokens and catalog mappings: .. code-block:: console # apt-get install memcached Start ``memcached`` service: .. code-block:: console # service memcached start #. Install Messaging service and ``uWSGI``: .. code-block:: console # apt-get install python-pip # git clone https://git.openstack.org/openstack/zaqar.git # cd zaqar # pip install . -r ./requirements.txt --upgrade --log /tmp/zaqar-pip.log # pip install --upgrade pymongo gevent uwsgi #. Create Zaqar configiration directory ``/etc/zaqar/``: .. code-block:: console # mkdir /etc/zaqar #. Customize the policy file: .. code-block:: console # oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf # cp etc/zaqar.policy.yaml.sample /etc/zaqar/policy.yaml Edit any item as needed in policy.yaml. .. note:: By default, if you do not need custom policy file, you do not need to perform the above steps, then zaqar will use the code's default policy. #. Create log file: .. code-block:: console # touch /var/log/zaqar-server.log # chown ZAQARUSER:ZAQARUSER /var/log/zaqar-server.log # chmod 600 /var/log/zaqar-server.log Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create ``/srv/zaqar`` folder to store ``uWSGI`` configuration files: .. code-block:: console # mkdir /srv/zaqar #. Create ``/srv/zaqar/zaqar_uwsgi.py`` with the following content: .. code-block:: python from keystonemiddleware import auth_token from zaqar.transport.wsgi import app app = auth_token.AuthProtocol(app.app, {}) #. Increase backlog listen limit from default (128): .. code-block:: console # echo "net.core.somaxconn=2048" | sudo tee --append /etc/sysctl.conf #. Create ``/srv/zaqar/uwsgi.ini`` file with the following content and modify as needed: .. code-block:: ini [uwsgi] https = WEB0.EXAMPLE-MESSAGES.NET:8888,PATH_TO_SERVER_CRT,PATH_TO_SERVER_PRIVATE_KEY pidfile = /var/run/zaqar.pid gevent = 2000 gevent-monkey-patch = true listen = 1024 enable-threads = true chdir = /srv/zaqar module = zaqar_uwsgi:app workers = 4 harakiri = 60 add-header = Connection: close Replace ``PATH_TO_SERVER_CRT`` with path to the server's certificate (``*.crt``) and ``PATH_TO_SERVER_PRIVATE_KEY`` with path to the server's private key (``*.key``). .. note:: The ``uWSGI`` configuration options above can be modified for different security and performance requirements including load balancing. See the official `uWSGI configuration reference`_. #. Create pid file: .. code-block:: console # touch /var/run/zaqar.pid # chown ZAQARUSER:ZAQARUSER /var/run/zaqar.pid Replace ``ZAQARUSER`` with the name of the user in system under which the Messaging service will run. #. Create Messaging service's configuration file ``/etc/zaqar/zaqar.conf`` with the following content: .. code-block:: ini [DEFAULT] # Show debugging output in logs (sets DEBUG log level output) #debug = False # Pooling and admin mode configs pooling = True admin_mode = True # Log to file log_file = /var/log/zaqar-server.log # This is taken care of in our custom app.py, so disable here ;auth_strategy = keystone # Modify to make it work with your Identity service. [keystone_authtoken] project_domain_name = Default user_domain_name = Default project_domain_id = default project_name = service user_domain_id = default # File path to a PEM encoded Certificate Authority to use when verifying # HTTPs connections. Defaults to system CAs if commented. cafile = PATH_TO_CA_FILE # Messaging service user name in Identity service. username = ZAQARIDENTITYUSER # Messaging service password in Identity service. password = ZAQARIDENTITYPASSWORD # Complete public Identity API endpoint (HTTPS protocol is more preferable # than HTTP). www_authenticate_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Complete admin Identity API endpoint (HTTPS protocol is more preferable # than HTTP). identity_uri = HTTPS://IDENTITY.EXAMPLE-MESSAGES.NET:5000 # Token cache time in seconds. token_cache_time = TOKEN_CACHE_TIME memcached_servers = 127.0.0.1:11211 [cache] # Dogpile.cache backend module. It is recommended that Memcache with # pooling (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be # used in production deployments. Small workloads (single process) # like devstack can use the dogpile.cache.memory backend. (string # value) backend = dogpile.cache.memory memcache_servers = 127.0.0.1:11211 [drivers] transport = wsgi message_store = mongodb management_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI. If ssl connection enabled, then ssl_keyfile, # ssl_certfile, ssl_cert_reqs, ssl_ca_certs options need to be set # accordingly. uri = mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred # Name for the database on mongodb server. database = zaqarmanagementstore # Number of databases across which to partition message data, in order # to reduce writer lock %. DO NOT change this setting after initial # deployment. It MUST remain static. Also, you should not need a large # number of partitions to improve performance, esp. if deploying # MongoDB on SSD storage. (integer value) partitions = 8 # Uncomment any options below if needed. # Maximum number of times to retry a failed operation. Currently # only used for retrying a message post. ;max_attempts = 1000 # Maximum sleep interval between retries (actual sleep time # increases linearly according to number of attempts performed). ;max_retry_sleep = 0.1 # Maximum jitter interval, to be added to the sleep interval, in # order to decrease probability that parallel requests will retry # at the same instant. ;max_retry_jitter = 0.005 # Frequency of message garbage collections, in seconds ;gc_interval = 5 * 60 # Threshold of number of expired messages to reach in a given # queue, before performing the GC. Useful for reducing frequent # locks on the DB for non-busy queues, or for worker queues # which process jobs quickly enough to keep the number of in- # flight messages low. # # Note: The higher this number, the larger the memory-mapped DB # files will be. ;gc_threshold = 1000 [drivers:message_store:mongodb] # This section has same set of available options as # "[drivers:management_store:mongodb]" section. # # If pooling is enabled, all pools inherit values from options in these # settings unless overridden in pool creation request. Also "uri" option # value isn't used in case of pooling. # # If ssl connection enabled, then ssl_keyfile, ssl_certfile, ssl_cert_reqs, # ssl_ca_certs options need to be set accordingly. # Name for the database on MondoDB server. database = zaqarmessagestore [transport] max_queues_per_page = 1000 max_queue_metadata = 262144 max_mesages_per_page = 10 max_messages_post_size = 262144 max_message_ttl = 1209600 max_claim_ttl = 43200 max_claim_grace = 43200 [signed_url] # Secret key used to encrypt pre-signed URLs. (string value) secret_key = SOMELONGSECRETKEY Edit any options as needed, especially the options with capitalized values. #. Create an upstart config, it could be named as ``/etc/init/zaqar.conf``: .. code-block:: bash description "Zaqar api server" author "Your Name " start on runlevel [2345] stop on runlevel [!2345] chdir /var/run pre-start script mkdir -p /var/run/zaqar chown zaqar:zaqar /var/run/zaqar mkdir -p /var/lock/zaqar chown zaqar:root /var/lock/zaqar end script exec /usr/bin/uwsgi --master --emperor /etc/zaqar/uwsgi Finalize installation --------------------- Now after you have configured the web server and the database servers to have a functional Messaging service, you need to start the service, make the service automatically start with the system and define the created ``MongoDB`` replica-set as Messaging's pool. #. Start Messaging service on the web server: .. code-block:: console # systemctl start zaqar.uwsgi.service #. Make Messaging service start automatically after reboot on the web server: .. code-block:: console # systemctl enable zaqar.uwsgi.service #. Configure pool: .. code-block:: console # curl -i -X PUT https://WEB0.EXAMPLE-MESSAGES.NET:8888/v2/pools/POOL1 \ -d '{"weight": 100, "uri": "mongodb://MYDB0.EXAMPLE-MESSAGES.NET,MYDB1.EXAMPLE-MESSAGES.NET,MYDB2.EXAMPLE-MESSAGES.NET:27017/?replicaSet=catalog&w=2&readPreference=secondaryPreferred", "options": {"partitions": 8}}' \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" \ -H "Content-type: application/json" \ Replace ``POOL1`` variable with the desired name of a pool. Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. .. note:: The ``options`` key in curl request above overrides any options (specified in configuration file or default) in ``[drivers:message_store:mongodb]`` Messaging service configuration file's section. .. tip:: In larger deployments, there should be many load balanced web servers. Also the management store databases and the message store databases (pools) should be on different ``MongoDB`` replica-sets. .. _`MongoDB installation instructions`: https://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ .. _`MongoDB configuration reference`: https://docs.mongodb.org/v3.0/reference/configuration-options/ .. _`MongoDB security reference`: https://docs.mongodb.org/manual/security/ .. _`uWSGI configuration reference`: http://uwsgi-docs.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/install.rst0000664000175100017510000000221115033040005020645 0ustar00mylesmyles.. _install: Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Messaging service, code-named zaqar. This section assumes that you already have a working OpenStack environment with at least Identity service installed. Note that installation and configuration vary by distribution. .. toctree:: install-obs.rst install-rdo.rst install-ubuntu.rst Possible Minimum Scalable HA Setup ---------------------------------- Scalable HA (High availability) setup is out of scope in this chapter. For a HA setup, a load balancer has to be placed in front of the web servers. To provide high availability with minimum administration overhead for storage use ``MongoDB`` driver and for transport use ``wsgi`` driver. To have a small footprint while providing HA, you can use two web servers which will host the application and three ``MongoDB`` servers (configured as replica-set) which will host Messaging service's management store and message store databases. At larger scale, the management store database and the message store database are advised to be hosted on different ``MongoDB`` replica sets. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/next-steps.rst0000664000175100017510000000035715033040005021322 0ustar00mylesmyles.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the Messaging service. To add additional services, see the `additional documentation on installing OpenStack `_ . ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/install/verify.rst0000664000175100017510000000245115033040005020511 0ustar00mylesmyles.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Messaging service by creating messages via curl utility: .. code-block:: console $ curl -i -X POST http://ZAQAR_ENDPOINT:8888/v2/queues/samplequeue/messages \ -d '{"messages": [{"body": {"event": 1}, "ttl": 600}, {"body": {"event": 2}, "ttl": 600}]}' \ -H "Content-type: application/json" \ -H "Client-ID: CLIENT_ID" \ -H "X-Auth-Token: TOKEN" Replace ``CLIENT_ID`` variable with the universally unique identifier (UUID) which can be generated by, for example, ``uuidgen`` utility. Replace ``TOKEN`` variable with the authentication token retrieved from Identity service. If you choose not to enable Keystone authentication you won't have to pass a token. Replace ``ZAQAR_ENDPOINT`` variable with the endpoint of Messaging service. The normal response would be with status code 201 and look something like this: .. code-block:: console HTTP/1.1 201 Created content-length: 135 content-type: application/json; charset=UTF-8 location: http://ZAQAR_ENDPOINT:8888/v2/queues/samplequeue/messages?ids=575f6f2515e5c87d779a9b20,575f6f2515e5c87d779a9b21 Connection: close {"resources": ["/v2/queues/samplequeue/messages/575f6f2515e5c87d779a9b20", "/v2/queues/samplequeue/messages/575f6f2515e5c87d779a9b21"]} ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5540135 zaqar-20.1.0.dev29/doc/source/user/0000775000175100017510000000000015033040026015764 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/user/authentication_tokens.rst0000664000175100017510000000300515033040005023113 0ustar00mylesmylesGenerate an Authentication Token ================================ You can use `cURL `__ to try the authentication process in two steps: get a token, and send the token to a service. 1. Get an authentication token by providing your user name and either your API key or your password. Here are examples of both approaches: You can request a token by providing your user name and your password. :: $ curl -X POST https://localhost:5000/v2.0/tokens -d '{"auth":{"passwordCredentials":{"username": "joecool", "password":"coolword"}, "tenantId":"5"}}' -H 'Content-type: application/json' Successful authentication returns a token which you can use as evidence that your identity has already been authenticated. To use the token, pass it to other services as an ``X-Auth-Token`` header. Authentication also returns a service catalog, listing the endpoints you can use for Cloud services. 2. Use the authentication token to send a ``GET`` to a service you would like to use. Authentication tokens are typically valid for 24 hours. Applications should be designed to re-authenticate after receiving a 401 (Unauthorized) response from a service endpoint. **Note** If you programmatically parse an authentication response, be aware that service names are stable for the life of the particular service and can be used as keys. You should also be aware that a user's service catalog can include multiple uniquely-named services that perform similar functions. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/user/getting_started.rst0000664000175100017510000002572215033040005021712 0ustar00mylesmyles===================== Getting Started Guide ===================== Overview -------- Messaging service is a RESTful API-based messaging service. It supports distributed web applications,and is based on the OpenStack Zaqar project. Messaging service is a vital component of large, distributed web applications. You can use Messaging service for public, private, and hybrid cloud environments. As you develop distributed web applications, you often have multiple agents set up to complete sets of tasks for those applications. These tasks can be anything from creating users to deleting blocks of storage. Messaging service provides a simple interface that creates these tasks as queues, messages, and claims. The interface then posts, claims, reads, and deletes them as the tasks are needed and performed. Messaging service handles the distribution of tasks, but it does not necessarily manage the order of the tasks. Applications handle the workflow at a higher level. This guide explains how to access and start using the API so that you can begin to use Messaging service for your applications. Instructions are given for how to properly enter the necessary URLs, using cURL, to set up and use a basic set of Messaging service operations. Prerequisites for Running Examples ---------------------------------- In order to run the examples in this guide, you must have the following prerequisites: - A Cloud account - A username and password, as specified during registration - Prior knowledge of HTTP/1.1 conventions - Basic familiarity with Cloud and RESTful APIs How Messaging service Works --------------------------- Following is an overview of how Messaging service works. For definitions of Messaging service terms, see the below glossary. 1. You create a queue to which producers or publishers post messages. 2. Workers (consumers or subscribers) claim or get a message from the queue, complete the work in that message, and delete the message. If a worker will be off-line before it completes the work in a message, the worker can retire the claim's time to live (TTL), putting the message back into the queue for another worker to claim. 3. Subscribers monitor the claims from these queues to track activity and help troubleshoot errors. For the majority of use cases, Messaging service is not responsible for the ordering of messages. However, if there is only a single producer, Messaging service ensures that messages are handled in a First In, First Out (FIFO) order. Messaging Patterns ------------------ The Messaging service API supports a variety of messaging patterns including the following: - Task distribution - Event broadcasting - Point-to-point messaging Task distribution ----------------- The task distribution pattern has the following characteristics: - A producer is programmed to send messages to a queue. - Multiple workers (or consumers) are programmed to monitor a queue. - Only one worker can claim a message so that no other worker can claim the message and duplicate the work. - The worker must delete the message when work is done. - TTL restores a message to an unclaimed state if the worker never finishes. This pattern is ideal for dispatching jobs to multiple processors. Event Broadcasting ------------------ Characteristics of the event broadcasting pattern are: - The publisher sends messages to a queue. - Multiple observers (or subscribers) get the messages in the queue. - Multiple observers take action on each message. - Observers send a marker to skip messages already seen. - TTL eventually deletes messages. This pattern is ideal for notification of events to multiple observers at once. Point-to-point messaging ------------------------ Characteristics of the point-to-point messaging pattern are: - The publisher sends messages to a queue. - The consumer gets the messages in the queue. - The consumer can reply with the result of processing a message by sending another message to the same queue (queues are duplex by default). - The publisher gets replies from the queue. - The consumer sends a marker to skip messages already seen. - TTL eventually deletes messages. This pattern is ideal for communicating with a specific client, especially when a reply is desired from that client. Messaging service Operations ---------------------------- This section lists all of the operations that are available in the Messaging service API. This document uses some of the most common operations in `OpenStack API Reference `__.. For details about all of the operations, see the Messaging service API v2 Reference. Home Document ~~~~~~~~~~~~~ The following operation is available for the home document: - Get Home Document Queues ~~~~~~ The following operations are available for queues: - Create Queue - List Queues - Get Queue - Update Queue - Get Queue Stats - Delete Queue Messages ~~~~~~~~ The following operations are available for messages: - Post Message - Get Messages - Get a Specific Message - Get a Set of Messages by ID - Delete Message - Delete a Set of Messages by ID Claims ~~~~~~ The following operations are available for claims: - Claim Messages - Get Claim - Update Claim - Release Claim Subscriptions ~~~~~~~~~~~~~ The following operations are available for subscriptions: - Create Subscriptions - List Subscriptions - Get Subscription - Update Subscription - Delete Subscription Pools ~~~~~ The following operations are available for Pools: - Create Pools - List Pools - Get Pool - Update Pool - Delete Pool Flavors ~~~~~~~ The following operations are available for Flavors: - Create Flavors - List Flavors - Get Flavor - Update Flavors - Delete Flavors Health ~~~~~~ The following operations are available for Health: - Ping for basic health status - Get detailed health status Use Cases --------- Queuing systems are used to coordinate tasks within an application. Here are some examples: - **Backup**: A backup application might use a queuing system to connect the actions that users do in the a control panel to the customer's backup agent on a server. When a customer wants to start a backup, they simply choose "start backup" on a panel. Doing so causes the producer to put a "startBackup" message into the queue. Every few minutes, the agent on the customers server (the worker) checks the queue to see if it has any new messages to act on. The agent claims the "startBackup" message and kicks off the backup on the customer's server. - **Storage**: Gathering statistics for a large, distributed storage system can be a long process. The storage system can use a queuing system to ensure that jobs complete, even if one initially fails. Since messages are not deleted until after the worker has completed the job, the storage system can make sure that no job goes undone. If the worker fails to complete the job, the message stays in the queue to be completed by another server. In this case, a worker claims a message to perform a statistics job, but the claim's TTL expired and the message is put back into the queue when the job took too long to complete (meaning that it most likely failed). By giving the claim a TTL, applications can protect themselves from workers going off-line while processing a message. After a claim's TTL expires, the message is put back into the queue for another worker to claim. - **Email**: The team for an email application is constantly migrating customer email from old versions to newer ones, so they develop a tool to let customers do it themselves. The migrations take a long time, so they cannot be done with single API calls, or by a single server. When a user starts a migration job from their portal, the migration tool sends messages to the queue with details of how to run the migration. A set of migration engines, the consumers in this case, periodically check the queues for new migration tasks, claim the messages, perform the migration, and update a database with the migration details. This process allows a set of servers to work together to accomplish large migrations in a timely manner. Following are some generic use cases for Messaging service: - Distribute tasks among multiple workers (transactional job queues) - Forward events to data collectors (transactional event queues) - Publish events to any number of subscribers (event broadcasting) - Send commands to one or more agents (point-to-point messaging or event broadcasting) - Request an action or get information from a Remote Procedure Call (RPC) agent (point-to-point messaging) Additional Resources -------------------- For more information about using the API, see the Messaging service API v2 Reference. All you need to get started with Messaging service is the getting started guide, the reference, and your Cloud account. For information about the OpenStack Zaqar API, see `OpenStack API Reference `__. This API uses standard HTTP 1.1 response codes as documented at `www.w3.org/Protocols/rfc2616/rfc2616-sec10.html `__. Glossary -------- **Claim** The process of a worker checking out a message to perform a task. Claiming a message prevents other workers from attempting to process the same messages. **Claim TTL** Defines how long a message will be in claimed state. A message can be claimed by one worker at a time. **Consumer** A server that claims messages from the queue. **Message** A task, a notification, or any meaningful data that a producer or publisher sends to the queue. A message exists until it is deleted by a recipient or automatically by the system based on a TTL (time-to-live) value. **Message TTL** Defines how long a message will be accessible. **Producer** A server or application that sends messages to the queue. **Producer - Consumer** A pattern where each worker application that reads the queue has to claim the message in order to prevent duplicate processing. Later, when work is done, the worker is responsible for deleting the message. If message is not deleted in a predefined time, it can be claimed by other workers. **Publisher** A server or application that posts messages to the queue with the intent to distribute information or updates to multiple subscribers. **Publisher - Subscriber** A pattern where all worker applications have access to all messages in the queue. Workers cannot delete or update messages. **Queue** The entity that holds messages. Ideally, a queue is created per work type. For example, if you want to compress files, you would create a queue dedicated to this job. Any application that reads from this queue would only compress files. **Subscriber** An observer that watches messages like an RSS feed but does not claim any messages. **TTL** Time-to-live value. **Worker** A client that claims messages from the queue and performs actions based on those messages. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/user/headers_queue_api_working.rst0000664000175100017510000003004115033040005023721 0ustar00mylesmylesCommon Headers ============== Each request to the Message Queuing API must include certain standard and extended HTTP headers (as shown in the following table). These headers provide host, agent, authentication, and other pertinent information to the server. The following table provides the common headers used by the API. .. list-table:: :widths: 50 50 :header-rows: 1 * - Header - Description * - Host - Host name of the API * - Date - Current date and time * - Accept - Media type to use. Initially, only ``application/json`` is supported. **Note: The "Accept" header is required.** * - Accept-Encoding - Specifies that the agent accepts gzip-encoded response bodies * - Content-Type - ``application/json`` * - Content-Length - For ``POST`` or ``PUT`` requests, the length in bytes of the message document being submitted * - X-Auth-Token - Authorization token * - X-Project-Id - An ID for a project to which the value of X-Auth-Token grants access. Queues are created under this project. The project ID is the same as the account ID (also sometimes called tenant ID). * - Client-ID - A UUID for each client instance. The UUID must be submitted in its canonical form (for example, 3381af92-2b9e-11e3-b191-71861300734c). The client generates the Client-ID once. Client-ID persists between restarts of the client so the client should reuse that same Client-ID. **Note: All message-related operations require the use of "Client-ID" in the headers to ensure that messages are not echoed back to the client that posted them, unless the client explicitly requests this.** Working with the Message Queuing API ==================================== This chapter contains a simple exercise with some basic Message Queuing requests that you will commonly use. Example requests are provided in cURL, followed by the response. For a complete list of operations available for Message Queuing, see :doc:`getting_started` Each operation is fully described in the `Message Queuing API v2 Reference `_. Create Queue ------------ The Create Queue operation creates a queue in the region of your choice. The body of the PUT request is empty. The template is as follows: .. code:: rest PUT {endpoint}/queues/{queue_name} The ``queue_name`` parameter specifies the name to give the queue. The name *must not* exceed 64 bytes in length and is limited to US-ASCII letters, digits, underscores, and hyphens. Following are examples of a Create Queue request and response: .. code-block:: bash curl -i -X PUT https://queues.api.openstack.org/v2/queues/samplequeue \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 201 Created Content-Length: 0 Location: /v2/queues/samplequeue Post Message ------------ The Post Message operation inserts one or more messages in a queue. You can submit up to 10 messages in a single request, but you must encapsulate them in a collection container (an array in JSON, even for a single message - without the JSON array, you receive an "Invalid body request" error message). You can use the resulting value of the location header or response body to retrieve the created messages for further processing if needed. The template is as follows: .. code:: rest POST {endpoint}/queues/{queue_name}/messages The client specifies only the body and ttl attributes for the message. Metadata, such as id and age, is added. The response body contains a list of resource paths that correspond to each message submitted in the request, in the same order as they were submitted. If a server-side error occurs during the processing of the submitted messages, a partial list is returned. The ``partial`` attribute is set to ``true``, and the client tries to post the remaining messages again. **Important** The ``partial`` attribute has been deprecated in the v1.0 API and is not available in the v1.1 API. Drivers are now required to operate in a transactional manner. In other words, either all messages must be posted, or none of them. The ``body`` attribute specifies an arbitrary document that constitutes the body of the message being sent. The following rules apply for the maximum size: - The size is limited to 256 KB for the entire request body (as-is), including whitespace. - The maximum size of posted messages is the maximum size of the entire request document (rather than the sum of the individual message ``body`` field values as it was earlier releases). On error, the client is notified of by how much the request exceeded the limit. The document *must* be valid JSON. (The Message Queuing service validates it.) The ``ttl`` attribute specifies the lifetime of the message. When the lifetime expires, the server deletes the message and removes it from the queue. Valid values are 60 through 1209600 seconds (14 days). **Note** The server might not actually delete the message until its age reaches (ttl + 60) seconds. So there might be a delay of 60 seconds after the message expires before it is deleted. The following are examples of a Post Message request and response: .. code:: bash curl -i -X POST https://queues.api.openstack.org/v1/queues/samplequeue/messages -d \ '[{"ttl": 300,"body": {"event": "BackupStarted"}},{"ttl": 60,"body": {"play": "hockey"}}]' \ -H "Content-type: application/json" \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 201 Created Content-Length: 153 Content-Type: application/json; charset=utf-8 Location: /v1/queues/samplequeue/messages?ids=51ca00a0c508f154c912b85c,51ca00a0c508f154c912b85d {"partial": false, "resources": ["/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85c", "/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85d"]} Claim Messages -------------- The Claim Messages operation claims a set of messages (up to the value of the ``limit`` parameter) from oldest to newest and skips any messages that are already claimed. If there are no messages available to claim, the Message Queuing service returns an HTTP ``204 No Content`` response code. The template is as follows: .. code-block:: rest POST {endpoint}/queues/{queue_name}/claims{?limit} Content-Type: application/json { "ttl": {claim_ttl}, "grace": {message_grace} } The client (worker) needs to delete the message when it has finished processing it. The client deletes the message before the claim expires to ensure that the message is processed only once. If a client needs more time, the Cloud Service provides the Update Claim operation to make changes. See the Message Queuing API v1 Reference for a description of this operation. As part of the delete operation, workers specify the claim ID (which is best done by simply using the provided href). If workers perform these actions, then if a claim simply expires, the server can return an error and notify the worker of a possible race condition. This action gives the worker a chance to roll back its own processing of the given message because another worker can claim the message and process it. The age given for a claim is relative to the server's clock. The claim's age is useful for determining how quickly messages are getting processed and whether a given message's claim is about to expire. When a claim expires, it is released back to the queue for other workers to claim. (If the original worker failed to process the message, another client worker can then claim the message.) The ``limit`` parameter specifies the number of messages to claim. The ``limit`` parameter is configurable. The default is 20. Messages are claimed based on the number of messages available. The server might claim and return less than the requested number of messages. The ``ttl`` attribute specifies the lifetime of the claim. While messages are claimed, they are not available to other workers. The value must be between 60 and 43200 seconds (12 hours). The ``grace`` attribute specifies the message grace period in seconds. Valid values are between 60 and 43200 seconds (12 hours). To deal with workers that have stopped responding (for up to 1209600 seconds or 14 days, including claim lifetime), the server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus the specified grace period. If a claimed message normally lives longer than the grace period, its expiration is not adjusted. it Following are examples of a Claim Messages request and response: .. code:: bash curl -i -X POST https://queues.api.openstack.org/v1/queues/samplequeue/claims -d \ '{"ttl": 300,"grace":300}' \ -H "Content-type: application/json" \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code-block:: rest HTTP/1.1 201 OK Content-Length: 164 Content-Type: application/json; charset=utf-8 Location: /v1/queues/samplequeue/claims/51ca011c821e7250f344efd6 X-Project-Id: [ { "body": { "event": "BackupStarted" }, "age": 124, "href": "\/v1\/queues\/samplequeue\/messages\/51ca00a0c508f154c912b85c?claim_id=51ca011c821e7250f344efd6", "ttl": 300 } ] Delete Message with Claim ID ---------------------------- The Delete Message operations deletes messages. The template is as follows: .. code:: rest DELETE {endpoint}/queues/{queue_name}/messages/{message_id}{?claim_id} The ``message_id`` parameter specifies the message to delete. The ``claim_id`` parameter specifies that the message is deleted only if it has the specified claim ID and that claim has not expired. This specification is useful for ensuring that only one worker processes any given message. When a worker's claim expires before it deletes a message that it has processed, the worker must roll back any actions it took based on that message because another worker can now claim and process the same message. Following are examples of a Delete Message request and response: .. code:: bash curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue/messages/51ca00a0c508f154c912b85c?claim_id=51ca011c821e7250f344efd6 \ -H "Content-type: application/json" \ -H "X-Auth-Token: " \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 204 No Content Release Claim ------------- The Release Claim operation immediately releases a claim, making any remaining, undeleted) messages associated with the claim available to other workers. The template is as follows: .. code:: rest DELETE {endpoint}/queues/{queue_name}/claims/{claim_id} This operation is useful when a worker is performing a graceful shutdown, fails to process one or more messages, or is taking longer than expected to process messages and wants to make the remainder of the messages available to other workers. Following are examples of a Release Claim request and response: .. code:: bash curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue/claims/51ca011c821e7250f344efd6 \ -H "Content-type: application/json" \ -H "X-Auth-Token: " \ -H "Client-ID: e58668fc-26eb-11e3-8270-5b3128d43830" \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 204 No Content Delete Queue ------------ The Delete Queue operation immediately deletes a queue and all of its existing messages. The template is as follows: .. code:: rest DELETE {endpoint}/queues/{queue_name} Following are examples of a Delete Queue request and response: .. code:: bash curl -i -X DELETE https://queues.api.openstack.org/v1/queues/samplequeue \ -H "Content-type: application/json" \ -H "X-Auth-Token: " \ -H "Accept: application/json" \ -H "X-Project-Id: " .. code:: rest HTTP/1.1 204 No Content ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/user/index.rst0000664000175100017510000000027515033040005017626 0ustar00mylesmyles========== User Guide ========== .. toctree:: :maxdepth: 2 getting_started send_request_api authentication_tokens headers_queue_api_working notification_delivery_policy ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/user/notification_delivery_policy.rst0000664000175100017510000000531015033040005024462 0ustar00mylesmyles.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================== The Notification Delivery Policy Guide ====================================== Support notification delivery policy in webhook type. It will work when the notification is sent from Zaqar to the subscriber failed. This guide shows how to use this feature: Webhook ------- .. note:: You should make sure that the message notification is enabled. By default, the ``message_pipeline`` config option in [storage] section should be set like: message_pipeline = zaqar.notification.notifier 1. Create the queue with _retry_policy metadata like this: .. code:: json { "_retry_policy": { "retries_with_no_delay": "", "minimum_delay_retries": "", "minimum_delay": "", "maximum_delay": "", "maximum_delay_retries": "", "retry_backoff_function": "", "ignore_subscription_override": ""} } - 'minimum_delay' and 'maximum_delay' mean delay time in seconds. - 'retry_backoff_function' mean name of retry backoff function. There will be a enum in Zaqar that contain all valid values. Zaqar now supports retry backoff function including 'linear', 'arithmetic','geometric' and 'exponential'. - 'minimum_delay_retries' and 'maximum_delay_retries' mean the number of retries with 'minimum_delay' or 'maximum_delay' delay time. If value of retry_policy is empty dict, that Zaqar will use default value to those keys: - retries_with_no_delay=3 - minimum_delay_retries=3 - minimum_delay=5 - maximum_delay=30 - maximum_delay_retries=3 - retry_backoff_function=linear - ignore_subscription_override=False 2. Create a subscription with options like queue's metadata below. If user don't set the options, Zaqar will use the retry policy in queue's metadata. If user do it, Zaqar will use the retry policy in options by default, if user still want to use retry policy in queue's metadata, then can set the ignore_subscription_override = True. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/doc/source/user/send_request_api.rst0000664000175100017510000000642315033040005022052 0ustar00mylesmylesSend Requests to the API ======================== You have several options for sending requests through an API: - Developers and testers may prefer to use cURL, the command-line tool from http://curl.haxx.se/. With cURL you can send HTTP requests and receive responses back from the command line. - If you like to use a more graphical interface, the REST client for Firefox also works well for testing and trying out commands, see https://addons.mozilla.org/en-US/firefox/addon/restclient/. - You can also download and install rest-client, a Java application to test RESTful web services, from https://github.com/wiztools/rest-client. Sending API Requests Using cURL ------------------------------- cURL is a command-line tool that is available in UNIX® system-based environments and Apple Mac OS X® systems, and can be downloaded for Microsoft Windows® to interact with the REST interfaces. For more information about cURL, visit http://curl.haxx.se/. cURL enables you to transmit and receive HTTP requests and responses from the command line or from within a shell script. As a result, you can work with the REST API directly without using one of the client APIs. The following cURL command-line options are used in this guide to run the examples. .. list-table:: :widths: 50 50 :header-rows: 1 * - Option - Description * - ``-d`` - Sends the specified data in a ``POST`` request to the HTTP server. * - ``-i`` - Includes the HTTP header in the output. * - ``-H HEADER`` - Specifies an HTTP header in the request. * - ``-X`` - Specifies the request method to use when communicating with the HTTP server. The specified request is used instead of the default method, which is GET. For example, ``-X PUT`` specifies to use the ``PUT`` request method. **Note** If you have the tools, you can run the cURL JSON request examples with the following options to format the output from cURL: `` | python -mjson.tool``. Copying and Pasting cURL Request Examples into a Terminal Window ---------------------------------------------------------------- To run the cURL request examples shown in this guide on Linux or Mac systems, perform the following actions: 1. Copy and paste each example from the HTML version of this guide into an ASCII text editor (for example, vi or TextEdit). You can click on the small document icon to the right of each request example to select it. 2. Modify each example with your required account information and so forth, as detailed in this guide. 3. After you are finished modifying the text for the cURL request example with your information (for example, ``your_username`` and ``your_api_key``), paste it into your terminal window. 4. Press Enter to run the cURL command. **Note** The carriage returns in the cURL request examples that are part of the cURL syntax are escaped with a backslash (\\) in order to avoid prematurely terminating the command. However, you should not escape carriage returns inside the JSON message within the command. **Tip** If you have trouble copying and pasting the examples as described, try typing the entire example on one long line, removing all the backslash line continuation characters. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5550137 zaqar-20.1.0.dev29/etc/0000775000175100017510000000000015033040026013514 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/etc/README-policy.json.sample0000664000175100017510000000050115033040005020112 0ustar00mylesmylesTo generate the sample policy.yaml file, run the following command from the top level of the zaqar directory: tox -egenpolicy Or run the command directly: oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf The output file will be in etc folder, named "zaqar.policy.yaml.sample" by default.././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/etc/logging.conf.sample0000664000175100017510000000161415033040005017270 0ustar00mylesmyles[loggers] keys=root,server,combined [formatters] keys=normal,normal_with_name,debug [handlers] keys=production,file,devel [logger_root] level=NOTSET handlers=devel [logger_server] level=DEBUG handlers=devel qualname=zaqar-server [logger_combined] level=DEBUG handlers=devel qualname=zaqar-combined [handler_production] class=handlers.SysLogHandler level=ERROR formatter=normal_with_name args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) [handler_file] class=FileHandler level=DEBUG formatter=normal_with_name args=('zaqar.log', 'w') [handler_devel] class=StreamHandler level=NOTSET formatter=debug args=(sys.stdout,) [formatter_normal] format=%(asctime)s %(levelname)s %(message)s [formatter_normal_with_name] format=(%(name)s): %(asctime)s %(levelname)s %(message)s [formatter_debug] format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5550137 zaqar-20.1.0.dev29/etc/oslo-config-generator/0000775000175100017510000000000015033040026017717 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/etc/oslo-config-generator/zaqar.conf0000664000175100017510000000045215033040005021702 0ustar00mylesmyles[DEFAULT] wrap_width = 79 output_file = etc/zaqar.conf.sample namespace = zaqar namespace = keystonemiddleware.auth_token namespace = oslo.cache namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = osprofiler namespace = oslo.policy namespace = oslo.reports ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/etc/uwsgi.conf0000664000175100017510000000020515033040005015513 0ustar00mylesmyles[uwsgi] strict = true http = :8888 processes = 1 threads = 4 wsgi-file = /opt/stack/zaqar/zaqar/transport/wsgi/app.py callable = app ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/etc/zaqar-benchmark-messages.json0000664000175100017510000000570715033040005021270 0ustar00mylesmyles[ { "weight": 0.8, "doc": { "ttl": 60, "body": { "id": "7FA23C90-62F7-40D2-9360-FBD5D7D61CD1", "evt": "Wakeup" } } }, { "weight": 0.1, "doc": { "ttl": 3600, "body": { "ResultSet": { "totalResultsAvailable": 1827221, "totalResultsReturned": 2, "firstResultPosition": 1, "Result": [ { "Title": "potato jpg", "Summary": "Kentang Si bungsu dari keluarga Solanum tuberosum L ini ternyata memiliki khasiat untuk mengurangi kerutan jerawat bintik hitam dan kemerahan pada kulit Gunakan seminggu sekali sebagai", "Url": "http://www.mediaindonesia.com/spaw/uploads/images/potato.jpg", "ClickUrl": "http://www.mediaindonesia.com/spaw/uploads/images/potato.jpg", "RefererUrl": "http://www.mediaindonesia.com/mediaperempuan/index.php?ar_id=Nzkw", "FileSize": 22630, "FileFormat": "jpeg", "Height": 362, "Width": 532, "Thumbnail": { "Url": "http://thm-a01.yimg.com/nimage/557094559c18f16a", "Height": 98, "Width": 145 } }, { "Title": "potato jpg", "Summary": "Introduction of puneri aloo This is a traditional potato preparation flavoured with curry leaves and peanuts and can be eaten on fasting day Preparation time 10 min", "Url": "http://www.infovisual.info/01/photo/potato.jpg", "ClickUrl": "http://www.infovisual.info/01/photo/potato.jpg", "RefererUrl": "http://sundayfood.com/puneri-aloo-indian-%20recipe", "FileSize": 119398, "FileFormat": "jpeg", "Height": 685, "Width": 1024, "Thumbnail": { "Url": "http://thm-a01.yimg.com/nimage/7fa23212efe84b64", "Height": 107, "Width": 160 } } ] } } } }, { "weight": 0.1, "doc": { "ttl": 360, "body": { "id": "7FA23C90-62F7-40D2-9360-FBD5D7D61CD1", "evt": "StartBackup", "files": [ "/foo/bar/stuff/thing.dat" ] } } } ] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/etc/zaqar-benchmark.conf.sample0000664000175100017510000000022015033040005020700 0ustar00mylesmyles[DEFAULT] # verbose = False # server_url = http://localhost:8888 # messages_path = some/path/to/messages.json # queue_prefix = ogre-test-queue- ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/etc/zaqar-policy-generator.conf0000664000175100017510000000010715033040005020755 0ustar00mylesmyles[DEFAULT] output_file = etc/zaqar.policy.yaml.sample namespace = zaqar ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/pyproject.toml0000664000175100017510000000010515033040005015646 0ustar00mylesmyles[build-system] requires = ["pbr>=6.1.1"] build-backend = "pbr.build" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5370138 zaqar-20.1.0.dev29/releasenotes/0000775000175100017510000000000015033040026015432 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5580137 zaqar-20.1.0.dev29/releasenotes/notes/0000775000175100017510000000000015033040026016562 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/.gitignore0000664000175100017510000000000015033040005020535 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml0000664000175100017510000000041515033040005027674 0ustar00mylesmyles--- features: - The OSprofiler is integrated to Zaqar in Ocata. It is a library from oslo. It aims to analyse the performance bottleneck issue by making possible to generate one trace per request affecting all involved services and build a tree of calls. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml0000664000175100017510000000107515033040005027161 0ustar00mylesmyles--- features: - Add a new webhook notifier using trust authentication. When using the 'trust+' URL prefix, Zaqar will create a Keystone trust for the user, and then use it when a notification happens to authenticate against Keystone and send the token to the endpoint. - Support 'post_data' and 'post_headers' options on subscribers, allowing customization of the payload when having a webhook subscriber. The 'post_data' option supports the '$zaqar_message$' string template, which will be replaced by the serialized JSON message if specified. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml0000664000175100017510000000045615033040005025065 0ustar00mylesmyles--- features: - The new Swift storage backend is added to Zaqar in Ocata. It's experimental currently. To use this backend, you should modify the "drivers" section in the config file. [Blueprint `swift-storage-driver `_] ././@PaxHeader0000000000000000000000000000021100000000000010207 xustar00115 path=zaqar-20.1.0.dev29/releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml 22 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d30000664000175100017510000000047615033040005031772 0ustar00mylesmyles--- fixes: - Add two configurations for the notification endpoint of the websocket server, instead of a random port and local address. One is 'notification-bind', address on which the notification server will listen. Another is 'notification-port', port on which the notification server will listen. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/configuration-refactor-0ff219ac59c96347.yaml0000664000175100017510000000041515033040005026264 0ustar00mylesmylesother: - | The code structure for configuration files are changed. This is insensitvie for end users, but the persons who work for downstream changes should pay attention. Please refactor your private configurations to ``zaqar/conf/`` folder as well. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/delete_messages_with_claim_ids-64bb8105de3768b1.yaml0000664000175100017510000000034215033040005027767 0ustar00mylesmyles--- features: - Add an new option named 'message_delete_with_claim_id', when it is True, delete messages must need claim_ids and message_ids both in request parameters. This will improve the security of the message. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/deprecate-json-formatted-policy-file-f2abc160715c3f9b.yaml0000664000175100017510000000176015033040005031035 0ustar00mylesmyles--- upgrade: - | The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml0000664000175100017510000000042615033040005024312 0ustar00mylesmyles--- deprecations: - Zaqar API v2 has been released for several cycles and it is integrated as the default API version by most of the OpenStack services. So it is time to deprecated v1.1 in favor of v2. Now in Newton cycle, Zaqar API v1.1 is officially deprecated. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/drop-py-2-7-09cf95d7d843d8f6.yaml0000664000175100017510000000030315033040005023577 0ustar00mylesmyles--- upgrade: - | Python 2.7 support has been dropped. Last release of Zaqar to support py2.7 is OpenStack Train. The minimum version of Python now supported by Zaqar is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/email-notification-by-internal-tool-08910ab2247c3864.yaml0000664000175100017510000000051015033040005030400 0ustar00mylesmyles--- features: - | Currently the email subscription in Zaqar relay on the third part tools, such as "sendmail". It means that deployer should install it out of Zaqar. If he forgets, Zaqar will raise internal error. This work let Zaqar support email subscription by itself using the ``smtp`` python library. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/encrypted-messages-in-queue-d7438d4f185be444.yaml0000664000175100017510000000072115033040005027141 0ustar00mylesmyles--- features: - | To enhance the security of messaging service, the queue in Zaqar supports to encrypt messages before storing them into storage backends, also could support to decrypt messages when those are claimed by consumer. To enable this feature, user just need to take "_enable_encrypt_messages=True" when creating queue. AES-256 is used as the default of encryption algorithm and encryption key is configurable in the zaqar.conf. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/falcon-4-e4b5aab856e3228c.yaml0000664000175100017510000000010615033040005023336 0ustar00mylesmyles--- fixes: - | Fixed compatibility with falcon 4.0.0 and later. ././@PaxHeader0000000000000000000000000000021000000000000010206 xustar00114 path=zaqar-20.1.0.dev29/releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml 22 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.0000664000175100017510000000035615033040005031521 0ustar00mylesmyles--- fixes: - Zaqar didn't return the reserved metadata when listing detailed queue. After this fix, Zaqar will return reserved metadata '_default_message_ttl' and '_max_messages_post_size' in response of listing detailed queue. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml0000664000175100017510000000036515033040005027706 0ustar00mylesmyles--- fixes: - | When access the root path of Zaqar service, for example: curl GET http://127.0.0.1:8888/, user will see 401 error. Which will cause some front end proxy (like HAProxy) to complain. Now this issue has been fixed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml0000664000175100017510000000032515033040005026400 0ustar00mylesmyles--- fixes: - Query for all subscriptions on a given queue by taking into account the returned marker, if any. Without this fix, only 10 subscriptions can be extracted from database to send notification. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml0000664000175100017510000000045115033040005026232 0ustar00mylesmyles--- features: - Introduce Guru to Zaqar. Guru is a mechanism whereby developers and system administrators can generate a report about the state of a running Zaqar executable. This report is called a *Guru Meditation Report*. Now Guru can support wsgi, websocket and uwsgi modes all. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/introduce-topic-resource-9b40674cac06bdc2.yaml0000664000175100017510000000065715033040005026675 0ustar00mylesmyles--- features: - Introduce a new resource called Topic into Zaqar. Topic is a concept from AWS Simple Notification Service (SNS), it will has relevance with subscriptions. User can send message to a topic, and then the subscribers will get the message according to different protocols, like http, email, sms, etc. This feature will help Zaqar to split Messaging Queue Service and Notification Service clearly. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml0000664000175100017510000000055615033040005027753 0ustar00mylesmyles--- features: - Queues now behave lazy in subscriptions also. So there is no need for the user to pre-create a queue before creating a subscription for this queue. Zaqar will create the queue automatically on the subscription creation request. As before, all subscriptions will continue to stay active even if the corresponding queue was deleted. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/purge-queue-6788a249ee59d55a.yaml0000664000175100017510000000032315033040005024066 0ustar00mylesmylesfeatures: - A new queue action is added so that users can purge a queue quickly. That means all the messages and subscriptions will be deleted automatically but the metadata of the queue will be kept. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/queue-filter-support-b704a1c27f7473b9.yaml0000664000175100017510000000030415033040005025717 0ustar00mylesmyles--- features: - | Support for queue filter when queue listing. With this feature, users can add filter of name or metadata in query string parameters in queue list to filter queues. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/redis-sentinel-authentication-93fa9b1846979e41.yaml0000664000175100017510000000034615033040005027511 0ustar00mylesmyles--- features: - | Now Redis driver supports authentication with Redis Sentinel. To use this feature, add the ``redis_password`` query to the Redis URI. The ``redis_username`` can be used when ACL feature is enabled. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/redis-username-98a265f61fca6a1c.yaml0000664000175100017510000000036315033040005024666 0ustar00mylesmyles--- features: - | Redis messaging store now supports authentication with username. deprecation: - | Password in redis uri will need to be prefixed by ':' in a future release. Make sure all uri options are updated accordingly. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/remove-format-contraint-of-client-id-ab787960df6e1606.yaml0000664000175100017510000000073215033040005030646 0ustar00mylesmyles--- features: - Since some clients use different format of client id not only uuid, like user id of ldap, so Zaqar will remove the format contrain of client id. Add one option 'client_id_uuid_safe' to allow user to control the validation of client id. Add two options 'min_length_client_id' and 'max_length_client_id' to allow user to control the length of client id if not using uuid. This also requires user to ensure the client id is immutable. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/remove-pool-group-00f2e69682c48131.yaml0000664000175100017510000000034715033040005025042 0ustar00mylesmyles--- features: - Zaqar supports a new way to directly use pool resource without pool_group when creating Flavor. The old way will be kept in Queens and be marked useless. Zaqar will remove the pool_group totally in Rocky. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/remove-pool-group-totally-062ecfccd90a6725.yaml0000664000175100017510000000034115033040005027020 0ustar00mylesmyles--- features: - | In Queens, we support the old way to use pool_group and the new way without it in Flavor both. In Stein, we will remove the pool_group totally and only keep the new way in Flavor and Pool. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/remove-py38-005b0eda63232532.yaml0000664000175100017510000000016615033040005023574 0ustar00mylesmyles--- upgrade: - | Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/remove-py39-cd35d7feff4be5fb.yaml0000664000175100017510000000016615033040005024353 0ustar00mylesmyles--- upgrade: - | Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/remove-strict-redis-e50cccbdf4a86f76.yaml0000664000175100017510000000011515033040005026011 0ustar00mylesmyles--- upgrade: - | The minimum redis-py version required is now >= 3.0.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/remove_pool_group_from_zaqar-f8eafeed21779959.yaml0000664000175100017510000000024415033040005027754 0ustar00mylesmyles--- features: - | Since we have introduced the 'pool_list' instead of pool_group in Queens, Now we will update the APIs to suggest users use new argument.././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924757.0 zaqar-20.1.0.dev29/releasenotes/notes/reno.cache0000664000175100017510000007641415033040025020525 0ustar00mylesmyles--- dates: - date: 1429876242 version: 2015.1.0rc2 - date: 1449238907 version: 2.0.0.0b1 - date: 1397743766 version: '2014.1' - date: 1516937844 version: 6.0.0.0b3 - date: 1738570153 version: victoria-eol - date: 1712142449 version: 18.0.0 - date: 1664972515 version: 15.0.0 - date: 1501227338 version: 5.0.0.0b3 - date: 1435801266 version: icehouse-eol - date: 1519817416 version: 6.0.0 - date: 1587751479 version: 10.0.0.0rc1 - date: 1418922804 version: 2015.1.0b1 - date: 1449522104 version: juno-eol - date: 1430397768 version: 2015.1.0 - date: 1524593951 version: 6.0.1 - date: 1435152912 version: 1.0.0.0b1 - date: 1696418109 version: 17.0.0 - date: 1742582773 version: xena-eol - date: 1602671848 version: 11.0.0 - date: 1453382334 version: 2.0.0.0b2 - date: 1486417490 version: 4.0.0.0rc1 - date: 1663257523 version: 15.0.0.0rc1 - date: 1434554824 version: 1.0.0a0 - date: 1406216619 version: 2014.2.b2 - date: 1472716113 version: 3.0.0.0b3 - date: 1557514060 version: pike-em - date: 1427215291 version: 2015.1.0b3 - date: 1703162030 version: ussuri-eol - date: 1554898813 version: 8.0.0 - date: 1637926563 version: ussuri-em - date: 1423154503 version: 2015.1.0b2 - date: 1605783535 version: stein-em - date: 1553094266 version: 8.0.0.0rc1 - date: 1674467330 version: 16.0.0.0b1 - date: 1532359425 version: 7.0.0.0b3 - date: 1513161715 version: 6.0.0.0b2 - date: 1571229743 version: 9.0.0 - date: 1617006284 version: 12.0.0.0rc1 - date: 1481708552 version: 1.1.0 - date: 1707148376 version: yoga-eom - date: 1670243729 version: queens-eol - date: 1633519518 version: 13.0.0 - date: 1487217294 version: 4.0.0.0rc2 - date: 1479482136 version: 4.0.0.0b1 - date: 1556297767 version: 5.0.1 - date: 1444893890 version: 1.0.0 - date: 1508891795 version: newton-eol - date: 1504091457 version: 5.0.0 - date: 1533574854 version: 7.0.0.0rc1 - date: 1595238672 version: 8.0.1 - date: 1499249523 version: mitaka-eol - date: 1402587647 version: 2014.2.b1 - date: 1681465750 version: xena-em - date: 1468522466 version: 3.0.0.0b2 - date: 1600871912 version: 11.0.0.0rc1 - date: 1592214500 version: 9.0.1 - date: 1646999687 version: 14.0.0.0rc1 - date: 1458918110 version: 2.0.0.0rc2 - date: 1474905765 version: 3.0.0.0rc2 - date: 1741874165 version: 20.0.0.0rc1 - date: 1694092890 version: stein-eol - date: 1491899658 version: 5.0.0.0b1 - date: 1394123327 version: 2014.1.b3 - date: 1714148043 version: zed-eom - date: 1429028274 version: 2015.1.0rc1 - date: 1589367612 version: 10.0.0 - date: 1441206354 version: 1.0.0.0b3 - date: 1572447201 version: queens-em - date: 1413475190 version: '2014.2' - date: 1727866426 version: 19.0.0 - date: 1412331400 version: 2014.2.rc1 - date: 1413445782 version: 2014.2.rc2 - date: 1438246411 version: 1.0.0.0b2 - date: 1524501616 version: 7.0.0.0b1 - date: 1458303503 version: 2.0.0.0rc1 - date: 1535638526 version: 7.0.0 - date: 1618398888 version: 12.0.0 - date: 1390490423 version: 2014.1.b2 - date: 1659352165 version: pike-eol - date: 1509015124 version: 6.0.0.0b1 - date: 1694786779 version: 17.0.0.0rc1 - date: 1475759795 version: 3.0.0 - date: 1742584292 version: yoga-eol - date: 1474011466 version: 3.0.0.0rc1 - date: 1631726027 version: 13.0.0.0rc1 - date: 1582754585 version: rocky-em - date: 1742407704 version: wallaby-eol - date: 1466769216 version: kilo-eol - date: 1457054496 version: 2.0.0.0b3 - date: 1555432397 version: ocata-em - date: 1444048446 version: 1.0.0.0rc2 - date: 1460015095 version: 2.0.0 - date: 1487771006 version: 4.0.0 - date: 1487306671 version: liberty-eol - date: 1620989208 version: train-em - date: 1648641420 version: 14.0.0 - date: 1703161583 version: train-eol - date: 1679488897 version: 16.0.0 - date: 1623870128 version: ocata-eol - date: 1731524280 version: 2023.1-eom - date: 1709663058 version: wallaby-eom - date: 1502439938 version: 5.0.0.0rc1 - date: 1569402429 version: 9.0.0.0rc1 - date: 1485553138 version: 4.0.0.0b3 - date: 1397635335 version: 2014.1.rc1 - date: 1710400377 version: 18.0.0.0rc1 - date: 1443002127 version: 1.0.0.0rc1 - date: 1397659999 version: 2014.1.rc2 - date: 1464937984 version: 3.0.0.0b1 - date: 1683192350 version: rocky-eol - date: 1746177001 version: 2023.2-eol - date: 1726147104 version: 19.0.0.0rc1 - date: 1709663024 version: victoria-eom - date: 1709663092 version: xena-eom - date: 1497273760 version: 5.0.0.0b2 - date: 1666798944 version: wallaby-em - date: 1518181652 version: 6.0.0.0rc1 - date: 1650287441 version: victoria-em - date: 1677591043 version: 16.0.0.0rc1 - date: 1409846741 version: 2014.2.b3 - date: 1743591471 version: 20.0.0 - date: 1481884647 version: 4.0.0.0b2 file-contents: releasenotes/notes/3841fa259c509971-start-using-reno.yaml: other: - Start using reno to manage release notes. releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml: features: - The OSprofiler is integrated to Zaqar in Ocata. It is a library from oslo. It aims to analyse the performance bottleneck issue by making possible to generate one trace per request affecting all involved services and build a tree of calls. releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml: features: - Add a new webhook notifier using trust authentication. When using the 'trust+' URL prefix, Zaqar will create a Keystone trust for the user, and then use it when a notification happens to authenticate against Keystone and send the token to the endpoint. - Support 'post_data' and 'post_headers' options on subscribers, allowing customization of the payload when having a webhook subscriber. The 'post_data' option supports the '$zaqar_message$' string template, which will be replaced by the serialized JSON message if specified. releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml: features: - The new Swift storage backend is added to Zaqar in Ocata. It's experimental currently. To use this backend, you should modify the "drivers" section in the config file. [Blueprint `swift-storage-driver `_] releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml: fixes: - Add two configurations for the notification endpoint of the websocket server, instead of a random port and local address. One is 'notification-bind', address on which the notification server will listen. Another is 'notification-port', port on which the notification server will listen. releasenotes/notes/configuration-refactor-0ff219ac59c96347.yaml: other: - 'The code structure for configuration files are changed. This is insensitvie for end users, but the persons who work for downstream changes should pay attention. Please refactor your private configurations to ``zaqar/conf/`` folder as well. ' releasenotes/notes/delete_messages_with_claim_ids-64bb8105de3768b1.yaml: features: - Add an new option named 'message_delete_with_claim_id', when it is True, delete messages must need claim_ids and message_ids both in request parameters. This will improve the security of the message. releasenotes/notes/deprecate-json-formatted-policy-file-f2abc160715c3f9b.yaml: deprecations: - 'Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ' upgrade: - 'The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. ' releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml: deprecations: - Zaqar API v2 has been released for several cycles and it is integrated as the default API version by most of the OpenStack services. So it is time to deprecated v1.1 in favor of v2. Now in Newton cycle, Zaqar API v1.1 is officially deprecated. releasenotes/notes/drop-py-2-7-09cf95d7d843d8f6.yaml: upgrade: - 'Python 2.7 support has been dropped. Last release of Zaqar to support py2.7 is OpenStack Train. The minimum version of Python now supported by Zaqar is Python 3.6. ' releasenotes/notes/email-notification-by-internal-tool-08910ab2247c3864.yaml: features: - 'Currently the email subscription in Zaqar relay on the third part tools, such as "sendmail". It means that deployer should install it out of Zaqar. If he forgets, Zaqar will raise internal error. This work let Zaqar support email subscription by itself using the ``smtp`` python library. ' releasenotes/notes/encrypted-messages-in-queue-d7438d4f185be444.yaml: features: - 'To enhance the security of messaging service, the queue in Zaqar supports to encrypt messages before storing them into storage backends, also could support to decrypt messages when those are claimed by consumer. To enable this feature, user just need to take "_enable_encrypt_messages=True" when creating queue. AES-256 is used as the default of encryption algorithm and encryption key is configurable in the zaqar.conf. ' releasenotes/notes/falcon-4-e4b5aab856e3228c.yaml: fixes: - 'Fixed compatibility with falcon 4.0.0 and later. ' releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml: fixes: - Zaqar didn't return the reserved metadata when listing detailed queue. After this fix, Zaqar will return reserved metadata '_default_message_ttl' and '_max_messages_post_size' in response of listing detailed queue. releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml: fixes: - 'When access the root path of Zaqar service, for example: curl GET http://127.0.0.1:8888/, user will see 401 error. Which will cause some front end proxy (like HAProxy) to complain. Now this issue has been fixed. ' releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml: fixes: - Query for all subscriptions on a given queue by taking into account the returned marker, if any. Without this fix, only 10 subscriptions can be extracted from database to send notification. releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml: features: - Introduce Guru to Zaqar. Guru is a mechanism whereby developers and system administrators can generate a report about the state of a running Zaqar executable. This report is called a *Guru Meditation Report*. Now Guru can support wsgi, websocket and uwsgi modes all. releasenotes/notes/introduce-topic-resource-9b40674cac06bdc2.yaml: features: - Introduce a new resource called Topic into Zaqar. Topic is a concept from AWS Simple Notification Service (SNS), it will has relevance with subscriptions. User can send message to a topic, and then the subscribers will get the message according to different protocols, like http, email, sms, etc. This feature will help Zaqar to split Messaging Queue Service and Notification Service clearly. releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml: features: - Queues now behave lazy in subscriptions also. So there is no need for the user to pre-create a queue before creating a subscription for this queue. Zaqar will create the queue automatically on the subscription creation request. As before, all subscriptions will continue to stay active even if the corresponding queue was deleted. releasenotes/notes/purge-queue-6788a249ee59d55a.yaml: features: - A new queue action is added so that users can purge a queue quickly. That means all the messages and subscriptions will be deleted automatically but the metadata of the queue will be kept. releasenotes/notes/queue-filter-support-b704a1c27f7473b9.yaml: features: - 'Support for queue filter when queue listing. With this feature, users can add filter of name or metadata in query string parameters in queue list to filter queues. ' releasenotes/notes/redis-sentinel-authentication-93fa9b1846979e41.yaml: features: - 'Now Redis driver supports authentication with Redis Sentinel. To use this feature, add the ``redis_password`` query to the Redis URI. The ``redis_username`` can be used when ACL feature is enabled. ' releasenotes/notes/redis-username-98a265f61fca6a1c.yaml: deprecation: - 'Password in redis uri will need to be prefixed by '':'' in a future release. Make sure all uri options are updated accordingly. ' features: - 'Redis messaging store now supports authentication with username. ' releasenotes/notes/remove-format-contraint-of-client-id-ab787960df6e1606.yaml: features: - Since some clients use different format of client id not only uuid, like user id of ldap, so Zaqar will remove the format contrain of client id. Add one option 'client_id_uuid_safe' to allow user to control the validation of client id. Add two options 'min_length_client_id' and 'max_length_client_id' to allow user to control the length of client id if not using uuid. This also requires user to ensure the client id is immutable. releasenotes/notes/remove-pool-group-totally-062ecfccd90a6725.yaml: features: - 'In Queens, we support the old way to use pool_group and the new way without it in Flavor both. In Stein, we will remove the pool_group totally and only keep the new way in Flavor and Pool. ' releasenotes/notes/remove-py38-005b0eda63232532.yaml: upgrade: - 'Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. ' releasenotes/notes/remove-py39-cd35d7feff4be5fb.yaml: upgrade: - 'Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. ' releasenotes/notes/remove-strict-redis-e50cccbdf4a86f76.yaml: upgrade: - 'The minimum redis-py version required is now >= 3.0.0 ' releasenotes/notes/remove_pool_group_from_zaqar-f8eafeed21779959.yaml: features: - 'Since we have introduced the ''pool_list'' instead of pool_group in Queens, Now we will update the APIs to suggest users use new argument.' releasenotes/notes/return_reserved_metdata_for_dead_letter_queue-da160301f6d8cfa4.yaml: features: - Add three new reserved metdata in response body of querying queue. "_dead_letter_queue", "_dead_letter_queue_messages_ttl" and "_max_claim_count". Those metadata will help user to know better about dead letter queue. releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml: features: - Currently Zaqar can support more built-in/reserved attributes in queue. For now there are two important attributes 'max_messages_post_size' and 'max_message_ttl'. With this feature, when user query queues Zaqar will show those two attributes (read from config file if there is no customized value from user) in queue metadata so that user can know what value it is. releasenotes/notes/sql_init-c9b3883241631f24.yaml: critical: - 'When using the sqlalchemy driver, operators now are required to run "zaqar-sql-db-manage upgrade" before making the service available. The service previously tried to create the database on the first request, but it was bound to race conditions. ' releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml: features: - Add migration support for Zaqar's sqlalchemy storage driver. releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml: features: - This feature is the third part of subscription confirmation feature. Support to send email to subscriber if confirmation is needed. To use this feature, user need to set the config option "external_confirmation_url", "subscription_confirmation_email_template" and "unsubscribe_confirmation_email_template". The confirmation page url that will be used in email subscription confirmation before notification, this page is not hosted in Zaqar server, user should build their own web service to provide this web page. The subscription_confirmation_email_template let user to customize the subscription confimation email content, including topic, body and sender. The unsubscribe_confirmation_email_template let user to customize the unsubscribe confimation email content, including topic, body and sender too. releasenotes/notes/support-cors-af8349382a44aa0d.yaml: features: - Zaqar now supports Cross-Origin Resource Sharing (CORS). releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml: features: - Support dot character in queue's name, like 'service.test_queue'. releasenotes/notes/support-extra-specs-to-subscription-confirming-edbdbebbdcd0cd74.yaml: features: - Introduce a new request header called "EXTRA-SPEC" and driver mechanism with stevedore to let developers to implement the task about how to deal with this informtaion. In Wallaby, there's just an empty handler by default. releasenotes/notes/support-notification-delivery-policy-fbc94083b4e6b8d0.yaml: features: - Support notificaiton delivery policy in webhook type. It will work when the notification is sent from Zaqar to the subscriber failed. User can define the retry policy in the options of subscription or metadata of queue. releasenotes/notes/support-query-quques-with-count-4453825671bb5298.yaml: features: - Support query queues with filter 'with_count=true' to return the amount of the queues. This will help users to quickly get the exact total number of queues which they own. releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml: features: - Currently, the v1 API is still accessible though it has been deprecated for a while. And we're going to deprecate v1.1 soon. To keep the backward compatibility, a new config option - ``enable_deprecated_api_versions`` is added so that operator can totally turn off an API version or still support it by adding the API version to the list of the new config option. releasenotes/notes/support_dead_letter_queue-c8b7303319e7f920.yaml: features: - 'Support for dead letter queue is added for MongoDB, Redis and Swift. With this feature, message will be moved to the specified dead letter queue if it''s claimed many times but still can''t successfully processed by a client. New reseved metadata keys of queue are added: _max_claim_count, _dead_letter_queue and _dead_letter_queue_messages_ttl. ' releasenotes/notes/support_md5_of_body-84c1cdc6809b6417.yaml: features: - 'Support non-URL encoded message body checksum function, the default algorithm is MD5. Back-end support for MongoDB, Redis and Swift. With this feature, when a user sends a message to the queue, Zaqar calculates a "checksum" value for the body of the non-URL encoded message, which the user can then get after the message is got or claimed. Finally, the user can use it to verify that the body of the newly obtained message is correct. ' releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57ebd.yaml: upgrade: - 'Upgrade one of storage drivers, mongo driver with new version of pymongo. Pymongo has been updated to 4.0.0, there are some changes which are not supported in new version: 1. Collection.count and Cursor.count is removed. 2. Collection.ensure_index is removed. 3. Collection.__bool__ raises NotImplementedError. 4. Should use Binary.from_uuid to handle the UUID object. Those changes need to upgrade the mongo driver''s code to work well. ' releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml: fixes: - In IPv6 management network environment, starting Zaqar server will run into 'Address family for hostname not support' error when use WSGI simple server. The root cause is that Python's TCPServer implementation is hard-coded to use IPv4, even in IPv6 environments. Now this issue has been fixed. releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml: features: - 'Encrypted Messages in Queue (Change-Id `Icecfb9a232cfeefc2f9603934696bb2dcd56bc9c `_) ' fixes: - 'Fix SSLError caused by not passing the cafile (Change-Id `I176e3876f2652608aaf51b0f74f4d971d31253e2 `_) ' - 'Fix the issue that the function unpackb has no encoding option (Change-Id `bb92e983a79e5c1608f6a603816e1b88283e34c9 `_) ' prelude: 'Welcome to the Victoria release of the OpenStack Message service (zaqar). In this cycle, the Zaqar team would like to bring the following points to your attention. Details may be found below. * Support encrypted messages in queue. * Fixed bugs for stable and security. ' releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml: features: - Now before users send messages to subscribers through a queue, the subscribers should be confirmed first. Zaqar only sends messages to the confirmed subscribers. This feature supports "webhook" and "mailto" subscribers with mongoDB or redis backend. The "mailto" part will be done in O cycle. Set "require_confirmation = True" to enable this feature. The default value is "False" now and we will enable it by default after one or two cycles. releasenotes/notes/zaqar-status-upgrade-check-framework-09caa1f741f6119d.yaml: features: - 'New framework for ``zaqar-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Zaqar upgrade to ensure if the upgrade can be performed safely. ' prelude: 'Added new tool ``zaqar-status upgrade check``. ' upgrade: - 'Operator can now use new CLI tool ``zaqar-status upgrade check`` to check if Zaqar deployment can be safely upgraded from N-1 to N release. ' notes: - files: - - releasenotes/notes/remove-py39-cd35d7feff4be5fb.yaml - !!binary | NDU2MjdjZjQwOGYxZTJiMjBmYjIxN2JlOWVmMTA5Yzk4NjNiYWJiMQ== version: 20.0.0-18 - files: - - releasenotes/notes/3841fa259c509971-start-using-reno.yaml - !!binary | NWEyY2I3MTFiYjllOTM4N2EyNDFjMzMzMjg4ZDExMzEzYzQyYzVjMw== - - releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml - !!binary | NTBiNDk1MDA3YzhjYzQ1NzdiYWJiOGQ2YWYyN2E5ZDljNWJjNzMxMw== version: 1.1.0 - files: - - releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml - !!binary | Y2ZiNGE5YTA1ODVmZGJhNWQyMmY5ZWE5YjE4MzhjODFkMDBjNWRkNQ== version: 2.0.0-10 - files: - - releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml - !!binary | NTE2MDRiNDk1NGIxNGEyYWUxNGIxNWI1MGMzMDJkMzJkYjBlNDBhNw== - - releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml - !!binary | Yjc0NTE0NmRmNDJhY2FiODZmYzAyN2ZkMDEwODhhYjk1ZWUzMDllNQ== - - releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml - !!binary | ZTlkYmIxOWEzYTEwMzMyMTczOWVmNjgzZWJmZDYwODA0YjUyYjVlYg== - - releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml - !!binary | YjhhNzBlNGFlY2E4M2ViZWZhZDRiMTI3YWY3MWIxYmQxMjVlZmE0MA== - - releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml - !!binary | NGMyYjdlMDRkYmNhMGFlMWU1ZDM0ODBjOGJhZDYwZGNiZmFjOGZmOA== - - releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml - !!binary | ZTlkYmIxOWEzYTEwMzMyMTczOWVmNjgzZWJmZDYwODA0YjUyYjVlYg== - - releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml - !!binary | ZjM4Y2VjZmRmYzU0YzE2ZTdjZDg1MzNjOTEyNTVkMDc0OThhOTgyYg== - - releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml - !!binary | ZTlkYmIxOWEzYTEwMzMyMTczOWVmNjgzZWJmZDYwODA0YjUyYjVlYg== - - releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml - !!binary | NjljNzk5NzM0YmNkMGQxYTBlODUwOTZmNjg3ZjE3ZWUzZDA3NDNjMA== version: 3.0.0 - files: - - releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml - !!binary | ZWY3MTEwYzI4MzM4YWY3NjgzM2U1MTVkNTZhNWQwOGVmMGQwYjI2Yw== - - releasenotes/notes/purge-queue-6788a249ee59d55a.yaml - !!binary | NDYwYzM0NTI5ODY4ZDJkYzMzMGFmYzEyYjQ2NjI3MDJiZjFmOTgyYQ== - - releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml - !!binary | ZmI4ZGE1ZDUzNTU0Njc1NDkwNGNkMjZmMjY2NGY0NTViYTcyNGFjZg== - - releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml - !!binary | ZjE5NmY5YThhOTJiYjcyMDZlZjY0ZThiYmExNTFiYTY1OWMzYWFiNg== version: 4.0.0 - files: - - releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml - !!binary | NzY0ODRkODgzYWY4OTQ0NGQyNjFjYjJmN2M0ODQyYzc0MGEzYWM3NQ== - - releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml - !!binary | YTY4YTAzYTIyODczMjA1MGIzM2MyYTdmMzVkMWNhYTlmMzQ2NzcxOA== - - releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml - !!binary | YjcxNDFiNTI3MDZjM2Q1ODM5M2NiYzVkY2Y2YjcxYmIxYTY0NzJhYg== - - releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml - !!binary | NTExOGRhZmM2NWU0ZWMzOWI3ODJmYTAyOWJhNzQ5MTk3MDBjYjQyYg== - - releasenotes/notes/sql_init-c9b3883241631f24.yaml - !!binary | MTBiMDdjOWNjZjViMjkyNTg1OGE0OGUxMjQzNzRhZGRhMjY3NjU5Zg== - - releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml - !!binary | NDc3OGY3MDhmYTllYzg2YjQxMzdlZWZkNjNkMDdjNDBhZDI0Mjk2ZQ== - - releasenotes/notes/support-cors-af8349382a44aa0d.yaml - !!binary | NDlhMzk3ZWJmYjQ5NWY4NDQxMWFhOTliZGU3Yzk5NTQ5YTdhYTNiYw== - - releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml - !!binary | YTg4N2Q3ODk1NzhjOGNlZGY3YmQ0MjdmZjRlMjVmMDM5MDAwMDAxZg== - - releasenotes/notes/support-notification-delivery-policy-fbc94083b4e6b8d0.yaml - !!binary | OTAwYmRiZTNkOWFhODBiM2U0NGQxOGY2Mzc3MTE2MjljYjQ1NmUzZA== - - releasenotes/notes/support_dead_letter_queue-c8b7303319e7f920.yaml - !!binary | ZjAzMmJlODEzMTQ1NjRhODg4MDZmOTYyMzFkM2Y1YTE2MTU5ZGJmZQ== version: 5.0.0 - files: - - releasenotes/notes/support_md5_of_body-84c1cdc6809b6417.yaml - !!binary | ZjYwNWRkOWJhZTVjNDM4MTFiY2YzMjk2MjgyNWQ4ZjY1YjYwZDZlNg== version: 6.0.0 - files: - - releasenotes/notes/configuration-refactor-0ff219ac59c96347.yaml - !!binary | YTdkZjA4ZGViOTI1Yjc5YTRlZGE2ZGJiZDNhMWVlMTU3N2Q2ZmZmZg== - - releasenotes/notes/queue-filter-support-b704a1c27f7473b9.yaml - !!binary | YjRjMzk1Yzc5YTc3OTg4YTRiMTlmZGVmYWE4YzI2Njc2ODQ4YjVjNg== - - releasenotes/notes/remove-format-contraint-of-client-id-ab787960df6e1606.yaml - !!binary | ZmZmODJlN2ExMTc4MjM4Y2U3MmE4MmM4N2U0MTBmODllMDIwZGVhZQ== - - releasenotes/notes/remove_pool_group_from_zaqar-f8eafeed21779959.yaml - !!binary | OTNiZDRmZWQ5M2FmMzU1ZjNmZTYzMzI1M2NiYmZjNTUwOTJhMGEyMA== - - releasenotes/notes/return_reserved_metdata_for_dead_letter_queue-da160301f6d8cfa4.yaml - !!binary | ODMzMDAzNDNkMjMwNTFkYjMxNzAyZDFjYWRiZjllMjUwYzM3ZWYyNA== version: 7.0.0 - files: - - releasenotes/notes/delete_messages_with_claim_ids-64bb8105de3768b1.yaml - !!binary | YmU4NDU5OGQzYTkzMTgzMjg2OWQ3N2E2NTkxZjk0NTZlYTZjMTBkYg== - - releasenotes/notes/email-notification-by-internal-tool-08910ab2247c3864.yaml - !!binary | ZTFjNjI3MDdkMzg3ZGMwN2VmODMzM2MwYmMyNzEzMTNkMjZmYWNmMw== - - releasenotes/notes/introduce-topic-resource-9b40674cac06bdc2.yaml - !!binary | ODhmMGRkN2M4ZjE4MWYyZTFjMGEzOGNmNWEyOTMzYzFmNTI4NmZkMw== - - releasenotes/notes/remove-pool-group-totally-062ecfccd90a6725.yaml - !!binary | ZDFhMDFkNTkxOTBhMmEzMGYyMDczMTQxMWMwNjU1YTBkMWQ0OGI3NA== - - releasenotes/notes/zaqar-status-upgrade-check-framework-09caa1f741f6119d.yaml - !!binary | MzdmYzEzNGQxMTAyYWRhYWUyZDcwOTdjNDc2NjllMWY4ZDUyYWZiOQ== version: 8.0.0 - files: - - releasenotes/notes/drop-py-2-7-09cf95d7d843d8f6.yaml - !!binary | MjJhZGU0ZmE3NzQ4YWE3YzM4OTVhYTkzMmY2NWJlOGI1MTY5MDBiMA== - - releasenotes/notes/support-query-quques-with-count-4453825671bb5298.yaml - !!binary | N2FhMjUyMmUzZDM3MGE3MDg4MmQwN2Q3NjQxNzQxNjc5NjE2ZmE1NQ== version: 10.0.0 - files: - - releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57ebd.yaml - !!binary | YTBjMzJjYmU0ZDA2NmRhZDUxYjBiZWJiYmE5MzY4ZDFlMGYxYjU3OA== version: 11.0.0-7 - files: - - releasenotes/notes/encrypted-messages-in-queue-d7438d4f185be444.yaml - !!binary | ZTEyYzY1YTM2OTgyNWVhNTQ2OWJkYjMxZjVjNzE1MTI2OGQ3OTI2Yg== version: 11.0.0 - files: - - releasenotes/notes/deprecate-json-formatted-policy-file-f2abc160715c3f9b.yaml - !!binary | OTQ4ZTg4YzI2ODJiNzFkNjRlYTFhYmJlNDdmMDNmYTI4MGIzMDkxMw== - - releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml - !!binary | MGU0MzVhMzUyMjUzNzRhMjA5N2VjNDM1YTdmYjEzNzZiYWJmZWQzNg== version: 12.0.0 - files: - - releasenotes/notes/support-extra-specs-to-subscription-confirming-edbdbebbdcd0cd74.yaml - !!binary | OWI2ZWRjZjZjYTVhY2E0NTUzNmZiNmY1MDM4MDY4ZTUwNmM5YzY3Mw== version: 13.0.0 - files: - - releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57ebd.yaml - !!binary | MGY2ZGRkNTcwOTlmYzgwNzc2ZjUwODU4YzZiMzE3OGU4ZWMzZTAxMQ== version: 14.0.0 - files: - - releasenotes/notes/remove-strict-redis-e50cccbdf4a86f76.yaml - !!binary | OTI4ZGZjNjE4ZTgxODU0ZGExOThkODYxNWQ0OTljZDE5OWRkYmQ3Mw== version: 17.0.0 - files: - - releasenotes/notes/redis-sentinel-authentication-93fa9b1846979e41.yaml - !!binary | ZmJlODNjOGE3YjQ5ZGYyNWQ1OTk3OTI4MTdlMWFkNTI0NjljZjEyOQ== - - releasenotes/notes/redis-username-98a265f61fca6a1c.yaml - !!binary | YTQ1ZjcwZTkzODY0MzJiM2QxZWUyYjMwYmNhYWNmYTlmMWI3NmI4OQ== version: 19.0.0 - files: - - releasenotes/notes/falcon-4-e4b5aab856e3228c.yaml - !!binary | NDU0M2U3NjkxY2UwZTE2NGMyY2ViMzUzNWYxMDdjODNiZjlkYmU4Zg== - - releasenotes/notes/remove-py38-005b0eda63232532.yaml - !!binary | ODcwZWEwNDhiZTdlMWI3NTkzNTMyMjczZGNmZjBkM2RiYzJkZDEwMw== version: 20.0.0 ././@PaxHeader0000000000000000000000000000021100000000000010207 xustar00115 path=zaqar-20.1.0.dev29/releasenotes/notes/return_reserved_metdata_for_dead_letter_queue-da160301f6d8cfa4.yaml 22 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/return_reserved_metdata_for_dead_letter_queue-da160301f6d8cfa40000664000175100017510000000036415033040005032313 0ustar00mylesmyles--- features: - Add three new reserved metdata in response body of querying queue. "_dead_letter_queue", "_dead_letter_queue_messages_ttl" and "_max_claim_count". Those metadata will help user to know better about dead letter queue. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml0000664000175100017510000000062115033040005030402 0ustar00mylesmyles--- features: - Currently Zaqar can support more built-in/reserved attributes in queue. For now there are two important attributes 'max_messages_post_size' and 'max_message_ttl'. With this feature, when user query queues Zaqar will show those two attributes (read from config file if there is no customized value from user) in queue metadata so that user can know what value it is. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/sql_init-c9b3883241631f24.yaml0000664000175100017510000000043215033040005023262 0ustar00mylesmyles--- critical: - | When using the sqlalchemy driver, operators now are required to run "zaqar-sql-db-manage upgrade" before making the service available. The service previously tried to create the database on the first request, but it was bound to race conditions. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml0000664000175100017510000000011715033040005026142 0ustar00mylesmyles--- features: - Add migration support for Zaqar's sqlalchemy storage driver. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml0000664000175100017510000000155015033040005032101 0ustar00mylesmyles--- features: - This feature is the third part of subscription confirmation feature. Support to send email to subscriber if confirmation is needed. To use this feature, user need to set the config option "external_confirmation_url", "subscription_confirmation_email_template" and "unsubscribe_confirmation_email_template". The confirmation page url that will be used in email subscription confirmation before notification, this page is not hosted in Zaqar server, user should build their own web service to provide this web page. The subscription_confirmation_email_template let user to customize the subscription confimation email content, including topic, body and sender. The unsubscribe_confirmation_email_template let user to customize the unsubscribe confimation email content, including topic, body and sender too. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-cors-af8349382a44aa0d.yaml0000664000175100017510000000011315033040005024326 0ustar00mylesmyles--- features: - Zaqar now supports Cross-Origin Resource Sharing (CORS). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml0000664000175100017510000000012415033040005026614 0ustar00mylesmyles--- features: - Support dot character in queue's name, like 'service.test_queue'. ././@PaxHeader0000000000000000000000000000021200000000000010210 xustar00116 path=zaqar-20.1.0.dev29/releasenotes/notes/support-extra-specs-to-subscription-confirming-edbdbebbdcd0cd74.yaml 22 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-extra-specs-to-subscription-confirming-edbdbebbdcd0cd70000664000175100017510000000037415033040005032611 0ustar00mylesmyles--- features: - Introduce a new request header called "EXTRA-SPEC" and driver mechanism with stevedore to let developers to implement the task about how to deal with this informtaion. In Wallaby, there's just an empty handler by default. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-more-backoff-functions-41e02a5977341576.yaml0000664000175100017510000000051515033040005027437 0ustar00mylesmyles--- features: - Support more retry backoff function in webhook type. It will work when Zaqar failed to send the notification to the subscriber. Users can define the retry backoff function in metadata of queue. There are four retry backoff functions including 'linear', 'arithmetic', 'geometric' and 'exponential'. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-notification-delivery-policy-fbc94083b4e6b8d0.yaml0000664000175100017510000000037215033040005031262 0ustar00mylesmyles--- features: - Support notificaiton delivery policy in webhook type. It will work when the notification is sent from Zaqar to the subscriber failed. User can define the retry policy in the options of subscription or metadata of queue. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-query-quques-with-count-4453825671bb5298.yaml0000664000175100017510000000030615033040005027667 0ustar00mylesmyles--- features: - Support query queues with filter 'with_count=true' to return the amount of the queues. This will help users to quickly get the exact total number of queues which they own. ././@PaxHeader0000000000000000000000000000020700000000000010214 xustar00113 path=zaqar-20.1.0.dev29/releasenotes/notes/support-redis-as-management-storage-backend-a205e3c4c4d01584.yaml 22 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-redis-as-management-storage-backend-a205e3c4c4d01584.y0000664000175100017510000000034515033040005031456 0ustar00mylesmyles--- features: - Support Redis as management storage backend to improve the performance and ease of deployment. For the management driver, user needs to enable the redis storage options in redis.conf to persistent data. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml0000664000175100017510000000061615033040005031037 0ustar00mylesmyles--- features: - Currently, the v1 API is still accessible though it has been deprecated for a while. And we're going to deprecate v1.1 soon. To keep the backward compatibility, a new config option - ``enable_deprecated_api_versions`` is added so that operator can totally turn off an API version or still support it by adding the API version to the list of the new config option. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support_dead_letter_queue-c8b7303319e7f920.yaml0000664000175100017510000000060015033040005026774 0ustar00mylesmyles--- features: - | Support for dead letter queue is added for MongoDB, Redis and Swift. With this feature, message will be moved to the specified dead letter queue if it's claimed many times but still can't successfully processed by a client. New reseved metadata keys of queue are added: _max_claim_count, _dead_letter_queue and _dead_letter_queue_messages_ttl. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support_delayed_queues-1babcaa3f056a39d.yaml0000664000175100017510000000042315033040005026651 0ustar00mylesmyles--- features: - | Support for delayed queues is added for MongoDB, Redis and Swift. With this feature, if the queue is a delayed queue, its message will be delayed some time to be claimed. New reseved metadata key of queue is added: _default_message_delay. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support_md5_of_body-84c1cdc6809b6417.yaml0000664000175100017510000000073715033040005025576 0ustar00mylesmyles--- features: - | Support non-URL encoded message body checksum function, the default algorithm is MD5. Back-end support for MongoDB, Redis and Swift. With this feature, when a user sends a message to the queue, Zaqar calculates a "checksum" value for the body of the non-URL encoded message, which the user can then get after the message is got or claimed. Finally, the user can use it to verify that the body of the newly obtained message is correct. ././@PaxHeader0000000000000000000000000000021300000000000010211 xustar00117 path=zaqar-20.1.0.dev29/releasenotes/notes/support_password_configure_for_redis_connection-6f169db73ca80416.yaml 22 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/support_password_configure_for_redis_connection-6f169db73ca8040000664000175100017510000000044615033040005032451 0ustar00mylesmyles--- features: - | Redis connection doesn't support password configure in zaqar, so redis-server can not set a password. If redis service doesn't set a password, it will suffer a large number of attacks. The patch will support password configure for redis connection in zaqar. ././@PaxHeader0000000000000000000000000000021300000000000010211 xustar00117 path=zaqar-20.1.0.dev29/releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57ebd.yaml 22 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57e0000664000175100017510000000075015033040005032041 0ustar00mylesmyles--- upgrade: - | Upgrade one of storage drivers, mongo driver with new version of pymongo. Pymongo has been updated to 4.0.0, there are some changes which are not supported in new version: 1. Collection.count and Cursor.count is removed. 2. Collection.ensure_index is removed. 3. Collection.__bool__ raises NotImplementedError. 4. Should use Binary.from_uuid to handle the UUID object. Those changes need to upgrade the mongo driver's code to work well. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml0000664000175100017510000000050615033040005025407 0ustar00mylesmyles--- fixes: - In IPv6 management network environment, starting Zaqar server will run into 'Address family for hostname not support' error when use WSGI simple server. The root cause is that Python's TCPServer implementation is hard-coded to use IPv4, even in IPv6 environments. Now this issue has been fixed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml0000664000175100017510000000162015033040005026553 0ustar00mylesmyles--- prelude: | Welcome to the Victoria release of the OpenStack Message service (zaqar). In this cycle, the Zaqar team would like to bring the following points to your attention. Details may be found below. * Support encrypted messages in queue. * Fixed bugs for stable and security. features: - | Encrypted Messages in Queue (Change-Id `Icecfb9a232cfeefc2f9603934696bb2dcd56bc9c `_) fixes: - | Fix SSLError caused by not passing the cafile (Change-Id `I176e3876f2652608aaf51b0f74f4d971d31253e2 `_) - | Fix the issue that the function unpackb has no encoding option (Change-Id `bb92e983a79e5c1608f6a603816e1b88283e34c9 `_) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml0000664000175100017510000000074415033040005030636 0ustar00mylesmyles--- features: - Now before users send messages to subscribers through a queue, the subscribers should be confirmed first. Zaqar only sends messages to the confirmed subscribers. This feature supports "webhook" and "mailto" subscribers with mongoDB or redis backend. The "mailto" part will be done in O cycle. Set "require_confirmation = True" to enable this feature. The default value is "False" now and we will enable it by default after one or two cycles. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/notes/zaqar-status-upgrade-check-framework-09caa1f741f6119d.yaml0000664000175100017510000000071615033040005031012 0ustar00mylesmyles--- prelude: > Added new tool ``zaqar-status upgrade check``. features: - | New framework for ``zaqar-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Zaqar upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``zaqar-status upgrade check`` to check if Zaqar deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5590136 zaqar-20.1.0.dev29/releasenotes/source/0000775000175100017510000000000015033040026016732 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/2023.1.rst0000664000175100017510000000021015033040005020177 0ustar00mylesmyles=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/2023.2.rst0000664000175100017510000000020215033040005020201 0ustar00mylesmyles=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/2024.1.rst0000664000175100017510000000020215033040005020201 0ustar00mylesmyles=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/2024.2.rst0000664000175100017510000000020215033040005020202 0ustar00mylesmyles=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/2025.1.rst0000664000175100017510000000020215033040005020202 0ustar00mylesmyles=========================== 2025.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2025.1 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5590136 zaqar-20.1.0.dev29/releasenotes/source/_static/0000775000175100017510000000000015033040026020360 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/_static/.gitignore0000664000175100017510000000000015033040005022333 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5590136 zaqar-20.1.0.dev29/releasenotes/source/_templates/0000775000175100017510000000000015033040026021067 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/_templates/.gitignore0000664000175100017510000000000015033040005023042 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/conf.py0000664000175100017510000002071315033040005020231 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'reno.sphinxext', 'openstackdocstheme', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Zaqar Release Notes' copyright = '2015, Zaqar Developers' # Release notes are version independent. # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ZaqarReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ZaqarReleaseNotes.tex', 'Zaqar Release Notes Documentation', 'Zaqar Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'zaqarreleasenotes', 'Zaqar Release Notes Documentation', ['Zaqar Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ZaqarReleaseNotes', 'Zaqar Release Notes Documentation', 'Zaqar Developers', 'ZaqarReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] # Options for openstackdocstheme openstackdocs_repo_name = 'openstack/zaqar' openstackdocs_bug_project = 'zaqar' openstackdocs_bug_tag = '' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/index.rst0000664000175100017510000000045715033040005020576 0ustar00mylesmyles====================== Zaqar Release Notes ====================== .. toctree:: :maxdepth: 1 unreleased 2025.1 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/liberty.rst0000664000175100017510000000022215033040005021127 0ustar00mylesmyles============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5380137 zaqar-20.1.0.dev29/releasenotes/source/locale/0000775000175100017510000000000015033040026020171 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5380137 zaqar-20.1.0.dev29/releasenotes/source/locale/de/0000775000175100017510000000000015033040026020561 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5590136 zaqar-20.1.0.dev29/releasenotes/source/locale/de/LC_MESSAGES/0000775000175100017510000000000015033040026022346 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po0000664000175100017510000000376215033040005025404 0ustar00mylesmyles# Robert Simai , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2019-01-08 11:33+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-12-03 11:03+0000\n" "Last-Translator: Robert Simai \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.1.0" msgstr "1.1.0" msgid "2.0.0-10" msgstr "2.0.0-10" msgid "3.0.0" msgstr "3.0.0" msgid "4.0.0" msgstr "4.0.0" msgid "5.0.0" msgstr "5.0.0" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "Add migration support for Zaqar's sqlalchemy storage driver." msgstr "" "Migrationsunterstützung zu Zaqars sqlalchemy Storage Treiber hinzugefügt." msgid "Bug Fixes" msgstr "Fehlerkorrekturen" msgid "Critical Issues" msgstr "Kritische Probleme" msgid "Current Series Release Notes" msgstr "Aktuelle Serie Releasenotes" msgid "Deprecation Notes" msgstr "Ablaufwarnungen" msgid "Liberty Series Release Notes" msgstr "Liberty Serie Releasenotes" msgid "Mitaka Series Release Notes" msgstr "Mitaka Serie Releasenotes" msgid "New Features" msgstr "Neue Funktionen" msgid "Newton Series Release Notes" msgstr "Newton Serie Releasenotes" msgid "Ocata Series Release Notes" msgstr "Ocata Serie Releasenotes" msgid "Other Notes" msgstr "Andere Notizen" msgid "Pike Series Release Notes" msgstr "Pike Serie Releasenotes" msgid "Queens Series Release Notes" msgstr "Queens Serie Releasenotes" msgid "Rocky Series Release Notes" msgstr "Rocky Serie Releasenotes" msgid "Start using reno to manage release notes." msgstr "Reno wird für die Verwaltung der Releasenotes verwendet." msgid "Support dot character in queue's name, like 'service.test_queue'." msgstr "" "Unterstützung für den Punkt in Warteschlangennamen, z.B. 'service." "test_queue'." msgid "Zaqar Release Notes" msgstr "Zaqar Releasenotes" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5380137 zaqar-20.1.0.dev29/releasenotes/source/locale/en_GB/0000775000175100017510000000000015033040026021143 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5590136 zaqar-20.1.0.dev29/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000015033040026022730 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175100017510000010155015033040005025760 0ustar00mylesmyles# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andreas Jaeger , 2018. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-10-24 14:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-10-01 01:36+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.1.0" msgstr "1.1.0" msgid "10.0.0" msgstr "10.0.0" msgid "11.0.0" msgstr "11.0.0" msgid "11.0.0-7" msgstr "11.0.0-7" msgid "12.0.0" msgstr "12.0.0" msgid "13.0.0" msgstr "13.0.0" msgid "14.0.0" msgstr "14.0.0" msgid "17.0.0" msgstr "17.0.0" msgid "2.0.0-10" msgstr "2.0.0-10" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "2024.1 Series Release Notes" msgstr "2024.1 Series Release Notes" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.0.0b2" msgstr "3.0.0.0b2" msgid "3.0.0.0b3" msgstr "3.0.0.0b3" msgid "3.0.0.0rc1" msgstr "3.0.0.0rc1" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.0.0b2" msgstr "4.0.0.0b2" msgid "4.0.0.0rc1" msgstr "4.0.0.0rc1" msgid "5.0.0" msgstr "5.0.0" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "8.0.0" msgstr "8.0.0" msgid "" "A new queue action is added so that users can purge a queue quickly. That " "means all the messages and subscriptions will be deleted automatically but " "the metadata of the queue will be kept." msgstr "" "A new queue action is added so that users can purge a queue quickly. That " "means all the messages and subscriptions will be deleted automatically but " "the metadata of the queue will be kept." msgid "" "Add a new webhook notifier using trust authentication. When using the 'trust" "+' URL prefix, Zaqar will create a Keystone trust for the user, and then use " "it when a notification happens to authenticate against Keystone and send the " "token to the endpoint." msgstr "" "Add a new webhook notifier using trust authentication. When using the 'trust" "+' URL prefix, Zaqar will create a Keystone trust for the user, and then use " "it when a notification happens to authenticate against Keystone and send the " "token to the endpoint." msgid "" "Add an new option named 'message_delete_with_claim_id', when it is True, " "delete messages must need claim_ids and message_ids both in request " "parameters. This will improve the security of the message." msgstr "" "Add an new option named 'message_delete_with_claim_id', when it is True, " "delete messages must need claim_ids and message_ids both in request " "parameters. This will improve the security of the message." msgid "Add migration support for Zaqar's sqlalchemy storage driver." msgstr "Add migration support for Zaqar's sqlalchemy storage driver." msgid "" "Add three new reserved metdata in response body of querying queue. " "\"_dead_letter_queue\", \"_dead_letter_queue_messages_ttl\" and " "\"_max_claim_count\". Those metadata will help user to know better about " "dead letter queue." msgstr "" "Add three new reserved metdata in response body of querying queue. " "\"_dead_letter_queue\", \"_dead_letter_queue_messages_ttl\" and " "\"_max_claim_count\". Those metadata will help user to know better about " "dead letter queue." msgid "" "Add two configurations for the notification endpoint of the websocket " "server, instead of a random port and local address. One is 'notification-" "bind', address on which the notification server will listen. Another is " "'notification-port', port on which the notification server will listen." msgstr "" "Add two configurations for the notification endpoint of the websocket " "server, instead of a random port and local address. One is 'notification-" "bind', address on which the notification server will listen. Another is " "'notification-port', port on which the notification server will listen." msgid "Added new tool ``zaqar-status upgrade check``." msgstr "Added new tool ``zaqar-status upgrade check``." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Currently Zaqar can support more built-in/reserved attributes in queue. For " "now there are two important attributes 'max_messages_post_size' and " "'max_message_ttl'. With this feature, when user query queues Zaqar will show " "those two attributes (read from config file if there is no customized value " "from user) in queue metadata so that user can know what value it is." msgstr "" "Currently Zaqar can support more built-in/reserved attributes in queue. For " "now there are two important attributes 'max_messages_post_size' and " "'max_message_ttl'. With this feature, when user query queues Zaqar will show " "those two attributes (read from config file if there is no customised value " "from user) in queue metadata so that user can know what value it is." msgid "" "Currently the email subscription in Zaqar relay on the third part tools, " "such as \"sendmail\". It means that deployer should install it out of Zaqar. " "If he forgets, Zaqar will raise internal error. This work let Zaqar support " "email subscription by itself using the ``smtp`` python library." msgstr "" "Currently the email subscriptions in Zaqar rely on the third part tools, " "such as \"sendmail\". It means that deployer should install it out of Zaqar. " "If he forgets, Zaqar will raise internal error. This work let Zaqar support " "email subscription by itself using the ``smtp`` python library." msgid "" "Currently, the v1 API is still accessible though it has been deprecated for " "a while. And we're going to deprecate v1.1 soon. To keep the backward " "compatibility, a new config option - ``enable_deprecated_api_versions`` is " "added so that operator can totally turn off an API version or still support " "it by adding the API version to the list of the new config option." msgstr "" "Currently, the v1 API is still accessible though it has been deprecated for " "a while. And we're going to deprecate v1.1 soon. To keep the backward " "compatibility, a new config option - ``enable_deprecated_api_versions`` is " "added so that operator can totally turn off an API version or still support " "it by adding the API version to the list of the new config option." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Encrypted Messages in Queue (Change-Id " "`Icecfb9a232cfeefc2f9603934696bb2dcd56bc9c `_)" msgstr "" "Encrypted Messages in Queue (Change-Id " "`Icecfb9a232cfeefc2f9603934696bb2dcd56bc9c `_)" msgid "" "Fix SSLError caused by not passing the cafile (Change-Id " "`I176e3876f2652608aaf51b0f74f4d971d31253e2 `_)" msgstr "" "Fix SSLError caused by not passing the cafile (Change-Id " "`I176e3876f2652608aaf51b0f74f4d971d31253e2 `_)" msgid "" "Fix the issue that the function unpackb has no encoding option (Change-Id " "`bb92e983a79e5c1608f6a603816e1b88283e34c9 `_)" msgstr "" "Fix the issue that the function unpackb has no encoding option (Change-Id " "`bb92e983a79e5c1608f6a603816e1b88283e34c9 `_)" msgid "Fixed bugs for stable and security." msgstr "Fixed bugs for stable and security." msgid "" "In IPv6 management network environment, starting Zaqar server will run into " "'Address family for hostname not support' error when use WSGI simple server. " "The root cause is that Python's TCPServer implementation is hard-coded to " "use IPv4, even in IPv6 environments. Now this issue has been fixed." msgstr "" "In IPv6 management network environment, starting Zaqar server will run into " "'Address family for hostname not support' error when use WSGI simple server. " "The root cause is that Python's TCPServer implementation is hard-coded to " "use IPv4, even in IPv6 environments. Now this issue has been fixed." msgid "" "In Queens, we support the old way to use pool_group and the new way without " "it in Flavor both. In Stein, we will remove the pool_group totally and only " "keep the new way in Flavor and Pool." msgstr "" "In Queens, we support the old way to use pool_group and the new way without " "it in Flavour both. In Stein, we will remove the pool_group totally and only " "keep the new way in Flavour and Pool." msgid "" "Introduce Guru to Zaqar. Guru is a mechanism whereby developers and system " "administrators can generate a report about the state of a running Zaqar " "executable. This report is called a *Guru Meditation Report*. Now Guru can " "support wsgi, websocket and uwsgi modes all." msgstr "" "Introduce Guru to Zaqar. Guru is a mechanism whereby developers and system " "administrators can generate a report about the state of a running Zaqar " "executable. This report is called a *Guru Meditation Report*. Now Guru can " "support wsgi, websocket and uwsgi modes all." msgid "" "Introduce a new request header called \"EXTRA-SPEC\" and driver mechanism " "with stevedore to let developers to implement the task about how to deal " "with this informtaion. In Wallaby, there's just an empty handler by default." msgstr "" "Introduce a new request header called \"EXTRA-SPEC\" and driver mechanism " "with stevedore to let developers implement the task about how to deal with " "this information. In Wallaby, there's just an empty handler by default." msgid "" "Introduce a new resource called Topic into Zaqar. Topic is a concept from " "AWS Simple Notification Service (SNS), it will has relevance with " "subscriptions. User can send message to a topic, and then the subscribers " "will get the message according to different protocols, like http, email, " "sms, etc. This feature will help Zaqar to split Messaging Queue Service and " "Notification Service clearly." msgstr "" "Introduce a new resource called Topic into Zaqar. Topic is a concept from " "AWS Simple Notification Service (SNS), it will has relevance with " "subscriptions. User can send message to a topic, and then the subscribers " "will get the message according to different protocols, like http, email, " "SMS, etc. This feature will help Zaqar to split Messaging Queue Service and " "Notification Service clearly." msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "New Features" msgstr "New Features" msgid "" "New framework for ``zaqar-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Zaqar " "upgrade to ensure if the upgrade can be performed safely." msgstr "" "New framework for ``zaqar-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Zaqar " "upgrade to ensure if the upgrade can be performed safely." msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "" "Now Redis driver supports authentication with Redis Sentinel. To use this " "feature, add the ``redis_password`` query to the Redis URI. The " "``redis_username`` can be used when ACL feature is enabled." msgstr "" "Now Redis driver supports authentication with Redis Sentinel. To use this " "feature, add the ``redis_password`` query to the Redis URI. The " "``redis_username`` can be used when ACL feature is enabled." msgid "" "Now before users send messages to subscribers through a queue, the " "subscribers should be confirmed first. Zaqar only sends messages to the " "confirmed subscribers. This feature supports \"webhook\" and \"mailto\" " "subscribers with mongoDB or redis backend. The \"mailto\" part will be done " "in O cycle. Set \"require_confirmation = True\" to enable this feature. The " "default value is \"False\" now and we will enable it by default after one or " "two cycles." msgstr "" "Now before users send messages to subscribers through a queue, the " "subscribers should be confirmed first. Zaqar only sends messages to the " "confirmed subscribers. This feature supports \"webhook\" and \"mailto\" " "subscribers with MongoDB or Redis backend. The \"mailto\" part will be done " "in O cycle. Set \"require_confirmation = True\" to enable this feature. The " "default value is \"False\" now and we will enable it by default after one or " "two cycles." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "Operator can now use new CLI tool ``zaqar-status upgrade check`` to check if " "Zaqar deployment can be safely upgraded from N-1 to N release." msgstr "" "Operator can now use new CLI tool ``zaqar-status upgrade check`` to check if " "Zaqar deployment can be safely upgraded from N-1 to N release." msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Prelude" msgstr "Prelude" msgid "" "Python 2.7 support has been dropped. Last release of Zaqar to support py2.7 " "is OpenStack Train. The minimum version of Python now supported by Zaqar is " "Python 3.6." msgstr "" "Python 2.7 support has been dropped. Last release of Zaqar to support py2.7 " "is OpenStack Train. The minimum version of Python now supported by Zaqar is " "Python 3.6." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Query for all subscriptions on a given queue by taking into account the " "returned marker, if any. Without this fix, only 10 subscriptions can be " "extracted from database to send notification." msgstr "" "Query for all subscriptions on a given queue by taking into account the " "returned marker, if any. Without this fix, only 10 subscriptions can be " "extracted from database to send notification." msgid "" "Queues now behave lazy in subscriptions also. So there is no need for the " "user to pre-create a queue before creating a subscription for this queue. " "Zaqar will create the queue automatically on the subscription creation " "request. As before, all subscriptions will continue to stay active even if " "the corresponding queue was deleted." msgstr "" "Queues now behave lazy in subscriptions also. So there is no need for the " "user to pre-create a queue before creating a subscription for this queue. " "Zaqar will create the queue automatically on the subscription creation " "request. As before, all subscriptions will continue to stay active even if " "the corresponding queue was deleted." msgid "" "Redis connection doesn't support password configure in zaqar, so redis-" "server can not set a password. If redis service doesn't set a password, it " "will suffer a large number of attacks. The patch will support password " "configure for redis connection in zaqar." msgstr "" "Redis connection doesn't support password configuration in Zaqar, so Redis-" "server can not set a password. If Redis service doesn't set a password, it " "will suffer a large number of attacks. The patch will support password " "configuration for a Redis connection in Zaqar." msgid "Redis messaging store now supports authentication with username." msgstr "Redis messaging store now supports authentication with a username." msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "" "Since some clients use different format of client id not only uuid, like " "user id of ldap, so Zaqar will remove the format contrain of client id. Add " "one option 'client_id_uuid_safe' to allow user to control the validation of " "client id. Add two options 'min_length_client_id' and 'max_length_client_id' " "to allow user to control the length of client id if not using uuid. This " "also requires user to ensure the client id is immutable." msgstr "" "Since some clients use different format of client id not only UUID, like " "user id of LDAP, so Zaqar will remove the format contraint of the client id. " "Add one option 'client_id_uuid_safe' to allow user to control the validation " "of client id. Add two options 'min_length_client_id' and " "'max_length_client_id' to allow user to control the length of client id if " "not using UUID. This also requires user to ensure the client id is immutable." msgid "" "Since we have introduced the 'pool_list' instead of pool_group in Queens, " "Now we will update the APIs to suggest users use new argument." msgstr "" "Since we have introduced the 'pool_list' instead of pool_group in Queens, " "Now we will update the APIs to suggest users use new argument." msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Support 'post_data' and 'post_headers' options on subscribers, allowing " "customization of the payload when having a webhook subscriber. The " "'post_data' option supports the '$zaqar_message$' string template, which " "will be replaced by the serialized JSON message if specified." msgstr "" "Support 'post_data' and 'post_headers' options on subscribers, allowing " "customisation of the payload when having a webhook subscriber. The " "'post_data' option supports the '$zaqar_message$' string template, which " "will be replaced by the serialised JSON message if specified." msgid "" "Support Redis as management storage backend to improve the performance and " "ease of deployment. For the management driver, user needs to enable the " "redis storage options in redis.conf to persistent data." msgstr "" "Support Redis as the management storage backend to improve the performance " "and ease of deployment. For the management driver, user needs to enable the " "Redis storage options in redis.conf to persist data." msgid "Support dot character in queue's name, like 'service.test_queue'." msgstr "Support dot character in queue's name, like 'service.test_queue'." msgid "Support encrypted messages in queue." msgstr "Support encrypted messages in queue." msgid "" "Support for dead letter queue is added for MongoDB, Redis and Swift. With " "this feature, message will be moved to the specified dead letter queue if " "it's claimed many times but still can't successfully processed by a client. " "New reseved metadata keys of queue are added: _max_claim_count, " "_dead_letter_queue and _dead_letter_queue_messages_ttl." msgstr "" "Support for dead letter queue is added for MongoDB, Redis and Swift. With " "this feature, message will be moved to the specified dead letter queue if " "it's claimed many times but still can't successfully processed by a client. " "New reserved metadata keys of queue are added: _max_claim_count, " "_dead_letter_queue and _dead_letter_queue_messages_ttl." msgid "" "Support for delayed queues is added for MongoDB, Redis and Swift. With this " "feature, if the queue is a delayed queue, its message will be delayed some " "time to be claimed. New reseved metadata key of queue is added: " "_default_message_delay." msgstr "" "Support for delayed queues is added for MongoDB, Redis and Swift. With this " "feature, if the queue is a delayed queue, its message will be delayed some " "time to be claimed. New reserved metadata key of queue is added: " "_default_message_delay." msgid "" "Support for queue filter when queue listing. With this feature, users can " "add filter of name or metadata in query string parameters in queue list to " "filter queues." msgstr "" "Support for queue filter when queue listing. With this feature, users can " "add filter of name or metadata in query string parameters in queue list to " "filter queues." msgid "" "Support more retry backoff function in webhook type. It will work when Zaqar " "failed to send the notification to the subscriber. Users can define the " "retry backoff function in metadata of queue. There are four retry backoff " "functions including 'linear', 'arithmetic', 'geometric' and 'exponential'." msgstr "" "Support more retry back-off function in webhook type. It will work when " "Zaqar failed to send the notification to the subscriber. Users can define " "the retry back-off function in metadata of queue. There are four retry back-" "off functions including 'linear', 'arithmetic', 'geometric' and " "'exponential'." msgid "" "Support non-URL encoded message body checksum function, the default " "algorithm is MD5. Back-end support for MongoDB, Redis and Swift. With this " "feature, when a user sends a message to the queue, Zaqar calculates a " "\"checksum\" value for the body of the non-URL encoded message, which the " "user can then get after the message is got or claimed. Finally, the user can " "use it to verify that the body of the newly obtained message is correct." msgstr "" "Support non-URL encoded message body checksum function, the default " "algorithm is MD5. Back-end support for MongoDB, Redis and Swift. With this " "feature, when a user sends a message to the queue, Zaqar calculates a " "\"checksum\" value for the body of the non-URL encoded message, which the " "user can then get after the message is got or claimed. Finally, the user can " "use it to verify that the body of the newly obtained message is correct." msgid "" "Support notificaiton delivery policy in webhook type. It will work when the " "notification is sent from Zaqar to the subscriber failed. User can define " "the retry policy in the options of subscription or metadata of queue." msgstr "" "Support notification delivery policy in webhook type. It will work when the " "notification is sent from Zaqar to the subscriber failed. User can define " "the retry policy in the options of subscription or metadata of queue." msgid "" "Support query queues with filter 'with_count=true' to return the amount of " "the queues. This will help users to quickly get the exact total number of " "queues which they own." msgstr "" "Support query queues with filter 'with_count=true' to return the amount of " "the queues. This will help users to quickly get the exact total number of " "queues which they own." msgid "" "The OSprofiler is integrated to Zaqar in Ocata. It is a library from oslo. " "It aims to analyse the performance bottleneck issue by making possible to " "generate one trace per request affecting all involved services and build a " "tree of calls." msgstr "" "The OSprofiler is integrated to Zaqar in Ocata. It is a library from Oslo. " "It aims to analyse the performance bottleneck issue by making possible to " "generate one trace per request affecting all involved services and build a " "tree of calls." msgid "" "The code structure for configuration files are changed. This is insensitvie " "for end users, but the persons who work for downstream changes should pay " "attention. Please refactor your private configurations to ``zaqar/conf/`` " "folder as well." msgstr "" "The code structure for configuration files are changed. This is transparent " "for end users, but the persons who work for downstream changes should pay " "attention. Please refactor your private configurations to ``zaqar/conf/`` " "folder as well." msgid "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing " "customized or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them in YAML " "format. Use the `oslopolicy-convert-json-to-yaml `_ tool to " "convert a JSON to YAML formatted policy file in backward compatible way." msgstr "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators utilising " "customised or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them into " "YAML format. Use the `oslopolicy-convert-json-to-yaml `_ " "tool to convert a JSON to YAML formatted policy file in a backward " "compatible way." msgid "The minimum redis-py version required is now >= 3.0.0" msgstr "The minimum redis-py version required is now >= 3.0.0" msgid "" "The new Swift storage backend is added to Zaqar in Ocata. It's experimental " "currently. To use this backend, you should modify the \"drivers\" section in " "the config file. [Blueprint `swift-storage-driver `_]" msgstr "" "The new Swift storage backend is added to Zaqar in Ocata. It's experimental " "currently. To use this backend, you should modify the \"drivers\" section in " "the config file. [Blueprint `swift-storage-driver `_]" msgid "" "This feature is the third part of subscription confirmation feature. Support " "to send email to subscriber if confirmation is needed. To use this feature, " "user need to set the config option \"external_confirmation_url\", " "\"subscription_confirmation_email_template\" and " "\"unsubscribe_confirmation_email_template\". The confirmation page url that " "will be used in email subscription confirmation before notification, this " "page is not hosted in Zaqar server, user should build their own web service " "to provide this web page. The subscription_confirmation_email_template let " "user to customize the subscription confimation email content, including " "topic, body and sender. The unsubscribe_confirmation_email_template let user " "to customize the unsubscribe confimation email content, including topic, " "body and sender too." msgstr "" "This feature is the third part of subscription confirmation feature. Support " "to send email to subscriber if confirmation is needed. To use this feature, " "user need to set the config option \"external_confirmation_url\", " "\"subscription_confirmation_email_template\" and " "\"unsubscribe_confirmation_email_template\". The confirmation page URL that " "will be used in email subscription confirmation before notification, this " "page is not hosted in Zaqar server, user should build their own web service " "to provide this web page. The subscription_confirmation_email_template let " "user to customize the subscription confirmation email content, including " "topic, body and sender. The unsubscribe_confirmation_email_template let user " "to customise the unsubscribe confirmation email content, including topic, " "body and sender too." msgid "" "To enhance the security of messaging service, the queue in Zaqar supports to " "encrypt messages before storing them into storage backends, also could " "support to decrypt messages when those are claimed by consumer. To enable " "this feature, user just need to take \"_enable_encrypt_messages=True\" when " "creating queue. AES-256 is used as the default of encryption algorithm and " "encryption key is configurable in the zaqar.conf." msgstr "" "To enhance the security of messaging service, the queue in Zaqar supports to " "encrypt messages before storing them into storage backends, also could " "support to decrypt messages when those are claimed by consumer. To enable " "this feature, user just need to take \"_enable_encrypt_messages=True\" when " "creating queue. AES-256 is used as the default of encryption algorithm and " "encryption key is configurable in the zaqar.conf." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Upgrade one of storage drivers, mongo driver with new version of pymongo. " "Pymongo has been updated to 4.0.0, there are some changes which are not " "supported in new version: 1. Collection.count and Cursor.count is removed. " "2. Collection.ensure_index is removed. 3. Collection.__bool__ raises " "NotImplementedError. 4. Should use Binary.from_uuid to handle the UUID " "object. Those changes need to upgrade the mongo driver's code to work well." msgstr "" "Upgrade one of the storage drivers, the Mongo driver with new version of " "PyMongo. PyMongo has been updated to 4.0.0, there are some changes which are " "not supported in the new version: 1. Collection.count and Cursor.count is " "removed. 2. Collection.ensure_index is removed. 3. Collection.__bool__ " "raises NotImplementedError. 4. Should use Binary.from_uuid to handle the " "UUID object. Those changes need to upgrade the mongo driver's code to work " "well." msgid "" "Use of JSON policy files was deprecated by the ``oslo.policy`` library " "during the Victoria development cycle. As a result, this deprecation is " "being noted in the Wallaby cycle with an anticipated future removal of " "support by ``oslo.policy``. As such operators will need to convert to YAML " "policy files. Please see the upgrade notes for details on migration of any " "custom policy files." msgstr "" "The JSON policy files were deprecated by the ``oslo.policy`` library during " "the Victoria development cycle. As a result, this deprecation is being noted " "in the Wallaby cycle with an anticipated future removal of support by ``oslo." "policy``. As such operators will need to convert to YAML policy files. " "Please see the upgrade notes for details on the migration of any custom " "policy files." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "Welcome to the Victoria release of the OpenStack Message service (zaqar). " "In this cycle, the Zaqar team would like to bring the following points to " "your attention. Details may be found below." msgstr "" "Welcome to the Victoria release of the OpenStack Message service (Zaqar). " "In this cycle, the Zaqar team would like to bring the following points to " "your attention. Details may be found below." msgid "" "When access the root path of Zaqar service, for example: curl GET " "http://127.0.0.1:8888/, user will see 401 error. Which will cause some front " "end proxy (like HAProxy) to complain. Now this issue has been fixed." msgstr "" "When access the root path of Zaqar service, for example: curl GET " "http://127.0.0.1:8888/, user will see 401 error. Which will cause some front " "end proxy (like HAProxy) to complain. Now this issue has been fixed." msgid "" "When using the sqlalchemy driver, operators now are required to run \"zaqar-" "sql-db-manage upgrade\" before making the service available. The service " "previously tried to create the database on the first request, but it was " "bound to race conditions." msgstr "" "When using the sqlalchemy driver, operators now are required to run \"zaqar-" "sql-db-manage upgrade\" before making the service available. The service " "previously tried to create the database on the first request, but it was " "bound to race conditions." msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "" "Zaqar API v2 has been released for several cycles and it is integrated as " "the default API version by most of the OpenStack services. So it is time to " "deprecated v1.1 in favor of v2. Now in Newton cycle, Zaqar API v1.1 is " "officially deprecated." msgstr "" "Zaqar API v2 has been released for several cycles and it is integrated as " "the default API version by most of the OpenStack services. So it is time to " "deprecated v1.1 in favour of v2. Now in Newton cycle, Zaqar API v1.1 is " "officially deprecated." msgid "Zaqar Release Notes" msgstr "Zaqar Release Notes" msgid "" "Zaqar didn't return the reserved metadata when listing detailed queue. After " "this fix, Zaqar will return reserved metadata '_default_message_ttl' and " "'_max_messages_post_size' in response of listing detailed queue." msgstr "" "Zaqar didn't return the reserved metadata when listing detailed queue. After " "this fix, Zaqar will return reserved metadata '_default_message_ttl' and " "'_max_messages_post_size' in response of listing detailed queue." msgid "Zaqar now supports Cross-Origin Resource Sharing (CORS)." msgstr "Zaqar now supports Cross-Origin Resource Sharing (CORS)." msgid "" "Zaqar supports a new way to directly use pool resource without pool_group " "when creating Flavor. The old way will be kept in Queens and be marked " "useless. Zaqar will remove the pool_group totally in Rocky." msgstr "" "Zaqar supports a new way to directly use a pool resource without pool_group " "when creating a Flavour. The old way will be kept in Queens and be marked " "deprecated. Zaqar will remove the pool_group totally in Rocky." msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5380137 zaqar-20.1.0.dev29/releasenotes/source/locale/fr/0000775000175100017510000000000015033040026020600 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175100017510000000000015033040026022365 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175100017510000000246615033040005025423 0ustar00mylesmyles# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-07-10 21:01+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 06:45+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.0.0b2" msgstr "3.0.0.0b2" msgid "3.0.0.0b3" msgstr "3.0.0.0b3" msgid "3.0.0.0rc1" msgstr "3.0.0.0rc1" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "Mitaka Series Release Notes" msgstr "Note de release pour Mitaka" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Other Notes" msgstr "Autres notes" msgid "Start using reno to manage release notes." msgstr "Commence à utiliser reno pour la gestion des notes de release" msgid "Zaqar Release Notes" msgstr "Note de release pour Zaqar" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5380137 zaqar-20.1.0.dev29/releasenotes/source/locale/id/0000775000175100017510000000000015033040026020565 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/releasenotes/source/locale/id/LC_MESSAGES/0000775000175100017510000000000015033040026022352 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/locale/id/LC_MESSAGES/releasenotes.po0000664000175100017510000005761115033040005025412 0ustar00mylesmyles# suhartono , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2019-07-08 04:21+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-02-01 06:58+0000\n" "Last-Translator: suhartono \n" "Language-Team: Indonesian\n" "Language: id\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "1.1.0" msgstr "1.1.0" msgid "2.0.0-10" msgstr "2.0.0-10" msgid "3.0.0" msgstr "3.0.0" msgid "4.0.0" msgstr "4.0.0" msgid "5.0.0" msgstr "5.0.0" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "" "A new queue action is added so that users can purge a queue quickly. That " "means all the messages and subscriptions will be deleted automatically but " "the metadata of the queue will be kept." msgstr "" "Tindakan antrian baru ditambahkan sehingga pengguna dapat membersihkan " "antrian dengan cepat. Itu berarti semua pesan dan langganan akan dihapus " "secara otomatis tetapi metadata antrian akan disimpan." msgid "" "Add a new webhook notifier using trust authentication. When using the 'trust" "+' URL prefix, Zaqar will create a Keystone trust for the user, and then use " "it when a notification happens to authenticate against Keystone and send the " "token to the endpoint." msgstr "" "Tambahkan notifikasi webhook baru menggunakan otentikasi kepercayaan. Saat " "menggunakan awalan URL 'trust +', Zaqar akan membuat kepercayaan Keystone " "untuk pengguna, dan kemudian menggunakannya ketika pemberitahuan terjadi " "untuk mengotentikasi terhadap Keystone dan mengirim token ke endpoint." msgid "" "Add an new option named 'message_delete_with_claim_id', when it is True, " "delete messages must need claim_ids and message_ids both in request " "parameters. This will improve the security of the message." msgstr "" "Tambahkan opsi baru bernama 'message_delete_with_claim_id', ketika itu True, " "hapus pesan harus membutuhkan claim_ids dan message_ids keduanya dalam " "parameter permintaan. Ini akan meningkatkan keamanan pesan." msgid "Add migration support for Zaqar's sqlalchemy storage driver." msgstr "Tambahkan dukungan migrasi untuk driver penyimpanan sqlalchemy Zaqar." msgid "" "Add three new reserved metdata in response body of querying queue. " "\"_dead_letter_queue\", \"_dead_letter_queue_messages_ttl\" and " "\"_max_claim_count\". Those metadata will help user to know better about " "dead letter queue." msgstr "" "Tambahkan tiga metdata cadangan baru di body respons antrian permintaan. " "\"_dead_letter_queue\", \"_dead_letter_queue_messages_ttl\" dan " "\"_max_claim_count\". Metadata tersebut akan membantu pengguna untuk " "mengetahui lebih baik tentang antrian surat mati (dead letter queue)." msgid "" "Add two configurations for the notification endpoint of the websocket " "server, instead of a random port and local address. One is 'notification-" "bind', address on which the notification server will listen. Another is " "'notification-port', port on which the notification server will listen." msgstr "" "Tambahkan dua konfigurasi untuk endpoint notifikasi server websocket, bukan " "port acak dan alamat lokal. Salah satunya adalah 'notifikasi-bind', alamat " "di mana server notifikasi akan mendengarkan. Lainnya adalah 'port " "notifikasi', port yang mana server notifikasi akan mendengarkan." msgid "Added new tool ``zaqar-status upgrade check``." msgstr "Menambahkan alat baru ``zaqar-status upgrade check``." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Catatan Rilis Seri Saat Ini" msgid "" "Currently Zaqar can support more built-in/reserved attributes in queue. For " "now there are two important attributes 'max_messages_post_size' and " "'max_message_ttl'. With this feature, when user query queues Zaqar will show " "those two attributes (read from config file if there is no customized value " "from user) in queue metadata so that user can know what value it is." msgstr "" "Saat ini Zaqar dapat mendukung lebih banyak atribut built-in/reserved dalam " "antrian. Untuk saat ini ada dua atribut penting 'max_messages_post_size' dan " "'max_message_ttl'. Dengan fitur ini, ketika ada antrian permintaan pengguna, " "Zaqar akan menampilkan dua atribut tersebut (baca dari file konfigurasi jika " "tidak ada nilai yang disesuaikan dari pengguna) dalam metadata antrian " "sehingga pengguna dapat mengetahui nilainya." msgid "" "Currently the email subscription in Zaqar relay on the third part tools, " "such as \"sendmail\". It means that deployer should install it out of Zaqar. " "If he forgets, Zaqar will raise internal error. This work let Zaqar support " "email subscription by itself using the ``smtp`` python library." msgstr "" "Saat ini langganan email di Zaqar menyampaikan pada alat bagian ketiga, " "seperti \"sendmail\". Ini berarti bahwa deployer harus menginstalnya dari " "Zaqar. Jika dia lupa, Zaqar akan meningkatkan kesalahan internal. Karya ini " "memungkinkan Zaqar mendukung langganan email dengan sendirinya menggunakan " "pustaka python ``smtp``." msgid "" "Currently, the v1 API is still accessible though it has been deprecated for " "a while. And we're going to deprecate v1.1 soon. To keep the backward " "compatibility, a new config option - ``enable_deprecated_api_versions`` is " "added so that operator can totally turn off an API version or still support " "it by adding the API version to the list of the new config option." msgstr "" "Saat ini, API v1 masih dapat diakses meskipun telah ditinggalkan untuk " "sementara waktu. Dan kita akan segera menghentikan v1.1. Untuk menjaga " "kompatibilitas ke belakang, opsi konfigurasi baru - `` " "enable_deprecated_api_versions`` ditambahkan sehingga operator dapat " "sepenuhnya menonaktifkan versi API atau masih mendukungnya dengan " "menambahkan versi API ke daftar opsi konfigurasi baru." msgid "Deprecation Notes" msgstr "Catatan Deprekasi" msgid "" "In IPv6 management network environment, starting Zaqar server will run into " "'Address family for hostname not support' error when use WSGI simple server. " "The root cause is that Python's TCPServer implementation is hard-coded to " "use IPv4, even in IPv6 environments. Now this issue has been fixed." msgstr "" "Dalam lingkungan jaringan manajemen IPv6, memulai server Zaqar akan " "mengalami kesalahan 'Address family for hostname not support' saat " "menggunakan server sederhana WSGI. Akar penyebabnya adalah bahwa " "implementasi TCPServer Python adalah hard-coded untuk menggunakan IPv4, " "bahkan di lingkungan IPv6. Sekarang masalah ini telah diperbaiki." msgid "" "Introduce Guru to Zaqar. Guru is a mechanism whereby developers and system " "administrators can generate a report about the state of a running Zaqar " "executable. This report is called a *Guru Meditation Report*. Now Guru can " "support wsgi, websocket and uwsgi modes all." msgstr "" "Perkenalkan Guru ke Zaqar. Guru adalah mekanisme di mana pengembang dan " "administrator sistem dapat menghasilkan laporan tentang keadaan Zaqar yang " "sedang berjalan yang dapat dieksekusi. Laporan ini disebut *Guru Meditation " "Report*. Sekarang Guru dapat mendukung mode wsgi, websocket, dan uwsgi " "semuanya." msgid "Liberty Series Release Notes" msgstr "Catatan Rilis Seri Liberty" msgid "Mitaka Series Release Notes" msgstr "Catatan Rilis Seri Mitaka" msgid "New Features" msgstr "Fitur baru" msgid "" "New framework for ``zaqar-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Zaqar " "upgrade to ensure if the upgrade can be performed safely." msgstr "" "Kerangka kerja baru untuk perintah ``zaqar-status upgrade check`` " "ditambahkan. Kerangka kerja ini memungkinkan menambahkan berbagai " "pemeriksaan yang dapat dijalankan sebelum pemutakhiran Zaqar untuk " "memastikan apakah pemutakhiran dapat dilakukan dengan aman." msgid "Newton Series Release Notes" msgstr "Catatan Rilis Seri Newton" msgid "" "Now before users send messages to subscribers through a queue, the " "subscribers should be confirmed first. Zaqar only sends messages to the " "confirmed subscribers. This feature supports \"webhook\" and \"mailto\" " "subscribers with mongoDB or redis backend. The \"mailto\" part will be done " "in O cycle. Set \"require_confirmation = True\" to enable this feature. The " "default value is \"False\" now and we will enable it by default after one or " "two cycles." msgstr "" "Sekarang sebelum pengguna mengirim pesan ke pelanggan melalui antrian, " "pelanggan harus dikonfirmasi terlebih dahulu. Zaqar hanya mengirim pesan ke " "pelanggan yang dikonfirmasi. Fitur ini mendukung pelanggan \"webhook\" dan " "\"mailto\" dengan mongoDB atau redis backend. Bagian \"mailto\" akan " "dilakukan dalam siklus O. Setel \"require_confirmation = True\" untuk " "mengaktifkan fitur ini. Nilai default adalah \"False\" sekarang dan kami " "akan mengaktifkannya secara default setelah satu atau dua siklus." msgid "Ocata Series Release Notes" msgstr "Catatan Rilis Seri Ocata" msgid "" "Operator can now use new CLI tool ``zaqar-status upgrade check`` to check if " "Zaqar deployment can be safely upgraded from N-1 to N release." msgstr "" "Operator sekarang dapat menggunakan alat CLI baru ``zaqar-status upgrade " "check`` untuk memeriksa apakah penyebaran Zaqar dapat ditingkatkan secara " "aman dari rilis N-1 ke N." msgid "Other Notes" msgstr "Catatan lain" msgid "Pike Series Release Notes" msgstr "Catatan Rilis Seri Pike" msgid "Prelude" msgstr "Pendahuluan" msgid "Queens Series Release Notes" msgstr "Catatan Rilis Seri Queens" msgid "" "Query for all subscriptions on a given queue by taking into account the " "returned marker, if any. Without this fix, only 10 subscriptions can be " "extracted from database to send notification." msgstr "" "Permintaan untuk semua langganan pada antrian yang diberikan dengan " "memperhitungkan penanda yang dikembalikan, jika ada. Tanpa perbaikan ini, " "hanya 10 langganan yang dapat diekstraksi dari database untuk mengirim " "pemberitahuan." msgid "" "Queues now behave lazy in subscriptions also. So there is no need for the " "user to pre-create a queue before creating a subscription for this queue. " "Zaqar will create the queue automatically on the subscription creation " "request. As before, all subscriptions will continue to stay active even if " "the corresponding queue was deleted." msgstr "" "Antrian sekarang berperilaku malas dalam langganan juga. Jadi tidak perlu " "bagi pengguna untuk membuat antrian sebelum membuat berlangganan untuk " "antrian ini. Zaqar akan membuat antrian secara otomatis berdasarkan " "permintaan pembuatan langganan. Seperti sebelumnya, semua langganan akan " "tetap aktif meskipun antrian yang sesuai telah dihapus." msgid "" "Redis connection doesn't support password configure in zaqar, so redis-" "server can not set a password. If redis service doesn't set a password, it " "will suffer a large number of attacks. The patch will support password " "configure for redis connection in zaqar." msgstr "" "Koneksi redis tidak mendukung konfigurasi kata sandi di zaqar, jadi redis-" "server tidak dapat mengatur kata sandi. Jika layanan redis tidak menetapkan " "kata sandi, itu akan menderita sejumlah besar serangan. Patch akan mendukung " "konfigurasi kata sandi untuk koneksi redis di zaqar." msgid "Rocky Series Release Notes" msgstr "Catatan Rilis Seri Rocky" msgid "" "Since some clients use different format of client id not only uuid, like " "user id of ldap, so Zaqar will remove the format contrain of client id. Add " "one option 'client_id_uuid_safe' to allow user to control the validation of " "client id. Add two options 'min_length_client_id' and 'max_length_client_id' " "to allow user to control the length of client id if not using uuid. This " "also requires user to ensure the client id is immutable." msgstr "" "Karena beberapa klien menggunakan format berbeda dari id klien tidak hanya " "uuid, seperti id pengguna ldap, maka Zaqar akan menghapus format format id " "klien. Tambahkan satu opsi 'client_id_uuid_safe' untuk memungkinkan pengguna " "mengontrol validasi id klien. Tambahkan dua opsi 'min_length_client_id' dan " "'max_length_client_id' untuk memungkinkan pengguna mengontrol panjang id " "klien jika tidak menggunakan uuid. Ini juga mengharuskan pengguna untuk " "memastikan id klien tidak dapat diubah." msgid "" "Since we have introduced the 'pool_list' instead of pool_group in Queens, " "Now we will update the APIs to suggest users use new argument." msgstr "" "Karena kami telah memperkenalkan 'pool_list' sebagai ganti pool_group di " "Queens, Sekarang kami akan memperbarui API untuk menyarankan pengguna " "menggunakan argumen baru." msgid "Start using reno to manage release notes." msgstr "Mulai gunakan reno untuk mengelola catatan rilis." msgid "" "Support 'post_data' and 'post_headers' options on subscribers, allowing " "customization of the payload when having a webhook subscriber. The " "'post_data' option supports the '$zaqar_message$' string template, which " "will be replaced by the serialized JSON message if specified." msgstr "" "Mendukung opsi 'post_data' dan 'post_headers' pada pelanggan, memungkinkan " "kustomisasi payload ketika memiliki pelanggan webhook. Opsi 'post_data' " "mendukung templat string '$zaqar_message $', yang akan diganti dengan pesan " "JSON bersambung jika ditentukan." msgid "" "Support Redis as management storage backend to improve the performance and " "ease of deployment. For the management driver, user needs to enable the " "redis storage options in redis.conf to persistent data." msgstr "" "Mendukung Redis sebagai backend penyimpanan manajemen untuk meningkatkan " "kinerja dan kemudahan penyebaran. Untuk driver manajemen, pengguna perlu " "mengaktifkan opsi penyimpanan redis di redis.conf ke data persisten." msgid "Support dot character in queue's name, like 'service.test_queue'." msgstr "" "Mendukung karakter titik (dot) dalam nama antrian, seperti 'service." "test_queue'." msgid "" "Support for dead letter queue is added for MongoDB, Redis and Swift. With " "this feature, message will be moved to the specified dead letter queue if " "it's claimed many times but still can't successfully processed by a client. " "New reseved metadata keys of queue are added: _max_claim_count, " "_dead_letter_queue and _dead_letter_queue_messages_ttl." msgstr "" "Dukungan untuk antrian surat mati (dead letter queue) ditambahkan untuk " "MongoDB, Redis dan Swift. Dengan fitur ini, pesan akan dipindahkan ke " "antrian huruf mati (dead letter queue) yang ditentukan jika diklaim berulang " "kali tetapi masih tidak berhasil diproses oleh klien. Kunci metadata reseved " "baru dari antrian (reseved metadata keys of queue) ditambahkan ditambahkan: " "_max_claim_count, _dead_letter_queue dan _dead_letter_queue_messages_ttl." msgid "" "Support for delayed queues is added for MongoDB, Redis and Swift. With this " "feature, if the queue is a delayed queue, its message will be delayed some " "time to be claimed. New reseved metadata key of queue is added: " "_default_message_delay." msgstr "" "Dukungan untuk antrian tertunda ditambahkan untuk MongoDB, Redis dan Swift. " "Dengan fitur ini, jika antrian adalah antrian tertunda, pesannya akan " "ditunda beberapa saat untuk diklaim. Kunci metadata reseved baru dari " "antrian ditambahkan: _default_message_delay." msgid "" "Support for queue filter when queue listing. With this feature, users can " "add filter of name or metadata in query string parameters in queue list to " "filter queues." msgstr "" "Dukungan untuk filter antrian ketika daftar antrian. Dengan fitur ini, " "pengguna dapat menambahkan filter nama atau metadata dalam parameter string " "kueri dalam daftar antrian ke filter antrian." msgid "" "Support more retry backoff function in webhook type. It will work when Zaqar " "failed to send the notification to the subscriber. Users can define the " "retry backoff function in metadata of queue. There are four retry backoff " "functions including 'linear', 'arithmetic', 'geometric' and 'exponential'." msgstr "" "Mendukung lebih banyak fungsi retry backoff dalam tipe webhook. Ini akan " "berfungsi ketika Zaqar gagal mengirim pemberitahuan ke pelanggan. Pengguna " "dapat menentukan fungsi retry backoff dalam metadata antrian. Ada empat " "fungsi retry backoff termasuk 'linear', 'arithmetic', 'geometric' dan " "'exponential'." msgid "" "Support non-URL encoded message body checksum function, the default " "algorithm is MD5. Back-end support for MongoDB, Redis and Swift. With this " "feature, when a user sends a message to the queue, Zaqar calculates a " "\"checksum\" value for the body of the non-URL encoded message, which the " "user can then get after the message is got or claimed. Finally, the user can " "use it to verify that the body of the newly obtained message is correct." msgstr "" "Mendukung fungsi checksum isi pesan yang tidak disandikan URL, algoritma " "defaultnya adalah MD5. Dukungan back-end untuk MongoDB, Redis dan Swift. " "Dengan fitur ini, ketika pengguna mengirim pesan ke antrian, Zaqar " "menghitung nilai \"checksum\" untuk isi pesan yang dikodekan bukan URL, yang " "kemudian bisa diterima pengguna setelah pesan diterima atau diklaim. " "Akhirnya, pengguna dapat menggunakannya untuk memverifikasi bahwa isi pesan " "yang baru diperoleh sudah benar." msgid "" "Support notificaiton delivery policy in webhook type. It will work when the " "notification is sent from Zaqar to the subscriber failed. User can define " "the retry policy in the options of subscription or metadata of queue." msgstr "" "Mendukung kebijakan pengiriman pemberitahuan dalam tipe webhook. Ini akan " "berfungsi ketika notifikasi yang dikirim dari Zaqar ke pelanggan gagal. " "Pengguna dapat menentukan kebijakan retry dalam opsi langganan atau metadata " "antrian." msgid "" "The OSprofiler is integrated to Zaqar in Ocata. It is a library from oslo. " "It aims to analyse the performance bottleneck issue by making possible to " "generate one trace per request affecting all involved services and build a " "tree of calls." msgstr "" "OSprofiler terintegrasi ke Zaqar di Ocata. Ini adalah perpustakaan dari " "oslo. Ini bertujuan untuk menganalisis masalah bottleneck kinerja dengan " "memungkinkan untuk menghasilkan satu jejak per permintaan (one trace per " "request) yang mempengaruhi semua layanan yang terlibat dan membangun pohon " "panggilan (tree of call)." msgid "" "The code structure for configuration files are changed. This is insensitvie " "for end users, but the persons who work for downstream changes should pay " "attention. Please refactor your private configurations to ``zaqar/conf/`` " "folder as well." msgstr "" "Struktur kode untuk file konfigurasi diubah. Ini tidak masuk akal bagi " "pengguna akhir, tetapi orang yang bekerja untuk perubahan hilir harus " "memperhatikan. Silakan refactor konfigurasi pribadi Anda ke folder ``zaqar/" "conf/`` juga." msgid "" "The new Swift storage backend is added to Zaqar in Ocata. It's experimental " "currently. To use this backend, you should modify the \"drivers\" section in " "the config file. [Blueprint `swift-storage-driver `_]" msgstr "" "Backend penyimpanan Swift baru ditambahkan ke Zaqar di Ocata. Ini masih " "eksperimental. Untuk menggunakan backend ini, Anda harus memodifikasi bagian " "\"drivers\" di file konfigurasi. [Blueprint `swift-storage-driver `_]" msgid "" "This feature is the third part of subscription confirmation feature. Support " "to send email to subscriber if confirmation is needed. To use this feature, " "user need to set the config option \"external_confirmation_url\", " "\"subscription_confirmation_email_template\" and " "\"unsubscribe_confirmation_email_template\". The confirmation page url that " "will be used in email subscription confirmation before notification, this " "page is not hosted in Zaqar server, user should build their own web service " "to provide this web page. The subscription_confirmation_email_template let " "user to customize the subscription confimation email content, including " "topic, body and sender. The unsubscribe_confirmation_email_template let user " "to customize the unsubscribe confimation email content, including topic, " "body and sender too." msgstr "" "Fitur ini adalah bagian ketiga dari fitur konfirmasi berlangganan. Dukungan " "untuk mengirim email ke pelanggan jika konfirmasi diperlukan. Untuk " "menggunakan fitur ini, pengguna perlu mengatur opsi konfigurasi " "\"external_confirmation_url\", \"subscription_confirmation_email_template\" " "dan \"unsubscribe_confirmation_email_template\". Url halaman konfirmasi " "yang akan digunakan dalam konfirmasi berlangganan email sebelum " "pemberitahuan, halaman ini tidak di-host di server Zaqar, pengguna harus " "membangun layanan web mereka sendiri untuk menyediakan halaman web ini. The " "subscription_confirmation_email_template memungkinkan pengguna untuk " "menyesuaikan konten email konfigurasi langganan, termasuk topik, isi dan " "pengirim. The unsubscribe_confirmation_email_template memungkinkan pengguna " "untuk menyesuaikan konten email konfigurasi berhenti berlangganan, termasuk " "topik, body, dan pengirim juga." msgid "Upgrade Notes" msgstr "Catatan Upgrade" msgid "" "When access the root path of Zaqar service, for example: curl GET " "http://127.0.0.1:8888/, user will see 401 error. Which will cause some front " "end proxy (like HAProxy) to complain. Now this issue has been fixed." msgstr "" "Saat mengakses jalur root layanan Zaqar, misalnya: curl GET " "http://127.0.0.1:8888/, pengguna akan melihat 401 kesalahan. Hal ini akan " "menyebabkan beberapa proxy front end (seperti HAProxy) mengeluh. Sekarang " "masalah ini telah diperbaiki." msgid "" "When using the sqlalchemy driver, operators now are required to run \"zaqar-" "sql-db-manage upgrade\" before making the service available. The service " "previously tried to create the database on the first request, but it was " "bound to race conditions." msgstr "" "Saat menggunakan driver sqlalchemy, operator sekarang diharuskan menjalankan " "\"zaqar-sql-db-manage upgrade\" sebelum menyediakan layanan. Layanan " "sebelumnya mencoba membuat database berdasarkan permintaan pertama, tetapi " "terikat pada kondisi race." msgid "" "Zaqar API v2 has been released for several cycles and it is integrated as " "the default API version by most of the OpenStack services. So it is time to " "deprecated v1.1 in favor of v2. Now in Newton cycle, Zaqar API v1.1 is " "officially deprecated." msgstr "" "Zaqar API v2 telah dirilis selama beberapa siklus dan terintegrasi sebagai " "versi API default oleh sebagian besar layanan OpenStack. Jadi sekarang " "saatnya untuk v1.1 usang yang mendukung v2. Sekarang dalam siklus Newton, " "Zaqar API v1.1 secara resmi tidak digunakan lagi." msgid "Zaqar Release Notes" msgstr "Catatan Rilis Zaqar" msgid "" "Zaqar didn't return the reserved metadata when listing detailed queue. After " "this fix, Zaqar will return reserved metadata '_default_message_ttl' and " "'_max_messages_post_size' in response of listing detailed queue." msgstr "" "Zaqar tidak mengembalikan metadata yang dipesan saat daftar antrian " "terperinci. Setelah perbaikan ini, Zaqar akan mengembalikan metadata " "reserved '_default_message_ttl' dan '_max_messages_post_size' sebagai " "respons dari daftar antrian terperinci." msgid "Zaqar now supports Cross-Origin Resource Sharing (CORS)." msgstr "Zaqar sekarang mendukung Cross-Origin Resource Sharing (CORS)." msgid "" "Zaqar supports a new way to directly use pool resource without pool_group " "when creating Flavor. The old way will be kept in Queens and be marked " "useless. Zaqar will remove the pool_group totally in Rocky." msgstr "" "Zaqar mendukung cara baru untuk secara langsung menggunakan sumber daya pool " "tanpa pool_group saat membuat Flavour. Cara lama akan disimpan di Queens dan " "ditandai tidak berguna. Zaqar akan menghapus pool_group sepenuhnya di Rocky." ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/mitaka.rst0000664000175100017510000000023215033040005020724 0ustar00mylesmyles=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/newton.rst0000664000175100017510000000023215033040005020770 0ustar00mylesmyles=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/ocata.rst0000664000175100017510000000023015033040005020543 0ustar00mylesmyles=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/pike.rst0000664000175100017510000000021715033040005020411 0ustar00mylesmyles=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/queens.rst0000664000175100017510000000022315033040005020756 0ustar00mylesmyles=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/rocky.rst0000664000175100017510000000022115033040005020603 0ustar00mylesmyles=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/stein.rst0000664000175100017510000000022115033040005020576 0ustar00mylesmyles=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/train.rst0000664000175100017510000000017615033040005020602 0ustar00mylesmyles========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/unreleased.rst0000664000175100017510000000016015033040005021605 0ustar00mylesmyles============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/ussuri.rst0000664000175100017510000000020215033040005021005 0ustar00mylesmyles=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/victoria.rst0000664000175100017510000000022015033040005021273 0ustar00mylesmyles============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/wallaby.rst0000664000175100017510000000021415033040005021111 0ustar00mylesmyles============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/xena.rst0000664000175100017510000000020015033040005020404 0ustar00mylesmyles========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/yoga.rst0000664000175100017510000000020015033040005020410 0ustar00mylesmyles========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/releasenotes/source/zed.rst0000664000175100017510000000017415033040005020245 0ustar00mylesmyles======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/requirements.txt0000664000175100017510000000211715033040005016223 0ustar00mylesmyles# Requirements lower bounds listed here are our best effort to keep them up to # date but we do not test them so no guarantee of having them all correct. If # you find any incorrect lower bounds, let us know or propose a fix. pbr!=2.1.0,>=2.0.0 # Apache-2.0 alembic>=0.9.6 # MIT cryptography>=2.7 # BSD/Apache-2.0 falcon>=3.0.0 # Apache-2.0 jsonschema>=3.2.0 # MIT keystonemiddleware>=9.1.0 # Apache-2.0 msgpack>=1.0.0 # Apache-2.0 python-swiftclient>=3.10.1 # Apache-2.0 WebOb>=1.7.1 # MIT stevedore>=3.2.2 # Apache-2.0 oslo.cache>=1.26.0 # Apache-2.0 oslo.concurrency>=5.0.1 # Apache-2.0 oslo.config>=8.3.2 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db>=11.0.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=4.6.1 # Apache-2.0 oslo.messaging>=12.5.0 # Apache-2.0 oslo.reports>=2.2.0 # Apache-2.0 oslo.serialization>=4.2.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.utils>=4.12.1 # Apache-2.0 oslo.policy>=4.5.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 SQLAlchemy>=1.3.19 # MIT autobahn>=22.3.2 # MIT License requests>=2.25.0 # Apache-2.0 futurist>=1.2.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5390136 zaqar-20.1.0.dev29/samples/0000775000175100017510000000000015033040026014405 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/html/0000775000175100017510000000000015033040026015351 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/html/confirmation_web_service_sample.py0000664000175100017510000000574415033040005024340 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import requests import sys try: import SimpleHTTPServer import SocketServer except Exception: from http import server as SimpleHTTPServer import socketserver as SocketServer if len(sys.argv) > 2: PORT = int(sys.argv[2]) elif len(sys.argv) > 1: PORT = int(sys.argv[1]) else: PORT = 5678 class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """This is the sample service for email subscription confirmation. """ def do_OPTIONS(self): logging.warning('=================== OPTIONS =====================') self.send_response(200) self.send_header('Access-Control-Allow-Origin', self.headers['origin']) self.send_header('Access-Control-Allow-Methods', 'PUT') self.send_header('Access-Control-Allow-Headers', 'client-id,confirmation-url,content-type,url-expires,' 'url-methods,url-paths,url-signature,x-project-id,' 'confirm') self.end_headers() logging.warning(self.headers) return def do_PUT(self): logging.warning('=================== PUT =====================') self._send_confirm_request() self.send_response(200) self.send_header('Access-Control-Allow-Origin', self.headers['origin']) self.end_headers() message = "{\"message\": \"ok\"}" self.wfile.write(message) logging.warning(self.headers) return def _send_confirm_request(self): url = self.headers['confirmation-url'] confirmed_value = True try: if self.headers['confirm'] == "false": confirmed_value = False except KeyError: pass headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Project-ID': self.headers['x-project-id'], 'Client-ID': uuidutils.generate_uuid(), 'URL-Methods': self.headers['url-methods'], 'URL-Signature': self.headers['url-signature'], 'URL-Paths': self.headers['url-paths'], 'URL-Expires': self.headers['url-expires'], } data = {'confirmed': confirmed_value} requests.put(url=url, data=jsonutils.dumps(data), headers=headers) Handler = ServerHandler httpd = SocketServer.TCPServer(("", PORT), Handler) httpd.serve_forever() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/html/subscriptionConfirmation.html0000664000175100017510000001177515033040005023344 0ustar00mylesmyles

Confirming subscription...

././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/html/unsubscriptionConfirmation.html0000664000175100017510000001103715033040005023676 0ustar00mylesmyles

Removing subscription...

././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5380137 zaqar-20.1.0.dev29/samples/java-api-for-websocket/0000775000175100017510000000000015033040026020645 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/java-api-for-websocket/receive_message/0000775000175100017510000000000015033040026023773 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/java-api-for-websocket/receive_message/JsonDecoder.java0000664000175100017510000000222515033040005027033 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.openstack.zaqar.sample; import java.io.StringReader; import javax.json.Json; import javax.json.JsonObject; import javax.websocket.Decoder; import javax.websocket.EndpointConfig; public final class JsonDecoder implements Decoder.Text { @Override public JsonObject decode(final String s) { return Json.createReader(new StringReader(s)).readObject(); } @Override public void destroy() { } @Override public void init(final EndpointConfig config) { } @Override public boolean willDecode(final String s) { return true; } }././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/java-api-for-websocket/receive_message/SampleZaqarEndpoint.java0000664000175100017510000000404415033040005030556 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ import static java.lang.System.out; import java.io.IOException; import javax.json.JsonObject; import javax.websocket.ClientEndpoint; import javax.websocket.OnMessage; import javax.websocket.OnOpen; import javax.websocket.RemoteEndpoint; import javax.websocket.Session; @ClientEndpoint(decoders = JsonDecoder.class) public final class SampleZaqarEndpoint { @OnMessage public void onMessage(final JsonObject msg) { if (msg.getJsonObject("body").getJsonArray("messages") != null) out.println(msg.getJsonObject("body").getJsonArray("messages") .getJsonObject(0).getString("body")); } @OnOpen public void onOpen(final Session sess) throws IOException { final RemoteEndpoint.Basic remote = sess.getBasicRemote(); final String authenticateMsg = "{\"action\":\"authenticate\"," + "\"headers\":{\"X-Auth-Token\":" + "\"8444886dd9b04a1b87ddb502b508261c\",\"X-Project-ID\":" + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; // refer to bug // #1553398 remote.sendText(authenticateMsg); final String claimCreateMsg = "{\"action\":\"claim_create\",\"body\":" + "{\"queue_name\":\"SampleQueue\"},\"headers\":{\"Client-ID\":" + "\"355186cd-d1e8-4108-a3ac-a2183697232a\",\"X-Project-ID\":" + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; remote.sendText(claimCreateMsg); } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/java-api-for-websocket/send_message/0000775000175100017510000000000015033040026023302 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/java-api-for-websocket/send_message/SampleZaqarEndpoint.java0000664000175100017510000000331215033040005030062 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ import java.io.IOException; import javax.websocket.ClientEndpoint; import javax.websocket.OnOpen; import javax.websocket.RemoteEndpoint; import javax.websocket.Session; @ClientEndpoint public final class SampleZaqarEndpoint { @OnOpen public void onOpen(final Session sess) throws IOException { final RemoteEndpoint.Basic remote = sess.getBasicRemote(); final String authenticateMsg = "{\"action\":\"authenticate\"," + "\"headers\":{\"X-Auth-Token\":" + "\"8444886dd9b04a1b87ddb502b508261c\",\"X-Project-ID\":" + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; // refer to bug // #1553398 remote.sendText(authenticateMsg); final String messagePostMsg = "{\"action\":\"message_post\",\"body\":" + "{\"messages\":[{\"body\":\"Zaqar Sample\"}],\"queue_name\":" + "\"SampleQueue\"},\"headers\":{\"Client-ID\":" + "\"355186cd-d1e8-4108-a3ac-a2183697232a\",\"X-Project-ID\":" + "\"7530fad032ca431e9dc8ed4a5de5d99c\"}}"; remote.sendText(messagePostMsg); } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/javascript/0000775000175100017510000000000015033040026016553 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/javascript/receive_message/0000775000175100017510000000000015033040026021701 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/javascript/receive_message/zaqar_sample.js0000664000175100017510000000226015033040005024713 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the 'License'); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ const ws = new WebSocket('ws://localhost:9000'); ws.onmessage = (e) => { const msg = JSON.parse(e.data); if (msg.body.messages) console.log(msg.body.messages[0].body); }; ws.onopen = () => { ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 ws.send('{"action": "claim_create", "body": {"queue_name": "SampleQueue"}, \ "headers": {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", \ "X-Project-ID": "7530fad032ca431e9dc8ed4a5de5d99c"}}'); }; ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/javascript/send_message/0000775000175100017510000000000015033040026021210 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/javascript/send_message/zaqar_sample.js0000664000175100017510000000213015033040005024216 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the 'License'); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ const ws = new WebSocket('ws://localhost:9000'); ws.onopen = () => { ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 ws.send('{"action": "message_post", "body": {"messages": [{"body": \ "Zaqar Sample"}], "queue_name": "SampleQueue"}, "headers": \ {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", "X-Project-ID": \ "7530fad032ca431e9dc8ed4a5de5d99c"}}'); }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/javascript/websocket.html0000664000175100017510000003026715033040005021434 0ustar00mylesmyles Zaqar WebSocket example

Zaqar WebSocket example

Queues

Messages

Age Body TTL

Logs

././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5390136 zaqar-20.1.0.dev29/samples/jaxrs/0000775000175100017510000000000015033040026015534 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/jaxrs/receive_message/0000775000175100017510000000000015033040026020662 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/jaxrs/receive_message/SampleZaqarServlet.java0000664000175100017510000000365215033040005025315 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ import java.io.IOException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.client.Client; import javax.ws.rs.client.ClientBuilder; import javax.ws.rs.client.Entity; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; @SuppressWarnings("serial") @WebServlet(name = "SampleServlet", value = "/") public final class SampleZaqarServlet extends HttpServlet { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws IOException { final Client client = ClientBuilder.newClient(); final MultivaluedMap headers = new MultivaluedHashMap(); headers.putSingle("Client-ID", "355186cd-d1e8-4108-a3ac-a2183697232a"); headers.putSingle("X-Auth-Token", "8444886dd9b04a1b87ddb502b508261c"); headers.putSingle("X-Project-Id", "7530fad032ca431e9dc8ed4a5de5d99c"); final Response res = client .target("http://localhost:8888/v2/queues/SampleQueue/claims") .request().headers(headers).post(Entity.json("")); resp.getWriter().println(res.readEntity(String.class)); client.close(); } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/jaxrs/send_message/0000775000175100017510000000000015033040026020171 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/jaxrs/send_message/SampleZaqarServlet.java0000664000175100017510000000361715033040005024625 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.client.Client; import javax.ws.rs.client.ClientBuilder; import javax.ws.rs.client.Entity; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; @SuppressWarnings("serial") @WebServlet(name = "SampleZaqarServlet", value = "/") public final class SampleZaqarServlet extends HttpServlet { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) { final Client client = ClientBuilder.newClient(); final MultivaluedMap headers = new MultivaluedHashMap(); headers.putSingle("Client-ID", "355186cd-d1e8-4108-a3ac-a2183697232a"); headers.putSingle("X-Auth-Token", "8444886dd9b04a1b87ddb502b508261c"); headers.putSingle("X-Project-Id", "7530fad032ca431e9dc8ed4a5de5d99c"); client.target("http://localhost:8888/v2/queues/SampleQueue/messages") .request(MediaType.APPLICATION_JSON_TYPE).headers(headers) .post(Entity .json("{\"messages\":[{\"body\":\"Zaqar Sample\"}]}")); client.close(); } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5390136 zaqar-20.1.0.dev29/samples/nodejs/0000775000175100017510000000000015033040026015667 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/nodejs/receive_message/0000775000175100017510000000000015033040026021015 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/nodejs/receive_message/zaqar_sample.js0000664000175100017510000000234115033040005024027 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the 'License'); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ const WebSocket = require('ws'); const ws = new WebSocket('ws://localhost:9000'); ws.on('message', (data, flags) => { const msg = JSON.parse(data); if (msg.body.messages) console.log(msg.body.messages[0].body); }); ws.on('open', () => { ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 ws.send('{"action": "claim_create", "body": {"queue_name": "SampleQueue"}, \ "headers": {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", \ "X-Project-ID": "7530fad032ca431e9dc8ed4a5de5d99c"}}'); }); ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/nodejs/send_message/0000775000175100017510000000000015033040026020324 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/nodejs/send_message/zaqar_sample.js0000664000175100017510000000217415033040005023342 0ustar00mylesmyles/* * Licensed under the Apache License, Version 2.0 (the 'License'); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ const WebSocket = require('ws'); const ws = new WebSocket('ws://localhost:9000'); ws.on('open', () => { ws.send('{"action": "authenticate", "headers": {"X-Auth-Token": \ "8444886dd9b04a1b87ddb502b508261c", "X-Project-ID": \ "7530fad032ca431e9dc8ed4a5de5d99c"}}'); // refer to bug #1553398 ws.send('{"action": "message_post", "body": {"messages": [{"body": \ "Zaqar Sample"}], "queue_name": "SampleQueue"}, "headers": \ {"Client-ID": "355186cd-d1e8-4108-a3ac-a2183697232a", "X-Project-ID": \ "7530fad032ca431e9dc8ed4a5de5d99c"}}'); }); ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5390136 zaqar-20.1.0.dev29/samples/python-zaqarclient/0000775000175100017510000000000015033040026020241 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5600135 zaqar-20.1.0.dev29/samples/python-zaqarclient/receive_message/0000775000175100017510000000000015033040026023367 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/python-zaqarclient/receive_message/zaqar_sample.py0000664000175100017510000000210315033040005026411 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from zaqarclient.queues.v2 import client client = client.Client('http://localhost:8888', conf={ 'auth_opts': { 'options': { 'client_uuid': '355186cd-d1e8-4108-a3ac-a2183697232a', 'os_auth_token': '8444886dd9b04a1b87ddb502b508261c', 'os_auth_url': 'http://localhost:5000/v3.0/', 'os_project_id': '7530fad032ca431e9dc8ed4a5de5d99c' } } }) queue = client.queue('SampleQueue') claim = queue.claim(ttl=600, grace=600) # refer to bug #1553387 for msg in claim: print(msg) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5610135 zaqar-20.1.0.dev29/samples/python-zaqarclient/send_message/0000775000175100017510000000000015033040026022676 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/python-zaqarclient/send_message/zaqar_sample.py0000664000175100017510000000200715033040005025723 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from zaqarclient.queues.v2 import client client = client.Client('http://localhost:8888', conf={ 'auth_opts': { 'options': { 'client_uuid': '355186cd-d1e8-4108-a3ac-a2183697232a', 'os_auth_token': '8444886dd9b04a1b87ddb502b508261c', 'os_auth_url': 'http://localhost:5000/v3.0/', 'os_project_id': '7530fad032ca431e9dc8ed4a5de5d99c' } } }) queue = client.queue('SampleQueue') queue.post([{'body': 'Zaqar Sample'}]) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5610135 zaqar-20.1.0.dev29/samples/zaqar/0000775000175100017510000000000015033040026015523 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/zaqar/sendmail.py0000664000175100017510000001753515033040005017701 0ustar00mylesmyles# Copyright (c) 2018 Ustack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from email.mime.text import MIMEText from email.parser import Parser import smtplib import sys from keystoneauth1 import loading from keystoneauth1 import session as ks_session from oslo_config import cfg from oslo_serialization import jsonutils import requests import retrying KUNKA_SERVICE_TYPE = 'portal' """KUNKA_CORP_INFO_PATH is an API for obtaining information from the database (such as /api/email/corporation-info), and the returned information is the value of the field in the mail template. It is connected after the ip:port which is the database connection information. ip:port like 127.0.0.1:3306, or other""" KUNKA_CORP_INFO_PATH = 'Your API address' # The information is relatively sensitive, suggesting encrypted transmission, # or stored in the database to return.When "use_ssl" is False, the "port" is # 25, otherwise, the "port" is 465 in using SSL type. mail_info = { "from": "youremail@youremail.com", "hostname": "yourSMTP_serverAddr", "username": "yourSMTP_server-username", "password": "Authorization_code", "port": 25, "use_ssl": False } # It's a HTML mail template,and can be changed as needed mail_body = u"""

/////////////////// Respected {corp_name}user ///////////////////

{confirm_or_alarm}

{corp_name}— {home_link}

""" mail_confirm_link = u""" Your mailbox will be used for receiving system notifications. If you confirm, click the following link: Activation link """ mail_alarm_info = u""" Your alarm information is as follows:{reason}
Alarm level:{severity}
Alarm name:{alarm_name}
Alarm ID :{alarm_id} """ def prepare_conf(): cfg.CONF(project='zaqar') loading.register_auth_conf_options(cfg.CONF, 'keystone_authtoken') def get_admin_session(): auth_plugin = \ loading.load_auth_from_conf_options(cfg.CONF, 'keystone_authtoken') return ks_session.Session(auth=auth_plugin) def get_endpoint(session, service_type, interface='internal'): return session.get_endpoint(service_type=service_type, interface=interface) @retrying.retry(stop_max_attempt_number=3) def get_corp_info(session): kunka_endpoint = get_endpoint(session, KUNKA_SERVICE_TYPE) kunka_url = kunka_endpoint + KUNKA_CORP_INFO_PATH res = None res = requests.get(kunka_url) corp_info = res.json() return {"corp_name": corp_info['corporationName'], "logo_url": corp_info['emailLogoUrl'], "home_link": corp_info['homeUrl'], "from": corp_info['from']} def generate_msg(subbody, to, from_, subject, **kwargs): payload = mail_body.format(confirm_or_alarm=subbody, **kwargs) msg = MIMEText(payload.encode('utf-8'), 'html', 'utf-8') msg['subject'] = subject msg['from'] = from_ msg['to'] = to return msg def generate_subbody(subbody, **kwargs): return subbody.format(**kwargs) def get_confirm_link(str_): return str_.split('below: ')[-1] def prepare_msg(msg_str): headers = Parser().parsestr(msg_str) payload = headers.get_payload() msg_subject = headers['subject'] if not headers['subject']: alarm_info = jsonutils.loads(payload)['body'] subject = msg_subject + alarm_info['alarm_name'] template = generate_subbody(mail_alarm_info, reason=alarm_info['reason'], severity=alarm_info['severity'], alarm_name=alarm_info['alarm_name'], alarm_id=alarm_info['alarm_id']) else: subject = msg_subject template = generate_subbody(mail_confirm_link, confirm_link=get_confirm_link(payload)) session = get_admin_session() corp_info = get_corp_info(session) msg = generate_msg( template, headers['to'], corp_info['from'], subject, logo_url=corp_info['logo_url'], corp_name=corp_info['corp_name'], home_link=corp_info['home_link']) return msg @retrying.retry(stop_max_attempt_number=3) def send_it(msg): # if "use_ssl" is True, the "port" is 465 in using SSL type, # or other SSL port. if mail_info['use_ssl']: sender = smtplib.SMTP_SSL(mail_info["hostname"], mail_info['port']) else: sender = smtplib.SMTP(mail_info["hostname"], mail_info['port']) sender.set_debuglevel(1) sender.ehlo(mail_info["hostname"]) try: sender.login(mail_info["username"], mail_info["password"]) except smtplib.SMTPException: print("Error: Failed to connect to the SMTP service") sender.sendmail(msg['from'], msg['to'], msg.as_string()) def send_email(msg_str): prepare_conf() send_it(prepare_msg(msg_str)) if __name__ == '__main__': send_email(''.join(sys.stdin.readlines())) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/samples/zaqar/subscriber_service_sample.py0000664000175100017510000000474315033040005023326 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import requests import sys try: import SimpleHTTPServer import SocketServer except Exception: from http import server as SimpleHTTPServer import socketserver as SocketServer _AUTO_CONFIRM = False for arg in sys.argv: if arg == '--auto-confirm': _AUTO_CONFIRM = True sys.argv.remove(arg) break if len(sys.argv) > 2: PORT = int(sys.argv[2]) elif len(sys.argv) > 1: PORT = int(sys.argv[1]) else: PORT = 5678 class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): """This is the sample service for wsgi subscription. """ # TODO(wangxiyuan): support websocket. def do_POST(self): logging.warning('=================== POST =====================') data_string = str( self.rfile.read(int(self.headers['Content-Length']))) self.data = jsonutils.loads(data_string) if _AUTO_CONFIRM: self._send_confirm_request() message = 'OK' self.send_response(200) self.end_headers() self.wfile.write(message) logging.warning(self.headers) logging.warning(self.data) return def _send_confirm_request(self): url = self.data['WSGISubscribeURL'] headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Project-ID': self.data['X-Project-ID'], 'Client-ID': uuidutils.generate_uuid(), 'URL-Methods': self.data['URL-Methods'], 'URL-Signature': self.data['URL-Signature'], 'URL-Paths': self.data['URL-Paths'], 'URL-Expires': self.data['URL-Expires'], } data = {'confirmed': True} requests.put(url=url, data=jsonutils.dumps(data), headers=headers) Handler = ServerHandler httpd = SocketServer.TCPServer(("", PORT), Handler) httpd.serve_forever() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5880134 zaqar-20.1.0.dev29/setup.cfg0000664000175100017510000000630615033040026014567 0ustar00mylesmyles[metadata] name = zaqar summary = OpenStack Queuing and Notification Service description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/zaqar/latest/ python_requires = >=3.10 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 project_urls = Source=https://opendev.org/openstack/zaqar Tracker=https://bugs.launchpad.net/zaqar [files] packages = zaqar [entry_points] console_scripts = zaqar-bench = zaqar.bench.conductor:main zaqar-server = zaqar.cmd.server:run zaqar-gc = zaqar.cmd.gc:run zaqar-sql-db-manage = zaqar.storage.sqlalchemy.migration.cli:main zaqar-status = zaqar.cmd.status:main zaqar.data.storage = mongodb = zaqar.storage.mongodb.driver:DataDriver mongodb.fifo = zaqar.storage.mongodb.driver:FIFODataDriver redis = zaqar.storage.redis.driver:DataDriver swift = zaqar.storage.swift.driver:DataDriver faulty = zaqar.tests.faulty_storage:DataDriver zaqar.control.storage = sqlalchemy = zaqar.storage.sqlalchemy.driver:ControlDriver mongodb = zaqar.storage.mongodb.driver:ControlDriver redis = zaqar.storage.redis.driver:ControlDriver faulty = zaqar.tests.faulty_storage:ControlDriver zaqar.transport = wsgi = zaqar.transport.wsgi.driver:Driver websocket = zaqar.transport.websocket.driver:Driver oslo.config.opts = zaqar = zaqar.conf.opts:list_opts zaqar.storage.stages = zaqar.notification.notifier = zaqar.notification.notifier:NotifierDriver zaqar.storage.mongodb.driver.queue.stages = message_queue_handler = zaqar.storage.mongodb.messages:MessageQueueHandler zaqar.storage.redis.driver.queue.stages = message_queue_handler = zaqar.storage.redis.messages:MessageQueueHandler zaqar.storage.swift.driver.queue.stages = message_queue_handler = zaqar.storage.swift.messages:MessageQueueHandler zaqar.storage.mongodb.driver.topic.stages = message_queue_handler = zaqar.storage.mongodb.topic_messages:MessageTopicHandler zaqar.storage.redis.driver.topic.stages = message_queue_handler = zaqar.storage.redis.messages:MessageTopicHandler zaqar.storage.swift.driver.topic.stages = message_queue_handler = zaqar.storage.swift.messages:MessageTopicHandler zaqar.notification.tasks = http = zaqar.notification.tasks.webhook:WebhookTask https = zaqar.notification.tasks.webhook:WebhookTask mailto = zaqar.notification.tasks.mailto:MailtoTask trust+http = zaqar.notification.tasks.trust:TrustTask trust+https = zaqar.notification.tasks.trust:TrustTask zaqar.extraspec.tasks = messagecode = zaqar.extraspec.tasks.messagecode:MessageCodeAuthentication oslo.policy.policies = zaqar = zaqar.common.policies:list_rules [extras] mongodb = pymongo>=3.6.0 # Apache-2.0 redis = redis>=3.4.0 # MIT mysql = PyMySQL>=0.8.0 # MIT License [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/setup.py0000664000175100017510000000127115033040005014451 0ustar00mylesmyles# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/test-requirements.txt0000664000175100017510000000107415033040005017201 0ustar00mylesmyleshacking>=6.1.0,<6.2.0 # Apache-2.0 # Backends redis>=3.4.0 # MIT pymongo>=3.6.0 # Apache-2.0 python-swiftclient>=3.10.1 # Apache-2.0 websocket-client>=0.44.0 # LGPLv2+ PyMySQL>=0.8.0 # MIT License # Unit testing coverage!=4.4,>=4.0 # Apache-2.0 cryptography>=2.7 # BSD/Apache-2.0 ddt>=1.0.1 # MIT doc8>=0.8.1 # Apache-2.0 Pygments>=2.2.0 # BSD license fixtures>=3.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT testresources>=2.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 stestr>=2.0.0 #OSprofiler osprofiler>=1.4.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5610135 zaqar-20.1.0.dev29/tools/0000775000175100017510000000000015033040026014101 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5610135 zaqar-20.1.0.dev29/tools/doc/0000775000175100017510000000000015033040026014646 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/tools/doc/find_autodoc_modules.sh0000775000175100017510000000075215033040005021374 0ustar00mylesmyles#!/bin/bash ZAQAR_DIR='../../zaqar/' # include trailing slash DOCS_DIR='source' modules='' for x in `find ${ZAQAR_DIR} -name '*.py' | grep -v zaqar/tests | grep -v zaqar/bench`; do if [ `basename ${x} .py` == "__init__" ] ; then continue fi relative=zaqar.`echo ${x} | sed -e 's$^'${ZAQAR_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` modules="${modules} ${relative}" done for mod in ${modules} ; do if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; then echo ${mod} fi done././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/tools/doc/generate_autodoc_index.sh0000775000175100017510000000177415033040005021712 0ustar00mylesmyles#!/bin/sh SOURCEDIR=../../doc/source/api if [ ! -d ${SOURCEDIR} ] ; then mkdir -p ${SOURCEDIR} fi for x in `./find_autodoc_modules.sh`; do echo "Generating ${SOURCEDIR}/${x}.rst" echo "${SOURCEDIR}/${x}.rst" >> .autogenerated heading="The :mod:\`${x}\` module" # Figure out how long the heading is # and make sure to emit that many '=' under # it to avoid heading format errors # in Sphinx. heading_len=$(echo "$heading" | wc -c) underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') ( cat < ${SOURCEDIR}/${x}.rst done if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst done echo ${SOURCEDIR}/autoindex.rst >> .autogenerated fi././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/tools/test-setup.sh0000775000175100017510000000622115033040005016553 0ustar00mylesmyles#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW'; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # TO fix the mongodb issue in ubuntu 22.04 ubuntu_version=$(source /etc/os-release ; echo $VERSION_ID) if [[ $ubuntu_version == '24.04' ]]; then if [[ ! -d /etc/apt/sources.list.d ]]; then sudo mkdir /etc/apt/sources.list.d fi wget -qO - https://www.mongodb.org/static/pgp/server-8.0.asc | sudo apt-key add - echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu noble/mongodb-org/8.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-8.0.list sudo apt update sudo apt install -y mongodb-org sudo systemctl restart mongod sudo systemctl status mongod elif [[ $ubuntu_version == '22.04' ]]; then wget -qO - https://www.mongodb.org/static/pgp/server-7.0.asc | sudo apt-key add - echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu jammy/mongodb-org/7.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-7.0.list sudo apt update sudo apt install -y mongodb-org sudo systemctl restart mongod sudo systemctl status mongod elif [[ $ubuntu_version == '20.04' ]]; then wget -qO - https://www.mongodb.org/static/pgp/server-7.0.asc | sudo apt-key add - echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu focal/mongodb-org/7.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-7.0.list sudo apt update sudo apt install -y mongodb-org sudo systemctl restart mongod sudo systemctl status mongod elif [[ $ubuntu_version == '12' ]]; then wget -qO - https://www.mongodb.org/static/pgp/server-7.0.asc | sudo apt-key add - echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu jammy/mongodb-org/7.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-7.0.list sudo apt update sudo apt install -y mongodb-org sudo systemctl restart mongod sudo systemctl status mongod else sudo apt-get install -y mongodb sudo systemctl restart mongodb fi sudo apt install -y pip python3-setuptools sudo python3 setup.py install ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/tox.ini0000664000175100017510000000613715033040005014260 0ustar00mylesmyles[tox] minversion = 4.6 envlist = py3,pep8 [testenv] usedevelop = true setenv = ZAQAR_TESTS_CONFIGS_DIR={toxinidir}/zaqar/tests/etc/ ZAQAR_TEST_MONGODB=1 ZAQAR_TEST_SLOW=1 OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = stestr run --serial --slowest {posargs} [testenv:integration] setenv = {[testenv]setenv} ZAQAR_TEST_INTEGRATION=1 OS_TEST_PATH=./zaqar/tests/functional [testenv:pep8] commands = doc8 doc/source flake8 [testenv:genconfig] commands = oslo-config-generator --config-file etc/oslo-config-generator/zaqar.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file etc/zaqar-policy-generator.conf [testenv:cover] setenv = {[testenv]setenv} PYTHON=coverage run --source zaqar --parallel-mode commands = {[testenv]commands} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:venv] commands = {posargs} [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [testenv:api-ref] # This environment is called from CI scripts to test and publish # the API Ref to docs.openstack.org. # allowlist_externals = bash rm deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -rf api-ref/build sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:debug] commands = oslo_debug_helper {posargs} [testenv:releasenotes] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = doc8 releasenotes/source releasenotes/notes sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [flake8] exclude = .venv*,.git,.tox,dist,doc,*lib/python*,*.egg,.update-venv # NOTE(flaper87): Our currently max-complexity is 20. Not sure what the ideal complexity # for Zaqar should be but lets keep it to the minimum possible. max-complexity = 20 # [H904] Delay string interpolations at logging calls. enable-extensions=H904 # Ignored extensions, might be enabled again after review: # E123 closing bracket does not match indentation of opening bracket's line # E226 missing whitespace around arithmetic operator # E241 multiple spaces after ',' # E402 module level import not at top of file # E731 do not assign a lambda expression, use a def # W503 line break before binary operator # W504 line break after binary operator ignore = E123,E226,E241,E402,E731,W503,W504 [doc8] # File extensions to check extensions = .rst, .yaml # Maximal line length should be 80 but we have some overlong lines. # Let's not get far more in. max-line-length = 80 [flake8:local-plugins] extension = N537 = checks:no_translate_logs paths = ./zaqar/hacking ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5610135 zaqar-20.1.0.dev29/zaqar/0000775000175100017510000000000015033040026014057 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/__init__.py0000664000175100017510000000134715033040005016172 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import zaqar.bootstrap import zaqar.version Bootstrap = zaqar.bootstrap.Bootstrap __version__ = zaqar.version.version_info.cached_version_string() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5620136 zaqar-20.1.0.dev29/zaqar/api/0000775000175100017510000000000015033040026014630 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/__init__.py0000664000175100017510000000000015033040005016724 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/handler.py0000664000175100017510000001135315033040005016617 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from zaqar.api.v2 import endpoints from zaqar.api.v2 import request as schema_validator from zaqar.common.api import request from zaqar.common.api import response from zaqar.common import consts from zaqar.common import errors from zaqar.common import urls class Handler(object): """Defines API handler The handler validates and process the requests """ _actions_mapping = { consts.MESSAGE_LIST: 'GET', consts.MESSAGE_GET: 'GET', consts.MESSAGE_GET_MANY: 'GET', consts.MESSAGE_POST: 'POST', consts.MESSAGE_DELETE: 'DELETE', consts.MESSAGE_DELETE_MANY: 'DELETE' } def __init__(self, storage, control, validate, defaults): self.v2_endpoints = endpoints.Endpoints(storage, control, validate, defaults) self._subscription_factory = None def set_subscription_factory(self, factory): self._subscription_factory = factory def clean_subscriptions(self, subscriptions): for resp in subscriptions: body = {'queue_name': resp._request._body.get('queue_name'), 'subscription_id': resp._body.get('subscription_id')} payload = {'body': body, 'headers': resp._request._headers} req = self.create_request(payload) self.v2_endpoints.subscription_delete(req) def process_request(self, req, protocol): # FIXME(vkmc): Control API version if req._action == consts.SUBSCRIPTION_CREATE: subscriber = req._body.get('subscriber') if not subscriber: # Default to the connected websocket as subscriber subscriber = self._subscription_factory.get_subscriber( protocol) return self.v2_endpoints.subscription_create(req, subscriber) return getattr(self.v2_endpoints, req._action)(req) @staticmethod def validate_request(payload, req): """Validate a request and its payload against a schema. :return: a Response object if validation failed, None otherwise. """ try: action = payload.get('action') validator = schema_validator.RequestSchema() is_valid = validator.validate(action=action, body=payload) except errors.InvalidAction as ex: body = {'error': str(ex)} headers = {'status': 400} return response.Response(req, body, headers) else: if not is_valid: body = {'error': 'Schema validation failed.'} headers = {'status': 400} return response.Response(req, body, headers) def create_response(self, code, body, req=None): if req is None: req = self.create_request() headers = {'status': code} return response.Response(req, body, headers) @staticmethod def create_request(payload=None, env=None): if payload is None: payload = {} action = payload.get('action') body = payload.get('body', {}) headers = payload.get('headers') return request.Request(action=action, body=body, headers=headers, api="v2", env=env) def get_defaults(self): return self.v2_endpoints._defaults def verify_signature(self, key, payload): action = payload.get('action') method = self._actions_mapping.get(action) headers = payload.get('headers', {}) project = headers.get('X-Project-ID') expires = headers.get('URL-Expires') methods = headers.get('URL-Methods') paths = headers.get('URL-Paths') signature = headers.get('URL-Signature') if not method or method not in methods: return False try: verified = urls.verify_signed_headers_data(key, paths, project=project, methods=methods, expires=expires, signature=signature) except ValueError: return False return verified ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5620136 zaqar-20.1.0.dev29/zaqar/api/v1_1/0000775000175100017510000000000015033040026015376 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/v1_1/__init__.py0000664000175100017510000000000015033040005017472 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/v1_1/request.py0000664000175100017510000005041415033040005017441 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.common.api import api from zaqar.common import consts class RequestSchema(api.Api): headers = { 'User-Agent': {'type': 'string'}, 'Date': {'type': 'string'}, 'Accept': {'type': 'string'}, 'Client-ID': {'type': 'string'}, 'X-Project-ID': {'type': 'string'}, 'X-Auth-Token': {'type': 'string'} } schema = { # Base 'get_home_doc': { 'properties': { 'action': {'enum': ['get_home_doc']}, 'headers': { 'type': 'object', 'properties': headers, } }, 'required': ['action', 'headers'], 'admin': True, }, 'check_node_health': { 'properties': { 'action': {'enum': ['check_node_health']}, 'headers': { 'type': 'object', 'properties': headers, } }, 'required': ['action', 'headers'], 'admin': True, }, 'ping_node': { 'properties': { 'action': {'enum': ['ping_node']}, 'headers': { 'type': 'object', 'properties': headers, } }, 'required': ['action', 'headers'], 'admin': True, }, 'authenticate': { 'properties': { 'action': {'enum': ['authenticate']}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['X-Project-ID', 'X-Auth-Token'] } }, 'required': ['action', 'headers'], }, # Queues consts.QUEUE_LIST: { 'properties': { 'action': {'enum': [consts.QUEUE_LIST]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'marker': {'type': 'string'}, 'limit': {'type': 'integer'}, 'detailed': {'type': 'boolean'} } } }, 'required': ['action', 'headers'] }, consts.QUEUE_CREATE: { 'properties': { 'action': {'enum': [consts.QUEUE_CREATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID']}, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'] }, consts.QUEUE_DELETE: { 'properties': { 'action': {'enum': [consts.QUEUE_DELETE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, }, 'required': ['queue_name'] } }, 'required': ['action', 'headers', 'body'] }, consts.QUEUE_GET: { 'properties': { 'action': {'enum': [consts.QUEUE_GET]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'] }, consts.QUEUE_GET_STATS: { 'properties': { 'action': {'enum': [consts.QUEUE_GET_STATS]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True }, # Messages consts.MESSAGE_LIST: { 'properties': { 'action': {'enum': [consts.MESSAGE_LIST]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'marker': {'type': 'string'}, 'limit': {'type': 'integer'}, 'echo': {'type': 'boolean'}, 'include_claimed': {'type': 'boolean'}, }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'] }, consts.MESSAGE_GET: { 'properties': { 'action': {'enum': [consts.MESSAGE_GET]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'message_id': {'type': 'string'}, }, 'required': ['queue_name', 'message_id'], } }, 'required': ['action', 'headers', 'body'] }, consts.MESSAGE_GET_MANY: { 'properties': { 'action': {'enum': [consts.MESSAGE_GET_MANY]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'message_ids': {'type': 'array'}, }, 'required': ['queue_name', 'message_ids'], } }, 'required': ['action', 'headers', 'body'] }, consts.MESSAGE_POST: { 'properties': { 'action': {'enum': [consts.MESSAGE_POST]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'messages': {'type': 'array'}, }, 'required': ['queue_name', 'messages'], } }, 'required': ['action', 'headers', 'body'] }, consts.MESSAGE_DELETE: { 'properties': { 'action': {'enum': [consts.MESSAGE_DELETE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'message_id': {'type': 'string'}, 'claim_id': {'type': 'string'} }, 'required': ['queue_name', 'message_id'], } }, 'required': ['action', 'headers', 'body'] }, consts.MESSAGE_DELETE_MANY: { 'properties': { 'action': {'enum': [consts.MESSAGE_DELETE_MANY]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'message_ids': {'type': 'array'}, 'claim_ids': {'type': 'array'}, 'pop': {'type': 'integer'} }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'] }, # Claims consts.CLAIM_CREATE: { 'properties': { 'action': {'enum': [consts.CLAIM_CREATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'limit': {'type': 'integer'}, 'ttl': {'type': 'integer'}, 'grace': {'type': 'integer'} }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'] }, consts.CLAIM_GET: { 'properties': { 'action': {'enum': [consts.CLAIM_GET]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'claim_id': {'type': 'string'} }, 'required': ['queue_name', 'claim_id'], } }, 'required': ['action', 'headers', 'body'] }, consts.CLAIM_UPDATE: { 'properties': { 'action': {'enum': [consts.CLAIM_UPDATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'claim_id': {'type': 'string'}, 'ttl': {'type': 'integer'} }, 'required': ['queue_name', 'claim_id'], } }, 'required': ['action', 'headers', 'body'] }, consts.CLAIM_DELETE: { 'properties': { 'action': {'enum': [consts.CLAIM_DELETE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'claim_id': {'type': 'string'} }, 'required': ['queue_name', 'claim_id'], } }, 'required': ['action', 'headers', 'body'] }, # Pools consts.POOL_LIST: { 'properties': { 'action': {'enum': [consts.POOL_LIST]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'pool_name': {'type': 'string'}, 'limit': {'type': 'integer'}, 'marker': {'type': 'string'} }, 'required': ['pool_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.POOL_CREATE: { 'properties': { 'action': {'enum': [consts.POOL_CREATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'pool_name': {'type': 'string'}, 'weight': {'type': 'integer'}, 'uri': {'type': 'string'}, 'options': {'type': 'object'}, }, 'required': ['pool_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.POOL_UPDATE: { 'properties': { 'action': {'enum': [consts.POOL_UPDATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'pool_name': {'type': 'string'}, 'weight': {'type': 'integer'}, 'uri': {'type': 'string'}, 'options': {'type': 'object'}, }, 'required': ['pool_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.POOL_GET: { 'properties': { 'action': {'enum': [consts.POOL_GET]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'pool_name': {'type': 'string'}, 'detailed': {'type': 'boolean'} }, 'required': ['pool_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.POOL_DELETE: { 'properties': { 'action': {'enum': [consts.POOL_DELETE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'pool_name': {'type': 'string'} }, 'required': ['pool_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, # Flavors consts.FLAVOR_LIST: { 'properties': { 'action': {'enum': [consts.FLAVOR_LIST]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'flavor_name': {'type': 'string'}, 'limit': {'type': 'integer'}, 'marker': {'type': 'string'} }, 'required': ['flavor_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.FLAVOR_CREATE: { 'properties': { 'action': {'enum': [consts.FLAVOR_CREATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'flavor_name': {'type': 'string'}, 'pool_name': {'type': 'string'}, 'capabilities': {'type': 'object'}, }, 'required': ['flavor_name', 'pool_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.FLAVOR_UPDATE: { 'properties': { 'action': {'enum': [consts.FLAVOR_UPDATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'flavor_name': {'type': 'string'}, 'pool_name': {'type': 'string'}, 'capabilities': {'type': 'object'}, }, 'required': ['flavor_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.FLAVOR_GET: { 'properties': { 'action': {'enum': [consts.FLAVOR_GET]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'flavor_name': {'type': 'string'}, 'detailed': {'type': 'boolean'} }, 'required': ['flavor_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, consts.FLAVOR_DELETE: { 'properties': { 'action': {'enum': [consts.FLAVOR_DELETE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'flavor_name': {'type': 'string'} }, 'required': ['flavor_name'], } }, 'required': ['action', 'headers', 'body'], 'admin': True, }, } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/v1_1/response.py0000664000175100017510000003637415033040005017620 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.common.api import api from zaqar.common import consts class ResponseSchema(api.Api): """Define validation schema for json response.""" def __init__(self, limits): self.limits = limits age = { "type": "number", "minimum": 0 } message = { "type": "object", "properties": { "id": { "type": "string", }, "href": { "type": "string", "pattern": r"^(/v1\.1/queues/[a-zA-Z0-9_-]{1,64}" r"/messages/[a-zA-Z0-9_-]+)(\?claim_id=[a-zA-Z0-9_-]+)?$" }, "age": age, "ttl": { "type": "number", "minimum": 1, "maximum": self.limits.max_message_ttl }, "body": { "type": "object" }, "checksum": { "type": "string", }, }, "required": ["href", "ttl", "age", "body", "id"], "additionalProperties": False, } claim_href = { "type": "string", "pattern": r"^(/v1\.1/queues/[a-zA-Z0-9_-]{1,64}" r"/messages/[a-zA-Z0-9_-]+)" r"\?claim_id=[a-zA-Z0-9_-]+$" } flavor = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'pattern': r'^/v1\.1/flavors/[a-zA-Z0-9_-]{1,64}$' }, 'pool': { 'type': 'string', }, 'project': { 'type': 'string' }, 'capabilities': { 'type': 'object', 'additionalProperties': True } }, 'required': ['href', 'pool', 'project'], 'additionalProperties': False, } self.schema = { consts.MESSAGE_GET_MANY: { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": message, "minItems": 1, "maxItems": self.limits.max_messages_per_page } }, 'required': ['messages'], 'additionalProperties': False, }, consts.QUEUE_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string', 'enum': ['next'], }, 'href': { 'type': 'string', "pattern": r"^/v1\.1/queues\?", } }, 'required': ['rel', 'href'], 'additionalProperties': False, }, 'minItems': 1, 'maxItems': 1, }, 'queues': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': { 'type': 'string', 'pattern': '^[a-zA-Z0-9_-]{1,64}$' }, 'href': { 'type': 'string', 'pattern': r'^/v1\.1/queues/' r'[a-zA-Z0-9_-]{1,64}$', }, 'metadata': { 'type': 'object', } }, 'required': ['name', 'href'], 'additionalProperties': False, }, 'minItems': 1, 'maxItems': self.limits.max_queues_per_page, } }, 'required': ['links', 'queues'], 'additionalProperties': False, }, consts.QUEUE_GET_STATS: { 'type': 'object', 'properties': { 'messages': { 'type': 'object', 'properties': { 'free': { 'type': 'number', 'minimum': 0 }, 'claimed': { 'type': 'number', 'minimum': 0 }, 'total': { 'type': 'number', 'minimum': 0 }, 'oldest': { 'type': 'object' }, 'newest': { 'type': 'object' } }, 'required': ['free', 'claimed', 'total'], 'additionalProperties': False } }, 'required': ['messages'], 'additionalProperties': False }, consts.POOL_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string' }, 'href': { 'type': 'string', 'pattern': r'^/v1\.1/pools\?' } }, 'required': ['rel', 'href'], 'additionalProperties': False } }, 'pools': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'pattern': r'^/v1\.1/' r'pools/[a-zA-Z0-9_-]{1,64}$' }, 'weight': { 'type': 'number', 'minimum': -1 }, 'name': { 'type': 'string' }, 'uri': { 'type': 'string' }, 'flavor': { 'type': ['string', 'null'] }, 'options': { 'type': 'object', 'additionalProperties': True } }, 'required': ['href', 'weight', 'uri'], 'additionalProperties': False, }, } }, 'required': ['links', 'pools'], 'additionalProperties': False }, consts.MESSAGE_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string' }, 'href': { 'type': 'string', 'pattern': r'^/v1\.1/queues/[a-zA-Z0-9_-]+' r'/messages\?(.)*$' } }, 'required': ['rel', 'href'], 'additionalProperties': False } }, 'messages': { "type": "array", "items": message, "minItems": 0, "maxItems": self.limits.max_messages_per_claim_or_pop } } }, consts.POOL_GET_DETAIL: { 'type': 'object', 'properties': { 'name': { 'type': 'string' }, 'uri': { 'type': 'string' }, 'flavor': { 'type': ['string', 'null'] }, 'weight': { 'type': 'number', 'minimum': -1 }, 'href': { 'type': 'string', 'pattern': r'^/v1\.1/pools/' r'[a-zA-Z0-9_\-]+$' }, 'options': { 'type': 'object', 'additionalProperties': True } }, 'required': ['uri', 'weight', 'href'], 'additionalProperties': False }, consts.CLAIM_CREATE: { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", }, "href": claim_href, "ttl": { "type": "number", "minimum": 1, "maximum": self.limits.max_message_ttl }, "age": age, "body": { "type": "object" }, "checksum": { "type": "string", }, }, "required": ["href", "ttl", "age", "body", "id"], "additionalProperties": False, }, "minItems": 1, "maxItems": self.limits.max_messages_per_page } }, 'required': ['messages'], 'additionalProperties': False }, consts.CLAIM_GET: { 'type': 'object', 'properties': { 'age': age, 'ttl': { 'type': 'number', 'minimum': 0, 'maximum': self.limits.max_claim_ttl }, 'href': { 'type': 'string', 'pattern': r'^/v1\.1/queues/[a-zA-Z0-9_-]+' r'/claims/[a-zA-Z0-9_-]+$' }, 'messages': { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", }, "href": claim_href, "ttl": { "type": "number", "minimum": 1, "maximum": self.limits.max_message_ttl }, "age": age, "body": { "type": "object" } }, "required": ["href", "ttl", "age", "body", "id"], "additionalProperties": False, }, "minItems": 1, "maxItems": self.limits.max_messages_per_page } }, 'required': ['age', 'ttl', 'messages', 'href'], 'additionalProperties': False }, consts.FLAVOR_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string' }, 'href': { 'type': 'string', 'pattern': r'^/v1\.1/flavors\?' } }, 'required': ['rel', 'href'], 'additionalProperties': False } }, 'flavors': { 'type': 'array', 'items': flavor, } }, 'required': ['links', 'flavors'], 'additionalProperties': False } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5620136 zaqar-20.1.0.dev29/zaqar/api/v2/0000775000175100017510000000000015033040026015157 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/v2/__init__.py0000664000175100017510000000000015033040005017253 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/v2/endpoints.py0000664000175100017510000011200315033040005017526 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from stevedore import driver from oslo_log import log as logging from oslo_utils import netutils from zaqar.common.api import errors as api_errors from zaqar.common.api import response from zaqar.common.api import utils as api_utils from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import validation LOG = logging.getLogger(__name__) class Endpoints(object): """v2 API Endpoints.""" def __init__(self, storage, control, validate, defaults): self._queue_controller = storage.queue_controller self._message_controller = storage.message_controller self._claim_controller = storage.claim_controller self._subscription_controller = storage.subscription_controller self._pools_controller = control.pools_controller self._flavors_controller = control.flavors_controller self._validate = validate self._defaults = defaults self._subscription_url = None # Queues @api_utils.on_exception_sends_500 def queue_list(self, req): """Gets a list of queues :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') LOG.debug('Queue list - project: %(project)s', {'project': project_id}) try: kwargs = api_utils.get_headers(req) self._validate.queue_listing(**kwargs) results = self._queue_controller.list( project=project_id, **kwargs) # Buffer list of queues. Can raise NoPoolFound error. queues = list(next(results)) except (ValueError, validation.ValidationFailed) as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) except storage_errors.ExceptionBase as ex: error = 'Queues could not be listed.' headers = {'status': 503} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) # Got some. Prepare the response. body = {'queues': queues} headers = {'status': 200} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def queue_create(self, req): """Creates a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') metadata = req._body.get('metadata', {}) LOG.debug('Queue create - queue: %(queue)s, project: %(project)s', {'queue': queue_name, 'project': project_id}) try: self._validate.queue_identification(queue_name, project_id) self._validate.queue_metadata_length(len(str(metadata))) self._validate.queue_metadata_putting(metadata) created = self._queue_controller.create(queue_name, metadata=metadata, project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) except storage_errors.ExceptionBase as ex: error = _('Queue %s could not be created.') % queue_name headers = {'status': 503} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) else: body = _('Queue %s created.') % queue_name headers = {'status': 201} if created else {'status': 204} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def queue_delete(self, req): """Deletes a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') LOG.debug('Queue delete - queue: %(queue)s, project: %(project)s', {'queue': queue_name, 'project': project_id}) try: self._queue_controller.delete(queue_name, project=project_id) except storage_errors.ExceptionBase as ex: error = _('Queue %s could not be deleted.') % queue_name headers = {'status': 503} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) else: body = _('Queue %s removed.') % queue_name headers = {'status': 204} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def queue_get(self, req): """Gets a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') LOG.debug('Queue get - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: resp_dict = self._queue_controller.get(queue_name, project=project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) error = _('Queue %s does not exist.') % queue_name headers = {'status': 404} return api_utils.error_response(req, ex, headers, error) except storage_errors.ExceptionBase as ex: headers = {'status': 503} error = _('Cannot retrieve queue %s.') % queue_name LOG.exception(error) return api_utils.error_response(req, ex, headers, error) else: body = resp_dict headers = {'status': 200} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def queue_get_stats(self, req): """Gets queue stats :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') LOG.debug('Get queue stats - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: resp_dict = self._queue_controller.stats(queue_name, project=project_id) body = resp_dict except storage_errors.QueueDoesNotExist: LOG.exception('Queue "%s" does not exist', queue_name) resp_dict = { 'messages': { 'claimed': 0, 'free': 0, 'total': 0 } } body = resp_dict headers = {'status': 404} return response.Response(req, body, headers) except storage_errors.ExceptionBase as ex: error = _('Cannot retrieve queue %s stats.') % queue_name headers = {'status': 503} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) else: headers = {'status': 200} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def queue_purge(self, req): """Purge queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') resource_types = req._body.get('resource_types', ["messages", "subscriptions"]) LOG.debug('Purge queue - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: pop_limit = 100 if "messages" in resource_types: LOG.debug("Purge all messages under queue %s", queue_name) resp = self._pop_messages(req, queue_name, project_id, pop_limit) while resp.get_response()['body']['messages']: resp = self._pop_messages(req, queue_name, project_id, pop_limit) if "subscriptions" in resource_types: LOG.debug("Purge all subscriptions under queue %s", queue_name) resp = self._subscription_controller.list(queue_name, project=project_id) subscriptions = list(next(resp)) for sub in subscriptions: self._subscription_controller.delete(queue_name, sub['id'], project=project_id) except storage_errors.QueueDoesNotExist as ex: LOG.exception('Queue "%s" does not exist', queue_name) headers = {'status': 404} return api_utils.error_response(req, ex, headers) except storage_errors.ExceptionBase as ex: LOG.exception('Error deleting queue "%s".', queue_name) headers = {'status': 503} return api_utils.error_response(req, ex, headers) else: headers = {'status': 204} return response.Response(req, {}, headers) # Messages @api_utils.on_exception_sends_500 def message_list(self, req): """Gets a list of messages on a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') LOG.debug('Message list - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: kwargs = api_utils.get_headers(req) self._validate.client_id_uuid_safe(req._headers.get('Client-ID')) client_uuid = api_utils.get_client_uuid(req) self._validate.message_listing(**kwargs) results = self._message_controller.list( queue_name, project=project_id, client_uuid=client_uuid, **kwargs) # Buffer messages cursor = next(results) messages = list(cursor) except (ValueError, api_errors.BadRequest, validation.ValidationFailed) as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) except storage_errors.DoesNotExist as ex: LOG.debug(ex) headers = {'status': 404} return api_utils.error_response(req, ex, headers) if messages: # Found some messages, so prepare the response kwargs['marker'] = next(results) messages = [api_utils.format_message(message) for message in messages] headers = {'status': 200} body = {'messages': messages} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def message_get(self, req): """Gets a message from a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') message_id = req._body.get('message_id') LOG.debug('Message get - message: %(message)s, ' 'queue: %(queue)s, project: %(project)s', {'message': message_id, 'queue': queue_name, 'project': project_id}) try: message = self._message_controller.get( queue_name, message_id, project=project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) headers = {'status': 404} return api_utils.error_response(req, ex, headers) # Prepare response message = api_utils.format_message(message) headers = {'status': 200} body = {'messages': message} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def message_get_many(self, req): """Gets a set of messages from a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') message_ids = list(req._body.get('message_ids')) LOG.debug('Message get - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: self._validate.message_listing(limit=len(message_ids)) messages = self._message_controller.bulk_get( queue_name, message_ids=message_ids, project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) # Prepare response messages = list(messages) messages = [api_utils.format_message(message) for message in messages] headers = {'status': 200} body = {'messages': messages} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def message_post(self, req): """Post a set of messages to a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') LOG.debug('Messages post - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) messages = req._body.get('messages') if messages is None: ex = _('Invalid request.') error = _('No messages were found in the request body.') headers = {'status': 400} return api_utils.error_response(req, ex, headers, error) try: # NOTE(flwang): Replace 'exists' with 'get_metadata' won't impact # the performance since both of them will call # collection.find_one() queue_meta = None try: queue_meta = self._queue_controller.get_metadata(queue_name, project_id) except storage_errors.DoesNotExist: self._validate.queue_identification(queue_name, project_id) self._queue_controller.create(queue_name, project=project_id) # NOTE(flwang): Queue is created in lazy mode, so no metadata # set. queue_meta = {} queue_max_msg_size = queue_meta.get('_max_messages_post_size', None) queue_default_ttl = queue_meta.get('_default_message_ttl') if queue_default_ttl: _message_post_spec = (('ttl', int, queue_default_ttl), ('body', '*', None),) else: _message_post_spec = (('ttl', int, self._defaults.message_ttl), ('body', '*', None),) # Place JSON size restriction before parsing self._validate.message_length(len(str(messages)), max_msg_post_size=queue_max_msg_size) except validation.ValidationFailed as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) try: messages = api_utils.sanitize(messages, _message_post_spec, doctype=list) except api_errors.BadRequest as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) try: self._validate.client_id_uuid_safe(req._headers.get('Client-ID')) client_uuid = api_utils.get_client_uuid(req) self._validate.message_posting(messages) message_ids = self._message_controller.post( queue_name, messages=messages, project=project_id, client_uuid=client_uuid) except (ValueError, api_errors.BadRequest, validation.ValidationFailed) as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) except storage_errors.DoesNotExist as ex: LOG.debug(ex) headers = {'status': 404} return api_utils.error_response(req, ex, headers) except storage_errors.MessageConflict as ex: error = _('No messages could be enqueued.') headers = {'status': 500} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) # Prepare the response headers = {'status': 201} body = {'message_ids': message_ids} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def message_delete(self, req): """Delete a message from a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') message_id = req._body.get('message_id') LOG.debug('Messages item DELETE - message: %(message)s, ' 'queue: %(queue)s, project: %(project)s', {'message': message_id, 'queue': queue_name, 'project': project_id}) claim_id = req._body.get('claim_id') try: self._message_controller.delete( queue_name, message_id=message_id, project=project_id, claim=claim_id) except storage_errors.MessageNotClaimed as ex: LOG.debug(ex) error = _('A claim was specified, but the message ' 'is not currently claimed.') headers = {'status': 400} return api_utils.error_response(req, ex, headers, error) except storage_errors.ClaimDoesNotExist as ex: LOG.debug(ex) error = _('The specified claim does not exist or ' 'has expired.') headers = {'status': 400} return api_utils.error_response(req, ex, headers, error) except storage_errors.NotPermitted as ex: LOG.debug(ex) error = _('This message is claimed; it cannot be ' 'deleted without a valid claim ID.') headers = {'status': 403} return api_utils.error_response(req, ex, headers, error) headers = {'status': 204} body = {} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def message_delete_many(self, req): """Deletes a set of messages from a queue :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') message_ids = req._body.get('message_ids') claim_ids = None if self._validate.get_limit_conf_value('message_delete_with_claim_id'): claim_ids = req._body.get('claim_ids') pop_limit = req._body.get('pop') LOG.debug('Messages collection DELETE - queue: %(queue)s,' 'project: %(project)s, messages: %(message_ids)s', {'queue': queue_name, 'project': project_id, 'message_ids': message_ids}) try: self._validate.message_deletion(message_ids, pop_limit, claim_ids) except validation.ValidationFailed as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) if message_ids: return self._delete_messages_by_id(req, queue_name, message_ids, project_id, claim_ids) elif pop_limit: return self._pop_messages(req, queue_name, project_id, pop_limit) @api_utils.on_exception_sends_500 def _delete_messages_by_id(self, req, queue_name, ids, project_id, claim_ids=None): self._message_controller.bulk_delete(queue_name, message_ids=ids, project=project_id, claim_ids=claim_ids) headers = {'status': 204} body = {} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def _pop_messages(self, req, queue_name, project_id, pop_limit): LOG.debug('Pop messages - queue: %(queue)s, project: %(project)s', {'queue': queue_name, 'project': project_id}) messages = self._message_controller.pop( queue_name, project=project_id, limit=pop_limit) # Prepare response if not messages: messages = [] headers = {'status': 200} body = {'messages': messages} return response.Response(req, body, headers) # Claims @api_utils.on_exception_sends_500 def claim_create(self, req): """Creates a claim :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') LOG.debug('Claims create - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) self._claim_post_spec = ( ('ttl', int, self._defaults.claim_ttl), ('grace', int, self._defaults.claim_grace), ) # Claim some messages # NOTE(vkmc): We build a dict with the ttl and grace # This is the metadata the storage is waiting for kwargs = api_utils.get_headers(req) # Read claim metadata (e.g., ttl) and raise appropriate # errors as needed. metadata = api_utils.sanitize(kwargs, self._claim_post_spec) limit = (None if kwargs.get('limit') is None else kwargs.get('limit')) claim_options = {} if limit is None else {'limit': limit} try: self._validate.claim_creation(metadata, limit=limit) except (ValueError, validation.ValidationFailed) as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) cid, msgs = self._claim_controller.create( queue_name, metadata=metadata, project=project_id, **claim_options) # Buffer claimed messages # TODO(vkmc): optimize, along with serialization (below) resp_msgs = list(msgs) # Serialize claimed messages, if any. This logic assumes # the storage driver returned well-formed messages. if len(resp_msgs) != 0: resp_msgs = [api_utils.format_message(msg, cid) for msg in resp_msgs] headers = {'status': 201} body = {'claim_id': cid, 'messages': resp_msgs} else: headers = {'status': 204} body = {'claim_id': cid} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def claim_get(self, req): """Gets a claim :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') claim_id = req._body.get('claim_id') LOG.debug('Claim get - claim: %(claim_id)s, ' 'queue: %(queue_name)s, project: %(project_id)s', {'queue_name': queue_name, 'project_id': project_id, 'claim_id': claim_id}) try: meta, msgs = self._claim_controller.get( queue_name, claim_id=claim_id, project=project_id) # Buffer claimed messages # TODO(vkmc): Optimize along with serialization (see below) meta['messages'] = list(msgs) except storage_errors.DoesNotExist as ex: LOG.debug(ex) error = _('Claim %s does not exist.') % claim_id headers = {'status': 404} return api_utils.error_response(req, ex, headers, error) # Serialize claimed messages # TODO(vkmc): Optimize meta['messages'] = [api_utils.format_message(msg, claim_id) for msg in meta['messages']] del meta['id'] headers = {'status': 200} body = meta return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def claim_update(self, req): """Updates a claim :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') claim_id = req._body.get('claim_id') LOG.debug('Claim update - claim: %(claim_id)s, ' 'queue: %(queue_name)s, project:%(project_id)s', {'queue_name': queue_name, 'project_id': project_id, 'claim_id': claim_id}) self._claim_patch_spec = ( ('ttl', int, self._defaults.claim_ttl), ('grace', int, self._defaults.claim_grace), ) # Read claim metadata (e.g., TTL) and raise appropriate # HTTP errors as needed. metadata = api_utils.sanitize(req._body, self._claim_patch_spec) try: self._validate.claim_updating(metadata) self._claim_controller.update(queue_name, claim_id=claim_id, metadata=metadata, project=project_id) headers = {'status': 204} body = _('Claim %s updated.') % claim_id return response.Response(req, body, headers) except validation.ValidationFailed as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) except storage_errors.DoesNotExist as ex: LOG.debug(ex) error = _('Claim %s does not exist.') % claim_id headers = {'status': 404} return api_utils.error_response(req, ex, headers, error) @api_utils.on_exception_sends_500 def claim_delete(self, req): """Deletes a claim :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') claim_id = req._body.get('claim_id') LOG.debug('Claim delete - claim: %(claim_id)s, ' 'queue: %(queue_name)s, project: %(project_id)s', {'queue_name': queue_name, 'project_id': project_id, 'claim_id': claim_id}) self._claim_controller.delete(queue_name, claim_id=claim_id, project=project_id) headers = {'status': 204} body = _('Claim %s deleted.') % claim_id return response.Response(req, body, headers) # Subscriptions @api_utils.on_exception_sends_500 def subscription_list(self, req): """List all subscriptions for a queue. :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') LOG.debug('Subscription list - project: %(project)s', {'project': project_id}) try: kwargs = api_utils.get_headers(req) self._validate.subscription_listing(**kwargs) results = self._subscription_controller.list( queue_name, project=project_id, **kwargs) # Buffer list of subscriptions. Can raise NoPoolFound error. subscriptions = list(next(results)) except (ValueError, validation.ValidationFailed) as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) except storage_errors.ExceptionBase as ex: error = 'Subscriptions could not be listed.' headers = {'status': 503} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) # Got some. Prepare the response. body = {'subscriptions': subscriptions} headers = {'status': 200} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def subscription_create(self, req, subscriber): """Create a subscription for a queue. :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') options = req._body.get('options', {}) ttl = req._body.get('ttl', self._defaults.subscription_ttl) LOG.debug( 'Subscription create - queue: %(queue)s, project: %(project)s', {'queue': queue_name, 'project': project_id}) try: url = netutils.urlsplit(subscriber) mgr = driver.DriverManager('zaqar.notification.tasks', url.scheme, invoke_on_load=True) req_data = req._env.copy() mgr.driver.register(subscriber, options, ttl, project_id, req_data) data = {'subscriber': subscriber, 'options': options, 'ttl': ttl} self._validate.subscription_posting(data) self._validate.queue_identification(queue_name, project_id) if not self._queue_controller.exists(queue_name, project_id): self._queue_controller.create(queue_name, project=project_id) created = self._subscription_controller.create(queue_name, subscriber, data['ttl'], data['options'], project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) headers = {'status': 400} return api_utils.error_response(req, ex, headers) except storage_errors.ExceptionBase as ex: error = _('Subscription %s could not be created.') % queue_name headers = {'status': 503} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) else: if created: msg = _('Subscription %s created.') % queue_name body = {'subscription_id': str(created), 'message': msg} headers = {'status': 201} else: body = _('Subscription %s not created.') % queue_name headers = {'status': 409} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def subscription_delete(self, req): """Delete a specific subscription by ID. :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') subscription_id = req._body.get('subscription_id') LOG.debug( 'Subscription delete - queue: %(queue)s, project: %(project)s', {'queue': queue_name, 'project': project_id}) try: self._subscription_controller.delete(queue_name, subscription_id, project=project_id) except storage_errors.ExceptionBase as ex: error = _('Subscription %(subscription)s for queue %(queue)s ' 'could not be deleted.') % { 'subscription': subscription_id, 'queue': queue_name} headers = {'status': 503} LOG.exception(error) return api_utils.error_response(req, ex, headers, error) else: body = _('Subscription %s removed.') % subscription_id headers = {'status': 204} return response.Response(req, body, headers) @api_utils.on_exception_sends_500 def subscription_get(self, req): """Retrieve details about an existing subscription. :param req: Request instance ready to be sent. :type req: `api.common.Request` :return: resp: Response instance :type: resp: `api.common.Response` """ project_id = req._headers.get('X-Project-ID') queue_name = req._body.get('queue_name') subscription_id = req._body.get('subscription_id') LOG.debug('Subscription get - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: resp_dict = self._subscription_controller.get(queue_name, subscription_id, project=project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) error = _('Subscription %(subscription)s for queue %(queue)s ' 'does not exist.') % { 'subscription': subscription_id, 'queue': queue_name} headers = {'status': 404} return api_utils.error_response(req, ex, headers, error) except storage_errors.ExceptionBase as ex: headers = {'status': 503} error = _('Cannot retrieve subscription %s.') % subscription_id LOG.exception(error) return api_utils.error_response(req, ex, headers, error) else: body = resp_dict headers = {'status': 200} return response.Response(req, body, headers) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/v2/request.py0000664000175100017510000001050115033040005017213 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.api.v1_1 import request as v1_1 from zaqar.common import consts class RequestSchema(v1_1.RequestSchema): headers = v1_1.RequestSchema.headers schema = v1_1.RequestSchema.schema schema.update({ # Subscriptions consts.SUBSCRIPTION_LIST: { 'properties': { 'action': {'enum': [consts.SUBSCRIPTION_LIST]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'] }, consts.SUBSCRIPTION_CREATE: { 'properties': { 'action': {'enum': [consts.SUBSCRIPTION_CREATE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID']}, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'subscriber': {'type': 'string'}, 'ttl': {'type': 'integer'}, 'options': {'type': 'object'}, }, 'required': ['queue_name', ], } }, 'required': ['action', 'headers', 'body'] }, consts.SUBSCRIPTION_DELETE: { 'properties': { 'action': {'enum': [consts.SUBSCRIPTION_DELETE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'subscription_id': {'type': 'string'}, }, 'required': ['queue_name', 'subscription_id'] } }, 'required': ['action', 'headers', 'body'] }, consts.SUBSCRIPTION_GET: { 'properties': { 'action': {'enum': [consts.SUBSCRIPTION_GET]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID'] }, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'subscription_id': {'type': 'string'}, }, 'required': ['queue_name', 'subscription_id'], } }, 'required': ['action', 'headers', 'body'] }, consts.QUEUE_PURGE: { 'properties': { 'action': {'enum': [consts.QUEUE_PURGE]}, 'headers': { 'type': 'object', 'properties': headers, 'required': ['Client-ID', 'X-Project-ID']}, 'body': { 'type': 'object', 'properties': { 'queue_name': {'type': 'string'}, 'resource_types': {'type': 'array'}, }, 'required': ['queue_name'], } }, 'required': ['action', 'headers', 'body'] }, }) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/api/v2/response.py0000664000175100017510000003633515033040005017376 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.common.api import api from zaqar.common import consts class ResponseSchema(api.Api): """Define validation schema for json response.""" def __init__(self, limits): self.limits = limits age = { "type": "number", "minimum": 0 } message = { "type": "object", "properties": { "id": { "type": "string", }, "href": { "type": "string", "pattern": r"^(/v1/queues/[a-zA-Z0-9_-]{1,64}" r"/messages/[a-zA-Z0-9_-]+)(\?claim_id=[a-zA-Z0-9_-]+)?$" }, "age": age, "ttl": { "type": "number", "minimum": 1, "maximum": self.limits.max_message_ttl }, "body": { "type": "object" }, "checksum": { "type": "string", }, }, "required": ["href", "ttl", "age", "body", "id"], "additionalProperties": False, } claim_href = { "type": "string", "pattern": r"^(/v2/queues/[a-zA-Z0-9_-]{1,64}" r"/messages/[a-zA-Z0-9_-]+)" r"\?claim_id=[a-zA-Z0-9_-]+$" } flavor = { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'pattern': r'^/v2/flavors/[a-zA-Z0-9_-]{1,64}$' }, 'pool': { 'type': 'string', }, 'project': { 'type': 'string' }, 'capabilities': { 'type': 'object', 'additionalProperties': True } }, 'required': ['href', 'pool', 'project'], 'additionalProperties': False, } self.schema = { consts.MESSAGE_GET_MANY: { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": message, "minItems": 1, "maxItems": self.limits.max_messages_per_page } }, 'required': ['messages'], 'additionalProperties': False, }, consts.QUEUE_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string', 'enum': ['next'], }, 'href': { 'type': 'string', "pattern": r"^/v2/queues\?", } }, 'required': ['rel', 'href'], 'additionalProperties': False, }, 'minItems': 1, 'maxItems': 1, }, 'queues': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': { 'type': 'string', 'pattern': r'^[a-zA-Z0-9_-]{1,64}$' }, 'href': { 'type': 'string', 'pattern': r'^/v2/queues/' r'[a-zA-Z0-9_-]{1,64}$', }, 'metadata': { 'type': 'object', } }, 'required': ['name', 'href'], 'additionalProperties': False, }, 'minItems': 1, 'maxItems': self.limits.max_queues_per_page, } }, 'required': ['links', 'queues'], 'additionalProperties': False, }, consts.QUEUE_GET_STATS: { 'type': 'object', 'properties': { 'messages': { 'type': 'object', 'properties': { 'free': { 'type': 'number', 'minimum': 0 }, 'claimed': { 'type': 'number', 'minimum': 0 }, 'total': { 'type': 'number', 'minimum': 0 }, 'oldest': { 'type': 'object' }, 'newest': { 'type': 'object' } }, 'required': ['free', 'claimed', 'total'], 'additionalProperties': False } }, 'required': ['messages'], 'additionalProperties': False }, consts.POOL_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string' }, 'href': { 'type': 'string', 'pattern': r'^/v2/pools\?' } }, 'required': ['rel', 'href'], 'additionalProperties': False } }, 'pools': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'pattern': r'^/v2/' r'pools/[a-zA-Z0-9_-]{1,64}$' }, 'weight': { 'type': 'number', 'minimum': -1 }, 'name': { 'type': 'string' }, 'uri': { 'type': 'string' }, 'flavor': { 'type': ['string', 'null'] }, 'options': { 'type': 'object', 'additionalProperties': True } }, 'required': ['href', 'weight', 'uri'], 'additionalProperties': False, }, } }, 'required': ['links', 'pools'], 'additionalProperties': False }, consts.MESSAGE_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string' }, 'href': { 'type': 'string', 'pattern': r'^/v2/queues/[a-zA-Z0-9_-]+' r'/messages\?(.)*$' } }, 'required': ['rel', 'href'], 'additionalProperties': False } }, 'messages': { "type": "array", "items": message, "minItems": 0, "maxItems": self.limits.max_messages_per_claim_or_pop } } }, consts.POOL_GET_DETAIL: { 'type': 'object', 'properties': { 'name': { 'type': 'string' }, 'uri': { 'type': 'string' }, 'flavor': { 'type': ['string', 'null'] }, 'weight': { 'type': 'number', 'minimum': -1 }, 'href': { 'type': 'string', 'pattern': r'^/v2/pools/' r'[a-zA-Z0-9_\-]+$' }, 'options': { 'type': 'object', 'additionalProperties': True } }, 'required': ['uri', 'weight', 'href'], 'additionalProperties': False }, consts.CLAIM_CREATE: { 'type': 'object', 'properties': { 'messages': { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", }, "href": claim_href, "ttl": { "type": "number", "minimum": 1, "maximum": self.limits.max_message_ttl }, "age": age, "body": { "type": "object" }, "checksum": { "type": "string", }, }, "required": ["href", "ttl", "age", "body", "id"], "additionalProperties": False, }, "minItems": 1, "maxItems": self.limits.max_messages_per_page } }, 'required': ['messages'], 'additionalProperties': False }, consts.CLAIM_GET: { 'type': 'object', 'properties': { 'age': age, 'ttl': { 'type': 'number', 'minimum': 0, 'maximum': self.limits.max_claim_ttl }, 'href': { 'type': 'string', 'pattern': r'^/v2/queues/[a-zA-Z0-9_-]+' r'/claims/[a-zA-Z0-9_-]+$' }, 'messages': { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", }, "href": claim_href, "ttl": { "type": "number", "minimum": 1, "maximum": self.limits.max_message_ttl }, "age": age, "body": { "type": "object" } }, "required": ["href", "ttl", "age", "body", "id"], "additionalProperties": False, }, "minItems": 1, "maxItems": self.limits.max_messages_per_page } }, 'required': ['age', 'ttl', 'messages', 'href'], 'additionalProperties': False }, consts.FLAVOR_LIST: { 'type': 'object', 'properties': { 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string' }, 'href': { 'type': 'string', 'pattern': r'^/v2/flavors\?' } }, 'required': ['rel', 'href'], 'additionalProperties': False } }, 'flavors': { 'type': 'array', 'items': flavor, } }, 'required': ['links', 'flavors'], 'additionalProperties': False } } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5630136 zaqar-20.1.0.dev29/zaqar/bench/0000775000175100017510000000000015033040026015136 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bench/__init__.py0000664000175100017510000000000015033040005017232 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bench/conductor.py0000664000175100017510000000570415033040005017513 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing as mp import os from oslo_serialization import jsonutils # NOTE(Eva-i): See https://github.com/gevent/gevent/issues/349. Let's keep # it until the new stable version of gevent(>=1.1) will be released. os.environ["GEVENT_RESOLVER"] = "ares" from zaqar.bench import config from zaqar.bench import consumer from zaqar.bench import helpers from zaqar.bench import observer from zaqar.bench import producer CONF = config.conf def _print_debug_stats(name, stats): print(name.capitalize()) print('=' * len(name)) values = sorted(stats.items(), key=lambda v: v[0]) formatted_vals = ['{}: {:.1f}'.format(*v) for v in values] print('\n'.join(formatted_vals)) print() # Blank line def _reset_queues(): cli = helpers.get_new_client() for queue_name in helpers.queue_names: queue = cli.queue(queue_name) queue.delete() def main(): CONF(project='zaqar', prog='zaqar-benchmark') # NOTE(kgriffs): Reset queues since last time. We don't # clean them up after the performance test, in case # the user wants to examine the state of the system. if not CONF.skip_queue_reset: if CONF.debug: print('Resetting queues...') _reset_queues() downstream_queue = mp.Queue() procs = [mp.Process(target=worker.run, args=(downstream_queue,)) for worker in [producer, consumer, observer]] for each_proc in procs: each_proc.start() for each_proc in procs: each_proc.join() stats = {} for each_proc in procs: stats.update(downstream_queue.get_nowait()) if CONF.debug: print() for name in ('producer', 'observer', 'consumer'): stats_group = stats[name] # Skip disabled workers if not stats_group['duration_sec']: continue _print_debug_stats(name, stats_group) else: stats['params'] = { 'producer': { 'processes': CONF.producer_processes, 'workers': CONF.producer_workers }, 'consumer': { 'processes': CONF.consumer_processes, 'workers': CONF.consumer_workers }, 'observer': { 'processes': CONF.observer_processes, 'workers': CONF.observer_workers }, } print(jsonutils.dumps(stats)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bench/config.py0000664000175100017510000000477315033040005016765 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg conf = cfg.CONF _CLI_OPTIONS = ( cfg.IntOpt( 'producer_processes', short='pp', default=1, help='Number of Producer Processes'), cfg.IntOpt( 'producer_workers', short='pw', default=10, help='Number of Producer Workers'), cfg.IntOpt( 'consumer_processes', short='cp', default=1, help='Number of Consumer Processes'), cfg.IntOpt( 'consumer_workers', short='cw', default=0, help='Number of Consumer Workers'), cfg.IntOpt( 'observer_processes', short='op', default=1, help='Number of Observer Processes'), cfg.IntOpt( 'observer_workers', short='ow', default=5, help='Number of Observer Workers'), cfg.BoolOpt('debug', default=True, help=('Tag to indicate if print the details of running.')), cfg.FloatOpt('api_version', short='api', default='2', help='Zaqar API version to use'), cfg.IntOpt('messages_per_claim', short='cno', default=5, help=('Number of messages the consumer will attempt to ' 'claim at a time')), cfg.IntOpt('messages_per_list', short='lno', default=5, help=('Number of messages the observer will attempt to ' 'list at a time')), cfg.IntOpt('time', short='t', default=5, help="Duration of the performance test, in seconds"), cfg.StrOpt('server_url', short='s', default='http://localhost:8888'), cfg.StrOpt('queue_prefix', short='q', default='ogre-test-queue'), cfg.IntOpt('num_queues', short='qno', default=4), cfg.StrOpt('messages_path', short='m'), cfg.BoolOpt('skip_queue_reset', default=False, help=('Do not reset queues before running' 'the performance test')), ) conf.register_cli_opts(_CLI_OPTIONS) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bench/consumer.py0000664000175100017510000001370315033040005017344 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing as mp import random import sys import time from gevent import monkey as curious_george curious_george.patch_all(thread=False, select=False) import gevent import marktime from zaqarclient.transport import errors from zaqar.bench import config from zaqar.bench import helpers CONF = config.conf def claim_delete(queues, stats, test_duration, ttl, grace, limit): """Consumer Worker The Consumer Worker continuously claims and deletes messages for the specified duration. The time taken for each claim and delete is recorded for calculating throughput and latency. """ end = time.time() + test_duration claim_total_elapsed = 0 delete_total_elapsed = 0 total_failed_requests = 0 claim_total_requests = 0 delete_total_requests = 0 while time.time() < end: # NOTE(kgriffs): Distribute requests across all queues evenly. queue = random.choice(queues) try: marktime.start('claim_message') claim = queue.claim(ttl=ttl, grace=grace, limit=limit) claim_total_elapsed += marktime.stop('claim_message').seconds claim_total_requests += 1 except errors.TransportError as ex: sys.stderr.write("Could not claim messages : {0}\n".format(ex)) total_failed_requests += 1 else: for msg in claim: try: marktime.start('delete_message') msg.delete() elapsed = marktime.stop('delete_message').seconds delete_total_elapsed += elapsed delete_total_requests += 1 except errors.TransportError as ex: msg = "Could not delete messages: {0}\n".format(ex) sys.stderr.write(msg) total_failed_requests += 1 total_requests = (claim_total_requests + delete_total_requests + total_failed_requests) stats.put({ 'total_requests': total_requests, 'claim_total_requests': claim_total_requests, 'delete_total_requests': delete_total_requests, 'claim_total_elapsed': claim_total_elapsed, 'delete_total_elapsed': delete_total_elapsed, }) def load_generator(stats, num_workers, num_queues, test_duration, url, ttl, grace, limit): cli = helpers.get_new_client() queues = [] for queue_name in helpers.queue_names: queues.append(cli.queue(queue_name)) gevent.joinall([ gevent.spawn(claim_delete, queues, stats, test_duration, ttl, grace, limit) for _ in range(num_workers) ]) def crunch(stats): total_requests = 0 claim_total_elapsed = 0.0 delete_total_elapsed = 0.0 claim_total_requests = 0 delete_total_requests = 0 while not stats.empty(): entry = stats.get_nowait() total_requests += entry['total_requests'] claim_total_elapsed += entry['claim_total_elapsed'] delete_total_elapsed += entry['delete_total_elapsed'] claim_total_requests += entry['claim_total_requests'] delete_total_requests += entry['delete_total_requests'] return (total_requests, claim_total_elapsed, delete_total_elapsed, claim_total_requests, delete_total_requests) def run(upstream_queue): num_procs = CONF.consumer_processes num_workers = CONF.consumer_workers num_queues = CONF.num_queues # Stats that will be reported duration = 0 total_requests = 0 successful_requests = 0 claim_total_requests = 0 delete_total_requests = 0 throughput = 0 claim_latency = 0 delete_latency = 0 # Performance test if num_procs and num_workers: stats = mp.Queue() # TODO(TheSriram) : Make ttl and grace configurable args = (stats, num_workers, num_queues, CONF.time, CONF.server_url, 300, 200, CONF.messages_per_claim) procs = [mp.Process(target=load_generator, args=args) for _ in range(num_procs)] if CONF.debug: print('\nStarting consumers (cp={0}, cw={1})...'.format( num_procs, num_workers)) start = time.time() for each_proc in procs: each_proc.start() for each_proc in procs: each_proc.join() (total_requests, claim_total_elapsed, delete_total_elapsed, claim_total_requests, delete_total_requests) = crunch(stats) successful_requests = claim_total_requests + delete_total_requests duration = time.time() - start # NOTE(kgriffs): Duration should never be zero throughput = successful_requests / duration if claim_total_requests: claim_latency = (1000 * claim_total_elapsed / claim_total_requests) if delete_total_requests: delete_latency = (1000 * delete_total_elapsed / delete_total_requests) upstream_queue.put({ 'consumer': { 'duration_sec': duration, 'total_reqs': total_requests, 'claim_total_requests': claim_total_requests, 'successful_reqs': successful_requests, 'messages_processed': delete_total_requests, 'reqs_per_sec': throughput, 'ms_per_claim': claim_latency, 'ms_per_delete': delete_latency, } }) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bench/helpers.py0000664000175100017510000001117615033040005017155 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE(Eva-i): Some code was taken from python-zaqarclient. import os import sys import openstack.config from zaqarclient.queues import client from zaqar.bench import config CONF = config.conf def _get_credential_args(): """Retrieves credential arguments for keystone Credentials are either read via os-client-config from the environment or from a config file ('clouds.yaml'). Config file variables override those from the environment variables. devstack produces a clouds.yaml with two named clouds - one named 'devstack' which has user privs and one named 'devstack-admin' which has admin privs. This function will default to getting the credentials from environment variables. If not all required credentials present in environment variables, it tries to get credentials for 'devstack-admin' cloud in clouds.yaml. If no 'devstack-admin' cloud found, it tried to get credentials for 'devstack' cloud. If no 'devstack' cloud found, throws an error and stops the application. """ os_cfg = openstack.config.OpenStackConfig() cloud = os_cfg.get_one_cloud() cred_args = cloud.get_auth_args() cred_args['insecure'] = cloud.auth.get('insecure') cred_args['cacert'] = cloud.auth.get('cacert') cred_args['token'] = cloud.auth.get('token') required_options = ['username', 'password', 'auth_url', 'project_name'] if not all(arg in cred_args for arg in required_options): try: cloud = os_cfg.get_one_cloud(cloud='devstack-admin') except Exception: try: cloud = os_cfg.get_one_cloud(cloud='devstack') except Exception: print("Insufficient amount of credentials found for keystone " "authentication. Credentials should reside either in " "environment variables or in 'clouds.yaml' file. If " "both present, the ones in environment variables will " "be preferred. Exiting.") sys.exit() cred_args = cloud.get_auth_args() print("Using '{}' credentials".format(cloud.name)) return cred_args def _generate_client_conf(): auth_strategy = os.environ.get('OS_AUTH_STRATEGY', 'noauth') if auth_strategy == 'keystone': args = _get_credential_args() conf = { 'auth_opts': { 'backend': 'keystone', 'options': { 'os_username': args.get('username'), 'os_password': args.get('password'), 'os_project_name': args['project_name'], 'os_auth_url': args['auth_url'], 'insecure': args.get('insecure'), 'cacert': args.get('cacert'), 'auth_token': args.get('token') }, }, } else: conf = { 'auth_opts': { 'backend': 'noauth', 'options': { 'os_project_id': 'my-lovely-benchmark', }, }, } print("Using '{0}' authentication method".format(conf['auth_opts'] ['backend'])) return conf class LazyAPIVersion(object): def __init__(self): self.api_version = None @property def get(self): if self.api_version is None: conversion_map = { 1.0: 1, 1.1: 1.1, 2.0: 2, } try: self.api_version = conversion_map[CONF.api_version] except KeyError: print("Unknown Zaqar API version: '{}'. Exiting...".format( CONF.api_version)) sys.exit() print("Benchmarking Zaqar API v{0}...".format(self.api_version)) return self.api_version client_conf = _generate_client_conf() client_api = LazyAPIVersion() queue_names = [] for i in range(CONF.num_queues): queue_names.append((CONF.queue_prefix + '-' + str(i))) def get_new_client(): return client.Client(CONF.server_url, client_api.get, conf=client_conf) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bench/observer.py0000664000175100017510000001134415033040005017337 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing as mp import random import sys import time from gevent import monkey as curious_george curious_george.patch_all(thread=False, select=False) import gevent import marktime import urllib from zaqarclient.transport import errors from zaqar.bench import config from zaqar.bench import helpers CONF = config.conf # # TODO(kgriffs): Factor out the common code from producer, consumer # and worker (DRY all the things!) # def _extract_marker(links): for link in links: if link['rel'] == 'next': href = link['href'] break query = urllib.parse.urlparse(href).query params = urllib.parse.parse_qs(query) return params['marker'][0] def observer(queues, stats, test_duration, limit): """Observer Worker The observer lists messages without claiming them. """ end = time.time() + test_duration total_elapsed = 0 total_succeeded = 0 total_failed = 0 queues = [{'q': q, 'm': None} for q in queues] while time.time() < end: # NOTE(kgriffs): Distribute requests across all queues evenly. queue = random.choice(queues) try: marktime.start('list_messages') cursor = queue['q'].messages(limit=limit, marker=queue['m'], include_claimed=True) total_elapsed += marktime.stop('list_messages').seconds total_succeeded += 1 messages = list(cursor) if messages: # TODO(kgriffs): Figure out a less hacky way to do this # while preserving the ability to measure elapsed time # per request. queue['m'] = _extract_marker(cursor._links) except errors.TransportError as ex: sys.stderr.write("Could not list messages : {0}\n".format(ex)) total_failed += 1 total_requests = total_succeeded + total_failed stats.put({ 'total_requests': total_requests, 'total_succeeded': total_succeeded, 'total_elapsed': total_elapsed, }) def load_generator(stats, num_workers, num_queues, test_duration, limit): cli = helpers.get_new_client() queues = [] for queue_name in helpers.queue_names: queues.append(cli.queue(queue_name)) gevent.joinall([ gevent.spawn(observer, queues, stats, test_duration, limit) for _ in range(num_workers) ]) def crunch(stats): total_requests = 0 total_succeeded = 0 total_elapsed = 0.0 while not stats.empty(): entry = stats.get_nowait() total_requests += entry['total_requests'] total_succeeded += entry['total_succeeded'] total_elapsed += entry['total_elapsed'] return total_requests, total_succeeded, total_elapsed def run(upstream_queue): num_procs = CONF.observer_processes num_workers = CONF.observer_workers num_queues = CONF.num_queues # Stats that will be reported duration = 0 total_requests = 0 total_succeeded = 0 throughput = 0 latency = 0 # Performance test if num_procs and num_workers: test_duration = CONF.time stats = mp.Queue() args = (stats, num_workers, num_queues, test_duration, CONF.messages_per_list) procs = [mp.Process(target=load_generator, args=args) for _ in range(num_procs)] if CONF.debug: print('\nStarting observer (op={0}, ow={1})...'.format( num_procs, num_workers)) start = time.time() for each_proc in procs: each_proc.start() for each_proc in procs: each_proc.join() (total_requests, total_succeeded, total_elapsed) = crunch(stats) duration = time.time() - start throughput = total_succeeded / duration if total_succeeded: latency = (1000 * total_elapsed / total_succeeded) upstream_queue.put({ 'observer': { 'duration_sec': duration, 'total_reqs': total_requests, 'successful_reqs': total_succeeded, 'reqs_per_sec': throughput, 'ms_per_req': latency, } }) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bench/producer.py0000664000175100017510000001246215033040005017335 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing as mp import random import sys import time from gevent import monkey as curious_george curious_george.patch_all(thread=False, select=False) import gevent import marktime from oslo_serialization import jsonutils from zaqarclient.transport import errors from zaqar.bench import config from zaqar.bench import helpers CONF = config.conf def choose_message(message_pool): """Choose a message from our pool of possibilities.""" # Assume message_pool is sorted by weight, ascending position = random.random() accumulator = 0.00 for each_message in message_pool: accumulator += each_message['weight'] if position < accumulator: return each_message['doc'] assert False def load_messages(): default_file_name = 'zaqar-benchmark-messages.json' messages_path = CONF.messages_path or CONF.find_file(default_file_name) if messages_path: with open(messages_path) as f: message_pool = jsonutils.load(f) message_pool.sort(key=lambda msg: msg['weight']) return message_pool else: return [{"weight": 1.0, "doc": {"ttl": 60, "body": {"id": "7FA23C90-62F7-40D2-9360-FBD5D7D61CD1", "evt": "Single"}}}] def producer(queues, message_pool, stats, test_duration): """Producer Worker The Producer Worker continuously post messages for the specified duration. The time taken for each post is recorded for calculating throughput and latency. """ total_requests = 0 successful_requests = 0 total_elapsed = 0 end = time.time() + test_duration while time.time() < end: queue = random.choice(queues) try: marktime.start('post_message') queue.post(choose_message(message_pool)) total_elapsed += marktime.stop('post_message').seconds successful_requests += 1 except errors.TransportError as ex: sys.stderr.write("Could not post a message : {0}\n".format(ex)) total_requests += 1 stats.put({ 'successful_requests': successful_requests, 'total_requests': total_requests, 'total_elapsed': total_elapsed }) # TODO(TheSriram): make distributed across multiple machines # TODO(TheSriram): post across several queues (which workers to which queues? # weight them, so can have some busy queues, some not.) def load_generator(stats, num_workers, num_queues, test_duration): cli = helpers.get_new_client() queues = [] for queue_name in helpers.queue_names: queues.append(cli.queue(queue_name)) message_pool = load_messages() gevent.joinall([ gevent.spawn(producer, queues, message_pool, stats, test_duration) for _ in range(num_workers) ]) def crunch(stats): total_requests = 0 total_latency = 0.0 successful_requests = 0 while not stats.empty(): entry = stats.get_nowait() total_requests += entry['total_requests'] total_latency += entry['total_elapsed'] successful_requests += entry['successful_requests'] return successful_requests, total_requests, total_latency def run(upstream_queue): num_procs = CONF.producer_processes num_workers = CONF.producer_workers num_queues = CONF.num_queues duration = 0 total_requests = 0 successful_requests = 0 throughput = 0 latency = 0 if num_procs and num_workers: test_duration = CONF.time stats = mp.Queue() args = (stats, num_workers, num_queues, test_duration) # TODO(TheSriram): Multiple test runs, vary num workers and # drain/delete queues in between each run. Plot these on a # graph, with concurrency as the X axis. procs = [ mp.Process(target=load_generator, args=args) for _ in range(num_procs) ] if CONF.debug: print('\nStarting producer (pp={0}, pw={1})...'.format( num_procs, num_workers)) start = time.time() for each_proc in procs: each_proc.start() for each_proc in procs: each_proc.join() successful_requests, total_requests, total_latency = crunch(stats) duration = time.time() - start # NOTE(kgriffs): Duration should never be zero throughput = successful_requests / duration if successful_requests: latency = 1000 * total_latency / successful_requests upstream_queue.put({ 'producer': { 'duration_sec': duration, 'total_reqs': total_requests, 'successful_reqs': successful_requests, 'reqs_per_sec': throughput, 'ms_per_req': latency } }) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/bootstrap.py0000664000175100017510000001115415033040005016445 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import socket from oslo_log import log from osprofiler import opts as profiler_opts from osprofiler import profiler from stevedore import driver from zaqar.api import handler from zaqar.common import cache as oslo_cache from zaqar.common import consts from zaqar.common import decorators from zaqar.common import errors from zaqar.conf import drivers as driver_opts from zaqar.conf import opts as opts_tool from zaqar.storage import pipeline from zaqar.storage import pooling from zaqar.storage import utils as storage_utils from zaqar.transport import base from zaqar.transport.middleware import profile from zaqar.transport import validation LOG = log.getLogger(__name__) class Bootstrap(object): """Defines the Zaqar bootstrapper. The bootstrap loads up drivers per a given configuration, and manages their lifetimes. """ def __init__(self, conf): self.conf = conf for group, opts in opts_tool.list_opts_by_group(): self.conf.register_opts(opts, group=group) profiler_opts.set_defaults(self.conf) # TODO(wangxiyuan): Now the OSprofiler feature in Zaqar only support # wsgi. Websocket part will be added in the future. profile.setup(self.conf, 'Zaqar-server', socket.gethostname()) self.driver_conf = self.conf[driver_opts.GROUP_NAME] @decorators.lazy_property(write=False) def api(self): LOG.debug('Loading API handler') validate = validation.Validator(self.conf) defaults = base.ResourceDefaults(self.conf) return handler.Handler(self.storage, self.control, validate, defaults) @decorators.lazy_property(write=False) def storage(self): LOG.debug('Loading storage driver') if self.conf.pooling: LOG.debug('Storage pooling enabled') storage_driver = pooling.DataDriver(self.conf, self.cache, self.control) if self.conf.profiler.enabled: storage_driver = profiler.trace_cls("pooling_data_" "driver")(storage_driver) else: storage_driver = storage_utils.load_storage_driver( self.conf, self.cache, control_driver=self.control) LOG.debug('Loading storage pipeline') return pipeline.DataDriver(self.conf, storage_driver, self.control) @decorators.lazy_property(write=False) def control(self): LOG.debug('Loading storage control driver') return storage_utils.load_storage_driver(self.conf, self.cache, control_mode=True) @decorators.lazy_property(write=False) def cache(self): LOG.debug('Loading proxy cache driver') try: oslo_cache.register_config(self.conf) return oslo_cache.get_cache(self.conf) except RuntimeError as exc: LOG.exception('Error loading proxy cache.') raise errors.InvalidDriver(exc) @decorators.lazy_property(write=False) def transport(self): transport_name = self.driver_conf.transport LOG.debug('Loading transport driver: %s', transport_name) if transport_name == consts.TRANSPORT_WEBSOCKET: args = [self.conf, self.api, self.cache] else: args = [ self.conf, self.storage, self.cache, self.control, ] try: mgr = driver.DriverManager('zaqar.transport', transport_name, invoke_on_load=True, invoke_args=args) return mgr.driver except RuntimeError as exc: LOG.exception('Failed to load transport driver zaqar.transport.' '%(driver)s with args %(args)s', {'driver': transport_name, 'args': args}) raise errors.InvalidDriver(exc) def run(self): self.transport.listen() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5630136 zaqar-20.1.0.dev29/zaqar/cmd/0000775000175100017510000000000015033040026014622 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/cmd/__init__.py0000664000175100017510000000000015033040005016716 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/cmd/gc.py0000664000175100017510000000206615033040005015566 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log from zaqar import bootstrap from zaqar.common import cli LOG = log.getLogger(__name__) # In this first approach it's the responsibility of the operator # to call the garbage collector manually. Using crontab or a similar # tool is advised. @cli.runnable def run(): # Use the global CONF instance conf = cfg.CONF conf(project='zaqar', prog='zaqar-gc') server = bootstrap.Bootstrap(conf) LOG.debug('Calling the garbage collector') server.storage.gc() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/cmd/server.py0000664000175100017510000000564015033040005016504 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_config import cfg from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from zaqar import bootstrap from zaqar.common import cli from zaqar.conf import default from zaqar import version # NOTE(eggmaster): define command line options for zaqar-server _CLI_OPTIONS = ( default.admin_mode, cfg.BoolOpt('daemon', default=False, help='Run Zaqar server in the background.'), ) @cli.runnable def run(): # Use the global CONF instance conf = cfg.CONF gmr_opts.set_defaults(conf) # NOTE(eggmaster): register command line options for zaqar-server conf.register_cli_opts(_CLI_OPTIONS) log.register_options(conf) # NOTE(jeffrey4l): Overwrite the default vaule for # logging_context_format_string. Add project_id into it. conf.set_default('logging_context_format_string', '%(asctime)s.%(msecs)03d %(process)d %(levelname)s' ' %(name)s [%(request_id)s %(user_identity)s]' ' [project_id:%(project_id)s] %(message)s') conf(project='zaqar', prog='zaqar-server') log.setup(conf, 'zaqar') gmr.TextGuruMeditation.setup_autorun(version, conf=conf) server = bootstrap.Bootstrap(conf) # The following code is to daemonize zaqar-server to avoid # an issue with wsgiref writing to stdout/stderr when we don't # want it to. This is specifically needed to allow zaqar to # run under devstack, but it may also be useful for other scenarios. # Open /dev/zero and /dev/null for redirection. # Daemonizing zaqar-server is needed *just* when running under devstack # and when zaqar is invoked with `daemon` command line option. if conf.daemon: zerofd = os.open('/dev/zero', os.O_RDONLY) nullfd = os.open('/dev/null', os.O_WRONLY) # Close the stdthings and reassociate them with a non terminal os.dup2(zerofd, 0) os.dup2(nullfd, 1) os.dup2(nullfd, 2) # Detach process context, this requires 2 forks. try: pid = os.fork() if pid > 0: os._exit(0) except OSError: os._exit(1) try: pid = os.fork() if pid > 0: os._exit(0) except OSError: os._exit(2) server.run() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/cmd/status.py0000664000175100017510000000321715033040005016517 0ustar00mylesmyles# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck from zaqar.i18n import _ class Checks(upgradecheck.UpgradeCommands): """Upgrade checks for the zaqar-status upgrade check command Upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ # The format of the check functions is to return an # oslo_upgradecheck.upgradecheck.Result # object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. # If the check hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( (_('policy File JSON to YAML Migration'), (common_checks.check_policy_json, {'conf': cfg.CONF})), ) def main(): return upgradecheck.main( cfg.CONF, project='zaqar', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5640135 zaqar-20.1.0.dev29/zaqar/common/0000775000175100017510000000000015033040026015347 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/__init__.py0000664000175100017510000000117615033040005017462 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Code common to Zaqar""" from zaqar.common import pipeline Pipeline = pipeline.Pipeline ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/access.py0000664000175100017510000000137315033040005017163 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(cpp-cabrera): port to enum34 when that becomes available class Access(object): """An enumeration to represent access levels for APIs.""" public = 1 admin = 2 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5650134 zaqar-20.1.0.dev29/zaqar/common/api/0000775000175100017510000000000015033040026016120 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/__init__.py0000664000175100017510000000000015033040005020214 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/api.py0000664000175100017510000000441415033040005017243 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from jsonschema import validators from oslo_log import log from zaqar.common import errors from zaqar.i18n import _ LOG = log.getLogger(__name__) class Api(object): schema = {} validators = {} def get_schema(self, action): """Returns the schema for an action :param action: Action for which params need to be validated. :type action: `str` :returns: Action's schema :rtype: dict :raises InvalidAction: if the action does not exist """ try: return self.schema[action] except KeyError: msg = _('{0} is not a valid action').format(action) raise errors.InvalidAction(msg) def validate(self, action, body): """Validates the request data This method relies on jsonschema and exists just as a way for third-party transport to validate the request. It's not recommended to validate every request since they are already validated server side. :param action: Action's for which body need to be validated. :type action: `str` :param body: Params to validate :type body: dict :returns: True if the schema is valid, False otherwise :raises InvalidAction: if the action does not exist """ if action not in self.validators: schema = self.get_schema(action) self.validators[action] = validators.Draft4Validator(schema) try: self.validators[action].validate(body) except jsonschema.ValidationError as ex: LOG.debug('Schema validation failed. %s.', str(ex)) return False return True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/errors.py0000664000175100017510000000234515033040005020007 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class ExceptionBase(Exception): msg_format = '' def __init__(self, **kwargs): msg = self.msg_format.format(**kwargs) super(ExceptionBase, self).__init__(msg) class BadRequest(ExceptionBase): """Raised when an invalid request is received.""" msg_format = 'Bad request. {description}' def __init__(self, description): """Initializes the error with contextual information. :param description: Error description """ super(BadRequest, self).__init__(description=description) class DocumentTypeNotSupported(ExceptionBase): """Raised when the content of a request has an unsupported format.""" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/request.py0000664000175100017510000000312415033040005020157 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class Request(object): """General data for a Zaqar request Transport will generate a request object and send to this the API to be processed. :param action: Action to identify the API call being processed, i.e: 'get_queues', 'get_messages' :type action: str :param body: Request's body. Default: None :type body: str :param headers: Request headers. Default: None :type headers: dict :param api: Api entry point. i.e: 'queues.v1' :type api: `str`. :param env: Request environment. Default: None :type env: dict """ def __init__(self, action, body=None, headers=None, api=None, env=None): self._action = action self._body = body self._headers = headers or {} self._api = api self._env = env or {} def get_request(self): return {'action': self._action, 'body': self._body, 'headers': self._headers, 'api': self._api} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/response.py0000664000175100017510000000261215033040005020326 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class Response(object): """Common response class for Zaqar. All `zaqar.transport.base.Transport` implementations will return this to the higher level API which will then build an object out of it. :param request: The request sent to the server. :type request: `zaqar.transport.request.Request` :param body: Response's body :type body: `str` :param headers: Optional headers returned in the response. :type headers: dict """ __slots__ = ('_request', '_body', '_headers') def __init__(self, request, body, headers=None): self._request = request self._body = body self._headers = headers or {} def get_response(self): return {'request': self._request.get_request(), 'body': self._body, 'headers': self._headers} ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5660136 zaqar-20.1.0.dev29/zaqar/common/api/schemas/0000775000175100017510000000000015033040026017543 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/schemas/__init__.py0000664000175100017510000000000015033040005021637 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/schemas/flavors.py0000664000175100017510000000315715033040005021574 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """flavors: JSON schema for zaqar-queues flavors resources.""" # NOTE(flaper87): capabilities can be anything. These will be unique to # each storage driver, so we don't perform any further validation at # the transport layer. patch_capabilities = { 'type': 'object', 'properties': { 'capabilities': { 'type': 'object' } } } # NOTE(gengchc): Add pool_list in flavor creation for removing pool_group patch_pool_list = { 'type': 'object', 'properties': { 'pool_list': { 'type': 'array' }, 'additionalProperties': False } } create = { 'type': 'object', 'properties': { 'pool_list': patch_pool_list['properties']['pool_list'], 'capabilities': patch_capabilities['properties']['capabilities'] }, # NOTE(flaper87): capabilities need not be present. Storage drivers # must provide reasonable defaults. # NOTE(wanghao): remove pool in Newton release. 'oneOf': [{'required': ['pool_list']}], 'additionalProperties': False } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/schemas/pools.py0000664000175100017510000000406615033040005021254 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """pools: JSON schema for zaqar-queues pools resources.""" # NOTE(cpp-cabrera): options can be anything. These will be unique to # each storage driver, so we don't perform any further validation at # the transport layer. patch_options = { 'type': 'object', 'properties': { 'options': { 'type': 'object' } } } patch_uri = { 'type': 'object', 'properties': { 'uri': { 'type': 'string', 'minLength': 0, 'maxLength': 255, 'format': 'uri' }, 'additionalProperties': False } } # NOTE(gengchc): remove pool_group add flavor patch_flavor = { 'type': 'object', 'properties': { 'flavor': { 'type': 'string', 'minLength': 0, 'maxLength': 255 }, 'additionalProperties': False } } patch_weight = { 'type': 'object', 'properties': { 'weight': { 'type': 'integer', 'minimum': 0, 'maximum': 2**32 - 1 }, 'additionalProperties': False } } create = { 'type': 'object', 'properties': { 'weight': patch_weight['properties']['weight'], 'flavor': patch_flavor['properties']['flavor'], 'uri': patch_uri['properties']['uri'], 'options': patch_options['properties']['options'] }, # NOTE(cpp-cabrera): options need not be present. Storage drivers # must provide reasonable defaults. 'required': ['uri', 'weight'], 'additionalProperties': False } ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5660136 zaqar-20.1.0.dev29/zaqar/common/api/schemas/v1_1/0000775000175100017510000000000015033040026020311 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/schemas/v1_1/__init__.py0000664000175100017510000000000015033040005022405 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/schemas/v1_1/flavors.py0000664000175100017510000000245715033040005022344 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """flavors: JSON schema for zaqar-queues flavors resources.""" # NOTE(flaper87): capabilities can be anything. These will be unique to # each storage driver, so we don't perform any further validation at # the transport layer. patch_capabilities = { 'type': 'object', 'properties': { 'capabilities': { 'type': 'object' } } } create = { 'type': 'object', 'properties': { 'capabilities': patch_capabilities['properties']['capabilities'] }, # NOTE(flaper87): capabilities need not be present. Storage drivers # must provide reasonable defaults. # NOTE(wanghao): remove the whole folder when we remove the 1.1 API # totally. 'additionalProperties': True } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/api/utils.py0000664000175100017510000001627415033040005017641 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import functools import uuid from oslo_log import log as logging from oslo_utils import strutils import zaqar.common.api.errors as api_errors import zaqar.common.api.response as response from zaqar.i18n import _ LOG = logging.getLogger(__name__) def sanitize(document, spec=None, doctype=dict): """Validates a document and drops undesired fields. :param document: A dict to verify according to `spec`. :param spec: (Default None) Iterable describing expected fields, yielding tuples with the form of: (field_name, value_type, default_value) Note that value_type may either be a Python type, or the special string '*' to accept any type. default_value is the default to give the field if it is missing, or None to require that the field be present. If spec is None, the incoming documents will not be validated. :param doctype: type of document to expect; must be either JSONObject or JSONArray. :raises DocumentTypeNotSupported: if document type is not supported :raises TypeError: if document type is neither a JSONObject nor JSONArray :returns: A sanitized, filtered version of the document. If the document is a list of objects, each object will be filtered and returned in a new list. If, on the other hand, the document is expected to contain a single object, that object's fields will be filtered and the resulting object will be returned. """ if doctype is dict: if not isinstance(document, dict): raise api_errors.DocumentTypeNotSupported() return document if spec is None else filter_fields(document, spec) if doctype is list: if not isinstance(document, list): raise api_errors.DocumentTypeNotSupported() if spec is None: return document return [filter_fields(obj, spec) for obj in document] raise TypeError(_('Doctype must be either a JSONObject or JSONArray')) def filter_fields(document, spec): """Validates and retrieves typed fields from a single document. Sanitizes a dict-like document by checking it against a list of field spec, and returning only those fields specified. :param document: dict-like object :param spec: iterable describing expected fields, yielding tuples with the form of: (field_name, value_type). Note that value_type may either be a Python type, or the special string '*' to accept any type. :raises BadRequest: if any field is missing or not an instance of the specified type :returns: A filtered dict containing only the fields listed in the spec """ filtered = {} for name, value_type, default_value in spec: filtered[name] = get_checked_field(document, name, value_type, default_value) return filtered def get_checked_field(document, name, value_type, default_value): """Validates and retrieves a typed field from a document. This function attempts to look up doc[name], and raises appropriate errors if the field is missing or not an instance of the given type. :param document: dict-like object :param name: field name :param value_type: expected value type, or '*' to accept any type :param default_value: Default value to use if the value is missing, or None to make the value required. :raises BadRequest: if the field is missing or not an instance of value_type :returns: value obtained from doc[name] """ try: value = document[name] except KeyError: if default_value is not None: value = default_value else: description = _('Missing "{name}" field.').format(name=name) raise api_errors.BadRequest(description) # PERF(kgriffs): We do our own little spec thing because it is way # faster than jsonschema. if value_type == '*' or isinstance(value, value_type): return value description = _('The value of the "{name}" field must be a {vtype}.') description = description.format(name=name, vtype=value_type.__name__) raise api_errors.BadRequest(description) def get_client_uuid(req): """Read a required Client-ID from a request. :param req: Request object :returns: A UUID object or A string of client id """ try: return uuid.UUID(req._headers.get('Client-ID')) except ValueError: return req._headers.get('Client-ID') def get_headers(req): kwargs = {} # TODO(vkmc) We should add a control here to make sure # that the headers/request combination is possible # e.g. we cannot have messages_post with grace if req._body.get('marker') is not None: kwargs['marker'] = req._body.get('marker') if req._body.get('limit') is not None: kwargs['limit'] = int(req._body.get('limit')) if req._body.get('detailed') is not None: kwargs['detailed'] = strutils.bool_from_string( req._body.get('detailed')) if req._body.get('echo') is not None: kwargs['echo'] = strutils.bool_from_string(req._body.get('echo')) if req._body.get('include_claimed') is not None: kwargs['include_claimed'] = strutils.bool_from_string( req._body.get('include_claimed')) if req._body.get('include_delayed') is not None: kwargs['include_delayed'] = strutils.bool_from_string( req._body.get('include_delayed')) if req._body.get('ttl') is not None: kwargs['ttl'] = int(req._body.get('ttl')) if req._body.get('grace') is not None: kwargs['grace'] = int(req._body.get('grace')) return kwargs def on_exception_sends_500(func): """Handles generic Exceptions in API endpoints This decorator catches generic Exceptions and returns a generic Response. """ @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as ex: error = _("Unexpected error.") headers = {'status': 500} # args[0] - Endpoints object, args[1] - Request object. req = args[1] LOG.exception(error) return error_response(req, ex, headers, error) return wrapper def error_response(req, exception, headers=None, error=None): body = {'exception': str(exception), 'error': error} resp = response.Response(req, body, headers) return resp def format_message(message, claim_id=None): return { 'id': message['id'], 'claim_id': claim_id, 'ttl': message['ttl'], 'age': message['age'], 'body': message['body'], } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/auth.py0000664000175100017510000000654215033040005016666 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading from keystoneauth1 import session from keystoneclient.v3 import client from oslo_config import cfg PASSWORD_PLUGIN = 'password' TRUSTEE_CONF_GROUP = 'trustee' KEYSTONE_AUTHTOKEN_GROUP = 'keystone_authtoken' loading.register_auth_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP) loading.register_session_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP) loading.register_auth_conf_options(cfg.CONF, KEYSTONE_AUTHTOKEN_GROUP) _ZAQAR_ENDPOINTS = {} def _config_options(): trustee_opts = loading.get_auth_common_conf_options() trustee_opts.extend(loading.get_auth_plugin_conf_options(PASSWORD_PLUGIN)) yield TRUSTEE_CONF_GROUP, trustee_opts def get_trusted_token(trust_id): """Return a Keystone token using the given trust_id.""" auth_plugin = loading.load_auth_from_conf_options( cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=trust_id) trust_session = loading.load_session_from_conf_options( cfg.CONF, TRUSTEE_CONF_GROUP, auth=auth_plugin) return trust_session.auth.get_access(trust_session).auth_token def _get_admin_session(conf_group): auth_plugin = loading.load_auth_from_conf_options( cfg.CONF, conf_group) return session.Session( auth=auth_plugin, verify=getattr(cfg.CONF, conf_group).cafile) def _get_user_client(auth_plugin): sess = loading.load_session_from_conf_options( cfg.CONF, TRUSTEE_CONF_GROUP, auth=auth_plugin) return client.Client(session=sess, interface='public') def create_trust_id(auth_plugin, trustor_user_id, trustor_project_id, roles, expires_at): """Create a trust with the given user for the configured trustee user.""" admin_session = _get_admin_session(TRUSTEE_CONF_GROUP) trustee_user_id = admin_session.get_user_id() client = _get_user_client(auth_plugin) trust = client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_project_id, impersonation=True, role_names=roles, expires_at=expires_at) return trust.id def get_public_endpoint(): """Get Zaqar's public endpoint from keystone""" global _ZAQAR_ENDPOINTS if _ZAQAR_ENDPOINTS: return _ZAQAR_ENDPOINTS zaqar_session = _get_admin_session(KEYSTONE_AUTHTOKEN_GROUP) auth = zaqar_session.auth if not auth: return _ZAQAR_ENDPOINTS catalogs = auth.get_auth_ref(zaqar_session).service_catalog try: _ZAQAR_ENDPOINTS['zaqar'] = catalogs.url_for(service_name='zaqar') except Exception: pass try: _ZAQAR_ENDPOINTS['zaqar-websocket'] = catalogs.url_for( service_name='zaqar-websocket') except Exception: pass return _ZAQAR_ENDPOINTS ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/cache.py0000664000175100017510000000143615033040005016765 0ustar00mylesmyles# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache import core def register_config(conf): core.configure(conf) def get_cache(conf): region = core.create_region() return core.configure_cache_region(conf, region) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/cli.py0000664000175100017510000000266715033040005016500 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import functools import sys from oslo_config import cfg from oslo_log import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) def _fail(returncode, ex): """Handles terminal errors. :param returncode: process return code to pass to sys.exit :param ex: the error that occurred """ print(ex, file=sys.stderr) LOG.exception('Exception encountered:') sys.exit(returncode) def runnable(func): """Entry point wrapper. Note: This call blocks until the process is killed or interrupted. """ @functools.wraps(func) def _wrapper(): try: logging.register_options(CONF) logging.setup(CONF, 'zaqar') func() except KeyboardInterrupt: LOG.info('Terminating') except Exception as ex: _fail(1, ex) return _wrapper ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/consts.py0000664000175100017510000000445615033040005017240 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. TRANSPORT_DRIVERS = ( TRANSPORT_WSGI, TRANSPORT_WEBSOCKET, ) = ( 'wsgi', 'websocket', ) MESSAGE_STORE = ( MSG_STORE_MONGODB, ) = ( 'mongodb', ) MANAGEMENT_STORE = ( MGMT_STORE_MONGODB, ) = ( 'mongodb', ) SUBSCRIPTION_OPS = ( SUBSCRIPTION_CREATE, SUBSCRIPTION_LIST, SUBSCRIPTION_GET, SUBSCRIPTION_DELETE, ) = ( 'subscription_create', 'subscription_list', 'subscription_get', 'subscription_delete', ) MESSAGE_OPS = ( MESSAGE_POST, MESSAGE_LIST, MESSAGE_GET, MESSAGE_GET_MANY, MESSAGE_DELETE, MESSAGE_DELETE_MANY, ) = ( 'message_post', 'message_list', 'message_get', 'message_get_many', 'message_delete', 'message_delete_many', ) QUEUE_OPS = ( QUEUE_CREATE, QUEUE_LIST, QUEUE_GET, QUEUE_DELETE, QUEUE_GET_STATS, QUEUE_PURGE ) = ( 'queue_create', 'queue_list', 'queue_get', 'queue_delete', 'queue_get_stats', 'queue_purge' ) CLAIM_OPS = ( CLAIM_CREATE, CLAIM_GET, CLAIM_UPDATE, CLAIM_DELETE, ) = ( 'claim_create', 'claim_get', 'claim_update', 'claim_delete', ) POOL_OPS = ( POOL_CREATE, POOL_LIST, POOL_GET, POOL_GET_DETAIL, POOL_UPDATE, POOL_DELETE, ) = ( 'pool_create', 'pool_list', 'pool_get', 'pool_get_detail', 'pool_update', 'pool_delete', ) FLAVOR_OPS = ( FLAVOR_CREATE, FLAVOR_LIST, FLAVOR_GET, FLAVOR_UPDATE, FLAVOR_DELETE, ) = ( 'flavor_create', 'flavor_list', 'flavor_get', 'flavor_update', 'flavor_delete', ) RETRY_OPS = ( RETRIES_WITH_NO_DELAY, MINIMUM_DELAY_RETRIES, MINIMUM_DELAY, MAXIMUM_DELAY, MAXIMUM_DELAY_RETRIES, LINEAR_INTERVAL, ) = ( 3, 3, 5, 30, 3, 5, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/decorators.py0000664000175100017510000001612315033040005020066 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import msgpack from oslo_cache import core from oslo_log import log as logging from oslo_serialization import jsonutils LOG = logging.getLogger(__name__) class TransportLog(object): """Standard logging for transport driver responders This class implements a logging decorator that the transport driver responders can use for standard logging """ def __init__(self, resource_type): self.resource_type = resource_type def __call__(self, func): @functools.wraps(func) def wrapper(*args, **kwargs): # The below line takes function names like 'on_get' and 'on_patch' # and returns 'GET' and 'PATCH' respectively, so we do not need # the name of the HTTP method to be passed. method = func.__name__[3:].upper() LOG.debug('%(type)s %(method)s: %(arguments)s', {'type': self.resource_type, 'method': method, 'arguments': jsonutils.dumps(kwargs)}) return func(*args, **kwargs) return wrapper def memoized_getattr(meth): """Memoizes attributes returned by __getattr__ It can be used to remember the results from __getattr__ and reduce the debt of calling it again when the same attribute is accessed. This decorator memoizes attributes by setting them on the object itself. The wrapper returned by this decorator won't alter the returned value. :returns: A wrapper around the decorated method. """ @functools.wraps(meth) def wrapper(self, method_name): attr = meth(self, method_name) setattr(self, method_name, attr) return attr return wrapper def caches(keygen, ttl, cond=None): """Flags a getter method as being cached using oslo_cache. It is assumed that the containing class defines an attribute named `_cache` that is an instance of an oslo_cache backend. The getter should raise an exception if the value can't be loaded, which will skip the caching step. Otherwise, the getter must return a value that can be encoded with msgpack. Note that you can also flag a remover method such that it will purge an associated item from the cache, e.g.:: def project_cache_key(user, project=None): return user + ':' + str(project) class Project(object): def __init__(self, db, cache): self._db = db self._cache = cache @decorators.caches(project_cache_key, 60) def get_project(self, user, project=None): return self._db.get_project(user, project) @get_project.purges def del_project(self, user, project=None): self._db.delete_project(user, project) :param keygen: A static key generator function. This function must accept the same arguments as the getter, sans `self`. :param ttl: TTL for the cache entry, in seconds. :param cond: Conditional for whether or not to cache the value. Must be a function that takes a single value, and returns True or False. """ def purges_prop(remover): @functools.wraps(remover) def wrapper(self, *args, **kwargs): # First, purge from cache key = keygen(*args, **kwargs) self._cache.delete(key) # Remove/delete from origin remover(self, *args, **kwargs) return wrapper def prop(getter): @functools.wraps(getter) def wrapper(self, *args, **kwargs): key = keygen(*args, **kwargs) packed_value = self._cache.get(key, expiration_time=ttl) if packed_value is core.NO_VALUE: value = getter(self, *args, **kwargs) # Cache new value if desired if cond is None or cond(value): # NOTE(kgriffs): Setting use_bin_type is essential # for being able to distinguish between Unicode # and binary strings when decoding; otherwise, # both types are normalized to the MessagePack # str format family. packed_value = msgpack.packb(value, use_bin_type=True) self._cache.set(key, packed_value) else: # NOTE(kgriffs): unpackb does not default to UTF-8, # so we have to explicitly ask for it. value = msgpack.unpackb(packed_value) return value wrapper.purges = purges_prop return wrapper return prop def lazy_property(write=False, delete=True): """Creates a lazy property. :param write: Whether this property is "writable" :param delete: Whether this property can be deleted. """ def wrapper(fn): attr_name = '_lazy_' + fn.__name__ def getter(self): if not hasattr(self, attr_name): setattr(self, attr_name, fn(self)) return getattr(self, attr_name) def setter(self, value): setattr(self, attr_name, value) def deleter(self): delattr(self, attr_name) return property(fget=getter, fset=write and setter, fdel=delete and deleter, doc=fn.__doc__) return wrapper def api_version_manager(version_info): """Manage API versions based on their status This decorator disables `DEPRECATED` APIs by default unless the user explicitly enables it by adding it to the `enable_deprecated_api_versions` configuration option. :param version_info: Dictionary containing the API version info. """ api_version = version_info['id'] api_updated = version_info['updated'] deprecated = version_info['status'] == 'DEPRECATED' def wrapper(fn): @functools.wraps(fn) def register_api(driver, conf): if (deprecated and api_version not in conf.enable_deprecated_api_versions): return None if deprecated: LOG.warning('Enabling API version %(version)s. ' 'This version was marked as deprecated in ' '%(updated)s. Using it may expose security ' 'issues, unexpected behavior or damage your ' 'data.', {'version': api_version, 'updated': api_updated}) return fn(driver, conf) return register_api return wrapper ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/errors.py0000664000175100017510000000170015033040005017230 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class InvalidDriver(Exception): """A driver was not found or loaded.""" class PatternNotFound(Exception): """A string did not match the expected pattern or regex.""" class InvalidAction(Exception): """Raised when attempted a non existent action.""" class ConfigurationError(Exception): """An invalid value was used for a Zaqar configuration option.""" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/pipeline.py0000664000175100017510000000755015033040005017532 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """This module implements a common Pipeline object. The pipeline can be used to enhance the storage layer with filtering, routing, multiplexing and the like. For example: >>> stages = [MessageFilter(), EncryptionFilter(), QueueController()] >>> pipeline = Pipeline(stages) Every stage has to implement the method it wants to hook into. This method will be called when the pipeline consumption gets to that point - stage ordering matters - and will continue unless the method call returns a value that is not None. At least one of the stages has to implement the calling method. If none of them do, an AttributeError exception will be raised. """ import contextlib from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ LOG = logging.getLogger(__name__) class Pipeline(object): def __init__(self, pipeline=None): self._pipeline = pipeline and list(pipeline) or [] @decorators.memoized_getattr def __getattr__(self, name): with self.consumer_for(name) as consumer: return consumer @contextlib.contextmanager def consumer_for(self, method): """Creates a closure for `method` This method creates a closure to consume the pipeline for `method`. :params method: The method name to call on each stage :type method: `str` :returns: A callable to consume the pipeline. """ def consumer(*args, **kwargs): """Consumes the pipeline for `method` This function walks through the pipeline and calls `method` for each of the items in the pipeline. A warning will be logged for each stage not implementing `method` and an Attribute error will be raised if none of the stages do. :param args: Positional arguments to pass to the call. :param kwargs: Keyword arguments to pass to the call. :raises AttributeError: if none of the stages implement `method` """ # NOTE(flaper87): Used as a way to verify # the requested method exists in at least # one of the stages, otherwise AttributeError # will be raised. target = None result = None for stage in self._pipeline: try: target = getattr(stage, method) except AttributeError: sstage = str(stage) msgtmpl = _(u"Stage %(stage)s does not " "implement %(method)s") LOG.debug(msgtmpl, {'stage': sstage, 'method': method}) continue tmp = target(*args, **kwargs) # NOTE(flaper87): preserve the last, not None, result if tmp is not None: result = tmp # NOTE(flaper87): Will keep going forward # through the stageline unless the call returns # something. if result is not None: return result if target is None: msg = _('Method %s not found in any of ' 'the registered stages') % method LOG.error(msg) raise AttributeError(msg) yield consumer ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5680134 zaqar-20.1.0.dev29/zaqar/common/policies/0000775000175100017510000000000015033040026017156 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/__init__.py0000664000175100017510000000235415033040005021270 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from zaqar.common.policies import base from zaqar.common.policies import claims from zaqar.common.policies import flavors from zaqar.common.policies import health from zaqar.common.policies import messages from zaqar.common.policies import pools from zaqar.common.policies import queues from zaqar.common.policies import subscription from zaqar.common.policies import topics def list_rules(): return itertools.chain( base.list_rules(), claims.list_rules(), flavors.list_rules(), health.list_rules(), messages.list_rules(), pools.list_rules(), queues.list_rules(), subscription.list_rules(), topics.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/base.py0000664000175100017510000000164215033040005020442 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy ROLE_ADMIN = 'role:admin' RULE_ADMIN_OR_OWNER = 'is_admin:True or project_id:%(project_id)s' UNPROTECTED = '' rules = [ policy.RuleDefault( name='context_is_admin', check_str=ROLE_ADMIN ), policy.RuleDefault( name='admin_or_owner', check_str=RULE_ADMIN_OR_OWNER ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/claims.py0000664000175100017510000000401415033040005020774 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base CLAIMS = 'claims:%s' rules = [ policy.DocumentedRuleDefault( name=CLAIMS % 'create', check_str=base.UNPROTECTED, description='Claims a set of messages from the specified queue.', operations=[ { 'path': '/v2/queues/{queue_name}/claims', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=CLAIMS % 'get', check_str=base.UNPROTECTED, description='Queries the specified claim for the specified queue.', operations=[ { 'path': '/v2/queues/{queue_name}/claims/{claim_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=CLAIMS % 'delete', check_str=base.UNPROTECTED, description='Releases the specified claim for the specified queue.', operations=[ { 'path': '/v2/queues/{queue_name}/claims/{claim_id}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=CLAIMS % 'update', check_str=base.UNPROTECTED, description='Updates the specified claim for the specified queue.', operations=[ { 'path': '/v2/queues/{queue_name}/claims/{claim_id}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/flavors.py0000664000175100017510000000417415033040005021207 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base FLAVORS = 'flavors:%s' rules = [ policy.DocumentedRuleDefault( name=FLAVORS % 'get_all', check_str=base.UNPROTECTED, description='Lists flavors.', operations=[ { 'path': '/v2/flavors', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FLAVORS % 'create', check_str=base.ROLE_ADMIN, description='Creates a new flavor.', operations=[ { 'path': '/v2/flavors/{flavor_name}', 'method': 'PUT' } ] ), policy.DocumentedRuleDefault( name=FLAVORS % 'get', check_str=base.UNPROTECTED, description='Shows details for a flavor.', operations=[ { 'path': '/v2/flavors/{flavor_name}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=FLAVORS % 'delete', check_str=base.ROLE_ADMIN, description='Deletes the specified flavor.', operations=[ { 'path': '/v2/flavors/{flavor_name}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=FLAVORS % 'update', check_str=base.ROLE_ADMIN, description='Update flavor.', operations=[ { 'path': '/v2/flavors/{flavor_name}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/health.py0000664000175100017510000000243115033040005020772 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base PING = 'ping:%s' HEALTH = 'health:%s' rules = [ policy.DocumentedRuleDefault( name=PING % 'get', check_str=base.UNPROTECTED, description='Simple health check for end user(ping).', operations=[ { 'path': '/v2/ping', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=HEALTH % 'get', check_str=base.ROLE_ADMIN, description='Detailed health check for cloud operator/admin.', operations=[ { 'path': '/v2/health', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/messages.py0000664000175100017510000000447715033040005021350 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base MESSAGES = 'messages:%s' rules = [ policy.DocumentedRuleDefault( name=MESSAGES % 'get_all', check_str=base.UNPROTECTED, description='List all message in a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/messages', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=MESSAGES % 'create', check_str=base.UNPROTECTED, description='Create a message in a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/messages', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=MESSAGES % 'get', check_str=base.UNPROTECTED, description='Retrieve a specific message from a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/messages/{message_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=MESSAGES % 'delete', check_str=base.UNPROTECTED, description='Delete a specific message from a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/messages/{message_id}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=MESSAGES % 'delete_all', check_str=base.UNPROTECTED, description='Delete all messages from a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/messages', 'method': 'DELETE' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/pools.py0000664000175100017510000000407515033040005020667 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base POOLS = 'pools:%s' rules = [ policy.DocumentedRuleDefault( name=POOLS % 'get_all', check_str=base.ROLE_ADMIN, description='Lists pools.', operations=[ { 'path': '/v2/pools', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=POOLS % 'create', check_str=base.ROLE_ADMIN, description='Creates a pool.', operations=[ { 'path': '/v2/pools/{pool_name}', 'method': 'PUT' } ] ), policy.DocumentedRuleDefault( name=POOLS % 'get', check_str=base.ROLE_ADMIN, description='Shows details for a pool.', operations=[ { 'path': '/v2/pools/{pool_name}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=POOLS % 'delete', check_str=base.ROLE_ADMIN, description='Delete pool.', operations=[ { 'path': '/v2/pools/{pool_name}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=POOLS % 'update', check_str=base.ROLE_ADMIN, description='Update pool.', operations=[ { 'path': '/v2/pools/{pool_name}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/queues.py0000664000175100017510000000614715033040005021044 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base QUEUES = 'queues:%s' rules = [ policy.DocumentedRuleDefault( name=QUEUES % 'get_all', check_str=base.UNPROTECTED, description='List all message queues.', operations=[ { 'path': '/v2/queues', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=QUEUES % 'create', check_str=base.UNPROTECTED, description='Create a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}', 'method': 'PUT' } ] ), policy.DocumentedRuleDefault( name=QUEUES % 'get', check_str=base.UNPROTECTED, description='Get details about a specific message queue.', operations=[ { 'path': '/v2/queues/{queue_name}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=QUEUES % 'delete', check_str=base.UNPROTECTED, description='Delete a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=QUEUES % 'update', check_str=base.UNPROTECTED, description='Update a message queue.', operations=[ { 'path': '/v2/queues/{queue_name}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=QUEUES % 'stats', check_str=base.UNPROTECTED, description='Get statistics about a specific message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/stats', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=QUEUES % 'share', check_str=base.UNPROTECTED, description='Create a pre-signed URL for a given message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/share', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=QUEUES % 'purge', check_str=base.UNPROTECTED, description='Purge resources from a particular message queue.', operations=[ { 'path': '/v2/queues/{queue_name}/purge', 'method': 'POST' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/subscription.py0000664000175100017510000000542515033040005022257 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base SUBSCRIPTIONS = 'subscription:%s' rules = [ policy.DocumentedRuleDefault( name=SUBSCRIPTIONS % 'get_all', check_str=base.UNPROTECTED, description='Lists a queue subscriptions.', operations=[ { 'path': '/v2/queues/{queue_name}/subscriptions', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=SUBSCRIPTIONS % 'create', check_str=base.UNPROTECTED, description='Creates a subscription.', operations=[ { 'path': '/v2/queues/{queue_name}/subscriptions', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=SUBSCRIPTIONS % 'get', check_str=base.UNPROTECTED, description='Shows details for a subscription.', operations=[ { 'path': '/v2/queues/{queue_name}/subscriptions' '/{subscription_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=SUBSCRIPTIONS % 'delete', check_str=base.UNPROTECTED, description='Deletes the specified subscription.', operations=[ { 'path': '/v2/queues/{queue_name}/subscriptions' '/{subscription_id}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=SUBSCRIPTIONS % 'update', check_str=base.UNPROTECTED, description='Updates a subscription.', operations=[ { 'path': '/v2/queues/{queue_name}/subscriptions' '/{subscription_id}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=SUBSCRIPTIONS % 'confirm', check_str=base.UNPROTECTED, description='Confirms a subscription.', operations=[ { 'path': '/v2/queues/{queue_name}/subscriptions' '/{subscription_id}/confirm', 'method': 'PUT' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/policies/topics.py0000664000175100017510000000534315033040005021033 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from zaqar.common.policies import base TOPICS = 'topics:%s' rules = [ policy.DocumentedRuleDefault( name=TOPICS % 'get_all', check_str=base.UNPROTECTED, description='List all topics.', operations=[ { 'path': '/v2/topics', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=TOPICS % 'create', check_str=base.UNPROTECTED, description='Create a topic.', operations=[ { 'path': '/v2/topics/{topic_name}', 'method': 'PUT' } ] ), policy.DocumentedRuleDefault( name=TOPICS % 'get', check_str=base.UNPROTECTED, description='Get details about a specific topic.', operations=[ { 'path': '/v2/topics/{topic_name}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=TOPICS % 'delete', check_str=base.UNPROTECTED, description='Delete a topic.', operations=[ { 'path': '/v2/topics/{topic_name}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=TOPICS % 'update', check_str=base.UNPROTECTED, description='Update a topic.', operations=[ { 'path': '/v2/topics/{topic_name}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=TOPICS % 'stats', check_str=base.UNPROTECTED, description='Get statistics about a specific topic.', operations=[ { 'path': '/v2/topics/{topic_name}/stats', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=TOPICS % 'purge', check_str=base.UNPROTECTED, description='Purge resources from a particular topic.', operations=[ { 'path': '/v2/topic/{topic_name}/purge', 'method': 'POST' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5680134 zaqar-20.1.0.dev29/zaqar/common/storage/0000775000175100017510000000000015033040026017013 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/storage/__init__.py0000664000175100017510000000000015033040005021107 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/storage/select.py0000664000175100017510000000335115033040005020643 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """select: a collection of algorithms for choosing an entry from a collection.""" import random def weighted(objs, key='weight', generator=random.randint): """Perform a weighted select given a list of objects. :param objs: a list of objects containing at least the field `key` :type objs: [dict] :param key: the field in each obj that corresponds to weight :type key: str :param generator: a number generator taking two ints :type generator: function(int, int) -> int :return: an object :rtype: dict """ acc = 0 lookup = [] # construct weighted spectrum for o in objs: # NOTE(cpp-cabrera): skip objs with 0 weight if o[key] <= 0: continue acc += o[key] lookup.append((o, acc)) # no objects were found if not lookup: return None # NOTE(cpp-cabrera): select an object from the lookup table. If # the selector lands in the interval [lower, upper), then choose # it. gen = generator selector = gen(0, acc - 1) lower = 0 for obj, upper in lookup: if lower <= selector < upper: return obj lower = upper ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5680134 zaqar-20.1.0.dev29/zaqar/common/transport/0000775000175100017510000000000015033040026017403 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/transport/__init__.py0000664000175100017510000000000015033040005021477 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5680134 zaqar-20.1.0.dev29/zaqar/common/transport/wsgi/0000775000175100017510000000000015033040026020354 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/transport/wsgi/__init__.py0000664000175100017510000000000015033040005022450 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/transport/wsgi/helpers.py0000664000175100017510000003044115033040005022367 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """wsgi transport helpers.""" import re from stevedore import driver import uuid import falcon from oslo_log import log as logging from oslo_utils import versionutils from zaqar.common import urls from zaqar import context from zaqar.i18n import _ from zaqar.transport import validation LOG = logging.getLogger(__name__) def verify_pre_signed_url(key, req, resp, params): headers = req.headers project = headers.get('X-PROJECT-ID') expires = headers.get('URL-EXPIRES') methods = headers.get('URL-METHODS', '').split(',') paths = headers.get('URL-PATHS', '').split(',') signature = headers.get('URL-SIGNATURE') if not signature: return if req.method not in methods: raise falcon.HTTPNotFound() # Support to query single resource with pre-signed url if not any([p for p in paths if re.search(p, req.path)]): raise falcon.HTTPNotFound() try: verified = urls.verify_signed_headers_data(key, paths, project=project, methods=methods, expires=expires, signature=signature) except ValueError: raise falcon.HTTPNotFound() if not verified: raise falcon.HTTPNotFound() def get_client_uuid(req): """Read a required Client-ID from a request. :param req: A falcon.Request object :returns: A UUID object or A string of client id """ try: return uuid.UUID(req.get_header('Client-ID', required=True)) except ValueError: return req.get_header('Client-ID', required=True) def extract_project_id(req, resp, params): """Adds `project_id` to the list of params for all responders Meant to be used as a `before` hook. :param req: request sent :type req: falcon.request.Request :param resp: response object to return :type resp: falcon.response.Response :param params: additional parameters passed to responders :type params: dict :rtype: None """ api_version_string = req.path.split('/')[1] params['project_id'] = req.get_header('X-PROJECT-ID') if not api_version_string: # NOTE(jaosorior): The versions resource is public and shouldn't need # a check for the project-id. return if params['project_id'] == "": raise falcon.HTTPBadRequest( title='Empty project header not allowed', description=_('X-PROJECT-ID cannot be an empty string. Specify ' 'the right header X-PROJECT-ID and retry.')) if not params['project_id'] and versionutils.is_compatible( 'v1.1', api_version_string, same_major=False): raise falcon.HTTPBadRequest( title='Project-Id Missing', description=_('The header X-PROJECT-ID was missing')) def require_client_id(validate, req, resp, params): """Makes sure the header `Client-ID` is present in the request Use as a before hook. :param validate: A validator function that will be used to check the format of client id against configured limits. :param req: request sent :type req: falcon.request.Request :param resp: response object to return :type resp: falcon.response.Response :param params: additional parameters passed to responders :type params: dict :rtype: None """ if req.path.startswith('/v1.1/') or req.path.startswith('/v2/'): try: validate(req.get_header('Client-ID', required=True)) except ValueError: description = _('Malformed hexadecimal UUID.') raise falcon.HTTPBadRequest( title='Wrong UUID value', description=description) except validation.ValidationFailed as ex: raise falcon.HTTPBadRequest(title=str(ex)) else: # NOTE(wanghao): Since we changed the get_client_uuid to support # other format of client id, so need to check the uuid here for # v1 API. try: client_id = req.get_header('Client-ID') if client_id or client_id == '': uuid.UUID(client_id) except ValueError: description = _('Malformed hexadecimal UUID.') raise falcon.HTTPBadRequest( title='Wrong UUID value', description=description) def validate_queue_identification(validate, req, resp, params): """Hook for validating the queue name and project id in requests. The queue name validation is short-circuited if 'queue_name' does not exist in `params`. This hook depends on the `get_project` hook, which must be installed upstream. :param validate: A validator function that will be used to check the queue name against configured limits. functools.partial or a closure must be used to set this first arg, and expose the remaining ones as a Falcon hook interface. :param req: Falcon request object :param resp: Falcon response object :param params: Responder params dict """ try: validate(params['queue_name'], params['project_id']) except KeyError: # NOTE(kgriffs): queue_name not in params, so nothing to do pass except validation.ValidationFailed: project = params['project_id'] queue = params['queue_name'] LOG.debug('Invalid queue name "%(queue)s" submitted for ' 'project: %(project)s', {'queue': queue, 'project': project}) raise falcon.HTTPBadRequest( title=_('Invalid queue identification'), description=_('The format of the submitted queue ' 'name or project id is not valid.')) def require_accepts_json(req, resp, params): """Raises an exception if the request does not accept JSON Meant to be used as a `before` hook. :param req: request sent :type req: falcon.request.Request :param resp: response object to return :type resp: falcon.response.Response :param params: additional parameters passed to responders :type params: dict :rtype: None :raises HTTPNotAcceptable: if the request does not accept JSON """ if not req.client_accepts('application/json'): raise falcon.HTTPNotAcceptable( description='Endpoint only serves `application/json`; ' 'specify client-side media type support with ' 'the "Accept" header.', href='http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html', href_text='14.1 Accept, Hypertext Transfer Protocol -- HTTP/1.1') def require_content_type_be_non_urlencoded(req, resp, params): """Raises an exception on "x-www-form-urlencoded" content type of request. If request has body and "Content-Type" header has "application/x-www-form-urlencoded" value (case-insensitive), this function raises falcon.HTTPBadRequest exception. This strange function exists only to prevent bug/1547100 in a backward compatible way. Meant to be used as a `before` hook. :param req: request sent :type req: falcon.request.Request :param resp: response object to return :type resp: falcon.response.Response :param params: additional parameters passed to responders :type params: dict :rtype: None :raises HTTPBadRequest: if request has body and "Content-Type" header has "application/x-www-form-urlencoded" value """ if req.content_length is None: return if req.content_type and (req.content_type.lower() == 'application/x-www-form-urlencoded'): title = _('Invalid Content-Type') description = _('Endpoint does not accept ' '`application/x-www-form-urlencoded` content; ' 'currently supported media type is ' '`application/json`; specify proper client-side ' 'media type with the "Content-Type" header.') raise falcon.HTTPBadRequest(title=title, description=description) def inject_context(req, resp, params): """Inject context value into request environment. :param req: request sent :type req: falcon.request.Request :param resp: response object :type resp: falcon.response.Response :param params: additional parameters passed to responders :type params: dict :rtype: None """ client_id = req.get_header('Client-ID') request_id = req.headers.get('X-Openstack-Request-ID') auth_token = req.headers.get('X-AUTH-TOKEN') project_id = params.get('project_id') user_id = req.headers.get('X-USER-ID') domain_id = req.headers.get('X-DOMAIN-ID') project_domain_id = req.headers.get('X-PROJECT-DOMAIN-ID') user_domain_id = req.headers.get('X-USER-DOMAIN-ID') roles = req.headers.get('X-ROLES') roles = roles and roles.split(',') or [] ctxt = context.RequestContext(project_id=project_id, client_id=client_id, request_id=request_id, auth_token=auth_token, user_id=user_id, roles=roles, domain_id=domain_id, project_domain_id=project_domain_id, user_domain_id=user_domain_id) req.env['zaqar.context'] = ctxt def validate_topic_identification(validate, req, resp, params): """Hook for validating the topic name and project id in requests. The queue name validation is short-circuited if 'topic_name' does not exist in `params`. This hook depends on the `get_project` hook, which must be installed upstream. :param validate: A validator function that will be used to check the topic name against configured limits. functools.partial or a closure must be used to set this first arg, and expose the remaining ones as a Falcon hook interface. :param req: Falcon request object :param resp: Falcon response object :param params: Responder params dict """ try: validate(params['topic_name'], params['project_id']) except KeyError: # NOTE(kgriffs): topic not in params, so nothing to do pass except validation.ValidationFailed: project = params['project_id'] queue = params['topic_name'] LOG.debug('Invalid topic name "%(topic)s" submitted for ' 'project: %(project)s', {'topic': queue, 'project': project}) raise falcon.HTTPBadRequest( title=_('Invalid topic identification'), description=_('The format of the submitted topic ' 'name or project id is not valid.')) def verify_extra_spec(req, resp, params): """Extract `extra_spec` from request and verify it. Meant to be used as a `before` hook. :param req: request sent :type req: falcon.request.Request :param resp: response object to return :type resp: falcon.response.Response :param params: additional parameters passed to responders :type params: dict :rtype: None """ extra_spec = req.get_header('EXTRA-SPEC') if not extra_spec: return if extra_spec == "": raise falcon.HTTPBadRequest( title='Empty extra spec not allowed', description=_('Extra spec cannot be an empty ' 'if specify the header.')) extra_spec_schema = extra_spec.split(':')[0] if extra_spec_schema: mgr = driver.DriverManager('zaqar.extraspec.tasks', extra_spec_schema, invoke_on_load=True) mgr.driver.execute(extra_spec) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/urls.py0000664000175100017510000001004315033040005016701 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import hashlib import hmac from oslo_utils import timeutils _DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' def create_signed_url(key, paths, project=None, expires=None, methods=None): """Creates a signed url for the specified path This function will create a pre-signed URL for `path` using the specified `options` or the default ones. The signature will be the hex value of the hmac created using `key` :param key: A string to use as a `key` for the hmac generation. :param paths: A list of strings representing URL paths. :param project: (Default None) The ID of the project this URL belongs to. :param methods: (Default ['GET']) A list of methods that will be supported by the generated URL. :params expires: (Default time() + 86400) The expiration date for the generated URL. """ methods = methods or ['GET'] if key is None: raise ValueError('The `key` can\'t be None') if not isinstance(paths, list) or not paths: raise ValueError('`paths` must be a non-empty list') if not isinstance(methods, list): raise ValueError('`methods` should be a list') # NOTE(flaper87): The default expiration time is 1day # Evaluate whether this should be configurable. We may # also want to have a "maximum" expiration time. Food # for thoughts. if expires is not None: # NOTE(flaper87): Verify if the format is correct # and normalize the value to UTC. check_expires = None try: check_expires = int(expires) except ValueError: pass if check_expires: raise ValueError('`expires` should be date format, ' 'for example 2016-01-01T00:00:00, ' 'not integer value: %s' % check_expires) parsed = timeutils.parse_isotime(expires) expires = timeutils.normalize_time(parsed) else: delta = datetime.timedelta(days=1) expires = timeutils.utcnow() + delta if expires <= timeutils.utcnow(): raise ValueError('`expires` is lower than the current time') methods = sorted(methods) paths = sorted(paths) expires_str = expires.strftime(_DATE_FORMAT) hmac_body = (r'%(paths)s\n%(methods)s\n%(project)s\n%(expires)s' % {'paths': ','.join(paths), 'methods': ','.join(methods), 'project': project, 'expires': expires_str}).encode('utf-8') if not isinstance(key, bytes): key = bytes(key.encode('utf-8')) return {'paths': paths, 'methods': methods, 'project': project, 'expires': expires_str, 'signature': hmac.new(key, hmac_body, hashlib.sha256).hexdigest()} def verify_signed_headers_data(key, paths, project, signature, methods, expires): """Verify that `signature` matches for the given values :param key: A string to use as a `key` for the hmac generation. :param paths: A list of strings representing URL paths. :param project: The ID of the project this URL belongs to. :param signature: The pre-generated signature :param methods: A list of methods that will be supported by the generated URL. :params expires: The expiration date for the generated URL. """ generated = create_signed_url(key, paths, project=project, methods=methods, expires=expires) return signature == generated['signature'] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/common/utils.py0000664000175100017510000000370715033040005017065 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """utils: general-purpose utilities.""" from oslo_config import cfg def fields(d, names, pred=lambda x: True, key_transform=lambda x: x, value_transform=lambda x: x): """Returns the entries in this dictionary with keys appearing in names. :type d: dict :type names: [a] :param pred: a filter that is applied to the values of the dictionary. :type pred: (a -> bool) :param key_transform: a transform to apply to the key before returning it :type key_transform: a -> a :param value_transform: a transform to apply to the value before returning it :type value_transform: a -> a :rtype: dict """ return dict((key_transform(k), value_transform(v)) for k, v in d.items() if k in names and pred(v)) _pytype_to_cfgtype = { str: cfg.StrOpt, int: cfg.IntOpt, bool: cfg.BoolOpt, float: cfg.FloatOpt, list: cfg.ListOpt, dict: cfg.DictOpt } def dict_to_conf(options): """Converts a python dictionary to a list of oslo_config.cfg.Opt :param options: The python dictionary to convert :type options: dict :returns: a list of options compatible with oslo_config :rtype: [oslo_config.cfg.Opt] """ opts = [] for k, v in options.items(): opt_type = _pytype_to_cfgtype[type(v)] opts.append(opt_type(name=k, default=v)) return opts ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5690136 zaqar-20.1.0.dev29/zaqar/conf/0000775000175100017510000000000015033040026015004 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/__init__.py0000664000175100017510000000411315033040005017111 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from oslo_cache import core as cache from oslo_config import cfg from oslo_log import log from zaqar.conf import default from zaqar.conf import drivers from zaqar.conf import drivers_management_store_mongodb from zaqar.conf import drivers_management_store_redis from zaqar.conf import drivers_management_store_sqlalchemy from zaqar.conf import drivers_message_store_mongodb from zaqar.conf import drivers_message_store_redis from zaqar.conf import drivers_message_store_swift from zaqar.conf import drivers_transport_websocket from zaqar.conf import drivers_transport_wsgi from zaqar.conf import notification from zaqar.conf import pooling_catalog from zaqar.conf import profiler from zaqar.conf import signed_url from zaqar.conf import storage from zaqar.conf import transport CONF = cfg.CONF conf_modules = [ default, drivers, drivers_management_store_mongodb, drivers_management_store_redis, drivers_management_store_sqlalchemy, drivers_message_store_mongodb, drivers_message_store_redis, drivers_message_store_swift, drivers_transport_websocket, drivers_transport_wsgi, notification, pooling_catalog, profiler, signed_url, storage, transport ] def setup_logging(): """Set up logging for the keystone package.""" log.setup(CONF, 'zaqar') logging.captureWarnings(True) def configure(conf=None): if conf is None: conf = CONF for module in conf_modules: module.register_opts(conf) # add oslo.cache related config options cache.configure(conf) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/default.py0000664000175100017510000000372515033040005017006 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg admin_mode = cfg.BoolOpt( 'admin_mode', default=False, help='Activate privileged endpoints.') pooling = cfg.BoolOpt( 'pooling', default=False, help=('Enable pooling across multiple storage backends. ' 'If pooling is enabled, the storage driver ' 'configuration is used to determine where the ' 'catalogue/control plane data is kept.'), deprecated_opts=[cfg.DeprecatedOpt('sharding')]) unreliable = cfg.BoolOpt( 'unreliable', default=False, help='Disable all reliability constraints.') enable_deprecated_api_versions = cfg.ListOpt( 'enable_deprecated_api_versions', default=[], item_type=cfg.types.String(choices=('1', '1.1')), help='List of deprecated API versions to enable.') enable_checksum = cfg.BoolOpt( 'enable_checksum', default=False, help='Enable a checksum for message body. The default value is False.') auth_strategy = cfg.StrOpt( 'auth_strategy', default='', help=('Backend to use for authentication. ' 'For no auth, keep it empty. ' 'Existing strategies: keystone. ' 'See also the keystone_authtoken section below')) GROUP_NAME = 'DEFAULT' ALL_OPTS = [ admin_mode, pooling, unreliable, enable_deprecated_api_versions, enable_checksum, auth_strategy ] def register_opts(conf): conf.register_opts(ALL_OPTS) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers.py0000664000175100017510000000227315033040005017035 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg transport = cfg.StrOpt( 'transport', default='wsgi', help='Transport driver to use.') message_store = cfg.StrOpt( 'message_store', default='mongodb', deprecated_opts=[cfg.DeprecatedOpt('storage')], help='Storage driver to use as the messaging store.') management_store = cfg.StrOpt( 'management_store', default='mongodb', help='Storage driver to use as the management store.') GROUP_NAME = 'drivers' ALL_OPTS = [ transport, message_store, management_store ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_management_store_mongodb.py0000664000175100017510000001157215033040005024154 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg _deprecated_group = 'drivers:storage:mongodb' ssl_keyfile = cfg.StrOpt( 'ssl_keyfile', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_keyfile', group=_deprecated_group), ], help='The private keyfile used to identify the local ' 'connection against mongod. If included with ' 'the ``certifle`` then only the ``ssl_certfile``' ' is needed.') ssl_certfile = cfg.StrOpt( 'ssl_certfile', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_certfile', group=_deprecated_group), ], help='The certificate file used to identify the ' 'local connection against mongod.') ssl_cert_reqs = cfg.StrOpt( 'ssl_cert_reqs', default='CERT_REQUIRED', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_cert_reqs', group=_deprecated_group), ], help='Specifies whether a certificate is required ' 'from the other side of the connection, and ' 'whether it will be validated if provided. It ' 'must be one of the three values ``CERT_NONE``' '(certificates ignored), ``CERT_OPTIONAL``' '(not required, but validated if provided), or' ' ``CERT_REQUIRED``(required and validated). ' 'If the value of this parameter is not ' '``CERT_NONE``, then the ``ssl_ca_cert`` ' 'parameter must point to a file of CA ' 'certificates.') ssl_ca_certs = cfg.StrOpt( 'ssl_ca_certs', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_ca_certs', group=_deprecated_group), ], help='The ca_certs file contains a set of concatenated ' '"certification authority" certificates, which are ' 'used to validate certificates passed from the other ' 'end of the connection.') uri = cfg.StrOpt( 'uri', secret=True, deprecated_opts=[cfg.DeprecatedOpt( 'uri', group=_deprecated_group), ], help='Mongodb Connection URI. If ssl connection enabled, ' 'then ``ssl_keyfile``, ``ssl_certfile``, ' '``ssl_cert_reqs``, ``ssl_ca_certs`` need to be set ' 'accordingly.') database = cfg.StrOpt( 'database', default='zaqar', deprecated_opts=[cfg.DeprecatedOpt( 'database', group=_deprecated_group), ], help='Database name.') max_attempts = cfg.IntOpt( 'max_attempts', min=0, default=1000, deprecated_opts=[cfg.DeprecatedOpt( 'max_attempts', group=_deprecated_group), ], help=('Maximum number of times to retry a failed operation. ' 'Currently only used for retrying a message post.')) max_retry_sleep = cfg.FloatOpt( 'max_retry_sleep', default=0.1, deprecated_opts=[cfg.DeprecatedOpt( 'max_retry_sleep', group=_deprecated_group), ], help=('Maximum sleep interval between retries ' '(actual sleep time increases linearly ' 'according to number of attempts performed).')) max_retry_jitter = cfg.FloatOpt( 'max_retry_jitter', default=0.005, deprecated_opts=[cfg.DeprecatedOpt( 'max_retry_jitter', group=_deprecated_group), ], help=('Maximum jitter interval, to be added to the ' 'sleep interval, in order to decrease probability ' 'that parallel requests will retry at the ' 'same instant.')) max_reconnect_attempts = cfg.IntOpt( 'max_reconnect_attempts', default=10, deprecated_opts=[cfg.DeprecatedOpt( 'max_reconnect_attempts', group=_deprecated_group), ], help=('Maximum number of times to retry an operation that ' 'failed due to a primary node failover.')) reconnect_sleep = cfg.FloatOpt( 'reconnect_sleep', default=0.020, deprecated_opts=[cfg.DeprecatedOpt( 'reconnect_sleep', group=_deprecated_group), ], help=('Base sleep interval between attempts to reconnect ' 'after a primary node failover. ' 'The actual sleep time increases exponentially (power ' 'of 2) each time the operation is retried.')) GROUP_NAME = 'drivers:management_store:mongodb' ALL_OPTS = [ ssl_keyfile, ssl_certfile, ssl_cert_reqs, ssl_ca_certs, uri, database, max_attempts, max_retry_sleep, max_retry_jitter, max_reconnect_attempts, reconnect_sleep ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_management_store_redis.py0000664000175100017510000000634615033040005023640 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg _deprecated_group = 'drivers:storage:redis' uri = cfg.StrOpt( 'uri', default="redis://127.0.0.1:6379", secret=True, deprecated_opts=[cfg.DeprecatedOpt( 'uri', group=_deprecated_group), ], help=('Redis connection URI, taking one of three forms. ' 'For a direct connection to a Redis server, use ' 'the form "redis://[:password]@host[:port][?options]", ' 'where password is redis-server\'s password, when' 'redis-server is set password, the password option' 'needs to be set. port defaults to 6379 if not' 'specified. For an HA master-slave Redis cluster using' ' Redis Sentinel, use the form ' '"redis://[:password]@host1[:port1]' '[,host2[:port2],...,hostN[:portN]][?options]", ' 'where each host specified corresponds to an ' 'instance of redis-sentinel. In this form, the ' 'name of the Redis master used in the Sentinel ' 'configuration must be included in the query ' 'string as "master=". Finally, to connect ' 'to a local instance of Redis over a unix socket, ' 'you may use the form ' '"redis://[:password]@/path/to/redis.sock[?options]".' ' In all forms, the "socket_timeout" option may be' 'specified in the query string. Its value is ' 'given in seconds. If not provided, ' '"socket_timeout" defaults to 0.1 seconds.' 'There are multiple database instances in redis ' 'database, for example in the /etc/redis/redis.conf, ' 'if the parameter is "database 16", there are 16 ' 'database instances. By default, the data is stored ' 'in db = 0 database, if you want to use db = 1 ' 'database, you can use the following form: ' '"redis://host[:port][?dbid=1]".')) max_reconnect_attempts = cfg.IntOpt( 'max_reconnect_attempts', default=10, deprecated_opts=[cfg.DeprecatedOpt( 'max_reconnect_attempts', group=_deprecated_group), ], help=('Maximum number of times to retry an operation that ' 'failed due to a redis node failover.')) reconnect_sleep = cfg.FloatOpt( 'reconnect_sleep', default=1.0, deprecated_opts=[cfg.DeprecatedOpt( 'reconnect_sleep', group=_deprecated_group), ], help=('Base sleep interval between attempts to reconnect ' 'after a redis node failover. ')) GROUP_NAME = 'drivers:management_store:redis' ALL_OPTS = [ uri, max_reconnect_attempts, reconnect_sleep ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_management_store_sqlalchemy.py0000664000175100017510000000201315033040005024657 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg _deprecated_group = 'drivers:storage:sqlalchemy' uri = cfg.StrOpt( 'uri', default='sqlite:///:memory:', secret=True, deprecated_opts=[cfg.DeprecatedOpt( 'uri', group=_deprecated_group), ], help='An sqlalchemy URL') GROUP_NAME = 'drivers:management_store:sqlalchemy' ALL_OPTS = [ uri ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_message_store_mongodb.py0000664000175100017510000001265615033040005023470 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg _deprecated_group = 'drivers:storage:mongodb' ssl_keyfile = cfg.StrOpt( 'ssl_keyfile', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_keyfile', group=_deprecated_group), ], help='The private keyfile used to identify the local ' 'connection against mongod. If included with ' 'the ``certifle`` then only the ``ssl_certfile``' ' is needed.') ssl_certfile = cfg.StrOpt( 'ssl_certfile', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_certfile', group=_deprecated_group), ], help='The certificate file used to identify the ' 'local connection against mongod.') ssl_cert_reqs = cfg.StrOpt( 'ssl_cert_reqs', default='CERT_REQUIRED', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_cert_reqs', group=_deprecated_group), ], help='Specifies whether a certificate is required ' 'from the other side of the connection, and ' 'whether it will be validated if provided. It ' 'must be one of the three values ``CERT_NONE``' '(certificates ignored), ``CERT_OPTIONAL``' '(not required, but validated if provided), or' ' ``CERT_REQUIRED``(required and validated). ' 'If the value of this parameter is not ' '``CERT_NONE``, then the ``ssl_ca_cert`` ' 'parameter must point to a file of CA ' 'certificates.') ssl_ca_certs = cfg.StrOpt( 'ssl_ca_certs', deprecated_opts=[cfg.DeprecatedOpt( 'ssl_ca_certs', group=_deprecated_group), ], help='The ca_certs file contains a set of concatenated ' '"certification authority" certificates, which are ' 'used to validate certificates passed from the other ' 'end of the connection.') uri = cfg.StrOpt( 'uri', secret=True, deprecated_opts=[cfg.DeprecatedOpt( 'uri', group=_deprecated_group), ], help='Mongodb Connection URI. If ssl connection enabled, ' 'then ``ssl_keyfile``, ``ssl_certfile``, ' '``ssl_cert_reqs``, ``ssl_ca_certs`` need to be set ' 'accordingly.') database = cfg.StrOpt( 'database', default='zaqar', deprecated_opts=[cfg.DeprecatedOpt( 'database', group=_deprecated_group), ], help='Database name.') max_attempts = cfg.IntOpt( 'max_attempts', min=0, default=1000, deprecated_opts=[cfg.DeprecatedOpt( 'max_attempts', group=_deprecated_group), ], help=('Maximum number of times to retry a failed operation. ' 'Currently only used for retrying a message post.')) max_retry_sleep = cfg.FloatOpt( 'max_retry_sleep', default=0.1, deprecated_opts=[cfg.DeprecatedOpt( 'max_retry_sleep', group=_deprecated_group), ], help=('Maximum sleep interval between retries ' '(actual sleep time increases linearly ' 'according to number of attempts performed).')) max_retry_jitter = cfg.FloatOpt( 'max_retry_jitter', default=0.005, deprecated_opts=[cfg.DeprecatedOpt( 'max_retry_jitter', group=_deprecated_group), ], help=('Maximum jitter interval, to be added to the ' 'sleep interval, in order to decrease probability ' 'that parallel requests will retry at the ' 'same instant.')) max_reconnect_attempts = cfg.IntOpt( 'max_reconnect_attempts', default=10, deprecated_opts=[cfg.DeprecatedOpt( 'max_reconnect_attempts', group=_deprecated_group), ], help=('Maximum number of times to retry an operation that ' 'failed due to a primary node failover.')) reconnect_sleep = cfg.FloatOpt( 'reconnect_sleep', default=0.020, deprecated_opts=[cfg.DeprecatedOpt( 'reconnect_sleep', group=_deprecated_group), ], help=('Base sleep interval between attempts to reconnect ' 'after a primary node failover. ' 'The actual sleep time increases exponentially (power ' 'of 2) each time the operation is retried.')) partitions = cfg.IntOpt( 'partitions', default=2, deprecated_opts=[cfg.DeprecatedOpt( 'partitions', group=_deprecated_group), ], help=('Number of databases across which to ' 'partition message data, in order to ' 'reduce writer lock %. DO NOT change ' 'this setting after initial deployment. ' 'It MUST remain static. Also, you ' 'should not need a large number of partitions ' 'to improve performance, esp. if deploying ' 'MongoDB on SSD storage.')) GROUP_NAME = 'drivers:message_store:mongodb' ALL_OPTS = [ ssl_keyfile, ssl_certfile, ssl_cert_reqs, ssl_ca_certs, uri, database, max_attempts, max_retry_sleep, max_retry_jitter, max_reconnect_attempts, reconnect_sleep, partitions ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_message_store_redis.py0000664000175100017510000000634715033040005023151 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg _deprecated_group = 'drivers:storage:redis' uri = cfg.StrOpt( 'uri', default="redis://127.0.0.1:6379", secret=True, deprecated_opts=[cfg.DeprecatedOpt( 'uri', group=_deprecated_group), ], help=('Redis connection URI, taking one of three forms. ' 'For a direct connection to a Redis server, use ' 'the form "redis://[:password]@host[:port][?options]", ' 'where password is redis-server\'s password, when' 'redis-server is set password, the password option' 'needs to be set. port defaults to 6379 if not' 'specified. For an HA master-slave Redis cluster using' ' Redis Sentinel, use the form ' '"redis://[:password]@host1[:port1]' '[,host2[:port2],...,hostN[:portN]][?options]", ' 'where each host specified corresponds to an ' 'instance of redis-sentinel. In this form, the ' 'name of the Redis master used in the Sentinel ' 'configuration must be included in the query ' 'string as "master=". Finally, to connect ' 'to a local instance of Redis over a unix socket, ' 'you may use the form ' '"redis://[:password]@/path/to/redis.sock[?options]".' ' In all forms, the "socket_timeout" option may be' 'specified in the query string. Its value is ' 'given in seconds. If not provided, ' '"socket_timeout" defaults to 0.1 seconds.' 'There are multiple database instances in redis ' 'database, for example in the /etc/redis/redis.conf, ' 'if the parameter is "database 16", there are 16 ' 'database instances. By default, the data is stored ' 'in db = 0 database, if you want to use db = 1 ' 'database, you can use the following form: ' '"redis://host[:port][?dbid=1]".')) max_reconnect_attempts = cfg.IntOpt( 'max_reconnect_attempts', default=10, deprecated_opts=[cfg.DeprecatedOpt( 'max_reconnect_attempts', group=_deprecated_group), ], help=('Maximum number of times to retry an operation that ' 'failed due to a redis node failover.')) reconnect_sleep = cfg.FloatOpt( 'reconnect_sleep', default=1.0, deprecated_opts=[cfg.DeprecatedOpt( 'reconnect_sleep', group=_deprecated_group), ], help=('Base sleep interval between attempts to reconnect ' 'after a redis node failover. ')) GROUP_NAME = 'drivers:message_store:redis' ALL_OPTS = [ uri, max_reconnect_attempts, reconnect_sleep ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_message_store_swift.py0000664000175100017510000000361215033040005023167 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg auth_url = cfg.StrOpt( "auth_url", default="http://127.0.0.1:5000/v3/", help="URI of Keystone endpoint to discover Swift") uri = cfg.StrOpt( "uri", default="swift://demo:nomoresecrete@/demo", secret=True, help="Custom URI describing the swift connection.") insecure = cfg.StrOpt( "insecure", default=False, help="Don't check SSL certificate") project_domain_id = cfg.StrOpt( "project_domain_id", help="Domain ID containing project") project_domain_name = cfg.StrOpt( "project_domain_name", default='Default', help="Domain name containing project") user_domain_id = cfg.StrOpt( "user_domain_id", help="User's domain id") user_domain_name = cfg.StrOpt( "user_domain_name", default='Default', help="User's domain name") region_name = cfg.StrOpt( "region_name", help="Region name") interface = cfg.StrOpt( "interface", default="publicURL", help="The default interface for endpoint URL " "discovery.") GROUP_NAME = 'drivers:message_store:swift' ALL_OPTS = [ auth_url, uri, insecure, project_domain_id, project_domain_name, user_domain_id, user_domain_name, region_name, interface ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_transport_websocket.py0000664000175100017510000000272615033040005023222 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg bind = cfg.HostAddressOpt( 'bind', default='127.0.0.1', help='Address on which the self-hosting server will ' 'listen.') port = cfg.PortOpt( 'port', default=9000, help='Port on which the self-hosting server will listen.') external_port = cfg.PortOpt( 'external-port', help='Port on which the service is provided to the user.') notification_bind = cfg.HostAddressOpt( 'notification-bind', help='Address on which the notification server will ' 'listen.') notification_port = cfg.PortOpt( 'notification-port', default=0, help='Port on which the notification server will listen.') GROUP_NAME = 'drivers:transport:websocket' ALL_OPTS = [ bind, port, external_port, notification_bind, notification_port ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/drivers_transport_wsgi.py0000664000175100017510000000200515033040005022173 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg bind = cfg.HostAddressOpt( 'bind', default='127.0.0.1', help='Address on which the self-hosting server will ' 'listen.') port = cfg.PortOpt( 'port', default=8888, help='Port on which the self-hosting server will listen.') GROUP_NAME = 'drivers:transport:wsgi' ALL_OPTS = [ bind, port ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/notification.py0000664000175100017510000001071515033040005020045 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg smtp_mode = cfg.StrOpt( 'smtp_mode', default='third_part', choices=('third_part', 'self_local'), help='There are two values can be chosen: third_part or ' 'self_local. third_part means Zaqar will use the tools ' 'from config option smtp_commnd. self_local means the ' 'smtp python library will be used.') smtp_host = cfg.HostAddressOpt( 'smtp_host', help='The host IP for the email system. It should be ' 'set when smtp_mode is set to self_local.') smtp_port = cfg.PortOpt( 'smtp_port', help='The port for the email system. It should be set when ' 'smtp_mode is set to self_local.') smtp_user_name = cfg.StrOpt( 'smtp_user_name', help='The user name for the email system to login. It should ' 'be set when smtp_mode is set to self_local.') smtp_user_password = cfg.StrOpt( 'smtp_user_password', help='The user password for the email system to login. It ' 'should be set when smtp_mode is set to self_local.') smtp_command = cfg.StrOpt( 'smtp_command', default='/usr/sbin/sendmail -t -oi', help=( 'The command of smtp to send email. The format is ' '"command_name arg1 arg2".')) max_notifier_workers = cfg.IntOpt( 'max_notifier_workers', default=10, help='The max amount of the notification workers.') require_confirmation = cfg.BoolOpt( 'require_confirmation', default=False, help='Whether the http/https/email subscription need to be confirmed ' 'before notification.') external_confirmation_url = cfg.StrOpt( 'external_confirmation_url', help='The confirmation page url that will be used in email subscription ' 'confirmation before notification.') subscription_confirmation_email_template = cfg.DictOpt( "subscription_confirmation_email_template", default={'topic': 'Zaqar Notification - Subscription ' 'Confirmation', 'body': 'You have chosen to subscribe to the ' 'queue: {0}. This queue belongs to ' 'project: {1}. ' 'To confirm this subscription, ' 'click or visit this link below: {2}', 'sender': 'Zaqar Notifications ' ''}, help="Defines the set of subscription confirmation email content, " "including topic, body and sender. There is a mapping is " "{0} -> queue name, {1} ->project id, {2}-> confirm url in body " "string. User can use any of the three values. But they can't use " "more than three.") unsubscribe_confirmation_email_template = cfg.DictOpt( "unsubscribe_confirmation_email_template", default={'topic': 'Zaqar Notification - ' 'Unsubscribe Confirmation', 'body': 'You have unsubscribed successfully to the ' 'queue: {0}. This queue belongs to ' 'project: {1}. ' 'To resubscribe this subscription, ' 'click or visit this link below: {2}', 'sender': 'Zaqar Notifications ' ''}, help="Defines the set of unsubscribe confirmation email content, " "including topic, body and sender. There is a mapping is " "{0} -> queue name, {1} ->project id, {2}-> confirm url in body " "string. User can use any of the three values. But they can't use " "more than three.") GROUP_NAME = 'notification' ALL_OPTS = [ smtp_mode, smtp_host, smtp_port, smtp_user_name, smtp_user_password, smtp_command, max_notifier_workers, require_confirmation, external_confirmation_url, subscription_confirmation_email_template, unsubscribe_confirmation_email_template ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/opts.py0000664000175100017510000000704715033040005016350 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Single point of entry to generate the sample configuration file. This module collects all the necessary info from the other modules in this package. It is assumed that: * Every other module in this package has a 'list_opts' function which returns a dict where: * The keys are strings which are the group names. * The value of each key is a list of config options for that group. * The conf package doesn't have further packages with config options. * This module is only used in the context of sample file generation. """ import collections import importlib import os import pkgutil LIST_OPTS_FUNC_NAME = 'list_opts' IGNORED_MODULES = ('opts', 'constants', 'utils') def list_opts(): opts = collections.defaultdict(list) module_names = _list_module_names() imported_modules = _import_modules(module_names) _append_config_options(imported_modules, opts) return _tupleize(opts) def list_opts_by_group(): opts = [] module_names = _list_module_names() imported_modules = _import_modules(module_names) for module in imported_modules: configs = module.list_opts() group_name = (module.GROUP_NAME if module.GROUP_NAME != 'DEFAULT' else None) opts.append((group_name, configs[module.GROUP_NAME])) return opts def _tupleize(d): """Convert a dict of options to the 2-tuple format.""" return [(key, value) for key, value in d.items()] def _list_module_names(): module_names = [] package_path = os.path.dirname(os.path.abspath(__file__)) for _, module_name, ispkg in pkgutil.iter_modules(path=[package_path]): if module_name in IGNORED_MODULES or ispkg: # Skip this module. continue else: module_names.append(module_name) return module_names def _import_modules(module_names): imported_modules = [] for module_name in module_names: full_module_path = '.'.join(__name__.split('.')[:-1] + [module_name]) module = importlib.import_module(full_module_path) if not hasattr(module, LIST_OPTS_FUNC_NAME): raise Exception( "The module '%s' should have a '%s' function which " "returns the config options." % ( full_module_path, LIST_OPTS_FUNC_NAME)) else: imported_modules.append(module) return imported_modules def _process_old_opts(configs): """Convert old-style 2-tuple configs to dicts.""" if isinstance(configs, tuple): configs = [configs] return {label: options for label, options in configs} def _append_config_options(imported_modules, config_options): for module in imported_modules: configs = module.list_opts() # TODO(markus_z): Remove this compatibility shim once all list_opts() # functions have been updated to return dicts. if not isinstance(configs, dict): configs = _process_old_opts(configs) for key, val in configs.items(): config_options[key].extend(val) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/pooling_catalog.py0000664000175100017510000000167715033040005020527 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg enable_virtual_pool = cfg.BoolOpt( 'enable_virtual_pool', default=False, help='If enabled, the message_store will be used as the storage for the ' 'virtual pool.') GROUP_NAME = 'pooling:catalog' ALL_OPTS = [ enable_virtual_pool ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/profiler.py0000664000175100017510000000244715033040005017204 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg trace_wsgi_transport = cfg.BoolOpt( "trace_wsgi_transport", default=False, help="If False doesn't trace any transport requests." "Please note that it doesn't work for websocket now.") trace_message_store = cfg.BoolOpt( "trace_message_store", default=False, help="If False doesn't trace any message store requests.") trace_management_store = cfg.BoolOpt( "trace_management_store", default=False, help="If False doesn't trace any management store requests.") GROUP_NAME = 'profiler' ALL_OPTS = [ trace_wsgi_transport, trace_message_store, trace_management_store ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/signed_url.py0000664000175100017510000000162415033040005017511 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg secret_key = cfg.StrOpt('secret_key', secret=True, help='Secret key used to encrypt pre-signed URLs.') GROUP_NAME = 'signed_url' ALL_OPTS = [ secret_key ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/storage.py0000664000175100017510000000413015033040005017015 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from zaqar.i18n import _ queue_pipeline = cfg.ListOpt( 'queue_pipeline', default=[], help=_('Pipeline to use for processing queue operations. This pipeline ' 'will be consumed before calling the storage driver\'s controller ' 'methods.')) message_pipeline = cfg.ListOpt( 'message_pipeline', default=[], help=_('Pipeline to use for processing message operations. This pipeline ' 'will be consumed before calling the storage driver\'s controller ' 'methods.')) claim_pipeline = cfg.ListOpt( 'claim_pipeline', default=[], help=_('Pipeline to use for processing claim operations. This pipeline ' 'will be consumed before calling the storage driver\'s controller ' 'methods.')) subscription_pipeline = cfg.ListOpt( 'subscription_pipeline', default=[], help=_('Pipeline to use for processing subscription operations. This ' 'pipeline will be consumed before calling the storage driver\'s ' 'controller methods.')) topic_pipeline = cfg.ListOpt( 'topic_pipeline', default=[], help=_('Pipeline to use for processing topic operations. This ' 'pipeline will be consumed before calling the storage driver\'s ' 'controller methods.')) GROUP_NAME = 'storage' ALL_OPTS = [ queue_pipeline, message_pipeline, claim_pipeline, subscription_pipeline, topic_pipeline ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/conf/transport.py0000664000175100017510000001405715033040005017416 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg default_message_ttl = cfg.IntOpt( 'default_message_ttl', default=3600, help='Defines how long a message will be accessible.') default_message_delay = cfg.IntOpt( 'default_message_delay', default=0, help=('Defines the defautl value for queue delay seconds.' 'The 0 means the delayed queues feature is close.')) default_claim_ttl = cfg.IntOpt( 'default_claim_ttl', default=300, help='Defines how long a message will be in claimed state.') default_claim_grace = cfg.IntOpt( 'default_claim_grace', default=60, help='Defines the message grace period in seconds.') default_subscription_ttl = cfg.IntOpt( 'default_subscription_ttl', default=3600, help='Defines how long a subscription will be available.') max_queues_per_page = cfg.IntOpt( 'max_queues_per_page', default=20, deprecated_name='queue_paging_uplimit', deprecated_group='limits:transport', help='Defines the maximum number of queues per page.') max_messages_per_page = cfg.IntOpt( 'max_messages_per_page', default=20, deprecated_name='message_paging_uplimit', deprecated_group='limits:transport', help='Defines the maximum number of messages per page.') max_subscriptions_per_page = cfg.IntOpt( 'max_subscriptions_per_page', default=20, deprecated_name='subscription_paging_uplimit', deprecated_group='limits:transport', help='Defines the maximum number of subscriptions per page.') max_messages_per_claim_or_pop = cfg.IntOpt( 'max_messages_per_claim_or_pop', default=20, deprecated_name='max_messages_per_claim', help='The maximum number of messages that can be claimed (OR) ' 'popped in a single request') max_queue_metadata = cfg.IntOpt( 'max_queue_metadata', default=64 * 1024, deprecated_name='metadata_size_uplimit', deprecated_group='limits:transport', help='Defines the maximum amount of metadata in a queue.') max_messages_post_size = cfg.IntOpt( 'max_messages_post_size', default=256 * 1024, deprecated_name='message_size_uplimit', deprecated_group='limits:transport', deprecated_opts=[cfg.DeprecatedOpt('max_message_size')], help='Defines the maximum size of message posts.') max_message_ttl = cfg.IntOpt( 'max_message_ttl', default=1209600, deprecated_name='message_ttl_max', deprecated_group='limits:transport', help='Maximum amount of time a message will be available.') max_message_delay = cfg.IntOpt( 'max_message_delay', default=900, help='Maximum delay seconds for messages can be claimed.') max_claim_ttl = cfg.IntOpt( 'max_claim_ttl', default=43200, deprecated_name='claim_ttl_max', deprecated_group='limits:transport', help='Maximum length of a message in claimed state.') max_claim_grace = cfg.IntOpt( 'max_claim_grace', default=43200, deprecated_name='claim_grace_max', deprecated_group='limits:transport', help='Defines the maximum message grace period in seconds.') subscriber_types = cfg.ListOpt( 'subscriber_types', default=['http', 'https', 'mailto', 'trust+http', 'trust+https'], help='Defines supported subscriber types.') max_flavors_per_page = cfg.IntOpt( 'max_flavors_per_page', default=20, help='Defines the maximum number of flavors per page.') max_pools_per_page = cfg.IntOpt( 'max_pools_per_page', default=20, help='Defines the maximum number of pools per page.') client_id_uuid_safe = cfg.StrOpt( 'client_id_uuid_safe', default='strict', choices=['strict', 'off'], help='Defines the format of client id, the value could be ' '"strict" or "off". "strict" means the format of client id' ' must be uuid, "off" means the restriction be removed.') min_length_client_id = cfg.IntOpt( 'min_length_client_id', default='10', help='Defines the minimum length of client id if remove the ' 'uuid restriction. Default is 10.') max_length_client_id = cfg.IntOpt( 'max_length_client_id', default='36', help='Defines the maximum length of client id if remove the ' 'uuid restriction. Default is 36.') message_delete_with_claim_id = cfg.BoolOpt( 'message_delete_with_claim_id', default=False, help='Enable delete messages must be with claim IDS. This will ' 'improve the security of the message avoiding delete messages before' ' they are claimed and handled.') message_encryption_algorithms = cfg.StrOpt( 'message_encryption_algorithms', default='AES256', choices=['AES256', 'RSA'], help='Defines the encryption algorithms of messages, the value could be ' '"AES256" for now.') message_encryption_key = cfg.StrOpt( 'message_encryption_key', default='AES256', help='Defines the encryption key of algorithms.') GROUP_NAME = 'transport' ALL_OPTS = [ default_message_ttl, default_message_delay, default_claim_ttl, default_claim_grace, default_subscription_ttl, max_queues_per_page, max_messages_per_page, max_subscriptions_per_page, max_messages_per_claim_or_pop, max_queue_metadata, max_messages_post_size, max_message_ttl, max_message_delay, max_claim_ttl, max_claim_grace, subscriber_types, max_flavors_per_page, max_pools_per_page, client_id_uuid_safe, min_length_client_id, max_length_client_id, message_delete_with_claim_id, message_encryption_algorithms, message_encryption_key ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/context.py0000664000175100017510000000402515033040005016113 0ustar00mylesmyles# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of zaqar.""" from oslo_context import context class RequestContext(context.RequestContext): def __init__(self, project_id=None, client_id=None, overwrite=True, auth_token=None, user_id=None, domain_id=None, user_domain_id=None, project_domain_id=None, is_admin=False, read_only=False, request_id=None, roles=None, **kwargs): super(RequestContext, self).__init__( auth_token=auth_token, user_id=user_id, project_id=project_id, domain_id=domain_id, user_domain_id=user_domain_id, project_domain_id=project_domain_id, is_admin=is_admin, read_only=read_only, show_deleted=False, request_id=request_id, roles=roles) self.client_id = client_id if overwrite or not hasattr(context._request_store, 'context'): self.update_store() def update_store(self): context._request_store.context = self def to_dict(self): ctx = super(RequestContext, self).to_dict() ctx.update({ 'project_id': self.project_id, 'client_id': self.client_id }) return ctx ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5700135 zaqar-20.1.0.dev29/zaqar/extraspec/0000775000175100017510000000000015033040026016055 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/extraspec/__init__.py0000664000175100017510000000000015033040005020151 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5700135 zaqar-20.1.0.dev29/zaqar/extraspec/tasks/0000775000175100017510000000000015033040026017202 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/extraspec/tasks/__init__.py0000664000175100017510000000000015033040005021276 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/extraspec/tasks/messagecode.py0000664000175100017510000000145215033040005022032 0ustar00mylesmyles# Copyright (c) 2021 Fiberhome Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging LOG = logging.getLogger(__name__) class MessageCodeAuthentication(object): def execute(self, extra_spec): # NOTE(wanghao): There need an implement by developers. pass ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5700135 zaqar-20.1.0.dev29/zaqar/hacking/0000775000175100017510000000000015033040026015463 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/hacking/__init__.py0000664000175100017510000000000015033040005017557 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/hacking/checks.py0000664000175100017510000000314215033040005017272 0ustar00mylesmyles# Copyright (c) 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from hacking import core _all_log_levels = {'critical', 'error', 'exception', 'info', 'warning', 'debug'} # Since _Lx() have been removed, we just need to check _() _all_hints = {'_'} _log_translation_hint = re.compile( r".*LOG\.(%(levels)s)\(\s*(%(hints)s)\(" % { 'levels': '|'.join(_all_log_levels), 'hints': '|'.join(_all_hints), }) @core.flake8ext def no_translate_logs(logical_line): """N537 - Don't translate logs. Check for 'LOG.*(_(' Translators don't provide translations for log messages, and operators asked not to translate them. * This check assumes that 'LOG' is a logger. :param logical_line: The logical line to check. :returns: None if the logical line passes the check, otherwise a tuple is yielded that contains the offending index in logical line and a message describe the check validation failure. """ if _log_translation_hint.match(logical_line): yield (0, "N537: Log messages should not be translated!") ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/i18n.py0000664000175100017510000000161215033040005015205 0ustar00mylesmyles# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . """ import oslo_i18n as i18n _translators = i18n.TranslatorFactory(domain='zaqar') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5410137 zaqar-20.1.0.dev29/zaqar/locale/0000775000175100017510000000000015033040026015316 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5400136 zaqar-20.1.0.dev29/zaqar/locale/de/0000775000175100017510000000000015033040026015706 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5700135 zaqar-20.1.0.dev29/zaqar/locale/de/LC_MESSAGES/0000775000175100017510000000000015033040026017473 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/locale/de/LC_MESSAGES/zaqar.po0000664000175100017510000006741715033040005021165 0ustar00mylesmyles# Frank Kloeker , 2018. #zanata # Robert Simai , 2018. #zanata # Andreas Jaeger , 2019. #zanata # Robert Simai , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-12-29 01:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-09-26 06:27+0000\n" "Last-Translator: Andreas Jaeger \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(queue)s\" under project %(project)s" msgstr "" "%(attempts)d Versuche erforderlich, um %(num_messages)d Nachrichten in die " "Warteschlange \"%(queue)s\" unter Projekt %(project)s zu stellen" #, python-format msgid "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(topic)s\" under project %(project)s" msgstr "" "%(attempts)d Versuche erforderlich, um %(num_messages)d Nachrichten in die " "Warteschlange \"%(topic)s\" unter Projekt %(project)s zu stellen" msgid "A claim was specified, but the message is not currently claimed." msgstr "" "Ein Auftrag wurde angegeben, aber die Nachricht wird derzeit nicht in " "Anspruch genommen." #, python-format msgid "Accepted media type for PATCH: %s." msgstr "Akzeptierter Medientyp für PATCH: %s." msgid "Can't make confirmation email body, need a valid confirm url." msgstr "" "Bestätigungs-E-Mail-Nachricht kann nicht erstellt werden, Sie benötigen eine " "gültige Bestätigungs-URL." #, python-format msgid "Can't remove non-existent object %s." msgstr "Das nicht vorhandene Objekt %s kann nicht entfernt werden." #, python-format msgid "Can't replace non-existent object %s." msgstr "Das nicht vorhandene Objekt %s kann nicht ersetzt werden." #, python-format msgid "Cannot retrieve queue %s stats." msgstr "Die Statistiken der Warteschlange %s können nicht abgerufen werden." #, python-format msgid "Cannot retrieve queue %s." msgstr "Warteschlange %s kann nicht abgerufen werden." #, python-format msgid "Cannot retrieve subscription %s." msgstr "Abonnement %s kann nicht abgerufen werden." #, python-format msgid "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s" msgstr "CatalogueController: _update %(prj)s: %(queue)s: %(pool)s" #, python-format msgid "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s failed" msgstr "" "CatalogueController: _update %(prj)s: %(queue)s: %(pool)s ist fehlgeschlagen" #, python-format msgid "CatalogueController:delete %(prj)s:%(queue)s failed" msgstr "CatalogueController: delete %(prj)s: %(queue)s ist fehlgeschlagen" #, python-format msgid "CatalogueController:delete %(prj)s:%(queue)s success" msgstr "CatalogueController: Löschen %(prj)s: %(queue)s Erfolg" #, python-format msgid "CatalogueController:insert %(prj)s:%(queue)s %(pool)s failed" msgstr "" "CatalogueController: %(prj)s einfügen: %(queue)s %(pool)s ist fehlgeschlagen" #, python-format msgid "CatalogueController:insert %(prj)s:%(queue)s:%(pool)s, success" msgstr "CatalogueController: %(prj)s einfügen: %(queue)s: %(pool)s, Erfolg" #, python-format msgid "Claim %s deleted." msgstr "Auftrag %s gelöscht" #, python-format msgid "Claim %s does not exist." msgstr "Auftrag %s existiert nicht." #, python-format msgid "Claim %s updated." msgstr "Auftrag %s aktualisiert" msgid "Claim could not be created." msgstr "Auftrag konnte nicht erstellt werden." msgid "Claim could not be deleted." msgstr "Auftrag konnte nicht gelöscht werden." msgid "Claim could not be queried." msgstr "Auftrag konnte nicht abgefragt werden." msgid "Claim could not be updated." msgstr "Auftrag konnte nicht aktualisiert werden." msgid "Doctype must be either a JSONObject or JSONArray" msgstr "Doctype muss entweder ein JSONObject oder JSONArray sein" msgid "Document type not supported." msgstr "Dokumenttyp wird nicht unterstützt." msgid "" "Either a replica set or a mongos is required to guarantee message delivery" msgstr "" "Entweder ist ein Replikat oder ein Mongo erforderlich, um die Zustellung der " "Nachricht zu gewährleisten" msgid "" "Endpoint does not accept `application/x-www-form-urlencoded` content; " "currently supported media type is `application/json`; specify proper client-" "side media type with the \"Content-Type\" header." msgstr "" "Endpoint akzeptiert keine \"application/x-www-form-urlencoded\" Inhalte; Der " "aktuell unterstützte Medientyp ist `application/json`; Geben Sie den " "korrekten clientseitigen Medientyp mit dem Header \"Content-Type\" an." #, python-format msgid "" "Failed to increment the message counter for queue %(name)s and project " "%(project)s" msgstr "" "Fehler beim Inkrementieren des Nachrichtenzählers für die Warteschlangen " "%(name)s und Projekt %(project)s" #, python-format msgid "" "Failed to increment the message counter for topic %(name)s and project " "%(project)s" msgstr "" "Fehler beim Inkrementieren des Nachrichtenzählers für Topic %(name)s und " "Projekt %(project)s" #, python-format msgid "" "First attempt failed while adding messages to queue \"%(queue)s\" under " "project %(project)s" msgstr "" "Der erste Versuch ist beim Hinzufügen von Nachrichten zur Warteschlange " "\"%(queue)s\" unter Projekt %(project)s fehlgeschlagen" #, python-format msgid "" "First attempt failed while adding messages to topic \"%(topic)s\" under " "project %(project)s" msgstr "" "Der erste Versuch ist beim Hinzufügen von Nachrichten zum Topic \"%(topic)s" "\" unter Projekt %(project)s fehlgeschlagen" #, python-format msgid "Flavor %(flavor)s cant be updated, error:%(msg)s" msgstr "Variante %(flavor)s kann nicht aktualisiert werden, Fehler: %(msg)s" #, python-format msgid "Flavor %(flavor)s could not be created, error:%(msg)s" msgstr "Variante %(flavor)s konnte nicht erstellt werden, Fehler: %(msg)s" #, python-format msgid "Flavor %(flavor)s could not be created. " msgstr "Variante %(flavor)s konnte nicht erstellt werden." #, python-format msgid "Flavor %(flavor)s could not be deleted." msgstr "Variante %(flavor)s konnte nicht gelöscht werden." #, python-format msgid "Flavor %(flavor)s could not be updated, error:%(msg)s" msgstr "Variante %(flavor)s konnte nicht aktualisiert werden, Fehler: %(msg)s" msgid "Health status could not be read." msgstr "Gesundheitszustand konnte nicht gelesen werden." msgid "Invalid API request" msgstr "Ungültige API-Anfrage" msgid "Invalid Content-Type" msgstr "Ungültiger Inhaltstyp" #, python-format msgid "Invalid JSON pointer for this resource: '/%s, e.g /metadata/key'" msgstr "Ungültiger JSON-Zeiger für diese Ressource: '/%s, z.B. /metadata/key'" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Ungültige Operation: `%(op)s`. Es muss einer der folgenden sein: " "%(available)s." msgid "Invalid queue identification" msgstr "Ungültige Warteschlangenidentifikation" msgid "Invalid request body" msgstr "Ungültiger Anfragetext" msgid "Invalid request." msgstr "Ungültige Anfrage." msgid "Invalid scheme in Redis URI" msgstr "Ungültiges Schema in Redis URI" msgid "Invalid topic identification" msgstr "Ungültige Themenidentifikation" msgid "JSON contains integer that is too large." msgstr "JSON enthält eine Ganzzahl, die zu groß ist." msgid "Length of client id must be at least {0} and no greater than {1}." msgstr "" "Die Länge der Client-ID muss mindestens {0} und nicht größer als {1} sein." msgid "Limit must be at least 1 and may not be greater than {0}." msgstr "Limit muss mindestens 1 sein und darf nicht größer als {0} sein." msgid "Limit must be at least 1 and no greater than {0}." msgstr "Limit muss mindestens 1 und nicht größer als {0} sein." msgid "Malformed Redis URI" msgstr "Fehlerhafter Redis-URI" msgid "Malformed hexadecimal UUID." msgstr "Fehlerhafte hexadezimale UUID." msgid "Message collection size is too large. Max size {0}" msgstr "Die Größe der Nachrichtensammlung ist zu groß. Maximale Größe {0}" msgid "" "Message collection size is too large. The max size for current queue is {0}. " "It is calculated by max size = min(max_messages_post_size_config: {1}, " "max_messages_post_size_queue: {2})." msgstr "" "Die Größe der Nachrichtensammlung ist zu groß. Die maximale Größe für die " "aktuelle Warteschlange ist {0}. Es wird mit max size = min berechnet " "(max_messages_post_size_config: {1}, max_messages_post_size_queue: {2})." msgid "Message could not be deleted." msgstr "Nachricht konnte nicht gelöscht werden." msgid "Message could not be retrieved." msgstr "Die Nachricht konnte nicht abgerufen werden." msgid "Messages could not be deleted." msgstr "Nachrichten konnten nicht gelöscht werden." msgid "Messages could not be enqueued." msgstr "Nachrichten konnten nicht in die Warteschlange eingereiht werden." msgid "Messages could not be listed." msgstr "Nachrichten konnten nicht aufgelistet werden." msgid "Messages could not be popped." msgstr "Nachrichten konnten nicht abgerufen werden." msgid "Metadata could not be updated." msgstr "Metadaten konnten nicht aktualisiert werden." #, python-format msgid "Method %s not found in any of the registered stages" msgstr "Methode %s wurde in keiner der registrierten Phasen gefunden" msgid "Missing \"{name}\" field." msgstr "Fehlendes Feld \"{name}\"" msgid "Missing host name in Redis URI" msgstr "Fehlender Hostname in Redis URI" #, python-format msgid "Missing parameter %s in body." msgstr "Fehlender Parameter %s im Body." msgid "Missing path in Redis URI" msgstr "Fehlender Pfad in Redis URI" msgid "No messages could be enqueued." msgstr "Keine Nachrichten konnten in die Warteschlange eingereiht werden." msgid "No messages to enqueu." msgstr "Keine Nachrichten an Enqueu." msgid "No messages were found in the request body." msgstr "Im Anfragetext wurden keine Nachrichten gefunden." msgid "" "No messages with IDs: {ids} found in the queue {queue} for project {project}." msgstr "" "Keine Nachrichten mit IDs: {ids} in der Warteschlange {queue} für Projekt " "{project} gefunden." msgid "No subscription to create." msgstr "Kein Abonnement zum Erstellen." msgid "Not authorized" msgstr "Nicht berechtigt" msgid "Not found" msgstr "Nicht gefunden" msgid "Operation \"{0}\" requires a member named \"value\"." msgstr "Die Operation \"{0}\" benötigt ein Member namens \"value\"." msgid "Operations must be JSON objects." msgstr "Operationen müssen JSON-Objekte sein." msgid "Options must be a dict." msgstr "Optionen müssen ein Dictionary sein." msgid "PATCH body could not be empty for update." msgstr "Der PATCH-Body konnte nicht leer sein für das Update." msgid "" "Pipeline to use for processing claim operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline, die für die Verarbeitung von Forderungsoperationen verwendet " "werden soll. Diese Pipeline wird verbraucht, bevor die Controller-Methoden " "des Speichertreibers aufgerufen werden." msgid "" "Pipeline to use for processing message operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline zur Verarbeitung von Nachrichtenoperationen. Diese Pipeline wird " "verbraucht, bevor die Controller-Methoden des Speichertreibers aufgerufen " "werden." msgid "" "Pipeline to use for processing queue operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline für die Verarbeitung von Warteschlangenoperationen. Diese Pipeline " "wird verbraucht, bevor die Controller-Methoden des Speichertreibers " "aufgerufen werden." msgid "" "Pipeline to use for processing subscription operations. This pipeline will " "be consumed before calling the storage driver's controller methods." msgstr "" "Pipeline für die Verarbeitung von Abonnementvorgängen. Diese Pipeline wird " "verbraucht, bevor die Controller-Methoden des Speichertreibers aufgerufen " "werden." msgid "" "Pipeline to use for processing topic operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline für die Verarbeitung von Topicorgängen. Diese Pipeline wird " "verbraucht, bevor die Controller-Methoden des Speichertreibers aufgerufen " "werden." msgid "Please try again in a few seconds." msgstr "Bitte versuchen Sie es in einigen Sekunden erneut." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "Zeiger `%s` enthält \"~\" nicht Teil einer erkannten Escape-Sequenz." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Zeiger `%s` enthält benachbartes \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Der Zeiger `%s` enthält kein gültiges Token." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Zeiger `%s` beginnt nicht mit \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Der Zeiger `%s` endet mit \"/\"." msgid "Pop value must be at least 1 and may not be greater than {0}." msgstr "" "Der Pop-Wert muss mindestens 1 sein und darf nicht größer als {0} sein." msgid "Post body must contain key \"resource_types\"." msgstr "Der Post-Body muss den Schlüssel \"resource_types\" enthalten." msgid "Project ids may not be more than {0} characters long." msgstr "Projekt-IDs dürfen nicht länger als {0} Zeichen lang sein." #, python-format msgid "Queue %s could not be created." msgstr "Warteschlange %s konnte nicht erstellt werden." #, python-format msgid "Queue %s could not be deleted." msgstr "Warteschlange %s konnte nicht gelöscht werden." #, python-format msgid "Queue %s created." msgstr "Warteschlange %s erstellt." #, python-format msgid "Queue %s does not exist." msgstr "Warteschlange %s existiert nicht." #, python-format msgid "Queue %s removed." msgstr "Warteschlange %s wurde entfernt." msgid "Queue could not be created." msgstr "Die Warteschlange konnte nicht erstellt werden." msgid "Queue could not be deleted." msgstr "Die Warteschlange konnte nicht gelöscht werden." msgid "Queue could not be purged." msgstr "Die Warteschlange konnte nicht gelöscht werden." msgid "Queue could not be updated." msgstr "Die Warteschlange konnte nicht aktualisiert werden." msgid "Queue metadata could not be retrieved." msgstr "Warteschlangenmetadaten konnten nicht abgerufen werden." msgid "Queue metadata is too large. Max size: {0}" msgstr "Warteschlangenmetadaten sind zu groß. Maximale Größe: {0}" msgid "Queue names may not be more than {0} characters long." msgstr "Warteschlangennamen dürfen nicht länger als {0} Zeichen lang sein." msgid "" "Queue names may only contain ASCII letters, digits, underscores, and dashes." msgstr "" "Warteschlangennamen dürfen nur ASCII-Buchstaben, Ziffern, Unterstriche und " "Bindestriche enthalten." msgid "Queue stats could not be read." msgstr "Warteschlangenstatistiken konnten nicht gelesen werden." msgid "Queues could not be listed." msgstr "Warteschlangen konnten nicht aufgelistet werden." msgid "Request body can not be empty" msgstr "Der Anfragetext darf nicht leer sein" msgid "Request body could not be parsed." msgstr "Der Anfragetext konnte nicht analysiert werden." msgid "Request body could not be read." msgstr "Der Anfragetext konnte nicht gelesen werden." msgid "Request body must be a JSON array of operation objects." msgstr "Der Anfragetext muss ein JSON-Array von Operationsobjekten sein." msgid "" "Reserved queue attributes in metadata (which names start with \"_\") can not " "be set in API v1." msgstr "" "Reservierte Warteschlangenattribute in Metadaten (deren Namen mit \"_\" " "beginnen) können in API v1 nicht festgelegt werden." msgid "Resource conflict" msgstr "Ressourcenkonflikt" msgid "Resource types must be a sub set of {0}." msgstr "Ressourcentypen müssen eine Untergruppe von {0} sein." #, python-format msgid "Retry policy: %s must be a integer." msgstr "Wiederholungsrichtlinie: %s muss eine ganze Zahl sein." msgid "Service temporarily unavailable" msgstr "Dienst vorübergehend nicht verfügbar" #, python-format msgid "Serving on host %(bind)s:%(port)s" msgstr "Serving auf Host %(bind)s:%(port)s" #, python-format msgid "Stage %(stage)s does not implement %(method)s" msgstr "Stage %(stage)s hat keine Methode %(method)s implementiert" #, python-format msgid "Subscription %(subscription)s for queue %(queue)s could not be deleted." msgstr "" "Abonnement %(subscription)s für Warteschlange %(queue)s konnte nicht " "gelöscht werden." #, python-format msgid "Subscription %(subscription)s for queue %(queue)s does not exist." msgstr "" "Abonnement %(subscription)s für Warteschlange %(queue)s ist nicht vorhanden." #, python-format msgid "Subscription %(subscription_id)s could not be confirmed." msgstr "Abonnement %(subscription_id)s konnte nicht bestätigt werden." #, python-format msgid "Subscription %(subscription_id)s could not be updated." msgstr "Abonnement %(subscription_id)s konnte nicht aktualisiert werden." #, python-format msgid "Subscription %s could not be created." msgstr "Abonnement %s konnte nicht erstellt werden." #, python-format msgid "Subscription %s created." msgstr "Abonnement %s erstellt." #, python-format msgid "Subscription %s not created." msgstr "Abonnement %s wurde nicht erstellt." #, python-format msgid "Subscription %s removed." msgstr "Abonnement %s wurde entfernt." msgid "Subscription could not be created." msgstr "Abonnement konnte nicht erstellt werden." msgid "Subscription could not be deleted." msgstr "Abonnement konnte nicht gelöscht werden." msgid "Subscription could not be retrieved." msgstr "Abonnement konnte nicht abgerufen werden." msgid "Subscriptions could not be listed." msgstr "Abonnements konnten nicht aufgelistet werden." msgid "Subscriptions must be a dict." msgstr "Abonnements müssen ein Dictionary sein." msgid "" "Such subscription already exists.Subscriptions are unique by project + queue " "+ subscriber URI." msgstr "" "Eine solche Subskription ist bereits vorhanden. Subskriptionen sind nach " "Projekt + Warteschlange + Subskribenten-URI eindeutig." msgid "TTL must be an integer." msgstr "TTL muss eine Ganzzahl sein." msgid "The 'confirmed' should be boolean." msgstr "Das 'confirmed' sollte boolesch sein." msgid "" "The Delay TTL for a message may not exceed {0} seconds,and must be at least " "{1} seconds long." msgstr "" "Die Verzögerungs-TTL für eine Nachricht darf {0} Sekunden nicht " "überschreiten und muss mindestens {1} Sekunden lang sein." msgid "" "The Redis URI specifies multiple sentinel hosts, but is missing the \"master" "\" query string parameter. Please set \"master\" to the name of the Redis " "master server as specified in the sentinel configuration file." msgstr "" "Der Redis-URI gibt mehrere Sentinel-Hosts an, es fehlt jedoch der " "Abfragezeichenfolgenparameter \"Master\". Bitte setzen Sie \"Master\" auf " "den Namen des Redis-Master-Servers, wie in der Sentinel-Konfigurationsdatei " "angegeben." msgid "The Redis configuration URI contains an invalid port" msgstr "Der Redis-Konfigurations-URI enthält einen ungültigen Port" msgid "The Redis configuration URI does not define any sentinel hosts" msgstr "Der Redis-Konfigurations-URI definiert keine Sentinel-Hosts" #, python-format msgid "The Redis driver requires redis-server>=2.6, %s found" msgstr "Der Redis-Treiber benötigt den Redis-Server >=2.6, %s gefunden" msgid "" "The TTL can not exceed {0} seconds, and must be at least {1} seconds long." msgstr "" "Die TTL darf nicht länger als {0} Sekunden sein und muss mindestens {1} " "Sekunden lang sein." msgid "" "The TTL for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "Die TTL für eine Anforderung darf {0} Sekunden nicht überschreiten und muss " "mindestens {1} Sekunden lang sein." msgid "" "The TTL for a message may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "Die TTL für eine Nachricht darf nicht länger als {0} Sekunden sein und muss " "mindestens {1} Sekunden lang sein." msgid "The TTL for a subscription must be at least {0} seconds long." msgstr "Die TTL für ein Abonnement muss mindestens {0} Sekunden betragen." msgid "" "The TTL seconds for a subscription plus current time must be less than {0}." msgstr "" "Die TTL-Sekunden für ein Abonnement plus die aktuelle Zeit müssen kleiner " "als {0} sein." msgid "The format of the submitted queue name or project id is not valid." msgstr "" "Das Format des gesendeten Warteschlangennamens oder der Projekt-ID ist nicht " "gültig." msgid "The format of the submitted topic name or project id is not valid." msgstr "" "Das Format des gesendetenTopic-Namens oder der Projekt-ID ist nicht gültig." msgid "" "The grace for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "Die Gnadenfrist für eine Forderung darf nicht länger als {0} Sekunden und " "muss mindestens {1} Sekunden betragen." msgid "The header X-PROJECT-ID was missing" msgstr "Die Kopfzeile X-PROJECT-ID fehlte" #, python-format msgid "The mongodb driver requires mongodb>=2.2, %s found" msgstr "Der mongodb-Treiber benötigt mongodb>= 2.2, %s gefunden" msgid "" "The request should have both \"ids\" and \"claim_ids\" parameter in the " "request when message_delete_with_claim_id is True." msgstr "" "Die Anfrage sollte beide \"ids\" und \"claim_ids\" Parameter enthalten, wenn " "message_delete_with_claim_id zutrifft." msgid "" "The request should have either \"ids\" or \"pop\" parameter in the request, " "to be able to delete." msgstr "" "Die Anfrage sollte entweder \"ids\" oder \"pop\" -Parameter in der Anfrage " "haben, um löschen zu können." msgid "The root of path must be metadata, e.g /metadata/key." msgstr "Die Wurzel des Pfades muss Metadaten sein, z.B. /metadata/key." msgid "The specified claim does not exist or has expired." msgstr "Der angegebene Auftrag existiert nicht oder ist abgelaufen." msgid "The subscriber type of subscription must be supported in the list {0}." msgstr "" "Der Subskribentyp des Abonnements muss in der Liste {0} unterstützt werden." msgid "The value of the \"{name}\" field must be a {vtype}." msgstr "Der Wert des Feldes \"{name}\" muss ein {vtype} sein." msgid "This message is claimed; it cannot be deleted without a valid claim ID." msgstr "" "Diese Nachricht wird beauftragt; Es kann nicht ohne eine gültige Auftrags-ID " "gelöscht werden." msgid "This pool is used by flavors {flavor}; It cannot be deleted." msgstr "" "Dieser Pool wird von Variante {flavor} verwendet. Es kann nicht gelöscht " "werden." msgid "Topic could not be created." msgstr "Topic konnte nicht erstellt werden." msgid "Topic could not be deleted." msgstr "Topic konnte nicht gelöscht werden." msgid "Topic could not be purged." msgstr "Topic konnte nicht gelöscht werden." msgid "Topic could not be updated." msgstr "Topic konnte nicht aktualisiert werden." msgid "Topic metadata could not be retrieved." msgstr "Topic-Metadaten konnten nicht abgerufen werden." msgid "Topic names may not be more than {0} characters long." msgstr "Topic-Namen dürfen nicht länger als {0} Zeichen lang sein." msgid "" "Topic names may only contain ASCII letters, digits, underscores, and dashes." msgstr "" "Topic-Namen dürfen nur ASCII-Buchstaben, Ziffern, Unterstriche und " "Bindestriche enthalten." msgid "Topic stats could not be read." msgstr "Topicstatistiken konnten nicht gelesen werden." msgid "Topics could not be listed." msgstr "Topi konnten nicht aufgelistet werden." msgid "Unable to confirm subscription" msgstr "Das Abonnement konnte nicht bestätigt werden" msgid "Unable to create" msgstr "Konnte nicht erstellen" msgid "Unable to create pool" msgstr "Pool konnte nicht erstellt werden" msgid "Unable to delete" msgstr "Kann nicht gelöscht werden" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "\"%s\" konnte in JSON-Schemaänderung nicht gefunden werden" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "\"Op\" konnte in JSON-Schema nicht gefunden werden. Es muss einer der " "folgenden sein: %(available)s." msgid "Unable to update subscription" msgstr "Das Abonnement konnte nicht aktualisiert werden" msgid "Unexpected error." msgstr "Unerwarteter Fehler." msgid "Unrecognized JSON Schema draft version" msgstr "Nicht erkannte JSON-Schema-Entwurfsversion" msgid "" "Using a write concern other than `majority` or > 2 makes the service " "unreliable. Please use a different write concern or set `unreliable` to True " "in the config file." msgstr "" "Die Verwendung eines Schreibvorgangs, der nicht \"Majority\" oder \"2\" ist, " "macht den Dienst unzuverlässig. Bitte verwenden Sie einen anderen " "Schreibvorgang oder setzen Sie \"unreliable\" in der Konfigurationsdatei auf " "True." msgid "" "X-PROJECT-ID cannot be an empty string. Specify the right header X-PROJECT-" "ID and retry." msgstr "" "X-PROJECT-ID darf keine leere Zeichenfolge sein. Geben Sie die richtige " "Header X-PROJECT-ID an und versuchen Sie es erneut." msgid "You are not authorized to complete this action." msgstr "Sie sind nicht berechtigt, diese Aktion abzuschließen." msgid "_dead_letter_queue_messages_ttl must be integer." msgstr "_dead_letter_queue_messages_ttl muss Integer sein." msgid "_default_message_delay must be integer." msgstr "_default_message_delay muss Integer sein." msgid "" "_default_message_ttl can not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "_default_message_ttl kann {0} Sekunden nicht überschreiten und muss " "mindestens {1} Sekunden lang sein." msgid "_default_message_ttl must be integer." msgstr "_default_message_ttl muss Integer sein." msgid "_max_claim_count must be integer." msgstr "_max_claim_count muss Integer sein." msgid "" "_max_messages_post_size can not exceed {0}, and must be at least greater " "than 0." msgstr "" "_max_messages_post_size darf nicht größer als {0} sein und muss mindestens " "größer als 0 sein." msgid "_max_messages_post_size must be integer." msgstr "_max_messages_post_size muss Integer sein." msgid "ids parameter should have at least 1 and not greater than {0} values." msgstr "" "Der Parameter ids sollte mindestens 1 und nicht mehr als {0} Werte haben." msgid "ignore_subscription_override must be a boolean." msgstr "ignore_subscription_override muss ein boolescher Wert sein." msgid "invalid minimum_delay and maximum_delay." msgstr "ungültige minimum_delay und maximum_delay." msgid "invalid retry_backoff_function." msgstr "ungültige retry_backoff_function." msgid "minimum_delay must less than maximum_delay." msgstr "minimum_delay muss kleiner als maximum_delay sein." msgid "pop and id params cannot be present together in the delete request." msgstr "" "Pop- und ID-Parameter können nicht zusammen in der Löschanforderung " "vorhanden sein." msgid "register queue to pool: new flavor: None" msgstr "Registriere Warteschlange zu Pool: neuer Variante: Keine" #, python-format msgid "register queue to pool: new flavor:%(flavor)s" msgstr "registriere Warteschlange zu Pool: neue Variante:%(flavor)s" #, python-format msgid "" "register queue to pool: old flavor: %(oldflavor)s , new flavor: %(flavor)s" msgstr "" "Registrere Warteschlange zu Pool: alte Variante: %(oldflavor)s, neue " "Variante: %(flavor)s" #, python-format msgid "register queue: project:%(project)s queue:%(queue)s pool:%(pool)s" msgstr "" "Registriere Warteschlange: Projekt: %(project)s Warteschlange: %(queue)s " "Pool: %(pool)s" msgid "retry_backoff_function must be a string." msgstr "retry_backoff_function muss eine Zeichenfolge sein." msgid "retry_policy must be a dict." msgstr "retry_policy muss ein dict sein." msgid "updatefail" msgstr "Update fehlgeschlagen" msgid "{0} is not a valid action" msgstr "{0} ist keine gültige Aktion" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5400136 zaqar-20.1.0.dev29/zaqar/locale/en_GB/0000775000175100017510000000000015033040026016270 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5700135 zaqar-20.1.0.dev29/zaqar/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000015033040026020055 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/locale/en_GB/LC_MESSAGES/zaqar.po0000664000175100017510000006346115033040005021542 0ustar00mylesmyles# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-12-29 01:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2022-05-31 08:39+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(queue)s\" under project %(project)s" msgstr "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(queue)s\" under project %(project)s" #, python-format msgid "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(topic)s\" under project %(project)s" msgstr "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(topic)s\" under project %(project)s" msgid "A claim was specified, but the message is not currently claimed." msgstr "A claim was specified, but the message is not currently claimed." #, python-format msgid "Accepted media type for PATCH: %s." msgstr "Accepted media type for PATCH: %s." msgid "Can't make confirmation email body, need a valid confirm url." msgstr "Can't make confirmation email body, need a valid confirm URL." #, python-format msgid "Can't remove non-existent object %s." msgstr "Can't remove non-existent object %s." #, python-format msgid "Can't replace non-existent object %s." msgstr "Can't replace non-existent object %s." #, python-format msgid "Cannot retrieve queue %s stats." msgstr "Cannot retrieve queue %s stats." #, python-format msgid "Cannot retrieve queue %s." msgstr "Cannot retrieve queue %s." #, python-format msgid "Cannot retrieve subscription %s." msgstr "Cannot retrieve subscription %s." #, python-format msgid "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s" msgstr "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s" #, python-format msgid "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s failed" msgstr "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s failed" #, python-format msgid "CatalogueController:delete %(prj)s:%(queue)s failed" msgstr "CatalogueController:delete %(prj)s:%(queue)s failed" #, python-format msgid "CatalogueController:delete %(prj)s:%(queue)s success" msgstr "CatalogueController:delete %(prj)s:%(queue)s success" #, python-format msgid "CatalogueController:insert %(prj)s:%(queue)s %(pool)s failed" msgstr "CatalogueController:insert %(prj)s:%(queue)s %(pool)s failed" #, python-format msgid "CatalogueController:insert %(prj)s:%(queue)s:%(pool)s, success" msgstr "CatalogueController:insert %(prj)s:%(queue)s:%(pool)s, success" #, python-format msgid "Claim %s deleted." msgstr "Claim %s deleted." #, python-format msgid "Claim %s does not exist." msgstr "Claim %s does not exist." #, python-format msgid "Claim %s updated." msgstr "Claim %s updated." msgid "Claim could not be created." msgstr "Claim could not be created." msgid "Claim could not be deleted." msgstr "Claim could not be deleted." msgid "Claim could not be queried." msgstr "Claim could not be queried." msgid "Claim could not be updated." msgstr "Claim could not be updated." msgid "Doctype must be either a JSONObject or JSONArray" msgstr "Doctype must be either a JSONObject or JSONArray" msgid "Document type not supported." msgstr "Document type not supported." msgid "" "Either a replica set or a mongos is required to guarantee message delivery" msgstr "" "Either a replica set or a mongos is required to guarantee message delivery" msgid "Encrypted data appears to be corrupted." msgstr "Encrypted data appears to be corrupted." msgid "" "Endpoint does not accept `application/x-www-form-urlencoded` content; " "currently supported media type is `application/json`; specify proper client-" "side media type with the \"Content-Type\" header." msgstr "" "Endpoint does not accept `application/x-www-form-urlencoded` content; " "currently supported media type is `application/json`; specify proper client-" "side media type with the \"Content-Type\" header." msgid "Extra spec cannot be an empty if specify the header." msgstr "Extra spec cannot be an empty if specify the header." #, python-format msgid "" "Failed to increment the message counter for queue %(name)s and project " "%(project)s" msgstr "" "Failed to increment the message counter for queue %(name)s and project " "%(project)s" #, python-format msgid "" "Failed to increment the message counter for topic %(name)s and project " "%(project)s" msgstr "" "Failed to increment the message counter for topic %(name)s and project " "%(project)s" #, python-format msgid "" "First attempt failed while adding messages to queue \"%(queue)s\" under " "project %(project)s" msgstr "" "First attempt failed while adding messages to queue \"%(queue)s\" under " "project %(project)s" #, python-format msgid "" "First attempt failed while adding messages to topic \"%(topic)s\" under " "project %(project)s" msgstr "" "First attempt failed while adding messages to topic \"%(topic)s\" under " "project %(project)s" #, python-format msgid "Flavor %(flavor)s cant be updated, error:%(msg)s" msgstr "Flavour %(flavor)s cant be updated, error:%(msg)s" #, python-format msgid "Flavor %(flavor)s could not be created, error:%(msg)s" msgstr "Flavour %(flavor)s could not be created, error:%(msg)s" #, python-format msgid "Flavor %(flavor)s could not be created. " msgstr "Flavour %(flavor)s could not be created. " #, python-format msgid "Flavor %(flavor)s could not be deleted." msgstr "Flavour %(flavor)s could not be deleted." #, python-format msgid "Flavor %(flavor)s could not be updated, error:%(msg)s" msgstr "Flavour %(flavor)s could not be updated, error:%(msg)s" msgid "Health status could not be read." msgstr "Health status could not be read." msgid "Invalid API request" msgstr "Invalid API request" msgid "Invalid Content-Type" msgstr "Invalid Content-Type" #, python-format msgid "Invalid JSON pointer for this resource: '/%s, e.g /metadata/key'" msgstr "Invalid JSON pointer for this resource: '/%s, e.g /metadata/key'" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgid "Invalid queue identification" msgstr "Invalid queue identification" msgid "Invalid request body" msgstr "Invalid request body" msgid "Invalid request." msgstr "Invalid request." msgid "Invalid scheme in Redis URI" msgstr "Invalid scheme in Redis URI" msgid "Invalid topic identification" msgstr "Invalid topic identification" msgid "JSON contains integer that is too large." msgstr "JSON contains integer that is too large." msgid "Length of client id must be at least {0} and no greater than {1}." msgstr "Length of client id must be at least {0} and no greater than {1}." msgid "Limit must be at least 1 and may not be greater than {0}." msgstr "Limit must be at least 1 and may not be greater than {0}." msgid "Limit must be at least 1 and no greater than {0}." msgstr "Limit must be at least 1 and no greater than {0}." msgid "Malformed Redis URI" msgstr "Malformed Redis URI" msgid "Malformed hexadecimal UUID." msgstr "Malformed hexadecimal UUID." msgid "Message collection size is too large. Max size {0}" msgstr "Message collection size is too large. Max size {0}" msgid "" "Message collection size is too large. The max size for current queue is {0}. " "It is calculated by max size = min(max_messages_post_size_config: {1}, " "max_messages_post_size_queue: {2})." msgstr "" "Message collection size is too large. The max size for current queue is {0}. " "It is calculated by max size = min(max_messages_post_size_config: {1}, " "max_messages_post_size_queue: {2})." msgid "Message could not be deleted." msgstr "Message could not be deleted." msgid "Message could not be retrieved." msgstr "Message could not be retrieved." msgid "Messages could not be deleted." msgstr "Messages could not be deleted." msgid "Messages could not be enqueued." msgstr "Messages could not be enqueued." msgid "Messages could not be listed." msgstr "Messages could not be listed." msgid "Messages could not be popped." msgstr "Messages could not be popped." msgid "Metadata could not be updated." msgstr "Metadata could not be updated." #, python-format msgid "Method %s not found in any of the registered stages" msgstr "Method %s not found in any of the registered stages" msgid "Missing \"{name}\" field." msgstr "Missing \"{name}\" field." msgid "Missing host name in Redis URI" msgstr "Missing host name in Redis URI" #, python-format msgid "Missing parameter %s in body." msgstr "Missing parameter %s in body." msgid "Missing path in Redis URI" msgstr "Missing path in Redis URI" msgid "No messages could be enqueued." msgstr "No messages could be enqueued." msgid "No messages to enqueu." msgstr "No messages to enqueue." msgid "No messages were found in the request body." msgstr "No messages were found in the request body." msgid "" "No messages with IDs: {ids} found in the queue {queue} for project {project}." msgstr "" "No messages with IDs: {ids} found in the queue {queue} for project {project}." msgid "No subscription to create." msgstr "No subscription to create." msgid "Not authorized" msgstr "Not authorised" msgid "Not found" msgstr "Not found" msgid "Now Zaqar only support AES-256 and need to specify thekey." msgstr "Now Zaqar only supports AES-256 and need to specify the key." msgid "Operation \"{0}\" requires a member named \"value\"." msgstr "Operation \"{0}\" requires a member named \"value\"." msgid "Operations must be JSON objects." msgstr "Operations must be JSON objects." msgid "Options must be a dict." msgstr "Options must be a dict." msgid "PATCH body could not be empty for update." msgstr "PATCH body could not be empty for update." msgid "" "Pipeline to use for processing claim operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline to use for processing claim operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgid "" "Pipeline to use for processing message operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline to use for processing message operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgid "" "Pipeline to use for processing queue operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline to use for processing queue operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgid "" "Pipeline to use for processing subscription operations. This pipeline will " "be consumed before calling the storage driver's controller methods." msgstr "" "Pipeline to use for processing subscription operations. This pipeline will " "be consumed before calling the storage driver's controller methods." msgid "" "Pipeline to use for processing topic operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline to use for processing topic operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgid "Please try again in a few seconds." msgstr "Please try again in a few seconds." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "Pointer `%s` contains \"~\" not part of a recognised escape sequence." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Pointer `%s` contains adjacent \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Pointer `%s` does not contains valid token." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Pointer `%s` does not start with \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Pointer `%s` end with \"/\"." msgid "Pop value must be at least 1 and may not be greater than {0}." msgstr "Pop value must be at least 1 and may not be greater than {0}." msgid "Post body must contain key \"resource_types\"." msgstr "Post body must contain key \"resource_types\"." msgid "Project ids may not be more than {0} characters long." msgstr "Project ids may not be more than {0} characters long." #, python-format msgid "Queue %s could not be created." msgstr "Queue %s could not be created." #, python-format msgid "Queue %s could not be deleted." msgstr "Queue %s could not be deleted." #, python-format msgid "Queue %s created." msgstr "Queue %s created." #, python-format msgid "Queue %s does not exist." msgstr "Queue %s does not exist." #, python-format msgid "Queue %s removed." msgstr "Queue %s removed." msgid "Queue could not be created." msgstr "Queue could not be created." msgid "Queue could not be deleted." msgstr "Queue could not be deleted." msgid "Queue could not be purged." msgstr "Queue could not be purged." msgid "Queue could not be updated." msgstr "Queue could not be updated." msgid "Queue metadata could not be retrieved." msgstr "Queue metadata could not be retrieved." msgid "Queue metadata is too large. Max size: {0}" msgstr "Queue metadata is too large. Max size: {0}" msgid "Queue names may not be more than {0} characters long." msgstr "Queue names may not be more than {0} characters long." msgid "" "Queue names may only contain ASCII letters, digits, underscores, and dashes." msgstr "" "Queue names may only contain ASCII letters, digits, underscores, and dashes." msgid "Queue stats could not be read." msgstr "Queue stats could not be read." msgid "Queues could not be listed." msgstr "Queues could not be listed." msgid "Request body can not be empty" msgstr "Request body can not be empty" msgid "Request body could not be parsed." msgstr "Request body could not be parsed." msgid "Request body could not be read." msgstr "Request body could not be read." msgid "Request body must be a JSON array of operation objects." msgstr "Request body must be a JSON array of operation objects." msgid "" "Reserved queue attributes in metadata (which names start with \"_\") can not " "be set in API v1." msgstr "" "Reserved queue attributes in metadata (which names start with \"_\") can not " "be set in API v1." msgid "Resource conflict" msgstr "Resource conflict" msgid "Resource types must be a sub set of {0}." msgstr "Resource types must be a sub set of {0}." #, python-format msgid "Retry policy: %s must be a integer." msgstr "Retry policy: %s must be a integer." msgid "Service temporarily unavailable" msgstr "Service temporarily unavailable" #, python-format msgid "Serving on host %(bind)s:%(port)s" msgstr "Serving on host %(bind)s:%(port)s" #, python-format msgid "Stage %(stage)s does not implement %(method)s" msgstr "Stage %(stage)s does not implement %(method)s" #, python-format msgid "Subscription %(subscription)s for queue %(queue)s could not be deleted." msgstr "" "Subscription %(subscription)s for queue %(queue)s could not be deleted." #, python-format msgid "Subscription %(subscription)s for queue %(queue)s does not exist." msgstr "Subscription %(subscription)s for queue %(queue)s does not exist." #, python-format msgid "Subscription %(subscription_id)s could not be confirmed." msgstr "Subscription %(subscription_id)s could not be confirmed." #, python-format msgid "Subscription %(subscription_id)s could not be updated." msgstr "Subscription %(subscription_id)s could not be updated." #, python-format msgid "Subscription %s could not be created." msgstr "Subscription %s could not be created." #, python-format msgid "Subscription %s created." msgstr "Subscription %s created." #, python-format msgid "Subscription %s not created." msgstr "Subscription %s not created." #, python-format msgid "Subscription %s removed." msgstr "Subscription %s removed." msgid "Subscription could not be created." msgstr "Subscription could not be created." msgid "Subscription could not be deleted." msgstr "Subscription could not be deleted." msgid "Subscription could not be retrieved." msgstr "Subscription could not be retrieved." msgid "Subscriptions could not be listed." msgstr "Subscriptions could not be listed." msgid "Subscriptions must be a dict." msgstr "Subscriptions must be a dict." msgid "" "Such subscription already exists.Subscriptions are unique by project + queue " "+ subscriber URI." msgstr "" "Such subscription already exists.Subscriptions are unique by project + queue " "+ subscriber URI." msgid "TTL must be an integer." msgstr "TTL must be an integer." msgid "The 'confirmed' should be boolean." msgstr "The 'confirmed' should be boolean." msgid "" "The Delay TTL for a message may not exceed {0} seconds,and must be at least " "{1} seconds long." msgstr "" "The Delay TTL for a message may not exceed {0} seconds,and must be at least " "{1} seconds long." msgid "" "The Redis URI specifies multiple sentinel hosts, but is missing the \"master" "\" query string parameter. Please set \"master\" to the name of the Redis " "master server as specified in the sentinel configuration file." msgstr "" "The Redis URI specifies multiple sentinel hosts, but is missing the \"master" "\" query string parameter. Please set \"master\" to the name of the Redis " "master server as specified in the sentinel configuration file." msgid "The Redis configuration URI contains an invalid port" msgstr "The Redis configuration URI contains an invalid port" msgid "The Redis configuration URI does not define any sentinel hosts" msgstr "The Redis configuration URI does not define any sentinel hosts" #, python-format msgid "The Redis driver requires redis-server>=2.6, %s found" msgstr "The Redis driver requires redis-server>=2.6, %s found" msgid "" "The TTL can not exceed {0} seconds, and must be at least {1} seconds long." msgstr "" "The TTL can not exceed {0} seconds, and must be at least {1} seconds long." msgid "" "The TTL for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "The TTL for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgid "" "The TTL for a message may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "The TTL for a message may not exceed {0} seconds, and must be at least {1} " "seconds long." msgid "The TTL for a subscription must be at least {0} seconds long." msgstr "The TTL for a subscription must be at least {0} seconds long." msgid "" "The TTL seconds for a subscription plus current time must be less than {0}." msgstr "" "The TTL seconds for a subscription plus current time must be less than {0}." msgid "The format of the submitted queue name or project id is not valid." msgstr "The format of the submitted queue name or project id is not valid." msgid "The format of the submitted topic name or project id is not valid." msgstr "The format of the submitted topic name or project id is not valid." msgid "" "The grace for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "The grace for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgid "The header X-PROJECT-ID was missing" msgstr "The header X-PROJECT-ID was missing" #, python-format msgid "The mongodb driver requires mongodb>=2.2, %s found" msgstr "The MongoDB driver requires mongodb>=2.2, %s found" msgid "" "The request should have both \"ids\" and \"claim_ids\" parameter in the " "request when message_delete_with_claim_id is True." msgstr "" "The request should have both \"ids\" and \"claim_ids\" parameter in the " "request when message_delete_with_claim_id is True." msgid "" "The request should have either \"ids\" or \"pop\" parameter in the request, " "to be able to delete." msgstr "" "The request should have either \"ids\" or \"pop\" parameter in the request, " "to be able to delete." msgid "The root of path must be metadata, e.g /metadata/key." msgstr "The root of path must be metadata, e.g /metadata/key." msgid "The specified claim does not exist or has expired." msgstr "The specified claim does not exist or has expired." msgid "The subscriber type of subscription must be supported in the list {0}." msgstr "The subscriber type of subscription must be supported in the list {0}." msgid "The value of the \"{name}\" field must be a {vtype}." msgstr "The value of the \"{name}\" field must be a {vtype}." msgid "This message is claimed; it cannot be deleted without a valid claim ID." msgstr "" "This message is claimed; it cannot be deleted without a valid claim ID." msgid "This pool is used by flavors {flavor}; It cannot be deleted." msgstr "This pool is used by flavours {flavor}; It cannot be deleted." msgid "Topic could not be created." msgstr "Topic could not be created." msgid "Topic could not be deleted." msgstr "Topic could not be deleted." msgid "Topic could not be purged." msgstr "Topic could not be purged." msgid "Topic could not be updated." msgstr "Topic could not be updated." msgid "Topic metadata could not be retrieved." msgstr "Topic metadata could not be retrieved." msgid "Topic names may not be more than {0} characters long." msgstr "Topic names may not be more than {0} characters long." msgid "" "Topic names may only contain ASCII letters, digits, underscores, and dashes." msgstr "" "Topic names may only contain ASCII letters, digits, underscores, and dashes." msgid "Topic stats could not be read." msgstr "Topic stats could not be read." msgid "Topics could not be listed." msgstr "Topics could not be listed." msgid "Unable to confirm subscription" msgstr "Unable to confirm subscription" msgid "Unable to create" msgstr "Unable to create" msgid "Unable to create pool" msgstr "Unable to create pool" msgid "Unable to delete" msgstr "Unable to delete" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Unable to find '%s' in JSON Schema change" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgid "Unable to update subscription" msgstr "Unable to update subscription" msgid "Unexpected error." msgstr "Unexpected error." msgid "Unrecognized JSON Schema draft version" msgstr "Unrecognised JSON Schema draft version" msgid "" "Using a write concern other than `majority` or > 2 makes the service " "unreliable. Please use a different write concern or set `unreliable` to True " "in the config file." msgstr "" "Using a write concern other than `majority` or > 2 makes the service " "unreliable. Please use a different write concern or set `unreliable` to True " "in the config file." msgid "" "X-PROJECT-ID cannot be an empty string. Specify the right header X-PROJECT-" "ID and retry." msgstr "" "X-PROJECT-ID cannot be an empty string. Specify the right header X-PROJECT-" "ID and retry." msgid "You are not authorized to complete this action." msgstr "You are not authorised to complete this action." msgid "_dead_letter_queue_messages_ttl must be integer." msgstr "_dead_letter_queue_messages_ttl must be integer." msgid "_default_message_delay must be integer." msgstr "_default_message_delay must be integer." msgid "" "_default_message_ttl can not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "_default_message_ttl can not exceed {0} seconds, and must be at least {1} " "seconds long." msgid "_default_message_ttl must be integer." msgstr "_default_message_ttl must be integer." msgid "_enable_encrypt_messages must be boolean." msgstr "_enable_encrypt_messages must be boolean." msgid "_max_claim_count must be integer." msgstr "_max_claim_count must be integer." msgid "" "_max_messages_post_size can not exceed {0}, and must be at least greater " "than 0." msgstr "" "_max_messages_post_size can not exceed {0}, and must be at least greater " "than 0." msgid "_max_messages_post_size must be integer." msgstr "_max_messages_post_size must be integer." msgid "ids parameter should have at least 1 and not greater than {0} values." msgstr "ids parameter should have at least 1 and not greater than {0} values." msgid "ignore_subscription_override must be a boolean." msgstr "ignore_subscription_override must be a boolean." msgid "invalid minimum_delay and maximum_delay." msgstr "invalid minimum_delay and maximum_delay." msgid "invalid retry_backoff_function." msgstr "invalid retry_backoff_function." msgid "minimum_delay must less than maximum_delay." msgstr "minimum_delay must less than maximum_delay." msgid "policy File JSON to YAML Migration" msgstr "policy File JSON to YAML Migration" msgid "pop and id params cannot be present together in the delete request." msgstr "pop and id params cannot be present together in the delete request." msgid "register queue to pool: new flavor: None" msgstr "register queue to pool: new flavour: None" #, python-format msgid "register queue to pool: new flavor:%(flavor)s" msgstr "register queue to pool: new flavour:%(flavor)s" #, python-format msgid "" "register queue to pool: old flavor: %(oldflavor)s , new flavor: %(flavor)s" msgstr "" "register queue to pool: old flavour: %(oldflavor)s , new flavour: %(flavor)s" #, python-format msgid "register queue: project:%(project)s queue:%(queue)s pool:%(pool)s" msgstr "register queue: project:%(project)s queue:%(queue)s pool:%(pool)s" msgid "retry_backoff_function must be a string." msgstr "retry_backoff_function must be a string." msgid "retry_policy must be a dict." msgstr "retry_policy must be a dict." msgid "updatefail" msgstr "update fail" msgid "{0} is not a valid action" msgstr "{0} is not a valid action" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5410137 zaqar-20.1.0.dev29/zaqar/locale/es/0000775000175100017510000000000015033040026015725 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5700135 zaqar-20.1.0.dev29/zaqar/locale/es/LC_MESSAGES/0000775000175100017510000000000015033040026017512 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/locale/es/LC_MESSAGES/zaqar.po0000664000175100017510000002734015033040005021173 0ustar00mylesmyles# Translations template for zaqar. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the zaqar project. # # Translators: # Adriana Chisco Landazábal , 2015 # Milton Mazzarri , 2014 # Pablo Sanchez , 2015 # Victoria Martínez de la Cruz , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-09-17 06:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:53+0000\n" "Last-Translator: Copied by Zanata \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" #, python-format msgid "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(queue)s\" under project %(project)s" msgstr "" "%(attempts)d intento(s) requerido para publicar %(num_messages)d mensajes a " "la cola \"%(queue)s\" del proyecto %(project)s" msgid "A claim was specified, but the message is not currently claimed." msgstr "Se pudo especificar una solicitud pero no se ha solicitado mensaje." #, python-format msgid "Cannot retrieve queue %s stats." msgstr "No se puede recuperar estadísticas de cola %s." #, python-format msgid "Cannot retrieve queue %s." msgstr "No se puede recuperar cola %s." msgid "Claim could not be created." msgstr "No se pudo crear solicitud." msgid "Claim could not be deleted." msgstr "No se pudo eliminar solicitud." msgid "Claim could not be queried." msgstr "No se pudo realizar la solicitud." msgid "Claim could not be updated." msgstr "No se pudo actualizar solicitud." msgid "Doctype must be either a JSONObject or JSONArray" msgstr "Doctype debe ser un JSONObject o un JSONObject " msgid "Document type not supported." msgstr "Tipo de documento no soportado." msgid "" "Either a replica set or a mongos is required to guarantee message delivery" msgstr "" "Se requiere un conjunto de réplica o mongos para garantizar que el mensaje " "sea enviado" #, python-format msgid "" "Failed to increment the message counter for queue %(name)s and project " "%(project)s" msgstr "" "Fallo al incrementar el contador de mensajes para la cola %(name)s y " "proyecto %(project)s" #, python-format msgid "" "First attempt failed while adding messages to queue \"%(queue)s\" under " "project %(project)s" msgstr "" "Fallo el primer intento mientras agregaba mensajes a la cola \"%(queue)s\" " "del proyecto %(project)s" msgid "Health status could not be read." msgstr "No se pudo leer el estado de salud." msgid "Invalid API request" msgstr "Solicitud API no válida" msgid "Invalid queue identification" msgstr "Identificador de cola inválido" msgid "Invalid request body" msgstr "Cuerpo de solicitud es inválido" msgid "Invalid request." msgstr "Solicitud no válida" msgid "Invalid scheme in Redis URI" msgstr "Esquema en Redis URI no válido" msgid "JSON contains integer that is too large." msgstr "JSON contiene entero muy largo." msgid "Limit must be at least 1 and may not be greater than {0}." msgstr "Límite debe ser al menos 1 y no mayor que {0}." msgid "Limit must be at least 1 and no greater than {0}." msgstr "Límite debe ser al menos 1 y no mayor que {0}." msgid "Malformed Redis URI" msgstr "URI de Redis incorrecta" msgid "Malformed hexadecimal UUID." msgstr "Valor hexadecimal UUID mal formado." msgid "Message collection size is too large. Max size {0}" msgstr "El tamaño de la colección de mensajes es muy largo. Tamaño máximo: {0}" msgid "Message could not be deleted." msgstr "No se pudo eliminar mensaje." msgid "Message could not be retrieved." msgstr "No se pudo recuperar mensaje." msgid "Messages could not be deleted." msgstr "No se pudo eliminar mensajes." msgid "Messages could not be enqueued." msgstr "Np se pudo poner mensajes en cola." msgid "Messages could not be listed." msgstr "No se pudo listar mensajes." msgid "Messages could not be popped." msgstr "Los mensajes no se pudieron dirigir." msgid "Metadata could not be updated." msgstr "No se pudo actualizar metadatos." #, python-format msgid "Method %s not found in any of the registered stages" msgstr "El metodo %s no se ha encontrado en ninguna de las etapas registradas" msgid "Missing \"{name}\" field." msgstr "Campo \"{name}\" no presente." msgid "Missing host name in Redis URI" msgstr "Hace falta el nombre de host en Redis URI" #, python-format msgid "Missing parameter %s in body." msgstr "Falta parámetro %s en el cuerpo." msgid "Missing path in Redis URI" msgstr "Hace falta la ruta en Redis URI" msgid "No messages could be enqueued." msgstr "No hay mensajes para poner en cola." msgid "No messages to enqueu." msgstr "No hay mensajes para colocar en la cola." msgid "No messages were found in the request body." msgstr "No se encontraron mensajes en el cuerpo de solicitud." msgid "No subscription to create." msgstr "No hay suscripción para crear." msgid "Options must be a dict." msgstr "Las opciones deben ser un dict." msgid "Please try again in a few seconds." msgstr "Por favor intente de nuevo en unos segundos." msgid "Pop value must be at least 1 and may not be greater than {0}." msgstr "El valor pop debe ser al menos 1 y no debe ser mayor que {0}." msgid "Project ids may not be more than {0} characters long." msgstr "Los ids de proyecto no deben ocupar más de {0} caracteres. " #, python-format msgid "Queue %s could not be created." msgstr "No se pudo crear cola %s." #, python-format msgid "Queue %s could not be deleted." msgstr "No se pudo eliminar cola %s." #, python-format msgid "Queue %s created." msgstr "Se ha creado cola %s." #, python-format msgid "Queue %s does not exist." msgstr "No existe cola %s." #, python-format msgid "Queue %s removed." msgstr "Se ha eliminado cola %s." msgid "Queue could not be created." msgstr "No se pudo crear cola." msgid "Queue could not be deleted." msgstr "No se pudo eliminar cola." msgid "Queue metadata could not be retrieved." msgstr "No se pudo recuperar metadatos en cola." msgid "Queue metadata is too large. Max size: {0}" msgstr "Los metadatos de la cola son muy largos. Tamaño máximo: {0}" msgid "Queue names may not be more than {0} characters long." msgstr "Los nombres de colas no deben ocupar más de {0} caracteres." msgid "" "Queue names may only contain ASCII letters, digits, underscores, and dashes." msgstr "" "Los nombres de colas solo pueden contener caracteres ASCII, números, guiones " "bajos y guiones." msgid "Queue stats could not be read." msgstr "No se pueden leer las estadísticas de cola." msgid "Queues could not be listed." msgstr "No se pudo listar colas." msgid "Request body can not be empty" msgstr "Cuerpo de la solicitud no puede estar vacío" msgid "Request body could not be parsed." msgstr "Cuerpo de la solicitud no pudo ser analizado." msgid "Request body could not be read." msgstr "El cuerpo de la petición no pudo ser leído." msgid "Service temporarily unavailable" msgstr "Servicio no disponible temporalmente" #, python-format msgid "Serving on host %(bind)s:%(port)s" msgstr "Sirviendo en la máquina %(bind)s:%(port)s" #, python-format msgid "Stage %(stage)s does not implement %(method)s" msgstr "La etapa %(stage)s no implementa %(method)s" msgid "Subscription could not be created." msgstr "No se pudo crear suscripción." msgid "Subscription could not be deleted." msgstr "No se puede eliminar suscripción." msgid "Subscription could not be retrieved." msgstr "No se puede recuperar suscripción." msgid "Subscriptions could not be listed." msgstr "No se pueden listar suscripciones." msgid "TTL must be an integer." msgstr "TTL debe ser un entero." msgid "" "The Redis URI specifies multiple sentinel hosts, but is missing the \"master" "\" query string parameter. Please set \"master\" to the name of the Redis " "master server as specified in the sentinel configuration file." msgstr "" "El Redis URI especifica múltiples hosts de sentinel, pero hace falta el " "parámetro de secuencia de consulta \"master\". Por favor configure \"master" "\" en el nombre del servidor maestro Redis como se especifica en el fichero " "de configuración sentinel." msgid "The Redis configuration URI contains an invalid port" msgstr "La configuración URI de Redis contiene un puerto no válido" msgid "The Redis configuration URI does not define any sentinel hosts" msgstr "La configuración URI de Redis no define hosts de sentinel alguno" #, python-format msgid "The Redis driver requires redis-server>=2.6, %s found" msgstr "El controlador de Redis requiere edis-server>=2.6, %s encontrado" msgid "" "The TTL for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "El TTL para una solicitud no debe exceder {0} segundos y debe durar al menos " "{1} segundos." msgid "" "The TTL for a message may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "El TTL para un mensaje no debe exceder {0} segundos, y debe ser al menos de " "{1} segundos de largo." msgid "The format of the submitted queue name or project id is not valid." msgstr "" "El formato del nombre de la cola propuesto o el id del proyecto no es válido." msgid "" "The grace for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "El periodo de gracia para una solicitud no debe exceder {0} segundos y debe " "durar al menos {1} segundos." msgid "The header X-PROJECT-ID was missing" msgstr "Faltaba la cabecera X-PROJECT-ID" #, python-format msgid "The mongodb driver requires mongodb>=2.2, %s found" msgstr "El driver mongodb requiere mongodb>=2.2, %s encontrado" msgid "" "The request should have either \"ids\" or \"pop\" parameter in the request, " "to be able to delete." msgstr "" "La solicitud debe contener parámetro \"ids\" o \"pop\" para que sea posible " "la eliminación." msgid "The specified claim does not exist or has expired." msgstr "La solicitud especificada no existe o ha expirado." msgid "The subscriber type of subscription must be supported in the list {0}." msgstr "" "El tipo de suscriptor en la suscripción debe ser soportado en la lista {0}." msgid "The value of the \"{name}\" field must be a {vtype}." msgstr "El valor del campo \"{name}\" debe ser {vtype}." msgid "This message is claimed; it cannot be deleted without a valid claim ID." msgstr "" "El mensaje está solicitado; no se puede borrar sin un identificador de " "solicitud válido." msgid "This pool is used by flavors {flavor}; It cannot be deleted." msgstr "Los tipos {flavor} usan este pool; no puede eliminarse." msgid "Unable to create" msgstr "No se puede crear" msgid "Unable to delete" msgstr "No se puede eliminar" msgid "Unable to update subscription" msgstr "No se puede actualizar suscripción" msgid "Unexpected error." msgstr "Error inesperado." msgid "" "Using a write concern other than `majority` or > 2 makes the service " "unreliable. Please use a different write concern or set `unreliable` to True " "in the config file." msgstr "" "Emplear un asunto escrito diferente a `majority` o > 2 hace que el servicio " "sea desconfiable. Por favor emplee un asunto diferente o configure " "`unreliable` como True en el fichero de configuración." msgid "ids parameter should have at least 1 and not greater than {0} values." msgstr "" "el parámetro de identificador debe tener al menos entre 1 y {0} valores. " msgid "pop and id params cannot be present together in the delete request." msgstr "" "Los parámetros pop e identificador no pueden estar presentes en la solicitud " "de eliminación." msgid "{0} is not a valid action" msgstr "{0} no es una acción valida" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5410137 zaqar-20.1.0.dev29/zaqar/locale/id/0000775000175100017510000000000015033040026015712 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5700135 zaqar-20.1.0.dev29/zaqar/locale/id/LC_MESSAGES/0000775000175100017510000000000015033040026017477 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/locale/id/LC_MESSAGES/zaqar.po0000664000175100017510000005713415033040005021164 0ustar00mylesmyles# suhartono , 2018. #zanata # Frank Kloeker , 2019. #zanata # suhartono , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: zaqar VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-12-29 01:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-04-15 05:52+0000\n" "Last-Translator: Frank Kloeker \n" "Language-Team: Indonesian\n" "Language: id\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "" "%(attempts)d attempt(s) required to post %(num_messages)d messages to queue " "\"%(queue)s\" under project %(project)s" msgstr "" "%(attempts)d attempt(s) wajib memposting %(num_messages)d pesan ke antrian " "\"%(queue)s\" di bawah proyek %(project)s" msgid "A claim was specified, but the message is not currently claimed." msgstr "Klaim telah ditentukan, tetapi pesan saat ini tidak diklaim." #, python-format msgid "Accepted media type for PATCH: %s." msgstr "Tipe media yang diterima untuk PATCH: %s." msgid "Can't make confirmation email body, need a valid confirm url." msgstr "" "Tidak dapat membuat badan email konfirmasi (confirmation email body), " "memerlukan url konfirmasi yang valid." #, python-format msgid "Can't remove non-existent object %s." msgstr "Tidak dapat dihapus non-existent object %s." #, python-format msgid "Can't replace non-existent object %s." msgstr "Tidak dapat mengganti non-existent object %s." #, python-format msgid "Cannot retrieve queue %s stats." msgstr "Tidak dapat mengambil antrian %s stats." #, python-format msgid "Cannot retrieve queue %s." msgstr "Tidak dapat mengambil antrian %s." #, python-format msgid "Cannot retrieve subscription %s." msgstr "Tidak dapat mengambil subscription %s." #, python-format msgid "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s" msgstr "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s" #, python-format msgid "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s failed" msgstr "CatalogueController:_update %(prj)s:%(queue)s:%(pool)s gagal" #, python-format msgid "CatalogueController:delete %(prj)s:%(queue)s failed" msgstr "CatalogueController:delete %(prj)s:%(queue)s gagal" #, python-format msgid "CatalogueController:delete %(prj)s:%(queue)s success" msgstr "CatalogueController:delete %(prj)s:%(queue)s sucks" #, python-format msgid "CatalogueController:insert %(prj)s:%(queue)s %(pool)s failed" msgstr "CatalogueController:insert %(prj)s:%(queue)s %(pool)s gagal" #, python-format msgid "CatalogueController:insert %(prj)s:%(queue)s:%(pool)s, success" msgstr "CatalogueController:insert %(prj)s:%(queue)s:%(pool)s, sukses" #, python-format msgid "Claim %s deleted." msgstr "Klaim %s dihapus." #, python-format msgid "Claim %s does not exist." msgstr "Klaim %s tidak ada." #, python-format msgid "Claim %s updated." msgstr "Klaim %s diperbarui." msgid "Claim could not be created." msgstr "Klaim tidak dapat dibuat." msgid "Claim could not be deleted." msgstr "Klaim tidak dapat dihapus." msgid "Claim could not be queried." msgstr "Klaim tidak dapat ditanyakan." msgid "Claim could not be updated." msgstr "Klaim tidak dapat diperbarui." msgid "Doctype must be either a JSONObject or JSONArray" msgstr "Doctype harus berupa JSONObject atau JSONArray" msgid "Document type not supported." msgstr "Tipe dokumen tidak didukung." msgid "" "Either a replica set or a mongos is required to guarantee message delivery" msgstr "" "Salah satu set replika atau mongo diperlukan untuk menjamin pengiriman pesan" msgid "" "Endpoint does not accept `application/x-www-form-urlencoded` content; " "currently supported media type is `application/json`; specify proper client-" "side media type with the \"Content-Type\" header." msgstr "" "Endpoint tidak menerima konten `application/x-www-form-urlencoded`; jenis " "media yang saat ini didukung adalah `aplikasi/json`; tentukan jenis media " "sisi klien yang tepat dengan header \"Content-Type\"." #, python-format msgid "" "Failed to increment the message counter for queue %(name)s and project " "%(project)s" msgstr "" "Gagal menaikkan penghitung pesan untuk antrean %(name)s dan proyek " "%(project)s" #, python-format msgid "" "First attempt failed while adding messages to queue \"%(queue)s\" under " "project %(project)s" msgstr "" "Upaya pertama gagal saat menambahkan pesan ke antrean \"%(queue)s\" di bawah " "proyek %(project)s" #, python-format msgid "Flavor %(flavor)s cant be updated, error:%(msg)s" msgstr "Flavor %(flavor)s tidak dapat diperbarui, error:%(msg)s" #, python-format msgid "Flavor %(flavor)s could not be created, error:%(msg)s" msgstr "Flavor %(flavor)s tidak dapat dibuat, error:%(msg)s" #, python-format msgid "Flavor %(flavor)s could not be deleted." msgstr "Flavor %(flavor)s tidak bisa dihapus." #, python-format msgid "Flavor %(flavor)s could not be updated, error:%(msg)s" msgstr "Flavor %(flavor)s tidak dapat diperbarui, error:%(msg)s" msgid "Health status could not be read." msgstr "Status kesehatan tidak bisa dibaca." msgid "Invalid API request" msgstr "Permintaan API tidak valid" msgid "Invalid Content-Type" msgstr "Content-Type tidak Valid" #, python-format msgid "Invalid JSON pointer for this resource: '/%s, e.g /metadata/key'" msgstr "" "Pointer JSON tidak valid untuk sumber daya ini: '/%s, e.g /metadata/key'" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Operasi tidak valid: `%(op)s`. Itu harus salah satu dari yang berikut: " "%(available)s." msgid "Invalid queue identification" msgstr "Identifikasi antrian tidak valid" msgid "Invalid request body" msgstr "Body permintaan tidak valid" msgid "Invalid request." msgstr "Permintaan tidak valid." msgid "Invalid scheme in Redis URI" msgstr "Skema tidak valid dalam Redis URI" msgid "JSON contains integer that is too large." msgstr "JSON berisi bilangan integer yang terlalu besar." msgid "Length of client id must be at least {0} and no greater than {1}." msgstr "Panjang id klien harus setidaknya {0} dan tidak lebih besar dari {1}." msgid "Limit must be at least 1 and may not be greater than {0}." msgstr "Batas harus minimal 1 dan mungkin tidak lebih besar dari {0}." msgid "Limit must be at least 1 and no greater than {0}." msgstr "Batas harus minimal 1 dan tidak lebih besar dari {0}." msgid "Malformed Redis URI" msgstr "Redis URI berubah bentuk" msgid "Malformed hexadecimal UUID." msgstr "UUID hexadecimal salah." msgid "Message collection size is too large. Max size {0}" msgstr "Ukuran koleksi pesan terlalu besar. Ukuran maksimum {0}" msgid "" "Message collection size is too large. The max size for current queue is {0}. " "It is calculated by max size = min(max_messages_post_size_config: {1}, " "max_messages_post_size_queue: {2})." msgstr "" "Ukuran koleksi pesan terlalu besar. Ukuran maksimal untuk antrean saat ini " "adalah {0}. Ini dihitung dengan ukuran max = " "min(max_messages_post_size_config: {1}, max_messages_post_size_queue: {2})." msgid "Message could not be deleted." msgstr "Pesan tidak dapat dihapus." msgid "Message could not be retrieved." msgstr "Pesan tidak dapat diambil." msgid "Messages could not be deleted." msgstr "Pesan tidak dapat dihapus." msgid "Messages could not be enqueued." msgstr "Pesan tidak bisa diantisipasi." msgid "Messages could not be listed." msgstr "Pesan tidak dapat dicantumkan." msgid "Messages could not be popped." msgstr "Pesan tidak bisa muncul." msgid "Metadata could not be updated." msgstr "Metadata tidak dapat diperbarui." #, python-format msgid "Method %s not found in any of the registered stages" msgstr "Metode %s tidak ditemukan di salah satu tahapan yang terdaftar" msgid "Missing \"{name}\" field." msgstr "Field \"{name}\" tidak ada." msgid "Missing host name in Redis URI" msgstr "Nama host tidak ada di Redis URI" #, python-format msgid "Missing parameter %s in body." msgstr "Tidak ada parameter %s dalam body." msgid "Missing path in Redis URI" msgstr "Path tidak ada di Redis URI" msgid "No messages could be enqueued." msgstr "Tidak ada pesan yang bisa dimasukkan." msgid "No messages to enqueu." msgstr "Tidak ada pesan untuk enqueue." msgid "No messages were found in the request body." msgstr "Tidak ada pesan yang ditemukan di badan permintaan (request body)" msgid "" "No messages with IDs: {ids} found in the queue {queue} for project {project}." msgstr "" "Tidak ada pesan dengan IDs: {ids} ditemukan di antrian {queue} untuk proyek " "{proyek}." msgid "No subscription to create." msgstr "Tidak ada langganan untuk dibuat." msgid "Not authorized" msgstr "Tidak diizinkan" msgid "Not found" msgstr "Tidak ditemukan" msgid "Operation \"{0}\" requires a member named \"value\"." msgstr "Operation \"{0}\" membutuhkan anggota bernama \"value\"." msgid "Operations must be JSON objects." msgstr "Operasi harus menjadi objek JSON." msgid "Options must be a dict." msgstr "Pilihan harus berupa dict." msgid "PATCH body could not be empty for update." msgstr "PATCH body tidak boleh kosong untuk pembaruan." msgid "" "Pipeline to use for processing claim operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline digunakan untuk memproses operasi klaim. Pipeline ini akan " "dikonsumsi sebelum memanggil metode pengendali driver penyimpanan (storage " "driver's controller methods)." msgid "" "Pipeline to use for processing message operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline digunakan untuk memproses operasi pesan. Pipeline ini akan " "dikonsumsi sebelum memanggil metode pengendali driver penyimpanan (storage " "driver's controller methods)." msgid "" "Pipeline to use for processing queue operations. This pipeline will be " "consumed before calling the storage driver's controller methods." msgstr "" "Pipeline digunakan untuk memproses operasi antrian. Pipeline ini akan " "dikonsumsi sebelum memanggil metode pengendali driver penyimpanan (storage " "driver's controller methods)." msgid "" "Pipeline to use for processing subscription operations. This pipeline will " "be consumed before calling the storage driver's controller methods." msgstr "" "Pipeline digunakan untuk memproses operasi langganan. Pipeline ini akan " "dikonsumsi sebelum memanggil metode pengendali driver penyimpanan (storage " "driver's controller methods)." msgid "Please try again in a few seconds." msgstr "Silakan coba lagi dalam beberapa detik." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "Pointer `%s` berisi \"~\" bukan bagian dari urutan pelarian yang syah." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Pointer `%s` berisi yang berdekatan \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Pointer `%s` tidak mengandung token yang valid." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Pointer `%s` tidak dimulai dengan \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Pointer `%s` berakhir dengan \"/\"." msgid "Pop value must be at least 1 and may not be greater than {0}." msgstr "Nilai Pop minimal harus 1 dan mungkin tidak lebih besar dari {0}." msgid "Post body must contain key \"resource_types\"." msgstr "Isi postingan harus berisi key \"resource_types\"." msgid "Project ids may not be more than {0} characters long." msgstr "ID proyek tidak boleh lebih dari {0} karakter." #, python-format msgid "Queue %s could not be created." msgstr "Antrean %s tidak dapat dibuat." #, python-format msgid "Queue %s could not be deleted." msgstr "Antrean %s tidak dapat dihapus." #, python-format msgid "Queue %s created." msgstr "Antrean %s dibuat." #, python-format msgid "Queue %s does not exist." msgstr "Antrean %s tidak ada." #, python-format msgid "Queue %s removed." msgstr "Antrean %s dihapus." msgid "Queue could not be created." msgstr "Antrean tidak dapat dibuat." msgid "Queue could not be deleted." msgstr "Antrean tidak dapat dihapus." msgid "Queue could not be purged." msgstr "Antrean tidak dapat dibersihkan." msgid "Queue could not be updated." msgstr "Antrean tidak dapat diperbarui." msgid "Queue metadata could not be retrieved." msgstr "Metadata antrian tidak dapat diambil." msgid "Queue metadata is too large. Max size: {0}" msgstr "Queue metadata terlalu besar. Ukuran maksimum: {0}" msgid "Queue names may not be more than {0} characters long." msgstr "Nama antrean tidak boleh lebih dari {0} karakter." msgid "" "Queue names may only contain ASCII letters, digits, underscores, and dashes." msgstr "" "Nama antrean hanya boleh berisi huruf ASCII, angka, underscore, dan tanda " "hubung." msgid "Queue stats could not be read." msgstr "Statistik antrian tidak dapat dibaca." msgid "Queues could not be listed." msgstr "Antrean tidak dapat dicantumkan." msgid "Request body can not be empty" msgstr "Body permintaan tidak boleh kosong" msgid "Request body could not be parsed." msgstr "Body permintaan tidak dapat diuraikan." msgid "Request body could not be read." msgstr "Body permintaan tidak dapat dibaca." msgid "Request body must be a JSON array of operation objects." msgstr "Request body harus merupakan JSON array dari objek operasi." msgid "" "Reserved queue attributes in metadata (which names start with \"_\") can not " "be set in API v1." msgstr "" "Atribut antrian yang dicadangkan dalam metadata (yang namanya dimulai dengan " "\"_\") tidak dapat disetel di API v1." msgid "Resource conflict" msgstr "Konflik sumber daya" msgid "Resource types must be a sub set of {0}." msgstr "Tipe sumber daya harus berupa sub-set {0}." #, python-format msgid "Retry policy: %s must be a integer." msgstr "Retry policy: %s harus berupa integer" msgid "Service temporarily unavailable" msgstr "Layanan sementara tidak tersedia" #, python-format msgid "Serving on host %(bind)s:%(port)s" msgstr "Melayani di host %(bind)s:%(port)s" #, python-format msgid "Stage %(stage)s does not implement %(method)s" msgstr "Tahap %(stage)s tidak mengimplementasikan %(method)s" #, python-format msgid "Subscription %(subscription)s for queue %(queue)s could not be deleted." msgstr "" "Berlangganan %(subscription)s untuk antrean %(queue)s tidak dapat dihapus." #, python-format msgid "Subscription %(subscription)s for queue %(queue)s does not exist." msgstr "Berlangganan %(subscription)s untuk antrian %(queue)s tidak ada." #, python-format msgid "Subscription %(subscription_id)s could not be confirmed." msgstr "Berlangganan %(subscription_id)s tidak dapat dikonfirmasi." #, python-format msgid "Subscription %(subscription_id)s could not be updated." msgstr "Langganan %(subscription_id)s tidak bisa diperbarui." #, python-format msgid "Subscription %s could not be created." msgstr "Langganan %s tidak dapat dibuat." #, python-format msgid "Subscription %s created." msgstr "Langganan %s dibuat." #, python-format msgid "Subscription %s not created." msgstr "Berlangganan %s tidak dibuat." #, python-format msgid "Subscription %s removed." msgstr "Langganan %s dihapus." msgid "Subscription could not be created." msgstr "Langganan tidak dapat dibuat." msgid "Subscription could not be deleted." msgstr "Langganan tidak dapat dihapus." msgid "Subscription could not be retrieved." msgstr "Langganan tidak dapat diambil." msgid "Subscriptions could not be listed." msgstr "Langganan tidak dapat dicantumkan." msgid "Subscriptions must be a dict." msgstr "Langganan harus berupa dict." msgid "" "Such subscription already exists.Subscriptions are unique by project + queue " "+ subscriber URI." msgstr "" "Langganan semacam itu sudah ada. Langganan adalah unik dengan proyek + " "antrian + URI pelanggan." msgid "TTL must be an integer." msgstr "TTL harus berupa bilangan integer." msgid "The 'confirmed' should be boolean." msgstr "Yang 'dikonfirmasi' harus boolean." msgid "" "The Delay TTL for a message may not exceed {0} seconds,and must be at least " "{1} seconds long." msgstr "" "Delay TTLn untuk pesan tidak boleh lebih dari {0} detik, dan setidaknya " "harus sepanjang {1} detik." msgid "" "The Redis URI specifies multiple sentinel hosts, but is missing the \"master" "\" query string parameter. Please set \"master\" to the name of the Redis " "master server as specified in the sentinel configuration file." msgstr "" "Redis URI menentukan beberapa host sentinel, tetapi tidak memiliki parameter " "string kueri \"master\". Silakan set \"master\" ke nama server master Redis " "seperti yang ditentukan dalam file konfigurasi sentinel." msgid "The Redis configuration URI contains an invalid port" msgstr "URI konfigurasi Redis berisi port yang tidak valid" msgid "The Redis configuration URI does not define any sentinel hosts" msgstr "URI konfigurasi Redis tidak mendefinisikan host sentinel apa pun" #, python-format msgid "The Redis driver requires redis-server>=2.6, %s found" msgstr "Driver Redis membutuhkan redis-server>=2.6, %s ditemukan" msgid "" "The TTL can not exceed {0} seconds, and must be at least {1} seconds long." msgstr "" "TTL tidak dapat melebihi {0} detik, dan setidaknya harus sepanjang {1} detik." msgid "" "The TTL for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "TTL untuk klaim tidak boleh melebihi {0} detik, dan setidaknya harus " "sepanjang {1} detik." msgid "" "The TTL for a message may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "TTL untuk pesan tidak boleh melebihi {0} detik, dan setidaknya harus " "sepanjang {1} detik." msgid "The TTL for a subscription must be at least {0} seconds long." msgstr "TTL untuk berlangganan harus setidaknya berdurasi {0} detik." msgid "" "The TTL seconds for a subscription plus current time must be less than {0}." msgstr "" "TTL detik untuk langganan ditambah waktu saat ini harus kurang dari {0}." msgid "The format of the submitted queue name or project id is not valid." msgstr "Format nama antrian yang dimasukkan atau id proyek tidak valid." msgid "" "The grace for a claim may not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "Grace (kelonggaran) untuk klaim tidak boleh melebihi {0} detik, dan " "setidaknya harus sepanjang {1} detik." msgid "The header X-PROJECT-ID was missing" msgstr "Header X-PROJECT-ID hilang" #, python-format msgid "The mongodb driver requires mongodb>=2.2, %s found" msgstr "Driver mongodb membutuhkan mongodb>=2.2, %s ditemukan" msgid "" "The request should have both \"ids\" and \"claim_ids\" parameter in the " "request when message_delete_with_claim_id is True." msgstr "" "Permintaan harus memiliki parameter \"ids\" dan \"claim_ids\" dalam " "permintaan ketika message_delete_with_claim_id True." msgid "" "The request should have either \"ids\" or \"pop\" parameter in the request, " "to be able to delete." msgstr "" "Permintaan harus memiliki parameter \"id\" atau \"pop\" dalam permintaan, " "agar dapat menghapus." msgid "The root of path must be metadata, e.g /metadata/key." msgstr "Root of path harus berupa metadata, e.g /metadata/key." msgid "The specified claim does not exist or has expired." msgstr "Klaim yang ditentukan tidak ada atau telah kedaluwarsa." msgid "The subscriber type of subscription must be supported in the list {0}." msgstr "Tipe langganan pelanggan harus didukung dalam daftar {0}." msgid "The value of the \"{name}\" field must be a {vtype}." msgstr "Nilai field \"{name}\" harus {vtype}." msgid "This message is claimed; it cannot be deleted without a valid claim ID." msgstr "Pesan ini diklaim; tidak dapat dihapus tanpa ID klaim yang valid." msgid "This pool is used by flavors {flavor}; It cannot be deleted." msgstr "Pool ini digunakan oleh flavors {flavor}; Itu tidak bisa dihapus." msgid "Unable to confirm subscription" msgstr "Tidak dapat mengonfirmasi langganan" msgid "Unable to create" msgstr "Tidak dapat membuat" msgid "Unable to create pool" msgstr "Tidak dapat membuat pool" msgid "Unable to delete" msgstr "Tidak dapat menghapus" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Tidak dapat menemukan '%s' di perubahan JSON Schema" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Tidak dapat menemukan `op` di perubahan JSON Schema. Itu harus salah satu " "dari yang berikut: %(available)s." msgid "Unable to update subscription" msgstr "Tidak dapat memperbarui langganan" msgid "Unexpected error." msgstr "Kesalahan tak terduga." msgid "Unrecognized JSON Schema draft version" msgstr "Versi draf JSON Schema tidak dikenali" msgid "" "Using a write concern other than `majority` or > 2 makes the service " "unreliable. Please use a different write concern or set `unreliable` to True " "in the config file." msgstr "" "Menggunakan kekhawatiran tulis selain dari `majority` atau > 2 membuat " "layanan tidak dapat diandalkan. Harap gunakan kekhawatiran tulis yang " "berbeda atau setel `unreliable` ke True di file konfigurasi." msgid "" "X-PROJECT-ID cannot be an empty string. Specify the right header X-PROJECT-" "ID and retry." msgstr "" "X-PROJECT-ID tidak dapat berupa string kosong. Tentukan header kanan X-" "PROJECT-ID dan coba lagi." msgid "You are not authorized to complete this action." msgstr "Anda tidak berwenang untuk menyelesaikan tindakan ini." msgid "_dead_letter_queue_messages_ttl must be integer." msgstr "_dead_letter_queue_messages_ttl harus berupa bilangan integer." msgid "_default_message_delay must be integer." msgstr "_default_message_delay harus berupa bilangan integer." msgid "" "_default_message_ttl can not exceed {0} seconds, and must be at least {1} " "seconds long." msgstr "" "_default_message_ttl tidak dapat melebihi {0} detik, dan setidaknya harus " "sepanjang {1} detik." msgid "_default_message_ttl must be integer." msgstr "_default_message_ttl harus berupa integer." msgid "_max_claim_count must be integer." msgstr "_max_claim_count harus integer." msgid "" "_max_messages_post_size can not exceed {0}, and must be at least greater " "than 0." msgstr "" "_max_messages_post_size tidak boleh melebihi {0}, dan harus setidaknya lebih " "besar dari 0." msgid "_max_messages_post_size must be integer." msgstr "_max_messages_post_size harus integer." msgid "ids parameter should have at least 1 and not greater than {0} values." msgstr "" "Parameter id harus memiliki setidaknya 1 dan tidak lebih besar dari {0} " "nilai." msgid "ignore_subscription_override must be a boolean." msgstr "ignore_subscription_override harus berupa boolean." msgid "invalid minimum_delay and maximum_delay." msgstr "minimum_delaydan maksimum_delay tidak valid." msgid "invalid retry_backoff_function." msgstr "retry_backoff_function tidak valid." msgid "minimum_delay must less than maximum_delay." msgstr "minimum_delay harus kurang dari maximum_delay." msgid "pop and id params cannot be present together in the delete request." msgstr "pop dan id params tidak dapat hadir bersama dalam permintaan delete." msgid "register queue to pool: new flavor: None" msgstr "daftar antrian ke pool: new flavor: None" #, python-format msgid "" "register queue to pool: old flavor: %(oldflavor)s , new flavor: %(flavor)s" msgstr "" "daftar antrian ke pool: old flavor: %(oldflavor)s , new flavor: %(flavor)s" #, python-format msgid "register queue: project:%(project)s queue:%(queue)s pool:%(pool)s" msgstr "daftar antrian: project:%(project)s queue:%(queue)s pool:%(pool)s" msgid "retry_backoff_function must be a string." msgstr "retry_backoff_function harus berupa string." msgid "retry_policy must be a dict." msgstr "retry_policy harus berupa perintah." msgid "updatefail" msgstr "updatefail" msgid "{0} is not a valid action" msgstr "{0} bukan tindakan yang valid" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5710135 zaqar-20.1.0.dev29/zaqar/notification/0000775000175100017510000000000015033040026016545 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/notification/__init__.py0000664000175100017510000000000015033040005020641 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/notification/notifier.py0000664000175100017510000001603415033040005020737 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from stevedore import driver import futurist from oslo_log import log as logging from urllib import parse as urllib_parse from zaqar.common import auth from zaqar.common import urls from zaqar.storage import pooling LOG = logging.getLogger(__name__) @enum.unique class MessageType(enum.IntEnum): """Enum of message type.""" SubscriptionConfirmation = 1 UnsubscribeConfirmation = 2 Notification = 3 class NotifierDriver(object): """Notifier which is responsible for sending messages to subscribers. """ def __init__(self, *args, **kwargs): self.subscription_controller = kwargs.get('subscription_controller') max_workers = kwargs.get('max_notifier_workers', 10) self.executor = futurist.ThreadPoolExecutor(max_workers=max_workers) self.require_confirmation = kwargs.get('require_confirmation', False) self.queue_controller = kwargs.get('queue_controller') def post(self, queue_name, messages, client_uuid, project=None): """Send messages to the subscribers.""" if self.subscription_controller: if not isinstance(self.subscription_controller, pooling.SubscriptionController): marker = None queue_metadata = self.queue_controller.get(queue_name, project) retry_policy = queue_metadata.get('_retry_policy', {}) while True: subscribers = self.subscription_controller.list( queue_name, project, marker=marker) for sub in next(subscribers): LOG.debug("Notifying subscriber %r", (sub,)) s_type = urllib_parse.urlparse( sub['subscriber']).scheme # If the subscriber doesn't contain 'confirmed', it # means that this kind of subscriber was created before # the confirm feature be introduced into Zaqar. We # should allow them be subscribed. if (self.require_confirmation and not sub.get('confirmed', True)): LOG.info('The subscriber %s is not ' 'confirmed.', sub['subscriber']) continue for msg in messages: msg['Message_Type'] = MessageType.Notification.name self._execute(s_type, sub, messages, retry_policy=retry_policy) marker = next(subscribers) if not marker: break else: LOG.error('Failed to get subscription controller.') def send_confirm_notification(self, queue, subscription, conf, project=None, expires=None, api_version=None, is_unsubscribed=False): # NOTE(flwang): If the confirmation feature isn't enabled, just do # nothing. Here we're getting the require_confirmation from conf # object instead of using self.require_confirmation, because the # variable from self object really depends on the kwargs when # initializing the NotifierDriver object. See bug 1655812 for more # information. if not conf.notification.require_confirmation: return key = conf.signed_url.secret_key if not key: LOG.error("Can't send confirm notification due to the value of" " secret_key option is None") return url = "/%s/queues/%s/subscriptions/%s/confirm" % (api_version, queue, subscription['id']) pre_url = urls.create_signed_url(key, [url], project=project, expires=expires, methods=['PUT']) message = None if is_unsubscribed: message_type = MessageType.UnsubscribeConfirmation.name message = ('You have unsubscribed successfully to the queue: %s, ' 'you can resubscribe it by using confirmed=True.' % queue) else: message_type = MessageType.SubscriptionConfirmation.name message = 'You have chosen to subscribe to the queue: %s' % queue messages = {} endpoint_dict = auth.get_public_endpoint() if endpoint_dict: wsgi_endpoint = endpoint_dict.get('zaqar') if wsgi_endpoint: wsgi_subscribe_url = urllib_parse.urljoin( wsgi_endpoint, url) messages['WSGISubscribeURL'] = wsgi_subscribe_url websocket_endpoint = endpoint_dict.get('zaqar-websocket') if websocket_endpoint: websocket_subscribe_url = urllib_parse.urljoin( websocket_endpoint, url) messages['WebSocketSubscribeURL'] = websocket_subscribe_url messages.update({'Message_Type': message_type, 'Message': message, 'URL-Signature': pre_url['signature'], 'URL-Methods': pre_url['methods'][0], 'URL-Paths': pre_url['paths'][0], 'X-Project-ID': pre_url['project'], 'URL-Expires': pre_url['expires'], 'SubscribeBody': {'confirmed': True}, 'UnsubscribeBody': {'confirmed': False}}) s_type = urllib_parse.urlparse(subscription['subscriber']).scheme LOG.info('Begin to send %(type)s confirm/unsubscribe notification.' ' The request body is %(messages)s', {'type': s_type, 'messages': messages}) self._execute(s_type, subscription, [messages], conf) def _execute(self, s_type, subscription, messages, conf=None, retry_policy=None): if self.subscription_controller: data_driver = self.subscription_controller.driver conf = data_driver.conf else: conf = conf mgr = driver.DriverManager('zaqar.notification.tasks', s_type, invoke_on_load=True) self.executor.submit(mgr.driver.execute, subscription, messages, conf=conf, queue_retry_policy=retry_policy) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5710135 zaqar-20.1.0.dev29/zaqar/notification/tasks/0000775000175100017510000000000015033040026017672 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/notification/tasks/__init__.py0000664000175100017510000000000015033040005021766 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/notification/tasks/mailto.py0000664000175100017510000001400415033040005021525 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from email.mime import text import smtplib from urllib import parse as urllib_parse from oslo_concurrency import processutils from oslo_log import log as logging from oslo_serialization import jsonutils from zaqar.i18n import _ from zaqar.notification.notifier import MessageType LOG = logging.getLogger(__name__) class MailtoTask(object): def _make_confirm_string(self, conf_n, message, queue_name): confirm_url = conf_n.external_confirmation_url if confirm_url is None: msg = _("Can't make confirmation email body, need a valid " "confirm url.") LOG.error(msg) raise Exception(msg) param_string_signature = '?Signature=' + message.get('URL-Signature', '') param_string_methods = '&Methods=' + message.get('URL-Methods', '') param_string_paths = '&Paths=' + message.get('URL-Paths', '') param_string_project = '&Project=' + message.get('X-Project-ID', '') param_string_expires = '&Expires=' + message.get('URL-Expires', '') param_string_confirm_url = '&Url=' + message.get('WSGISubscribeURL', '') param_string_queue = '&Queue=' + queue_name confirm_url_string = (confirm_url + param_string_signature + param_string_methods + param_string_paths + param_string_project + param_string_expires + param_string_confirm_url + param_string_queue) return confirm_url_string def _make_confirmation_email(self, body, subscription, message, conf_n): queue_name = subscription['source'] confirm_url = self._make_confirm_string(conf_n, message, queue_name) email_body = "" if body is not None: email_body = body.format(queue_name, message['X-Project-ID'], confirm_url) return text.MIMEText(email_body) def execute(self, subscription, messages, **kwargs): subscriber = urllib_parse.urlparse(subscription['subscriber']) params = urllib_parse.parse_qs(subscriber.query) params = dict((k.lower(), v) for k, v in params.items()) conf_n = kwargs.get('conf').notification try: for message in messages: # Send confirmation email to subscriber. if (message.get('Message_Type') == MessageType.SubscriptionConfirmation.name): content = conf_n.subscription_confirmation_email_template msg = self._make_confirmation_email(content['body'], subscription, message, conf_n) msg["to"] = subscriber.path msg["from"] = content['sender'] msg["subject"] = content['topic'] elif (message.get('Message_Type') == MessageType.UnsubscribeConfirmation.name): content = conf_n.unsubscribe_confirmation_email_template msg = self._make_confirmation_email(content['body'], subscription, message, conf_n) msg["to"] = subscriber.path msg["from"] = content['sender'] msg["subject"] = content['topic'] else: # NOTE(Eva-i): Unfortunately this will add 'queue_name' key # to our original messages(dicts) which will be later # consumed in the storage controller. It seems safe though. message['queue_name'] = subscription['source'] msg = text.MIMEText(jsonutils.dumps(message)) msg["to"] = subscriber.path msg["from"] = subscription['options'].get('from', '') subject_opt = subscription['options'].get('subject', '') msg["subject"] = params.get('subject', subject_opt) if conf_n.smtp_mode == 'third_part': cmd = conf_n.smtp_command.split(' ') processutils.execute(*cmd, process_input=msg.as_string()) elif conf_n.smtp_mode == 'self_local': sender = smtplib.SMTP_SSL(conf_n.smtp_host, conf_n.smtp_port) sender.set_debuglevel(1) sender.ehlo(conf_n.smtp_host) try: sender.login(conf_n.smtp_user_name, conf_n.smtp_user_password) except smtplib.SMTPException: LOG.error("Failed to connect to the SMTP service") continue sender.sendmail(msg['from'], msg['to'], msg.as_string()) LOG.debug("Send mail successfully: %s", msg.as_string()) except OSError as err: LOG.exception('Failed to create process for sendmail, ' 'because %s.', str(err)) except Exception as exc: LOG.exception('Failed to send email because %s.', str(exc)) def register(self, subscriber, options, ttl, project_id, request_data): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/notification/tasks/trust.py0000664000175100017510000000434015033040005021423 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime from oslo_utils import timeutils from zaqar.common import auth from zaqar.notification.tasks import webhook class TrustTask(webhook.WebhookTask): """A webhook using trust authentication. This webhook will use the trust stored in the subscription to ask for a token, which will then be passed to the notified service. """ def execute(self, subscription, messages, **kwargs): subscription = copy.deepcopy(subscription) subscriber = subscription['subscriber'] trust_id = subscription['options']['trust_id'] token = auth.get_trusted_token(trust_id) subscription['subscriber'] = subscriber[6:] headers = {'X-Auth-Token': token, 'Content-Type': 'application/json'} super(TrustTask, self).execute(subscription, messages, headers, **kwargs) def register(self, subscriber, options, ttl, project_id, request_data): if 'trust_id' not in options: # We have a trust subscriber without a trust ID, # create it trustor_user_id = request_data.get('X-USER-ID') roles = request_data.get('X-ROLES', '') if roles: roles = roles.split(',') else: roles = [] auth_plugin = request_data.get('keystone.token_auth') expires_at = None if ttl: expires_at = timeutils.utcnow() + datetime.timedelta( seconds=ttl) trust_id = auth.create_trust_id( auth_plugin, trustor_user_id, project_id, roles, expires_at) options['trust_id'] = trust_id ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/notification/tasks/webhook.py0000664000175100017510000001552315033040005021705 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import math import time from oslo_log import log as logging from oslo_serialization import jsonutils import requests from zaqar.common import consts LOG = logging.getLogger(__name__) def _Linear_function(minimum_delay, maximum_delay, times): return range(minimum_delay, maximum_delay, times) def _Geometric_function(minimum_delay, maximum_delay, times): x_max = int((maximum_delay - minimum_delay) / times) k = math.pow(10, math.log10(maximum_delay/minimum_delay)/(x_max-1)) xarray = range(1, x_max+1) return [int(minimum_delay*math.pow(k, a-1)) for a in xarray] def _Exponential_function(minimum_delay, maximum_delay, times): x_max = int((maximum_delay - minimum_delay) / times) k = math.pow(10, math.log10(maximum_delay/minimum_delay)/(x_max-1)) p = minimum_delay/k xarray = range(1, x_max+1) return [int(p*math.pow(k, a)) for a in xarray] def _Arithmetic_function(minimum_delay, maximum_delay, times): x_max = int((maximum_delay - minimum_delay) / times) d = 2.0 * (maximum_delay - minimum_delay) / (x_max * (x_max - 1)) xarray = range(1, x_max+1) return [int(minimum_delay+(a-1)*a*d/2) for a in xarray] RETRY_BACKOFF_FUNCTION_MAP = {'linear': _Linear_function, 'arithmetic': _Arithmetic_function, 'geometric': _Geometric_function, 'exponential': _Exponential_function} class WebhookTask(object): def _post_request_success(self, subscriber, data, headers): try: response = requests.post(subscriber, data=data, headers=headers) if response and (response.status_code in range(200, 500)): return True except Exception as e: LOG.exception('post request got exception in retry: %s.', str(e)) return False def _retry_post(self, sub_retry_policy, queue_retry_policy, subscriber, data, headers): retry_policy = None if sub_retry_policy.get('ignore_subscription_override') or \ queue_retry_policy.get('ignore_subscription_override'): retry_policy = queue_retry_policy or {} else: retry_policy = sub_retry_policy or queue_retry_policy or {} # Immediate Retry Phase for retry_with_no_delay in range( 0, retry_policy.get('retries_with_no_delay', consts.RETRIES_WITH_NO_DELAY)): LOG.debug('Retry with no delay, count: %s', retry_with_no_delay) if self._post_request_success(subscriber, data, headers): return # Pre-Backoff Phase for minimum_delay_retry in range( 0, retry_policy.get('minimum_delay_retries', consts.MINIMUM_DELAY_RETRIES)): LOG.debug('Retry with minimum delay, count: %s', minimum_delay_retry) time.sleep(retry_policy.get('minimum_delay', consts.MINIMUM_DELAY)) if self._post_request_success(subscriber, data, headers): return # Now we support linear,arithmetic, # exponential and geometric retry backoff function. retry_function = retry_policy.get('retry_backoff_function', 'linear') backoff_function = RETRY_BACKOFF_FUNCTION_MAP[retry_function] for i in backoff_function(retry_policy.get('minimum_delay', consts.MINIMUM_DELAY), retry_policy.get('maximum_delay', consts.MAXIMUM_DELAY), consts.LINEAR_INTERVAL): LOG.debug('Retry with function:%s, sleep: %s seconds', retry_function, i) time.sleep(i) if self._post_request_success(subscriber, data, headers): return # Post-Backoff Phase for maximum_delay_retries in range( 0, retry_policy.get('maximum_delay_retries', consts.MAXIMUM_DELAY_RETRIES)): LOG.debug('Retry with maximum delay, count: %s', maximum_delay_retries) time.sleep(retry_policy.get('maximum_delay', consts.MAXIMUM_DELAY)) if self._post_request_success(subscriber, data, headers): return LOG.debug('Send request retries are all failed.') def execute(self, subscription, messages, headers=None, **kwargs): if headers is None: headers = {'Content-Type': 'application/json'} headers.update(subscription['options'].get('post_headers', {})) try: for msg in messages: # NOTE(Eva-i): Unfortunately this will add 'queue_name' key to # our original messages(dicts) which will be later consumed in # the storage controller. It seems safe though. msg['queue_name'] = subscription['source'] if 'post_data' in subscription['options']: data = subscription['options']['post_data'] data = data.replace('"$zaqar_message$"', jsonutils.dumps(msg)) else: data = jsonutils.dumps(msg) response = requests.post(subscription['subscriber'], data=data, headers=headers) if response and (response.status_code not in range(200, 500)): LOG.info("Response is %s, begin to retry", response.status_code) self._retry_post( subscription['options'].get('_retry_policy', {}), kwargs.get('queue_retry_policy'), subscription['subscriber'], data, headers) except Exception as e: LOG.exception('webhook task got exception: %s.', str(e)) self._retry_post(subscription['options'].get('_retry_policy', {}), kwargs.get('queue_retry_policy'), subscription['subscriber'], data, headers) def register(self, subscriber, options, ttl, project_id, request_data): pass ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5710135 zaqar-20.1.0.dev29/zaqar/storage/0000775000175100017510000000000015033040026015523 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/__init__.py0000664000175100017510000000252015033040005017630 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Zaqar Storage Drivers""" from zaqar.storage import base from zaqar.storage import errors # NOQA # Hoist classes into package namespace Capabilities = base.Capabilities ControlDriverBase = base.ControlDriverBase DataDriverBase = base.DataDriverBase CatalogueBase = base.CatalogueBase Claim = base.Claim Message = base.Message Queue = base.Queue Subscription = base.Subscription PoolsBase = base.PoolsBase FlavorsBase = base.FlavorsBase Topic = base.Topic DEFAULT_QUEUES_PER_PAGE = base.DEFAULT_QUEUES_PER_PAGE DEFAULT_MESSAGES_PER_PAGE = base.DEFAULT_MESSAGES_PER_PAGE DEFAULT_POOLS_PER_PAGE = base.DEFAULT_POOLS_PER_PAGE DEFAULT_SUBSCRIPTIONS_PER_PAGE = base.DEFAULT_SUBSCRIPTIONS_PER_PAGE DEFAULT_TOPICS_PER_PAGE = base.DEFAULT_TOPICS_PER_PAGE DEFAULT_MESSAGES_PER_CLAIM = base.DEFAULT_MESSAGES_PER_CLAIM ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/base.py0000664000175100017510000011573415033040005017017 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # Copyright 2014 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements the DriverBase abstract class for Zaqar storage drivers.""" import abc import functools import time import enum from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from zaqar.common import decorators from zaqar.storage import errors from zaqar.storage import utils DEFAULT_QUEUES_PER_PAGE = 10 DEFAULT_MESSAGES_PER_PAGE = 10 DEFAULT_POOLS_PER_PAGE = 10 DEFAULT_SUBSCRIPTIONS_PER_PAGE = 10 DEFAULT_TOPICS_PER_PAGE = 10 DEFAULT_MESSAGES_PER_CLAIM = 10 LOG = logging.getLogger(__name__) @enum.unique class Capabilities(enum.IntEnum): """Enum of storage capabilities.""" FIFO = 1 CLAIMS = 2 DURABILITY = 3 AOD = 4 # At least once delivery HIGH_THROUGHPUT = 5 class DriverBase(object, metaclass=abc.ABCMeta): """Base class for both data and control plane drivers :param conf: Configuration containing options for this driver. :type conf: `oslo_config.ConfigOpts` :param cache: Cache instance to use for reducing latency for certain lookups. :type cache: `dogpile.cache.region.CacheRegion` """ _DRIVER_OPTIONS = [] def __init__(self, conf, cache): self.conf = conf self.cache = cache self._register_opts() def _register_opts(self): for group, options in self._DRIVER_OPTIONS: for opt in options: try: self.conf.register_opt(opt, group=group) except cfg.DuplicateOptError: pass class DataDriverBase(DriverBase, metaclass=abc.ABCMeta): """Interface definition for storage drivers. Data plane storage drivers are responsible for implementing the core functionality of the system. Connection information and driver-specific options are loaded from the config file or the pool catalog. :param conf: Configuration containing options for this driver. :type conf: `oslo_config.ConfigOpts` :param cache: Cache instance to use for reducing latency for certain lookups. :type cache: `dogpile.cache.region.CacheRegion` """ BASE_CAPABILITIES = [] def __init__(self, conf, cache, control_driver): super(DataDriverBase, self).__init__(conf, cache) # creating ControlDriver instance for accessing QueueController's # data from DataDriver self.control_driver = control_driver @abc.abstractmethod def is_alive(self): """Check whether the storage is ready.""" raise NotImplementedError @property @abc.abstractmethod def capabilities(self): """Returns storage's capabilities.""" return self.BASE_CAPABILITIES def health(self): """Return the health status of service.""" overall_health = {} # NOTE(flwang): KPI extracted from different storage backends, # _health() will be implemented by different storage drivers. backend_health = self._health() if backend_health: overall_health.update(backend_health) return overall_health @abc.abstractmethod def _health(self): """Return the health status based on different backends.""" raise NotImplementedError @abc.abstractmethod def close(self): """Close connections to the backend.""" raise NotImplementedError def _get_operation_status(self): op_status = {} status_template = lambda s, t, r: {'succeeded': s, 'seconds': t, 'ref': r} project = uuidutils.generate_uuid() queue = uuidutils.generate_uuid() client = uuidutils.generate_uuid() msg_template = lambda s: {'ttl': 600, 'body': {'event': 'p_%s' % s}} messages = [msg_template(i) for i in range(100)] claim_metadata = {'ttl': 60, 'grace': 300} # NOTE (flwang): Using time.time() instead of timeit since timeit will # make the method calling be complicated. def _handle_status(operation_type, callable_operation): succeeded = True ref = None result = None try: start = time.time() result = callable_operation() except Exception: ref = uuidutils.generate_uuid() LOG.exception('Error calling operation.', extra={'instance_uuid': ref}) succeeded = False status = status_template(succeeded, time.time() - start, ref) op_status[operation_type] = status return succeeded, result # create queue func = functools.partial(self.queue_controller.create, queue, project=project) succeeded, _ = _handle_status('create_queue', func) # post messages if succeeded: func = functools.partial(self.message_controller.post, queue, messages, client, project=project) _, msg_ids = _handle_status('post_messages', func) # claim messages if msg_ids: func = functools.partial(self.claim_controller.create, queue, claim_metadata, project=project) _, (claim_id, claim_msgs) = _handle_status('claim_messages', func) # list messages func = functools.partial(self.message_controller.list, queue, project, echo=True, client_uuid=client, include_claimed=True) _handle_status('list_messages', func) # delete messages if claim_id and claim_msgs: for message in claim_msgs: func = functools.partial(self. message_controller.delete, queue, message['id'], project, claim=claim_id) succeeded, _ = _handle_status('delete_messages', func) if not succeeded: break # delete claim func = functools.partial(self.claim_controller.delete, queue, claim_id, project) _handle_status('delete_claim', func) # delete queue func = functools.partial(self.message_controller.bulk_delete, queue, msg_ids, project=project) _handle_status('bulk_delete_messages', func) func = functools.partial(self.queue_controller.delete, queue, project=project) _handle_status('delete_queue', func) return op_status def gc(self): """Perform manual garbage collection of claims and messages. This method can be overridden in order to provide a trigger that can be called by so-called "garbage collection" scripts that are required by some drivers. By default, this method does nothing. """ pass @decorators.lazy_property(write=False) def queue_controller(self): return self.control_driver.queue_controller @property @abc.abstractmethod def message_controller(self): """Returns the driver's message controller.""" raise NotImplementedError @property @abc.abstractmethod def claim_controller(self): """Returns the driver's claim controller.""" raise NotImplementedError @property @abc.abstractmethod def subscription_controller(self): """Returns the driver's subscription controller.""" raise NotImplementedError @decorators.lazy_property(write=False) def topic_controller(self): """Returns the driver's topic controller.""" return self.control_driver.topic_controller class ControlDriverBase(DriverBase, metaclass=abc.ABCMeta): """Interface definition for control plane storage drivers. Storage drivers that work at the control plane layer allow one to modify aspects of the functionality of the system. This is ideal for administrative purposes. Allows access to the pool registry through a catalogue and a pool controller. :param conf: Configuration containing options for this driver. :type conf: `oslo_config.ConfigOpts` :param cache: Cache instance to use for reducing latency for certain lookups. :type cache: `dogpile.cache.region.CacheRegion` """ @property @abc.abstractmethod def catalogue_controller(self): """Returns the driver's catalogue controller.""" raise NotImplementedError @property @abc.abstractmethod def pools_controller(self): """Returns storage's pool management controller.""" raise NotImplementedError @property @abc.abstractmethod def flavors_controller(self): """Returns storage's flavor management controller.""" raise NotImplementedError @property @abc.abstractmethod def queue_controller(self): """Returns the driver's queue controller.""" raise NotImplementedError @property @abc.abstractmethod def topic_controller(self): """Returns the driver's topic controller.""" raise NotImplementedError @abc.abstractmethod def close(self): """Close connections to the backend.""" raise NotImplementedError class ControllerBase(object): """Top-level class for controllers. :param driver: Instance of the driver instantiating this controller. """ def __init__(self, driver): self.driver = driver class Queue(ControllerBase, metaclass=abc.ABCMeta): """This class is responsible for managing queues. Queue operations include CRUD, monitoring, etc. Storage driver implementations of this class should be capable of handling high workloads and huge numbers of queues. """ def list(self, project=None, kfilter={}, marker=None, limit=DEFAULT_QUEUES_PER_PAGE, detailed=False, name=None): """Base method for listing queues. :param project: Project id :param kfilter: The key-value of metadata which user want to filter :param marker: The last queue name :param limit: (Default 10) Max number of queues to return :param detailed: Whether metadata is included :param name: The queue name which user want to filter :returns: An iterator giving a sequence of queues and the marker of the next page. """ return self._list(project, kfilter, marker, limit, detailed, name) _list = abc.abstractmethod(lambda x: None) def get(self, name, project=None): """Base method for queue metadata retrieval. :param name: The queue name :param project: Project id :returns: Dictionary containing queue metadata :raises DoesNotExist: if queue metadata does not exist """ return self._get(name, project) _get = abc.abstractmethod(lambda x: None) def get_metadata(self, name, project=None): """Base method for queue metadata retrieval. :param name: The queue name :param project: Project id :returns: Dictionary containing queue metadata :raises DoesNotExist: if queue metadata does not exist """ raise NotImplementedError def set_metadata(self, name, metadata, project=None): """Base method for updating a queue metadata. :param name: The queue name :param metadata: Queue metadata as a dict :param project: Project id :raises DoesNotExist: if queue metadata can not be updated """ raise NotImplementedError def create(self, name, metadata=None, project=None): """Base method for queue creation. :param name: The queue name :param project: Project id :returns: True if a queue was created and False if it was updated. """ return self._create(name, metadata, project) _create = abc.abstractmethod(lambda x: None) def exists(self, name, project=None): """Base method for testing queue existence. :param name: The queue name :param project: Project id :returns: True if a queue exists and False if it does not. """ return self._exists(name, project) _exists = abc.abstractmethod(lambda x: None) def delete(self, name, project=None): """Base method for deleting a queue. :param name: The queue name :param project: Project id """ return self._delete(name, project) _delete = abc.abstractmethod(lambda x: None) def stats(self, name, project=None): """Base method for queue stats. :param name: The queue name :param project: Project id :returns: Dictionary with the queue stats """ return self._stats(name, project) _stats = abc.abstractmethod(lambda x: None) def calculate_resource_count(self, project=None): """Base method for calculate queues amount. :param project: Project id :returns: The number of queues. """ return self._calculate_resource_count(project) _calculate_resource_count = abc.abstractmethod(lambda x: None) class Message(ControllerBase, metaclass=abc.ABCMeta): """This class is responsible for managing message CRUD.""" @abc.abstractmethod def list(self, queue, project=None, marker=None, limit=DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False): """Base method for listing messages. :param queue: Name of the queue to get the message from. :param project: Project id :param marker: Tail identifier :param limit: (Default 10) Max number of messages to return. :type limit: Maybe int :param echo: (Default False) Boolean expressing whether or not this client should receive its own messages. :param client_uuid: A UUID object. Required when echo=False. :param include_claimed: omit claimed messages from listing? :type include_claimed: bool :param include_delayed: omit delayed messages from listing :type include_delayed: bool :returns: An iterator giving a sequence of messages and the marker of the next page. """ raise NotImplementedError @abc.abstractmethod def first(self, queue, project=None, sort=1): """Get first message in the queue (including claimed). :param queue: Name of the queue to list :param sort: (Default 1) Sort order for the listing. Pass 1 for ascending (oldest message first), or -1 for descending (newest message first). :returns: First message in the queue, or None if the queue is empty """ raise NotImplementedError @abc.abstractmethod def get(self, queue, message_id, project=None): """Base method for getting a message. :param queue: Name of the queue to get the message from. :param project: Project id :param message_id: Message ID :returns: Dictionary containing message data :raises DoesNotExist: if message data can not be got """ raise NotImplementedError @abc.abstractmethod def bulk_get(self, queue, message_ids, project=None): """Base method for getting multiple messages. :param queue: Name of the queue to get the message from. :param project: Project id :param message_ids: A sequence of message IDs. :returns: An iterable, yielding dicts containing message details """ raise NotImplementedError @abc.abstractmethod def post(self, queue, messages, client_uuid, project=None): """Base method for posting one or more messages. Implementations of this method should guarantee and preserve the order, in the returned list, of incoming messages. :param queue: Name of the queue to post message to. :param messages: Messages to post to queue, an iterable yielding 1 or more elements. An empty iterable results in undefined behavior. :param client_uuid: A UUID object. :param project: Project id :returns: List of message ids """ raise NotImplementedError @abc.abstractmethod def delete(self, queue, message_id, project=None, claim=None): """Base method for deleting a single message. :param queue: Name of the queue to post message to. :param message_id: Message to be deleted :param project: Project id :param claim: Claim this message belongs to. When specified, claim must be valid and message_id must belong to it. """ raise NotImplementedError @abc.abstractmethod def bulk_delete(self, queue, message_ids, project=None, claim_ids=None): """Base method for deleting multiple messages. :param queue: Name of the queue to post message to. :param message_ids: A sequence of message IDs to be deleted. :param project: Project id :param claim_ids: claim IDs passed in by the delete request """ raise NotImplementedError @abc.abstractmethod def pop(self, queue, limit, project=None): """Base method for popping messages. :param queue: Name of the queue to pop message from. :param limit: Number of messages to pop. :param project: Project id """ raise NotImplementedError class Claim(ControllerBase, metaclass=abc.ABCMeta): @abc.abstractmethod def get(self, queue, claim_id, project=None): """Base method for getting a claim. :param queue: Name of the queue this claim belongs to. :param claim_id: The claim id :param project: Project id :returns: (Claim's metadata, claimed messages) :raises DoesNotExist: if claimed messages can not be got """ raise NotImplementedError @abc.abstractmethod def create(self, queue, metadata, project=None, limit=DEFAULT_MESSAGES_PER_CLAIM): """Base method for creating a claim. :param queue: Name of the queue this claim belongs to. :param metadata: Claim's parameters to be stored. :param project: Project id :param limit: (Default 10) Max number of messages to claim. :returns: (Claim ID, claimed messages) """ raise NotImplementedError @abc.abstractmethod def update(self, queue, claim_id, metadata, project=None): """Base method for updating a claim. :param queue: Name of the queue this claim belongs to. :param claim_id: Claim to be updated :param metadata: Claim's parameters to be updated. :param project: Project id """ raise NotImplementedError @abc.abstractmethod def delete(self, queue, claim_id, project=None): """Base method for deleting a claim. :param queue: Name of the queue this claim belongs to. :param claim_id: Claim to be deleted :param project: Project id """ raise NotImplementedError class Subscription(ControllerBase, metaclass=abc.ABCMeta): """This class is responsible for managing subscriptions of notification. """ @abc.abstractmethod def list(self, queue, project=None, marker=None, limit=DEFAULT_SUBSCRIPTIONS_PER_PAGE): """Base method for listing subscriptions. :param queue: Name of the queue to get the subscriptions from. :type queue: str :param project: Project this subscription belongs to. :type project: str :param marker: used to determine which subscription to start with :type marker: str :param limit: (Default 10) Max number of results to return :type limit: int :returns: An iterator giving a sequence of subscriptions and the marker of the next page. :rtype: [{}] """ raise NotImplementedError @abc.abstractmethod def get(self, queue, subscription_id, project=None): """Returns a single subscription entry. :param queue: Name of the queue subscription belongs to. :type queue: str :param subscription_id: ID of this subscription :type subscription_id: str :param project: Project this subscription belongs to. :type project: str :returns: Dictionary containing subscription data :rtype: {} :raises SubscriptionDoesNotExist: if not found """ raise NotImplementedError @abc.abstractmethod def create(self, queue, subscriber, ttl, options, project=None): """Create a new subscription. :param queue:The source queue for notifications :type queue: str :param subscriber: The subscriber URI :type subscriber: str :param ttl: time to live for this subscription :type ttl: int :param options: Options used to configure this subscription :type options: dict :param project: Project id :type project: str :returns: True if a subscription was created and False if it is failed. :rtype: boolean """ raise NotImplementedError @abc.abstractmethod def update(self, queue, subscription_id, project=None, **kwargs): """Updates the weight, uris, and/or options of this subscription :param queue: Name of the queue subscription belongs to. :type queue: str :param name: ID of the subscription :type name: text :param kwargs: one of: `source`, `subscriber`, `ttl`, `options` :type kwargs: dict :raises SubscriptionDoesNotExist: if not found :raises SubscriptionAlreadyExists: if attempt to update in a way to create duplicate subscription """ raise NotImplementedError @abc.abstractmethod def exists(self, queue, subscription_id, project=None): """Base method for testing subscription existence. :param queue: Name of the queue subscription belongs to. :type queue: str :param subscription_id: ID of subscription :type subscription_id: str :param project: Project id :type project: str :returns: True if a subscription exists and False if it does not. """ raise NotImplementedError @abc.abstractmethod def delete(self, queue, subscription_id, project=None): """Base method for deleting a subscription. :param queue: Name of the queue subscription belongs to. :type queue: str :param subscription_id: ID of the subscription to be deleted. :type subscription_id: str :param project: Project id :type project: str """ raise NotImplementedError @abc.abstractmethod def get_with_subscriber(self, queue, subscriber, project=None): """Base method for get a subscription with the subscriber. :param queue: Name of the queue subscription belongs to. :type queue: str :param subscriber: link of the subscription to be notified. :type subscriber: str :param project: Project id :type project: str :returns: Dictionary containing subscription data :rtype: dict """ raise NotImplementedError @abc.abstractmethod def confirm(self, queue, subscription_id, project=None, confirmed=True): """Base method for confirming a subscription. :param queue: Name of the queue subscription belongs to. :type queue: str :param subscription_id: ID of the subscription to be deleted. :type subscription_id: str :param project: Project id :type project: str :param confirmed: Confirm a subscription or cancel the confirmation of a subscription. :type confirmed: boolean """ raise NotImplementedError class PoolsBase(ControllerBase, metaclass=abc.ABCMeta): """A controller for managing pools.""" def _check_capabilities(self, uri, flavor=None, name=None): default_store = self.driver.conf.drivers.message_store pool_caps = self.capabilities(flavor=flavor, name=name) if not pool_caps: return True new_store = utils.load_storage_impl(uri, default_store=default_store) # NOTE(flaper87): Since all pools in a pool flavor # are assumed to have the same capabilities, it's # fine to check against just 1 return pool_caps == new_store.BASE_CAPABILITIES def capabilities(self, flavor=None, name=None): """Gets the set of capabilities for this flavor/name :param flavor: The pool flavor to get capabilities for :type flavor: str :param name: The pool name to get capabilities for :type name: str """ pllt = [] if name: pool = self.get(name) pllt.append(pool) else: pllt = list(self._get_pools_by_flavor(flavor)) if not len(pllt) > 0: return () default_store = self.driver.conf.drivers.message_store pool_store = utils.load_storage_impl(pllt[0]['uri'], default_store=default_store) return pool_store.BASE_CAPABILITIES def list(self, marker=None, limit=DEFAULT_POOLS_PER_PAGE, detailed=False): """Lists all registered pools. :param marker: used to determine which pool to start with :type marker: str :param limit: (Default 10) Max number of results to return :type limit: int :param detailed: whether to include options :type detailed: bool :returns: A list of pools - name, weight, uri :rtype: [{}] """ return self._list(marker, limit, detailed) _list = abc.abstractmethod(lambda x: None) def create(self, name, weight, uri, flavor=None, options=None): """Registers a pool entry. :param name: The name of this pool :type name: str :param weight: the likelihood that this pool will be used :type weight: int :param uri: A URI that can be used by a storage client (e.g., pymongo) to access this pool. :type uri: str :param flavor: The flavor of this pool :type flavor: str :param options: Options used to configure this pool :type options: dict """ flavor_obj = {} if flavor is not None: flavor_obj["name"] = flavor if not self._check_capabilities(uri, flavor=flavor_obj): raise errors.PoolCapabilitiesMismatch() return self._create(name, weight, uri, flavor, options) _create = abc.abstractmethod(lambda x: None) def get_pools_by_flavor(self, flavor=None, detailed=False): """Returns a pool list filtered by given pool flavor. :param flavor: The flavor to filter on. `None` returns pools that are not assigned to any pool flavor. :type flavor: str :param detailed: Should the options data be included? :type detailed: bool :returns: weight, uri, and options for this pool :rtype: {} :raises PoolDoesNotExist: if not found """ return self._get_pools_by_flavor(flavor, detailed) _get_pools_by_flavor = abc.abstractmethod(lambda x: None) def get(self, name, detailed=False): """Returns a single pool entry. :param name: The name of this pool :type name: str :param detailed: Should the options data be included? :type detailed: bool :returns: weight, uri, and options for this pool :rtype: {} :raises PoolDoesNotExist: if not found """ return self._get(name, detailed) _get = abc.abstractmethod(lambda x: None) def exists(self, name): """Returns a single pool entry. :param name: The name of this pool :type name: str :returns: True if the pool exists :rtype: bool """ return self._exists(name) _exists = abc.abstractmethod(lambda x: None) def delete(self, name): """Removes a pool entry. :param name: The name of this pool :type name: str :rtype: None """ return self._delete(name) _delete = abc.abstractmethod(lambda x: None) def update(self, name, **kwargs): """Updates the weight, uris, and/or options of this pool :param name: Name of the pool :type name: text :param kwargs: one of: `uri`, `weight`, `options` :type kwargs: dict :raises PoolDoesNotExist: if not found """ uri = kwargs.get('uri') if uri and not self._check_capabilities(uri, name=name): raise errors.PoolCapabilitiesMismatch() return self._update(name, **kwargs) _update = abc.abstractmethod(lambda x: None) def drop_all(self): """Deletes all pools from storage.""" return self._drop_all() _drop_all = abc.abstractmethod(lambda x: None) class CatalogueBase(ControllerBase, metaclass=abc.ABCMeta): """A controller for managing the catalogue. The catalogue is responsible for maintaining a mapping between project.queue entries to their pool. """ @abc.abstractmethod def list(self, project): """Get a list of queues from the catalogue. :param project: The project to use when filtering through queue entries. :type project: str :returns: [{'project': ..., 'queue': ..., 'pool': ...},] :rtype: [dict] """ raise NotImplementedError @abc.abstractmethod def get(self, project, queue): """Returns the pool identifier for the given queue. :param project: Namespace to search for the given queue :type project: str :param queue: The name of the queue to search for :type queue: str :returns: {'pool': ...} :rtype: dict :raises QueueNotMapped: if queue is not mapped """ raise NotImplementedError @abc.abstractmethod def exists(self, project, queue): """Determines whether the given queue exists under project. :param project: Namespace to check. :type project: str :param queue: str - Particular queue to check for :type queue: str :return: True if the queue exists under this project :rtype: bool """ @abc.abstractmethod def insert(self, project, queue, pool): """Creates a new catalogue entry, or updates it if it already exists. :param project: str - Namespace to insert the given queue into :type project: str :param queue: str - The name of the queue to insert :type queue: str :param pool: pool identifier to associate this queue with :type pool: str """ raise NotImplementedError @abc.abstractmethod def delete(self, project, queue): """Removes this entry from the catalogue. :param project: The namespace to search for this queue :type project: str :param queue: The queue name to remove :type queue: str """ raise NotImplementedError @abc.abstractmethod def update(self, project, queue, pools=None): """Updates the pool identifier for this queue. :param project: Namespace to search :type project: str :param queue: The name of the queue :type queue: str :param pools: The name of the pool where this project/queue lives. :type pools: str :raises QueueNotMapped: if queue is not mapped """ raise NotImplementedError @abc.abstractmethod def drop_all(self): """Drops all catalogue entries from storage.""" raise NotImplementedError class FlavorsBase(ControllerBase, metaclass=abc.ABCMeta): """A controller for managing flavors.""" @abc.abstractmethod def list(self, project=None, marker=None, limit=10): """Lists all registered flavors. :param project: Project this flavor belongs to. :type project: str :param marker: used to determine which flavor to start with :type marker: str :param limit: (Default 10) Max number of results to return :type limit: int :returns: A list of flavors - name, project, flavor :rtype: [{}] """ raise NotImplementedError @abc.abstractmethod def create(self, name, project=None, capabilities=None): """Registers a flavor entry. :param name: The name of this flavor :type name: str :param project: Project this flavor belongs to. :type project: str :param pool: The name of the pool to use for this flavor. :type pool: str :param capabilities: Flavor capabilities :type capabilities: dict """ raise NotImplementedError @abc.abstractmethod def get(self, name, project=None): """Returns a single flavor entry. :param name: The name of this flavor :type name: str :param project: Project this flavor belongs to. :type project: str :rtype: {} :raises FlavorDoesNotExist: if not found """ raise NotImplementedError @abc.abstractmethod def exists(self, name, project=None): """Verifies whether the flavor exists. :param name: The name of this flavor :type name: str :param project: Project this flavor belongs to. :type project: str :returns: True if the flavor exists :rtype: bool """ raise NotImplementedError @abc.abstractmethod def delete(self, name, project=None): """Removes a flavor entry. :param name: The name of this flavor :type name: str :param project: Project this flavor belongs to. :type project: str :rtype: None """ raise NotImplementedError @abc.abstractmethod def update(self, name, project=None, **kwargs): """Updates the flavor and/or capabilities of this flavor :param name: Name of the flavor :type name: text :param project: Project this flavor belongs to. :type project: str :param kwargs: one of: `uri`, `weight`, `options` :type kwargs: dict :raises FlavorDoesNotExist: if not found """ raise NotImplementedError @abc.abstractmethod def drop_all(self): """Deletes all flavors from storage.""" raise NotImplementedError class Topic(ControllerBase, metaclass=abc.ABCMeta): """This class is responsible for managing topics. Topic operations include CRUD, etc. Storage driver implementations of this class should be capable of handling high workloads and huge numbers of topics. """ def list(self, project=None, kfilter={}, marker=None, limit=DEFAULT_TOPICS_PER_PAGE, detailed=False, name=None): """Base method for listing topics. :param project: Project id :param kfilter: The key-value of metadata which user want to filter :param marker: The last topic name :param limit: (Default 10) Max number of topics to return :param detailed: Whether metadata is included :param name: The topic name which user want to filter :returns: An iterator giving a sequence of topics and the marker of the next page. """ return self._list(project, kfilter, marker, limit, detailed, name) _list = abc.abstractmethod(lambda x: None) def get(self, name, project=None): """Base method for topic metadata retrieval. :param name: The topic name :param project: Project id :returns: Dictionary containing topic metadata :raises DoesNotExist: if topic metadata does not exist """ return self._get(name, project) _get = abc.abstractmethod(lambda x: None) def get_metadata(self, name, project=None): """Base method for topic metadata retrieval. :param name: The topic name :param project: Project id :returns: Dictionary containing topic metadata :raises DoesNotExist: if topic metadata does not exist """ raise NotImplementedError def set_metadata(self, name, metadata, project=None): """Base method for updating a topic metadata. :param name: The topic name :param metadata: Topic metadata as a dict :param project: Project id :raises DoesNotExist: if topic metadata can not be updated """ raise NotImplementedError def create(self, name, metadata=None, project=None): """Base method for topic creation. :param name: The topic name :param project: Project id :returns: True if a topic was created and False if it was updated. """ return self._create(name, metadata, project) _create = abc.abstractmethod(lambda x: None) def exists(self, name, project=None): """Base method for testing topic existence. :param name: The topic name :param project: Project id :returns: True if a topic exists and False if it does not. """ return self._exists(name, project) _exists = abc.abstractmethod(lambda x: None) def delete(self, name, project=None): """Base method for deleting a topic. :param name: The topic name :param project: Project id """ return self._delete(name, project) _delete = abc.abstractmethod(lambda x: None) def stats(self, name, project=None): """Base method for topic stats. :param name: The topic name :param project: Project id :returns: Dictionary with the queue stats """ return self._stats(name, project) _stats = abc.abstractmethod(lambda x: None) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/configuration.py0000664000175100017510000000313115033040005020737 0ustar00mylesmyles# Copyright (c) 2016 HuaWei, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg class Configuration(object): def __init__(self, conf): """Initialize configuration.""" self.local_conf = conf def register_opts(self, volume_opts, group=None): self.local_conf.register_opts(volume_opts, group=group) def set_override(self, name, override, group=None): self.local_conf.set_override(name, override, group=group) def safe_get(self, value): try: return self.__getattr__(value) except cfg.NoSuchOptError: return None def __contains__(self, key): """Return True if key is in local_conf.""" return key in self.local_conf def __getattr__(self, value): # Don't use self.local_conf to avoid reentrant call to __getattr__() local_conf = object.__getattribute__(self, 'local_conf') return getattr(local_conf, value) def __getitem__(self, key): """Look up an option value and perform string substitution.""" return self.local_conf.__getitem__(key) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/errors.py0000664000175100017510000001521615033040005017413 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class ExceptionBase(Exception): msg_format = '' def __init__(self, **kwargs): msg = self.msg_format.format(**kwargs) super(ExceptionBase, self).__init__(msg) class ConnectionError(ExceptionBase): """Raised when the connection with the back-end was lost.""" class DoesNotExist(ExceptionBase): """Resource does not exist.""" class NotPermitted(ExceptionBase): """Operation not permitted.""" class Conflict(ExceptionBase): """Resource could not be created due to a conflict.""" class MessageConflict(Conflict): msg_format = ('Message could not be enqueued due to a conflict ' 'with one or more other messages that are already in ' 'queue {queue} for project {project}') def __init__(self, queue, project): """Initializes the error with contextual information. :param queue: name of the queue to which the message was posted :param project: name of the project to which the queue belongs """ super(MessageConflict, self).__init__(queue=queue, project=project) class ClaimConflict(Conflict): msg_format = ('Messages could not be claimed due to a conflict ' 'with another parallel claim that is already in ' 'queue {queue} for project {project}') def __init__(self, queue, project): """Initializes the error with contextual information. :param queue: name of the queue to which the message was posted :param project: name of the project to which the queue belongs """ super(ClaimConflict, self).__init__(queue=queue, project=project) class QueueDoesNotExist(DoesNotExist): msg_format = 'Queue {name} does not exist for project {project}' def __init__(self, name, project): super(QueueDoesNotExist, self).__init__(name=name, project=project) class QueueIsEmpty(ExceptionBase): msg_format = 'Queue {name} in project {project} is empty' def __init__(self, name, project): super(QueueIsEmpty, self).__init__(name=name, project=project) class MessageDoesNotExist(DoesNotExist): msg_format = ('Message {mid} does not exist in ' 'queue {queue} for project {project}') def __init__(self, mid, queue, project): super(MessageDoesNotExist, self).__init__(mid=mid, queue=queue, project=project) class ClaimDoesNotExist(DoesNotExist): msg_format = ('Claim {cid} does not exist in ' 'queue {queue} for project {project}') def __init__(self, cid, queue, project): super(ClaimDoesNotExist, self).__init__(cid=cid, queue=queue, project=project) class ClaimDoesNotMatch(ExceptionBase): msg_format = ('Claim {cid} does not exist in the claim_ids parameter in' 'queue {queue} for project {project}') def __init__(self, cid, queue, project): super(ClaimDoesNotMatch, self).__init__(cid=cid, queue=queue, project=project) class MessageIsClaimed(NotPermitted): msg_format = 'Message {mid} is claimed' def __init__(self, mid): super(MessageIsClaimed, self).__init__(mid=mid) class MessageNotClaimed(NotPermitted): msg_format = 'Message {mid} is no longer claimed' def __init__(self, mid): super(MessageNotClaimed, self).__init__(mid=mid) class MessageNotClaimedBy(NotPermitted): msg_format = 'Message {mid} is not claimed by {cid}' def __init__(self, mid, cid): super(MessageNotClaimedBy, self).__init__(cid=cid, mid=mid) class QueueNotMapped(DoesNotExist): msg_format = ('No pool found for ' 'queue {queue} for project {project}') def __init__(self, queue, project): super(QueueNotMapped, self).__init__(queue=queue, project=project) class PoolDoesNotExist(DoesNotExist): msg_format = 'Pool {pool} does not exist' def __init__(self, pool): super(PoolDoesNotExist, self).__init__(pool=pool) class PoolGroupDoesNotExist(DoesNotExist): msg_format = 'Pool group {pool_group} does not exist' def __init__(self, pool_group): super(PoolGroupDoesNotExist, self).__init__(pool_group=pool_group) class FlavorDoesNotExist(DoesNotExist): msg_format = 'Flavor {flavor} does not exist' def __init__(self, flavor): super(FlavorDoesNotExist, self).__init__(flavor=flavor) class NoPoolFound(ExceptionBase): msg_format = 'No pools registered' def __init__(self): super(NoPoolFound, self).__init__() class PoolInUseByFlavor(NotPermitted): msg_format = 'Pool {pid} is in use by flavor {fid}' def __init__(self, pid, fid): super(PoolInUseByFlavor, self).__init__(pid=pid, fid=fid) self._flavor = fid @property def flavor(self): return self._flavor class SubscriptionDoesNotExist(DoesNotExist): msg_format = 'Subscription {subscription_id} does not exist' def __init__(self, subscription_id): super(SubscriptionDoesNotExist, self).__init__(subscription_id=subscription_id) class PoolCapabilitiesMismatch(ExceptionBase): msg_format = ('The pool being added does not ' 'support the minimum set of capabilities') class PoolAlreadyExists(Conflict): msg_format = 'The database URI is in use by another pool.' class PoolRedisNotSupportGroup(ExceptionBase): msg_format = ('Redis not support pool_goup, please use flavor ') class SubscriptionAlreadyExists(Conflict): msg_format = ('Such subscription already exists. Subscriptions ' 'are unique by project + queue + subscriber URI.') class TopicDoesNotExist(DoesNotExist): msg_format = 'Topic {name} does not exist for project {project}' def __init__(self, name, project): super(TopicDoesNotExist, self).__init__(name=name, project=project) class TopicIsEmpty(ExceptionBase): msg_format = 'Topic {name} in project {project} is empty' def __init__(self, name, project): super(TopicIsEmpty, self).__init__(name=name, project=project) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5730135 zaqar-20.1.0.dev29/zaqar/storage/mongodb/0000775000175100017510000000000015033040026017150 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/__init__.py0000664000175100017510000000374715033040005021271 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. r""" MongoDB Storage Driver for Zaqar. About the store --------------- MongoDB is a nosql, eventually consistent, reliable database with support for horizontal-scaling and capable of handling different levels of throughputs. Supported Features ------------------ - FIFO - Unlimited horizontal-scaling [1]_ - Reliability [2]_ .. [1] This is only possible with a sharding environment .. [2] Write concern must be equal or higher than 2 Supported Deployments --------------------- MongoDB can be deployed in 3 different ways. The first and most simple one is to deploy a standalone `mongod` node. The second one is to use a Replica Sets which gives a master-slave deployment but cannot be scaled unlimitedly. The third and last one is a sharded cluster. The second and third methods are the ones recommended for production environments where durability and scalability are a must-have. The driver itself forces operators to use such environments by checking whether it is talking to a replica-set or sharded cluster. Such enforcement can be disabled by running Zaqar in an unreliable mode. Replica Sets ------------ When running on a replica-set, Zaqar won't try to be smart and it'll rely as much as possible on the database and pymongo. Sharded Cluster --------------- TBD """ from zaqar.storage.mongodb import driver # Hoist classes into package namespace ControlDriver = driver.ControlDriver DataDriver = driver.DataDriver FIFODataDriver = driver.FIFODataDriver ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/catalogue.py0000664000175100017510000000640215033040005021465 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """MongoDB storage controller for the queues catalogue. Serves to construct an association between a project + queue -> pool. :: { 'p_q': project_queue :: str, 's': pool_identifier :: str } """ from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.mongodb import utils PRIMARY_KEY = utils.PROJ_QUEUE_KEY CATALOGUE_INDEX = [ (PRIMARY_KEY, 1) ] class CatalogueController(base.CatalogueBase): def __init__(self, *args, **kwargs): super(CatalogueController, self).__init__(*args, **kwargs) self._col = self.driver.database.catalogue self._col.create_index(CATALOGUE_INDEX, unique=True) @utils.raises_conn_error def _insert(self, project, queue, pool, upsert): key = utils.scope_queue_name(queue, project) return self._col.update_one({PRIMARY_KEY: key}, {'$set': {'s': pool}}, upsert=upsert) @utils.raises_conn_error def list(self, project): fields = {'_id': 0} query = utils.scoped_query(None, project) ntotal = self._col.count_documents(query) return utils.HookedCursor(self._col.find(query, fields), _normalize, ntotal=ntotal) @utils.raises_conn_error def get(self, project, queue): fields = {'_id': 0} key = utils.scope_queue_name(queue, project) entry = self._col.find_one({PRIMARY_KEY: key}, projection=fields) if entry is None: raise errors.QueueNotMapped(queue, project) return _normalize(entry) @utils.raises_conn_error def exists(self, project, queue): key = utils.scope_queue_name(queue, project) return self._col.find_one({PRIMARY_KEY: key}) is not None def insert(self, project, queue, pool): # NOTE(cpp-cabrera): _insert handles conn_error self._insert(project, queue, pool, upsert=True) @utils.raises_conn_error def delete(self, project, queue): self._col.delete_one({ PRIMARY_KEY: utils.scope_queue_name(queue, project)}) def update(self, project, queue, pool=None): # NOTE(cpp-cabrera): _insert handles conn_error res = self._insert(project, queue, pool, upsert=False) if res.matched_count == 0: raise errors.QueueNotMapped(queue, project) @utils.raises_conn_error def drop_all(self): self._col.drop() self._col.create_index(CATALOGUE_INDEX, unique=True) def _normalize(entry): project, queue = utils.parse_scoped_project_queue(entry[PRIMARY_KEY]) return { 'queue': queue, 'project': project, 'pool': entry['s'] } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/claims.py0000664000175100017510000003404115033040005020771 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements the MongoDB storage controller for claims. Field Mappings: In order to reduce the disk / memory space used, field names will be, most of the time, the first letter of their long name. """ import datetime from bson import objectid from oslo_log import log as logging from oslo_utils import timeutils from pymongo.collection import ReturnDocument from zaqar import storage from zaqar.storage import errors from zaqar.storage.mongodb import utils LOG = logging.getLogger(__name__) def _messages_iter(msg_iter): """Used to iterate through messages.""" try: msg = next(msg_iter) yield msg.pop('claim') yield msg # Smoke it! for msg in msg_iter: del msg['claim'] yield msg except StopIteration: return class ClaimController(storage.Claim): """Implements claim resource operations using MongoDB. No dedicated collection is being used for claims. Claims are created in the messages collection and live within messages, that is, in the c field. This implementation certainly uses more space on disk but reduces the number of queries to be executed and the time needed to retrieve claims and claimed messages. As for the memory usage, this implementation requires less memory since a single index is required. The index is a compound index between the claim id and it's expiration timestamp. """ @utils.raises_conn_error @utils.retries_on_autoreconnect def get(self, queue, claim_id, project=None): msg_ctrl = self.driver.message_controller # Base query, always check expire time now = timeutils.utcnow_ts() cid = utils.to_oid(claim_id) if cid is None: raise errors.ClaimDoesNotExist(claim_id, queue, project) try: # Lets get claim's data # from the first message # in the iterator msgs = _messages_iter(msg_ctrl._claimed(queue, cid, now, project=project)) claim = next(msgs) update_time = claim['e'] - claim['t'] age = now - update_time claim_meta = { 'age': int(age), 'ttl': claim['t'], 'id': str(claim['id']), } except StopIteration: raise errors.ClaimDoesNotExist(cid, queue, project) return claim_meta, msgs # NOTE(kgriffs): If we get an autoreconnect or any other connection error, # the worst that can happen is you get an orphaned claim, but it will # expire eventually and free up those messages to be claimed again. We # might consider setting a "claim valid" flag similar to how posting # messages works, in order to avoid this situation if it turns out to # be a real problem for users. @utils.raises_conn_error @utils.retries_on_autoreconnect def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): """Creates a claim. This implementation was done in a best-effort fashion. In order to create a claim we need to get a list of messages that can be claimed. Once we have that list we execute a query filtering by the ids returned by the previous query. Since there's a lot of space for race conditions here, we'll check if the number of updated records is equal to the max number of messages to claim. If the number of updated messages is lower than limit we'll try to claim the remaining number of messages. This 2 queries are required because there's no way, as for the time being, to execute an update on a limited number of records. """ msg_ctrl = self.driver.message_controller queue_ctrl = self.driver.queue_controller # Get the maxClaimCount, deadLetterQueue and DelayTTL # from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) ttl = metadata['ttl'] grace = metadata['grace'] oid = objectid.ObjectId() now = timeutils.utcnow_ts() claim_expires = now + ttl claim_expires_dt = datetime.datetime.fromtimestamp( claim_expires, tz=datetime.timezone.utc).replace(tzinfo=None) message_ttl = ttl + grace message_expiration = datetime.datetime.fromtimestamp( claim_expires + grace, tz=datetime.timezone.utc).replace( tzinfo=None) meta = { 'id': oid, 't': ttl, 'e': claim_expires, 'c': 0 # NOTE(flwang): A placeholder which will be updated later } # NOTE(cdyangzhenyu): If the ``_default_message_delay`` is 0 means # queue is not delayed queue, So we don't filter for delay messages. include_delayed = False if queue_meta.get('_default_message_delay', 0) else True # Get a list of active, not claimed nor expired # messages that could be claimed. msgs = msg_ctrl._active(queue, projection={'_id': 1, 'c': 1}, project=project, limit=limit, include_delayed=include_delayed) messages = iter([]) be_claimed = [(msg['_id'], msg['c'].get('c', 0)) for msg in msgs] ids = [_id for _id, _ in be_claimed] if len(ids) == 0: return None, messages # NOTE(kgriffs): Set the claim field for # the active message batch, while also # filtering out any messages that happened # to get claimed just now by one or more # parallel requests. # # Filtering by just 'c.e' works because # new messages have that field initialized # to the current time when the message is # posted. There is no need to check whether # 'c' exists or 'c.id' is None. collection = msg_ctrl._collection(queue, project) updated = collection.update_many({'_id': {'$in': ids}, 'c.e': {'$lte': now}}, {'$set': {'c': meta}}, upsert=False) # NOTE(flaper87): Dirty hack! # This sets the expiration time to # `expires` on messages that would # expire before claim. new_values = {'e': message_expiration, 't': message_ttl} collection.update_many({'p_q': utils.scope_queue_name(queue, project), 'e': {'$lt': claim_expires_dt}, 'c.id': oid}, {'$set': new_values}, upsert=False) msg_count_moved_to_DLQ = 0 if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): LOG.debug(u"The list of messages being claimed: %(be_claimed)s", {"be_claimed": be_claimed}) for _id, claimed_count in be_claimed: # NOTE(flwang): We have claimed the message above, but we will # update the claim count below. So that means, when the # claimed_count equals queue_meta['_max_claim_count'], the # message has met the threshold. And Zaqar will move it to the # DLQ. if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message collection.update_one({'_id': _id, 'c.id': oid}, {'$set': {'c.c': claimed_count + 1}}, upsert=False) LOG.debug(u"Message %(id)s has been claimed %(count)d " u"times.", {"id": str(_id), "count": claimed_count + 1}) else: # 2. Check if the message's claim count has exceeded the # max claim count defined in the queue, if so, move the # message to the dead letter queue. # NOTE(flwang): We're moving message directly. That means, # the queue and dead letter queue must be created on the # same storage pool. It's a technical tradeoff, because if # we re-send the message to the dead letter queue by # message controller, then we will lost all the claim # information. dlq_name = queue_meta['_dead_letter_queue'] new_msg = {'c.c': claimed_count, 'p_q': utils.scope_queue_name(dlq_name, project)} dlq_ttl = queue_meta.get("_dead_letter_queue_messages_ttl") if dlq_ttl: new_msg['t'] = dlq_ttl kwargs = {"return_document": ReturnDocument.AFTER} msg = collection.find_one_and_update({'_id': _id, 'c.id': oid}, {'$set': new_msg}, **kwargs) dlq_collection = msg_ctrl._collection(dlq_name, project) if dlq_collection is None: LOG.warning(u"Failed to find the message collection " u"for queue %(dlq_name)s", {"dlq_name": dlq_name}) return None, iter([]) # NOTE(flwang): If dead letter queue and queue are in the # same partition, the message has been already # modified. if collection != dlq_collection: result = dlq_collection.insert_one(msg) if result.inserted_id: collection.delete_one({'_id': _id}) LOG.debug(u"Message %(id)s has met the max claim count " u"%(count)d, now it has been moved to dead " u"letter queue %(dlq_name)s.", {"id": str(_id), "count": claimed_count, "dlq_name": dlq_name}) msg_count_moved_to_DLQ += 1 if updated.modified_count != 0: # NOTE(kgriffs): This extra step is necessary because # in between having gotten a list of active messages # and updating them, some of them may have been # claimed by a parallel request. Therefore, we need # to find out which messages were actually tagged # with the claim ID successfully. if msg_count_moved_to_DLQ < updated.modified_count: claim, messages = self.get(queue, oid, project=project) else: # NOTE(flwang): Though messages are claimed, but all of them # have met the max claim count and have been moved to DLQ. return None, iter([]) return str(oid), messages @utils.raises_conn_error @utils.retries_on_autoreconnect def update(self, queue, claim_id, metadata, project=None): cid = utils.to_oid(claim_id) if cid is None: raise errors.ClaimDoesNotExist(claim_id, queue, project) now = timeutils.utcnow_ts() grace = metadata['grace'] ttl = metadata['ttl'] claim_expires = now + ttl claim_expires_dt = datetime.datetime.fromtimestamp( claim_expires, tz=datetime.timezone.utc).replace(tzinfo=None) message_ttl = ttl + grace message_expires = datetime.datetime.fromtimestamp( claim_expires + grace, tz=datetime.timezone.utc).replace( tzinfo=None) msg_ctrl = self.driver.message_controller claimed = msg_ctrl._claimed(queue, cid, expires=now, limit=1, project=project) try: next(claimed) except StopIteration: raise errors.ClaimDoesNotExist(claim_id, queue, project) meta = { 'id': cid, 't': ttl, 'e': claim_expires, } # TODO(kgriffs): Create methods for these so we don't interact # with the messages collection directly (loose coupling) scope = utils.scope_queue_name(queue, project) collection = msg_ctrl._collection(queue, project) collection.update_many({'p_q': scope, 'c.id': cid}, {'$set': {'c': meta}}, upsert=False) # NOTE(flaper87): Dirty hack! # This sets the expiration time to # `expires` on messages that would # expire before claim. collection.update_many({'p_q': scope, 'e': {'$lt': claim_expires_dt}, 'c.id': cid}, {'$set': {'e': message_expires, 't': message_ttl}}, upsert=False) @utils.raises_conn_error @utils.retries_on_autoreconnect def delete(self, queue, claim_id, project=None): msg_ctrl = self.driver.message_controller msg_ctrl._unclaim(queue, claim_id, project=project) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/controllers.py0000664000175100017510000000313115033040005022063 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Exports Mongodb storage controllers. Field Mappings: In order to reduce the disk / memory space used, fields name will be, most of the time, the first letter of their long name. Fields mapping will be updated and documented in each controller class. """ from zaqar.storage.mongodb import catalogue from zaqar.storage.mongodb import claims from zaqar.storage.mongodb import flavors from zaqar.storage.mongodb import messages from zaqar.storage.mongodb import pools from zaqar.storage.mongodb import queues from zaqar.storage.mongodb import subscriptions from zaqar.storage.mongodb import topics CatalogueController = catalogue.CatalogueController ClaimController = claims.ClaimController FlavorsController = flavors.FlavorsController MessageController = messages.MessageController FIFOMessageController = messages.FIFOMessageController QueueController = queues.QueueController PoolsController = pools.PoolsController SubscriptionController = subscriptions.SubscriptionController TopicController = topics.TopicController ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/driver.py0000664000175100017510000002757315033040005021030 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Mongodb storage driver implementation.""" import ssl from osprofiler import profiler import pymongo import pymongo.errors from zaqar.common import decorators from zaqar.conf import drivers_management_store_mongodb from zaqar.conf import drivers_message_store_mongodb from zaqar.i18n import _ from zaqar import storage from zaqar.storage.mongodb import controllers def _connection(conf): # NOTE(flaper87): remove possible zaqar specific # schemes like: mongodb.fifo uri = conf.uri if conf.uri: uri = "mongodb://%s" % (conf.uri.split("://")[-1]) if conf.uri and 'replicaSet' in conf.uri: MongoClient = pymongo.MongoReplicaSetClient else: MongoClient = pymongo.MongoClient if conf.uri and 'ssl=true' in conf.uri.lower(): kwargs = {'connect': False} # Default to CERT_REQUIRED ssl_cert_reqs = ssl.CERT_REQUIRED if conf.ssl_cert_reqs == 'CERT_OPTIONAL': ssl_cert_reqs = ssl.CERT_OPTIONAL if conf.ssl_cert_reqs == 'CERT_NONE': ssl_cert_reqs = ssl.CERT_NONE kwargs['ssl_cert_reqs'] = ssl_cert_reqs if conf.ssl_keyfile: kwargs['ssl_keyfile'] = conf.ssl_keyfile if conf.ssl_certfile: kwargs['ssl_certfile'] = conf.ssl_certfile if conf.ssl_ca_certs: kwargs['ssl_ca_certs'] = conf.ssl_ca_certs return MongoClient(uri, **kwargs) return MongoClient(uri, connect=False) class DataDriver(storage.DataDriverBase): BASE_CAPABILITIES = tuple(storage.Capabilities) _DRIVER_OPTIONS = [(drivers_management_store_mongodb.GROUP_NAME, drivers_management_store_mongodb.ALL_OPTS), (drivers_message_store_mongodb.GROUP_NAME, drivers_message_store_mongodb.ALL_OPTS)] _COL_SUFIX = "_messages_p" def __init__(self, conf, cache, control_driver): super(DataDriver, self).__init__(conf, cache, control_driver) self.mongodb_conf = self.conf[drivers_message_store_mongodb.GROUP_NAME] conn = self.connection server_info = conn.server_info()['version'] self.server_version = tuple(map(int, server_info.split('.'))) if self.server_version < (2, 2): raise RuntimeError(_('The mongodb driver requires mongodb>=2.2, ' '%s found') % server_info) if not len(conn.nodes) > 1 and not conn.is_mongos: if not self.conf.unreliable: raise RuntimeError(_('Either a replica set or a mongos is ' 'required to guarantee message delivery')) else: _mongo_wc = conn.write_concern.document.get('w') # NOTE(flwang): mongo client is using None as the default value of # write concern. But in Python 3.x we can't compare by order # different types of operands like in Python 2.x. # And we can't set the write concern value when create the # connection since it will fail with norepl if mongodb version # below 2.6. Besides it doesn't make sense to create the # connection again after getting the version. durable = (_mongo_wc is not None and (_mongo_wc == 'majority' or _mongo_wc >= 2) ) if not self.conf.unreliable and not durable: raise RuntimeError(_('Using a write concern other than ' '`majority` or > 2 makes the service ' 'unreliable. Please use a different ' 'write concern or set `unreliable` ' 'to True in the config file.')) # FIXME(flaper87): Make this dynamic self._capabilities = self.BASE_CAPABILITIES @property def capabilities(self): return self._capabilities def is_alive(self): try: # NOTE(zyuan): Requires admin access to mongodb return 'ok' in self.connection.admin.command('ping') except pymongo.errors.PyMongoError: return False def close(self): self.connection.close() def _health(self): KPI = {} KPI['storage_reachable'] = self.is_alive() KPI['operation_status'] = self._get_operation_status() message_volume = {'free': 0, 'claimed': 0, 'total': 0} for msg_col in [db.messages for db in self.message_databases]: msg_count_claimed = msg_col.count_documents({'c.id': {'$ne': None}}) message_volume['claimed'] += msg_count_claimed msg_count_total = msg_col.count_documents({}) message_volume['total'] += msg_count_total message_volume['free'] = (message_volume['total'] - message_volume['claimed']) KPI['message_volume'] = message_volume return KPI @decorators.lazy_property(write=False) def message_databases(self): """List of message databases, ordered by partition number.""" kwargs = {} if not self.server_version < (2, 6): # NOTE(flaper87): Skip mongodb versions below 2.6 when # setting the write concern on the database. pymongo 3.0 # fails with norepl when creating indexes. doc = self.connection.write_concern.document.copy() doc.setdefault('w', 'majority') doc.setdefault('j', False) kwargs['write_concern'] = pymongo.WriteConcern(**doc) name = self.mongodb_conf.database partitions = self.mongodb_conf.partitions databases = [] for p in range(partitions): db_name = name + self._COL_SUFIX + str(p) databases.append(self.connection.get_database(db_name, **kwargs)) return databases @decorators.lazy_property(write=False) def subscriptions_database(self): """Database dedicated to the "subscription" collection.""" name = self.mongodb_conf.database + '_subscriptions' return self.connection[name] @decorators.lazy_property(write=False) def connection(self): """MongoDB client connection instance.""" return _connection(self.mongodb_conf) @decorators.lazy_property(write=False) def message_controller(self): controller = controllers.MessageController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("mongodb_message_controller")(controller) else: return controller @decorators.lazy_property(write=False) def claim_controller(self): controller = controllers.ClaimController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("mongodb_claim_controller")(controller) else: return controller @decorators.lazy_property(write=False) def subscription_controller(self): controller = controllers.SubscriptionController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("mongodb_subscription_" "controller")(controller) else: return controller class FIFODataDriver(DataDriver): BASE_CAPABILITIES = (storage.Capabilities.DURABILITY, storage.Capabilities.CLAIMS, storage.Capabilities.AOD, storage.Capabilities.HIGH_THROUGHPUT) _COL_SUFIX = "_messages_fifo_p" @decorators.lazy_property(write=False) def message_controller(self): controller = controllers.FIFOMessageController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("mongodb_message_controller")(controller) else: return controller class ControlDriver(storage.ControlDriverBase): def __init__(self, conf, cache): super(ControlDriver, self).__init__(conf, cache) self.conf.register_opts( drivers_management_store_mongodb.ALL_OPTS, group=drivers_management_store_mongodb.GROUP_NAME) self.mongodb_conf = self.conf[ drivers_management_store_mongodb.GROUP_NAME] def close(self): self.connection.close() @decorators.lazy_property(write=False) def connection(self): """MongoDB client connection instance.""" return _connection(self.mongodb_conf) @decorators.lazy_property(write=False) def database(self): name = self.mongodb_conf.database return self.connection[name] @decorators.lazy_property(write=False) def queues_database(self): """Database dedicated to the "queues" collection. The queues collection is separated out into its own database to avoid writer lock contention with the messages collections. """ name = self.mongodb_conf.database + '_queues' return self.connection[name] @decorators.lazy_property(write=False) def topics_database(self): """Database dedicated to the "topics" collection. The topics collection is separated out into its own database to avoid writer lock contention with the messages collections. """ name = self.mongodb_conf.database + '_topics' return self.connection[name] @decorators.lazy_property(write=False) def queue_controller(self): controller = controllers.QueueController(self) if (self.conf.profiler.enabled and (self.conf.profiler.trace_message_store or self.conf.profiler.trace_management_store)): return profiler.trace_cls("mongodb_queues_controller")(controller) else: return controller @property def pools_controller(self): controller = controllers.PoolsController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("mongodb_pools_controller")(controller) else: return controller @property def catalogue_controller(self): controller = controllers.CatalogueController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("mongodb_catalogue_" "controller")(controller) else: return controller @property def flavors_controller(self): controller = controllers.FlavorsController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("mongodb_flavors_controller")(controller) else: return controller @decorators.lazy_property(write=False) def topic_controller(self): controller = controllers.TopicController(self) if (self.conf.profiler.enabled and (self.conf.profiler.trace_message_store or self.conf.profiler.trace_management_store)): return profiler.trace_cls("mongodb_topics_controller")(controller) else: return controller ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/flavors.py0000664000175100017510000001160015033040005021171 0ustar00mylesmyles# Copyright (c) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """ Schema: 'n': name :: str 'p': project :: str 's': storage pool_group :: str 'c': capabilities :: dict """ from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.mongodb import utils FLAVORS_INDEX = [ ('p', 1), ('n', 1), ] FLAVORS_STORAGE_POOL_INDEX = [ ('s', 1) ] # NOTE(cpp-cabrera): used for get/list operations. There's no need to # show the marker or the _id - they're implementation details. OMIT_FIELDS = (('_id', False),) def _field_spec(detailed=False): return dict(OMIT_FIELDS + (() if detailed else (('c', False),))) class FlavorsController(base.FlavorsBase): def __init__(self, *args, **kwargs): super(FlavorsController, self).__init__(*args, **kwargs) # To avoid creating unique index twice flavors_index_str = '_'.join( map(lambda x: '%s_%s' % (x[0], x[1]), FLAVORS_INDEX) ) self._col = self.driver.database.flavors indexes = self._col.index_information().keys() if flavors_index_str and flavors_index_str not in indexes: self._col.create_index(FLAVORS_INDEX, background=True, name='flavors_name', unique=True) self._col.create_index(FLAVORS_STORAGE_POOL_INDEX, background=True, name='flavors_storage_pool_group_name') self._pools_ctrl = self.driver.pools_controller @utils.raises_conn_error def list(self, project=None, marker=None, limit=10, detailed=False): query = {'p': project} if marker is not None: query['n'] = {'$gt': marker} cursor = self._col.find(query, projection=_field_spec(detailed), limit=limit).sort('n', 1) ntotal = self._col.count_documents(query) marker_name = {} def normalizer(flavor): marker_name['next'] = flavor['n'] return _normalize(flavor, detailed=detailed) yield utils.HookedCursor(cursor, normalizer, ntotal=ntotal) yield marker_name and marker_name['next'] @utils.raises_conn_error def get(self, name, project=None, detailed=False): res = self._col.find_one({'n': name, 'p': project}, _field_spec(detailed)) if not res: raise errors.FlavorDoesNotExist(name) return _normalize(res, detailed) @utils.raises_conn_error def create(self, name, project=None, capabilities=None): # NOTE(flaper87): Check if there are pools in this group. # Should there be a `group_exists` method? # NOTE(wanghao): Since we didn't pass the group name just pool name, # so we don't need to get the pool by group. # NOTE(gengchc2): If you do not use the removal group scheme to # configure flavor, pool_group can be None.. capabilities = {} if capabilities is None else capabilities self._col.update_one({'n': name, 'p': project}, {'$set': {'c': capabilities}}, upsert=True) @utils.raises_conn_error def exists(self, name, project=None): return self._col.find_one({'n': name, 'p': project}) is not None @utils.raises_conn_error def update(self, name, project=None, capabilities=None): fields = {} if capabilities is not None: fields['c'] = capabilities # NOTE(gengchc2): If you do not use the removal group scheme to # configure flavor, pool_group can be None, pool_group can be remove. assert fields, '`capabilities` not found in kwargs' res = self._col.update_one({'n': name, 'p': project}, {'$set': fields}, upsert=False) if res.matched_count == 0: raise errors.FlavorDoesNotExist(name) @utils.raises_conn_error def delete(self, name, project=None): self._col.delete_one({'n': name, 'p': project}) @utils.raises_conn_error def drop_all(self): self._col.drop() self._col.create_index(FLAVORS_INDEX, unique=True) def _normalize(flavor, detailed=False): ret = { 'name': flavor['n'], } if detailed: ret['capabilities'] = flavor['c'] return ret ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/messages.py0000664000175100017510000012661215033040005021336 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements MongoDB the storage controller for messages. Field Mappings: In order to reduce the disk / memory space used, field names will be, most of the time, the first letter of their long name. """ import datetime import time import uuid from bson import binary from bson import objectid from oslo_log import log as logging from oslo_utils import timeutils import pymongo.errors import pymongo.read_preferences from zaqar.i18n import _ from zaqar import storage from zaqar.storage import errors from zaqar.storage.mongodb import utils from zaqar.storage import utils as s_utils LOG = logging.getLogger(__name__) # NOTE(kgriffs): This value, in seconds, should be at least less than the # minimum allowed TTL for messages (60 seconds). Make it 45 to allow for # some fudge room. MAX_RETRY_POST_DURATION = 45 # NOTE(kgriffs): It is extremely unlikely that all workers would somehow hang # for more than 5 seconds, without a single one being able to succeed in # posting some messages and incrementing the counter, thus allowing the other # producers to succeed in turn. COUNTER_STALL_WINDOW = 5 # For hinting ID_INDEX_FIELDS = [('_id', 1)] # For removing expired messages TTL_INDEX_FIELDS = [ ('e', 1), ] # NOTE(cpp-cabrera): to unify use of project/queue across mongodb # storage impls. PROJ_QUEUE = utils.PROJ_QUEUE_KEY # NOTE(kgriffs): This index is for listing messages, usually # filtering out claimed ones. ACTIVE_INDEX_FIELDS = [ (PROJ_QUEUE, 1), # Project will be unique, so put first ('k', 1), # Used for sorting and paging, must come before range queries ('c.e', 1), # Used for filtering out claimed messages # NOTE(kgriffs): We do not include 'u' and 'tx' here on # purpose. It was found experimentally that adding 'u' did # not improve performance, and so it was left out in order # to reduce index size and make updating the index # faster. When 'tx' was added, it was assumed that it would # follow a similar performance pattern to 'u', since by # the time you traverse the index down past the fields # listed above, there is very little left to scan, esp. # considering all queries are limited (limit=) to a fairly # small number. # # TODO(kgriffs): The extrapolation wrt 'tx' needs to be # proven empirically. ] # For counting COUNTING_INDEX_FIELDS = [ (PROJ_QUEUE, 1), # Project will be unique, so put first ('c.e', 1), # Used for filtering out claimed messages ] # Index used for claims CLAIMED_INDEX_FIELDS = [ (PROJ_QUEUE, 1), ('c.id', 1), ('k', 1), ('c.e', 1), ] # This index is meant to be used as a shard-key and to ensure # uniqueness for markers. # # As for other compound indexes, order matters. The marker `k` # gives enough cardinality to ensure chunks are evenly distributed, # whereas the `p_q` field helps keeping chunks from the same project # and queue together. # # In a sharded environment, uniqueness of this index is still guaranteed # because it's used as a shard key. MARKER_INDEX_FIELDS = [ ('k', 1), (PROJ_QUEUE, 1), ] TRANSACTION_INDEX_FIELDS = [ ('tx', 1), ] class MessageController(storage.Message): """Implements message resource operations using MongoDB. Messages are scoped by project + queue. :: Messages: Name Field ------------------------- scope -> p_q ttl -> t expires -> e marker -> k body -> b claim -> c client uuid -> u transaction -> tx delay -> d checksum -> cs """ def __init__(self, *args, **kwargs): super(MessageController, self).__init__(*args, **kwargs) # Cache for convenience and performance self._num_partitions = self.driver.mongodb_conf.partitions self._queue_ctrl = self.driver.queue_controller self._retry_range = range(self.driver.mongodb_conf.max_attempts) # Create a list of 'messages' collections, one for each database # partition, ordered by partition number. # # NOTE(kgriffs): Order matters, since it is used to lookup the # collection by partition number. For example, self._collections[2] # would provide access to zaqar_p2.messages (partition numbers are # zero-based). self._collections = [db.messages for db in self.driver.message_databases] # Ensure indexes are initialized before any queries are performed for collection in self._collections: self._ensure_indexes(collection) # ---------------------------------------------------------------------- # Helpers # ---------------------------------------------------------------------- def _ensure_indexes(self, collection): """Ensures that all indexes are created.""" collection.create_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True) collection.create_index(ACTIVE_INDEX_FIELDS, name='active', background=True) collection.create_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True) collection.create_index(COUNTING_INDEX_FIELDS, name='counting', background=True) collection.create_index(MARKER_INDEX_FIELDS, name='queue_marker', background=True) collection.create_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) def _collection(self, queue_name, project=None): """Get a partitioned collection instance.""" return self._collections[utils.get_partition(self._num_partitions, queue_name, project)] def _backoff_sleep(self, attempt): """Sleep between retries using a jitter algorithm. Mitigates thrashing between multiple parallel requests, and creates backpressure on clients to slow down the rate at which they submit requests. :param attempt: current attempt number, zero-based """ conf = self.driver.mongodb_conf seconds = utils.calculate_backoff(attempt, conf.max_attempts, conf.max_retry_sleep, conf.max_retry_jitter) time.sleep(seconds) def _purge_queue(self, queue_name, project=None): """Removes all messages from the queue. Warning: Only use this when deleting the queue; otherwise you can cause a side-effect of reseting the marker counter which can cause clients to miss tons of messages. If the queue does not exist, this method fails silently. :param queue_name: name of the queue to purge :param project: ID of the project to which the queue belongs """ scope = utils.scope_queue_name(queue_name, project) collection = self._collection(queue_name, project) collection.delete_many({PROJ_QUEUE: scope}) def _list(self, queue_name, project=None, marker=None, echo=False, client_uuid=None, projection=None, include_claimed=False, include_delayed=False, sort=1, limit=None, count=False): """Message document listing helper. :param queue_name: Name of the queue to list :param project: (Default None) Project `queue_name` belongs to. If not specified, queries the "global" namespace/project. :param marker: (Default None) Message marker from which to start iterating. If not specified, starts with the first message available in the queue. :param echo: (Default False) Whether to return messages that match client_uuid :param client_uuid: (Default None) UUID for the client that originated this request :param projection: (Default None) a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude :param include_claimed: (Default False) Whether to include claimed messages, not just active ones :param include_delayed: (Default False) Whether to include delayed messages, not just active ones :param sort: (Default 1) Sort order for the listing. Pass 1 for ascending (oldest message first), or -1 for descending (newest message first). :param limit: (Default None) The maximum number of messages to list. The results may include fewer messages than the requested `limit` if not enough are available. If limit is not specified :param count: (Default False) If return the collection's count :returns: Generator yielding up to `limit` messages. """ if sort not in (1, -1): raise ValueError('sort must be either 1 (ascending) ' 'or -1 (descending)') now = timeutils.utcnow_ts() query = { # Messages must belong to this queue and project. PROJ_QUEUE: utils.scope_queue_name(queue_name, project), # NOTE(kgriffs): Messages must be finalized (i.e., must not # be part of an unfinalized transaction). # # See also the note wrt 'tx' within the definition # of ACTIVE_INDEX_FIELDS. 'tx': None, } if not echo: if (client_uuid is not None) and not isinstance(client_uuid, uuid.UUID): client_uuid = uuid.UUID(client_uuid) client_uuid = binary.Binary.from_uuid(client_uuid) elif isinstance(client_uuid, uuid.UUID): client_uuid = binary.Binary.from_uuid(client_uuid) query['u'] = {'$ne': client_uuid} if marker is not None: query['k'] = {'$gt': marker} collection = self._collection(queue_name, project) if not include_claimed: # Only include messages that are not part of # any claim, or are part of an expired claim. query['c.e'] = {'$lte': now} if not include_delayed: # NOTE(cdyangzhenyu): Only include messages that are not # part of any delay, or are part of an expired delay. if # the message has no attribute 'd', it will also be obtained. # This is for compatibility with old data. query['$or'] = [{'d': {'$lte': now}}, {'d': {'$exists': False}}] # Construct the request cursor = collection.find(query, projection=projection, sort=[('k', sort)]) ntotal = None if count: ntotal = collection.count_documents(query) if limit is not None: cursor.limit(limit) if count: ntotal = collection.count_documents(query, limit=limit) # NOTE(flaper87): Suggest the index to use for this query to # ensure the most performant one is chosen. if count: return cursor.hint(ACTIVE_INDEX_FIELDS), ntotal return cursor.hint(ACTIVE_INDEX_FIELDS) # ---------------------------------------------------------------------- # "Friends" interface # ---------------------------------------------------------------------- def _count(self, queue_name, project=None, include_claimed=False): """Return total number of messages in a queue. This method is designed to very quickly count the number of messages in a given queue. Expired messages are not counted, of course. If the queue does not exist, the count will always be 0. Note: Some expired messages may be included in the count if they haven't been GC'd yet. This is done for performance. """ query = { # Messages must belong to this queue and project. PROJ_QUEUE: utils.scope_queue_name(queue_name, project), # NOTE(kgriffs): Messages must be finalized (i.e., must not # be part of an unfinalized transaction). # # See also the note wrt 'tx' within the definition # of ACTIVE_INDEX_FIELDS. 'tx': None, } if not include_claimed: # Exclude messages that are claimed query['c.e'] = {'$lte': timeutils.utcnow_ts()} collection = self._collection(queue_name, project) return collection.count_documents(filter=query, hint=COUNTING_INDEX_FIELDS) def _active(self, queue_name, marker=None, echo=False, client_uuid=None, projection=None, project=None, limit=None, include_delayed=False): return self._list(queue_name, project=project, marker=marker, echo=echo, client_uuid=client_uuid, projection=projection, include_claimed=False, include_delayed=include_delayed, limit=limit) def _claimed(self, queue_name, claim_id, expires=None, limit=None, project=None): if claim_id is None: claim_id = {'$ne': None} query = { PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 'c.id': claim_id, 'c.e': {'$gt': expires or timeutils.utcnow_ts()}, } kwargs = {} collection = self._collection(queue_name, project) # NOTE(kgriffs): Claimed messages bust be queried from # the primary to avoid a race condition caused by the # multi-phased "create claim" algorithm. # NOTE(flaper87): In pymongo 3.0 PRIMARY is the default and # `read_preference` is read only. We'd need to set it when the # client is created. msgs = collection.find(query, sort=[('k', 1)], **kwargs).hint( CLAIMED_INDEX_FIELDS) ntotal = collection.count_documents(query) if limit is not None: msgs = msgs.limit(limit) ntotal = collection.count_documents(query, limit=limit) now = timeutils.utcnow_ts() def denormalizer(msg): doc = _basic_message(msg, now) doc['claim'] = msg['c'] return doc return utils.HookedCursor(msgs, denormalizer, ntotal=ntotal) def _unclaim(self, queue_name, claim_id, project=None): cid = utils.to_oid(claim_id) # NOTE(cpp-cabrera): early abort - avoid a DB query if we're handling # an invalid ID if cid is None: return # NOTE(cpp-cabrera): unclaim by setting the claim ID to None # and the claim expiration time to now now = timeutils.utcnow_ts() scope = utils.scope_queue_name(queue_name, project) collection = self._collection(queue_name, project) collection.update_many({PROJ_QUEUE: scope, 'c.id': cid}, {'$set': {'c': {'id': None, 'e': now}}}, upsert=False) def _inc_counter(self, queue_name, project=None, amount=1, window=None): """Increments the message counter and returns the new value. :param queue_name: Name of the queue to which the counter is scoped :param project: Queue's project name :param amount: (Default 1) Amount by which to increment the counter :param window: (Default None) A time window, in seconds, that must have elapsed since the counter was last updated, in order to increment the counter. :returns: Updated message counter value, or None if window was specified, and the counter has already been updated within the specified time period. :raises QueueDoesNotExist: if not found """ # NOTE(flaper87): If this `if` is True, it means we're # using a mongodb in the control plane. To avoid breaking # environments doing so already, we'll keep using the counter # in the mongodb queue_controller rather than the one in the # message_controller. This should go away, eventually if hasattr(self._queue_ctrl, '_inc_counter'): return self._queue_ctrl._inc_counter(queue_name, project, amount, window) now = timeutils.utcnow_ts() update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}} query = _get_scoped_query(queue_name, project) if window is not None: threshold = now - window query['c.t'] = {'$lt': threshold} while True: try: collection = self._collection(queue_name, project).stats doc = collection.find_one_and_update( query, update, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) break except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect error') if doc is None: if window is None: # NOTE(kgriffs): Since we did not filter by a time window, # the queue should have been found and updated. Perhaps # the queue has been deleted? message = ('Failed to increment the message ' 'counter for queue %(name)s and ' 'project %(project)s') message %= dict(name=queue_name, project=project) LOG.warning(message) raise errors.QueueDoesNotExist(queue_name, project) # NOTE(kgriffs): Assume the queue existed, but the counter # was recently updated, causing the range query on 'c.t' to # exclude the record. return None return doc['c']['v'] def _get_counter(self, queue_name, project=None): """Retrieves the current message counter value for a given queue. This helper is used to generate monotonic pagination markers that are saved as part of the message document. Note 1: Markers are scoped per-queue and so are *not* globally unique or globally ordered. Note 2: If two or more requests to this method are made in parallel, this method will return the same counter value. This is done intentionally so that the caller can detect a parallel message post, allowing it to mitigate race conditions between producer and observer clients. :param queue_name: Name of the queue to which the counter is scoped :param project: Queue's project :returns: current message counter as an integer """ # NOTE(flaper87): If this `if` is True, it means we're # using a mongodb in the control plane. To avoid breaking # environments doing so already, we'll keep using the counter # in the mongodb queue_controller rather than the one in the # message_controller. This should go away, eventually if hasattr(self._queue_ctrl, '_get_counter'): return self._queue_ctrl._get_counter(queue_name, project) update = {'$inc': {'c.v': 0, 'c.t': 0}} query = _get_scoped_query(queue_name, project) try: collection = self._collection(queue_name, project).stats doc = collection.find_one_and_update( query, update, upsert=True, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) return doc['c']['v'] except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect error') # ---------------------------------------------------------------------- # Public interface # ---------------------------------------------------------------------- def list(self, queue_name, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False): if marker is not None: try: marker = int(marker) except ValueError: yield iter([]) messages, ntotal = self._list(queue_name, project=project, marker=marker, client_uuid=client_uuid, echo=echo, include_claimed=include_claimed, include_delayed=include_delayed, limit=limit, count=True) marker_id = {} now = timeutils.utcnow_ts() # NOTE (kgriffs) @utils.raises_conn_error not needed on this # function, since utils.HookedCursor already has it. def denormalizer(msg): marker_id['next'] = msg['k'] return _basic_message(msg, now) yield utils.HookedCursor(messages, denormalizer, ntotal=ntotal) yield str(marker_id['next']) @utils.raises_conn_error @utils.retries_on_autoreconnect def first(self, queue_name, project=None, sort=1): cursor = self._list(queue_name, project=project, include_claimed=True, sort=sort, limit=1) try: message = next(cursor) except StopIteration: raise errors.QueueIsEmpty(queue_name, project) now = timeutils.utcnow_ts() return _basic_message(message, now) @utils.raises_conn_error @utils.retries_on_autoreconnect def get(self, queue_name, message_id, project=None): mid = utils.to_oid(message_id) if mid is None: raise errors.MessageDoesNotExist(message_id, queue_name, project) now = timeutils.utcnow_ts() query = { '_id': mid, PROJ_QUEUE: utils.scope_queue_name(queue_name, project), } collection = self._collection(queue_name, project) message = list(collection.find(query).limit(1).hint(ID_INDEX_FIELDS)) if not message: raise errors.MessageDoesNotExist(message_id, queue_name, project) return _basic_message(message[0], now) @utils.raises_conn_error @utils.retries_on_autoreconnect def bulk_get(self, queue_name, message_ids, project=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if not message_ids: return iter([]) now = timeutils.utcnow_ts() # Base query, always check expire time query = { '_id': {'$in': message_ids}, PROJ_QUEUE: utils.scope_queue_name(queue_name, project), } collection = self._collection(queue_name, project) # NOTE(flaper87): Should this query # be sorted? messages = collection.find(query).hint(ID_INDEX_FIELDS) ntotal = collection.count_documents(query) def denormalizer(msg): return _basic_message(msg, now) return utils.HookedCursor(messages, denormalizer, ntotal=ntotal) @utils.raises_conn_error @utils.retries_on_autoreconnect def post(self, queue_name, messages, client_uuid, project=None): # NOTE(flaper87): This method should be safe to retry on # autoreconnect, since we've a 2-step insert for messages. # The worst-case scenario is that we'll increase the counter # several times and we'd end up with some non-active messages. if not self._queue_ctrl.exists(queue_name, project): raise errors.QueueDoesNotExist(queue_name, project) if (client_uuid is not None) and not isinstance(client_uuid, uuid.UUID): client_uuid = uuid.UUID(client_uuid) client_uuid = binary.Binary.from_uuid(client_uuid) elif isinstance(client_uuid, uuid.UUID): client_uuid = binary.Binary.from_uuid(client_uuid) # NOTE(flaper87): Make sure the counter exists. This method # is an upsert. self._get_counter(queue_name, project) now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) collection = self._collection(queue_name, project) messages = list(messages) msgs_n = len(messages) next_marker = self._inc_counter(queue_name, project, amount=msgs_n) - msgs_n prepared_messages = [] for index, message in enumerate(messages): msg = { PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 't': message['ttl'], 'e': now_dt + datetime.timedelta(seconds=message['ttl']), 'u': client_uuid, 'c': {'id': None, 'e': now, 'c': 0}, 'd': now + message.get('delay', 0), 'b': message['body'] if 'body' in message else {}, 'k': next_marker + index, 'tx': None } if self.driver.conf.enable_checksum: msg['cs'] = s_utils.get_checksum(message.get('body', None)) prepared_messages.append(msg) res = collection.insert_many(prepared_messages, bypass_document_validation=True) return [str(id_) for id_ in res.inserted_ids] @utils.raises_conn_error @utils.retries_on_autoreconnect def delete(self, queue_name, message_id, project=None, claim=None): # NOTE(cpp-cabrera): return early - this is an invalid message # id so we won't be able to find it any way mid = utils.to_oid(message_id) if mid is None: return collection = self._collection(queue_name, project) query = { '_id': mid, PROJ_QUEUE: utils.scope_queue_name(queue_name, project), } cid = utils.to_oid(claim) if cid is None: raise errors.ClaimDoesNotExist(claim, queue_name, project) now = timeutils.utcnow_ts() cursor = collection.find(query).hint(ID_INDEX_FIELDS) try: message = next(cursor) except StopIteration: return if claim is None: if _is_claimed(message, now): raise errors.MessageIsClaimed(message_id) else: if message['c']['id'] != cid: kwargs = {} # NOTE(flaper87): In pymongo 3.0 PRIMARY is the default and # `read_preference` is read only. We'd need to set it when the # client is created. # NOTE(kgriffs): Read from primary in case the message # was just barely claimed, and claim hasn't made it to # the secondary. message = collection.find_one(query, **kwargs) if message['c']['id'] != cid: if _is_claimed(message, now): raise errors.MessageNotClaimedBy(message_id, claim) raise errors.MessageNotClaimed(message_id) collection.delete_one(query) @utils.raises_conn_error @utils.retries_on_autoreconnect def bulk_delete(self, queue_name, message_ids, project=None, claim_ids=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if claim_ids: claim_ids = [cid for cid in map(utils.to_oid, claim_ids) if cid] query = { '_id': {'$in': message_ids}, PROJ_QUEUE: utils.scope_queue_name(queue_name, project), } collection = self._collection(queue_name, project) if claim_ids: message_claim_ids = [] messages = collection.find(query).hint(ID_INDEX_FIELDS) for message in messages: message_claim_ids.append(message['c']['id']) for cid in claim_ids: if cid not in message_claim_ids: raise errors.ClaimDoesNotExist(cid, queue_name, project) collection.delete_many(query) @utils.raises_conn_error @utils.retries_on_autoreconnect def pop(self, queue_name, limit, project=None): query = { PROJ_QUEUE: utils.scope_queue_name(queue_name, project), } # Only include messages that are not part of # any claim, or are part of an expired claim. now = timeutils.utcnow_ts() query['c.e'] = {'$lte': now} collection = self._collection(queue_name, project) projection = {'_id': 1, 't': 1, 'b': 1, 'c.id': 1} messages = (collection.find_one_and_delete(query, projection=projection) for _ in range(limit)) final_messages = [_basic_message(message, now) for message in messages if message] return final_messages class FIFOMessageController(MessageController): def _ensure_indexes(self, collection): """Ensures that all indexes are created.""" collection.create_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True) collection.create_index(ACTIVE_INDEX_FIELDS, name='active', background=True) collection.create_index(CLAIMED_INDEX_FIELDS, name='claimed', background=True) collection.create_index(COUNTING_INDEX_FIELDS, name='counting', background=True) # NOTE(kgriffs): This index must be unique so that # inserting a message with the same marker to the # same queue will fail; this is used to detect a # race condition which can cause an observer client # to miss a message when there is more than one # producer posting messages to the same queue, in # parallel. collection.create_index(MARKER_INDEX_FIELDS, name='queue_marker', unique=True, background=True) collection.create_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) @utils.raises_conn_error @utils.retries_on_autoreconnect def post(self, queue_name, messages, client_uuid, project=None): # NOTE(flaper87): This method should be safe to retry on # autoreconnect, since we've a 2-step insert for messages. # The worst-case scenario is that we'll increase the counter # several times and we'd end up with some non-active messages. if not self._queue_ctrl.exists(queue_name, project): raise errors.QueueDoesNotExist(queue_name, project) # NOTE(flaper87): Make sure the counter exists. This method # is an upsert. self._get_counter(queue_name, project) now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) collection = self._collection(queue_name, project) # Set the next basis marker for the first attempt. # # Note that we don't increment the counter right away because # if 2 concurrent posts happen and the one with the higher counter # ends before the one with the lower counter, there's a window # where a client paging through the queue may get the messages # with the higher counter and skip the previous ones. This would # make our FIFO guarantee unsound. next_marker = self._get_counter(queue_name, project) # Unique transaction ID to facilitate atomic batch inserts transaction = objectid.ObjectId() if (client_uuid is not None) and not isinstance(client_uuid, uuid.UUID): client_uuid = uuid.UUID(client_uuid) client_uuid = binary.Binary.from_uuid(client_uuid) elif isinstance(client_uuid, uuid.UUID): client_uuid = binary.Binary.from_uuid(client_uuid) prepared_messages = [] for index, message in enumerate(messages): msg = { PROJ_QUEUE: utils.scope_queue_name(queue_name, project), 't': message['ttl'], 'e': now_dt + datetime.timedelta(seconds=message['ttl']), 'u': client_uuid, 'c': {'id': None, 'e': now, 'c': 0}, 'd': now + message.get('delay', 0), 'b': message['body'] if 'body' in message else {}, 'k': next_marker + index, 'tx': None } if self.driver.conf.enable_checksum: msg['cs'] = s_utils.get_checksum(message.get('body', None)) prepared_messages.append(msg) # NOTE(kgriffs): Don't take the time to do a 2-phase insert # if there is no way for it to partially succeed. if len(prepared_messages) == 1: transaction = None prepared_messages[0]['tx'] = None # Use a retry range for sanity, although we expect # to rarely, if ever, reach the maximum number of # retries. # # NOTE(kgriffs): With the default configuration (100 ms # max sleep, 1000 max attempts), the max stall time # before the operation is abandoned is 49.95 seconds. for attempt in self._retry_range: try: res = collection.insert_many(prepared_messages, bypass_document_validation=True) # Log a message if we retried, for debugging perf issues if attempt != 0: msgtmpl = _('%(attempts)d attempt(s) required to post ' '%(num_messages)d messages to queue ' '"%(queue)s" under project %(project)s') LOG.debug(msgtmpl, dict(queue=queue_name, attempts=attempt + 1, num_messages=len(res.inserted_ids), project=project)) # Update the counter in preparation for the next batch # # NOTE(kgriffs): Due to the unique index on the messages # collection, competing inserts will fail as a whole, # and keep retrying until the counter is incremented # such that the competing marker's will start at a # unique number, 1 past the max of the messages just # inserted above. self._inc_counter(queue_name, project, amount=len(res.inserted_ids)) # NOTE(kgriffs): Finalize the insert once we can say that # all the messages made it. This makes bulk inserts # atomic, assuming queries filter out any non-finalized # messages. if transaction is not None: collection.update_many({'tx': transaction}, {'$set': {'tx': None}}, upsert=False) return [str(id_) for id_ in res.inserted_ids] except (pymongo.errors.DuplicateKeyError, pymongo.errors.BulkWriteError): # TODO(kgriffs): Record stats of how often retries happen, # and how many attempts, on average, are required to insert # messages. # NOTE(kgriffs): This can be used in conjunction with the # log line, above, that is emitted after all messages have # been posted, to gauge how long it is taking for messages # to be posted to a given queue, or overall. # # TODO(kgriffs): Add transaction ID to help match up loglines if attempt == 0: msgtmpl = _('First attempt failed while ' 'adding messages to queue ' '"%(queue)s" under project %(project)s') LOG.debug(msgtmpl, dict(queue=queue_name, project=project)) # NOTE(kgriffs): Never retry past the point that competing # messages expire and are GC'd, since once they are gone, # the unique index no longer protects us from getting out # of order, which could cause an observer to miss this # message. The code below provides a sanity-check to ensure # this situation can not happen. elapsed = timeutils.utcnow_ts() - now if elapsed > MAX_RETRY_POST_DURATION: msgtmpl = ('Exceeded maximum retry duration for queue ' '"%(queue)s" under project %(project)s') LOG.warning(msgtmpl, dict(queue=queue_name, project=project)) break # Chill out for a moment to mitigate thrashing/thundering self._backoff_sleep(attempt) # NOTE(kgriffs): Perhaps we failed because a worker crashed # after inserting messages, but before incrementing the # counter; that would cause all future requests to stall, # since they would keep getting the same base marker that is # conflicting with existing messages, until the messages that # "won" expire, at which time we would end up reusing markers, # and that could make some messages invisible to an observer # that is querying with a marker that is large than the ones # being reused. # # To mitigate this, we apply a heuristic to determine whether # a counter has stalled. We attempt to increment the counter, # but only if it hasn't been updated for a few seconds, which # should mean that nobody is left to update it! # # Note that we increment one at a time until the logjam is # broken, since we don't know how many messages were posted # by the worker before it crashed. next_marker = self._inc_counter( queue_name, project, window=COUNTER_STALL_WINDOW) # Retry the entire batch with a new sequence of markers. # # NOTE(kgriffs): Due to the unique index, and how # MongoDB works with batch requests, we will never # end up with a partially-successful update. The first # document in the batch will fail to insert, and the # remainder of the documents will not be attempted. if next_marker is None: # NOTE(kgriffs): Usually we will end up here, since # it should be rare that a counter becomes stalled. next_marker = self._get_counter( queue_name, project) else: msgtmpl = ('Detected a stalled message counter ' 'for queue "%(queue)s" under ' 'project %(project)s.' 'The counter was incremented to %(value)d.') LOG.warning(msgtmpl, dict(queue=queue_name, project=project, value=next_marker)) for index, message in enumerate(prepared_messages): message['k'] = next_marker + index except Exception: LOG.exception('Error parsing document') raise msgtmpl = ('Hit maximum number of attempts (%(max)s) for queue ' '"%(queue)s" under project %(project)s') LOG.warning(msgtmpl, dict(max=self.driver.mongodb_conf.max_attempts, queue=queue_name, project=project)) raise errors.MessageConflict(queue_name, project) def _is_claimed(msg, now): return (msg['c']['id'] is not None and msg['c']['e'] > now) def _basic_message(msg, now): oid = msg['_id'] age = now - utils.oid_ts(oid) res = { 'id': str(oid), 'age': int(age), 'ttl': msg['t'], 'claim_count': msg['c'].get('c', 0), 'body': msg['b'], 'claim_id': str(msg['c']['id']) if msg['c']['id'] else None } if msg.get('cs'): res['checksum'] = msg.get('cs') return res class MessageQueueHandler(object): def __init__(self, driver, control_driver): self.driver = driver self._cache = self.driver.cache self.queue_controller = self.driver.queue_controller self.message_controller = self.driver.message_controller def delete(self, queue_name, project=None): self.message_controller._purge_queue(queue_name, project) @utils.raises_conn_error @utils.retries_on_autoreconnect def stats(self, name, project=None): if not self.queue_controller.exists(name, project=project): raise errors.QueueDoesNotExist(name, project) controller = self.message_controller active = controller._count(name, project=project, include_claimed=False) total = controller._count(name, project=project, include_claimed=True) message_stats = { 'claimed': total - active, 'free': active, 'total': total, } try: oldest = controller.first(name, project=project, sort=1) newest = controller.first(name, project=project, sort=-1) except errors.QueueIsEmpty: pass else: now = timeutils.utcnow_ts() message_stats['oldest'] = utils.stat_message(oldest, now) message_stats['newest'] = utils.stat_message(newest, now) return {'messages': message_stats} def _get_scoped_query(name, project): return {'p_q': utils.scope_queue_name(name, project)} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/pools.py0000664000175100017510000001510315033040005020653 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """pools: an implementation of the pool management storage controller for mongodb. Schema: 'n': name :: str 'u': uri :: str 'w': weight :: int 'o': options :: dict 'f': flavor :: str """ import functools from oslo_log import log as logging from pymongo import errors as mongo_error from zaqar.common import utils as common_utils from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.mongodb import utils POOLS_INDEX = [ ('n', 1) ] LOG = logging.getLogger(__name__) URI_INDEX = [ ('u', 1) ] # NOTE(cpp-cabrera): used for get/list operations. There's no need to # show the marker or the _id - they're implementation details. OMIT_FIELDS = (('_id', False),) def _field_spec(detailed=False): return dict(OMIT_FIELDS + (() if detailed else (('o', False),))) class PoolsController(base.PoolsBase): def __init__(self, *args, **kwargs): super(PoolsController, self).__init__(*args, **kwargs) # To avoid creating unique index twice pools_index_str = '_'.join( map(lambda x: '%s_%s' % (x[0], x[1]), POOLS_INDEX) ) uri_index_str = '_'.join( map(lambda x: '%s_%s' % (x[0], x[1]), URI_INDEX) ) self._col = self.driver.database.pools indexes = self._col.index_information().keys() if pools_index_str and pools_index_str not in indexes: self._col.create_index(POOLS_INDEX, background=True, name='pools_name', unique=True) if uri_index_str and uri_index_str not in indexes: self._col.create_index(URI_INDEX, background=True, name='pools_uri', unique=True) @utils.raises_conn_error def _list(self, marker=None, limit=10, detailed=False): query = {} if marker is not None: query['n'] = {'$gt': marker} cursor = self._col.find(query, projection=_field_spec(detailed), limit=limit).sort('n') marker_name = {} def normalizer(pool): marker_name['next'] = pool['n'] return _normalize(pool, detailed=detailed) yield utils.HookedCursor(cursor, normalizer) yield marker_name and marker_name['next'] @utils.raises_conn_error def _get(self, name, detailed=False): res = self._col.find_one({'n': name}, _field_spec(detailed)) if not res: raise errors.PoolDoesNotExist(name) return _normalize(res, detailed) @utils.raises_conn_error def _get_pools_by_flavor(self, flavor=None, detailed=False): query = {} if flavor is None: query = {'f': None} elif flavor.get('name') is not None: query = {'f': flavor.get('name')} cursor = self._col.find(query, projection=_field_spec(detailed)) ntotal = self._col.count_documents(query) normalizer = functools.partial(_normalize, detailed=detailed) return utils.HookedCursor(cursor, normalizer, ntotal=ntotal) @utils.raises_conn_error def _create(self, name, weight, uri, flavor=None, options=None): options = {} if options is None else options try: self._col.update_one({'n': name}, {'$set': {'n': name, 'w': weight, 'u': uri, 'f': flavor, 'o': options}}, upsert=True) except mongo_error.DuplicateKeyError: LOG.exception('Pool "%s" already exists', name) raise errors.PoolAlreadyExists() @utils.raises_conn_error def _exists(self, name): return self._col.find_one({'n': name}) is not None @utils.raises_conn_error def _update(self, name, **kwargs): names = ('uri', 'weight', 'flavor', 'options') fields = common_utils.fields(kwargs, names, pred=lambda x: x is not None, key_transform=lambda x: x[0]) assert fields, ('`weight`, `uri`, ' 'or `options` not found in kwargs') flavor = fields.get('f') if flavor is not None and len(flavor) == 0: fields['f'] = None res = self._col.update_one({'n': name}, {'$set': fields}, upsert=False) if res.matched_count == 0: raise errors.PoolDoesNotExist(name) @utils.raises_conn_error def _delete(self, name): # NOTE(wpf): Initializing the Flavors controller here instead of # doing so in __init__ is required to avoid falling in a maximum # recursion error. try: pool = self.get(name) pools_in_flavor = [] flavor = pool.get("flavor", None) if flavor is not None: # NOTE(gengchc2): If this is the only pool in the # flavor and it's being used by a flavor, don't allow # it to be deleted. flavor1 = {} flavor1['name'] = flavor pools_in_flavor = self.get_pools_by_flavor(flavor=flavor1) if len(pools_in_flavor) == 1: raise errors.PoolInUseByFlavor(name, flavor) self._col.delete_one({'n': name}) except errors.PoolDoesNotExist: pass @utils.raises_conn_error def _drop_all(self): self._col.drop() self._col.create_index(POOLS_INDEX, unique=True) def _normalize(pool, detailed=False): ret = { 'name': pool['n'], 'flavor': pool['f'], 'uri': pool['u'], 'weight': pool['w'], } if detailed: ret['options'] = pool['o'] return ret ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/queues.py0000664000175100017510000002556615033040005021044 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements the MongoDB storage controller for queues. Field Mappings: In order to reduce the disk / memory space used, field names will be, most of the time, the first letter of their long name. """ from oslo_log import log as logging from oslo_utils import timeutils from pymongo.collection import ReturnDocument import pymongo.errors from zaqar.common import decorators from zaqar.i18n import _ from zaqar import storage from zaqar.storage import errors from zaqar.storage.mongodb import utils LOG = logging.getLogger(__name__) # NOTE(kgriffs): E.g.: 'queuecontroller:exists:5083853/my-queue' _QUEUE_CACHE_PREFIX = 'queuecontroller:' # NOTE(kgriffs): This causes some race conditions, but they are # harmless. If a queue was deleted, but we are still returning # that it exists, some messages may get inserted without the # client getting an error. In this case, those messages would # be orphaned and expire eventually according to their TTL. # # What this means for the client is that they have a bug; they # deleted a queue and then immediately tried to post messages # to it. If they keep trying to use the queue, they will # eventually start getting an error, once the cache entry # expires, which should clue them in on what happened. # # TODO(kgriffs): Make dynamic? _QUEUE_CACHE_TTL = 5 def _queue_exists_key(queue, project=None): # NOTE(kgriffs): Use string concatenation for performance, # also put project first since it is guaranteed to be # unique, which should reduce lookup time. return _QUEUE_CACHE_PREFIX + 'exists:' + str(project) + '/' + queue class QueueController(storage.Queue): """Implements queue resource operations using MongoDB. Queues are scoped by project, which is prefixed to the queue name. :: Queues: Name Field --------------------- name -> p_q msg counter -> c metadata -> m Message Counter: Name Field ------------------- value -> v modified ts -> t """ def __init__(self, *args, **kwargs): super(QueueController, self).__init__(*args, **kwargs) self._cache = self.driver.cache self._collection = self.driver.queues_database.queues # NOTE(flaper87): This creates a unique index for # project and name. Using project as the prefix # allows for querying by project and project+name. # This is also useful for retrieving the queues list for # a specific project, for example. Order matters! # NOTE(wanghao): pymongo has removed the ensure_index since 4.0.0. # So we need to update ensure_index to create_index. self._collection.create_index([('p_q', 1)], unique=True) # ---------------------------------------------------------------------- # Helpers # ---------------------------------------------------------------------- def _get_counter(self, name, project=None): """Retrieves the current message counter value for a given queue. This helper is used to generate monotonic pagination markers that are saved as part of the message document. Note 1: Markers are scoped per-queue and so are *not* globally unique or globally ordered. Note 2: If two or more requests to this method are made in parallel, this method will return the same counter value. This is done intentionally so that the caller can detect a parallel message post, allowing it to mitigate race conditions between producer and observer clients. :param name: Name of the queue to which the counter is scoped :param project: Queue's project :returns: current message counter as an integer """ doc = self._collection.find_one(_get_scoped_query(name, project), projection={'c.v': 1, '_id': 0}) if doc is None: raise errors.QueueDoesNotExist(name, project) return doc['c']['v'] def _inc_counter(self, name, project=None, amount=1, window=None): """Increments the message counter and returns the new value. :param name: Name of the queue to which the counter is scoped :param project: Queue's project name :param amount: (Default 1) Amount by which to increment the counter :param window: (Default None) A time window, in seconds, that must have elapsed since the counter was last updated, in order to increment the counter. :returns: Updated message counter value, or None if window was specified, and the counter has already been updated within the specified time period. :raises QueueDoesNotExist: if not found """ now = timeutils.utcnow_ts() update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}} query = _get_scoped_query(name, project) if window is not None: threshold = now - window query['c.t'] = {'$lt': threshold} while True: try: doc = self._collection.find_one_and_update( query, update, return_document=ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) break except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect failure') if doc is None: if window is None: # NOTE(kgriffs): Since we did not filter by a time window, # the queue should have been found and updated. Perhaps # the queue has been deleted? message = _('Failed to increment the message ' 'counter for queue %(name)s and ' 'project %(project)s') message %= dict(name=name, project=project) LOG.warning(message) raise errors.QueueDoesNotExist(name, project) # NOTE(kgriffs): Assume the queue existed, but the counter # was recently updated, causing the range query on 'c.t' to # exclude the record. return None return doc['c']['v'] # ---------------------------------------------------------------------- # Interface # ---------------------------------------------------------------------- def _get(self, name, project=None): try: return self.get_metadata(name, project) except errors.QueueDoesNotExist: return {} def _list(self, project=None, kfilter={}, marker=None, limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False, name=None): query = utils.scoped_query(marker, project, name, kfilter) projection = {'p_q': 1, '_id': 0} if detailed: projection['m'] = 1 cursor = self._collection.find(query, projection=projection) cursor = cursor.limit(limit).sort('p_q') marker_name = {} ntotal = self._collection.count_documents(query, limit=limit) def normalizer(record): queue = {'name': utils.descope_queue_name(record['p_q'])} marker_name['next'] = queue['name'] if detailed: queue['metadata'] = record['m'] return queue yield utils.HookedCursor(cursor, normalizer, ntotal=ntotal) yield marker_name and marker_name['next'] @utils.raises_conn_error @utils.retries_on_autoreconnect def get_metadata(self, name, project=None): queue = self._collection.find_one(_get_scoped_query(name, project), projection={'m': 1, '_id': 0}) if queue is None: raise errors.QueueDoesNotExist(name, project) return queue.get('m', {}) @utils.raises_conn_error # @utils.retries_on_autoreconnect def _create(self, name, metadata=None, project=None): # NOTE(flaper87): If the connection fails after it was called # and we retry to insert the queue, we could end up returning # `False` because of the `DuplicatedKeyError` although the # queue was indeed created by this API call. # # TODO(kgriffs): Commented out `retries_on_autoreconnect` for # now due to the above issue, since creating a queue is less # important to make super HA. try: # NOTE(kgriffs): Start counting at 1, and assume the first # message ever posted will succeed and set t to a UNIX # "modified at" timestamp. counter = {'v': 1, 't': 0} scoped_name = utils.scope_queue_name(name, project) self._collection.insert_one( {'p_q': scoped_name, 'm': metadata or {}, 'c': counter}) except pymongo.errors.DuplicateKeyError: return False else: return True # NOTE(kgriffs): Only cache when it exists; if it doesn't exist, and # someone creates it, we want it to be immediately visible. @utils.raises_conn_error @utils.retries_on_autoreconnect @decorators.caches(_queue_exists_key, _QUEUE_CACHE_TTL, lambda v: v) def _exists(self, name, project=None): query = _get_scoped_query(name, project) return self._collection.find_one(query) is not None @utils.raises_conn_error @utils.retries_on_autoreconnect def set_metadata(self, name, metadata, project=None): rst = self._collection.update_one(_get_scoped_query(name, project), {'$set': {'m': metadata}}) if rst.matched_count == 0: raise errors.QueueDoesNotExist(name, project) @utils.raises_conn_error @utils.retries_on_autoreconnect @_exists.purges def _delete(self, name, project=None): self._collection.delete_one(_get_scoped_query(name, project)) @utils.raises_conn_error @utils.retries_on_autoreconnect def _stats(self, name, project=None): pass @utils.raises_conn_error @utils.retries_on_autoreconnect def _calculate_resource_count(self, project=None): query = utils.scoped_query(None, project, None, {}) return self._collection.count_documents(query) def _get_scoped_query(name, project): return {'p_q': utils.scope_queue_name(name, project)} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/subscriptions.py0000664000175100017510000001664615033040005022443 0ustar00mylesmyles# Copyright (c) 2014 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import datetime from oslo_utils import timeutils import pymongo.errors from zaqar.common import utils as common_utils from zaqar import storage from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.mongodb import utils ID_INDEX_FIELDS = [('_id', 1)] SUBSCRIPTIONS_INDEX = [ ('s', 1), ('u', 1), ('p', 1), ] # For removing expired subscriptions TTL_INDEX_FIELDS = [ ('e', 1), ] class SubscriptionController(base.Subscription): """Implements subscription resource operations using MongoDB. Subscriptions are unique by project + queue/topic + subscriber. Schema: 's': source :: str 'u': subscriber:: str 't': ttl:: int 'e': expires: datetime.datetime 'o': options :: dict 'p': project :: str 'c': confirmed :: boolean """ def __init__(self, *args, **kwargs): super(SubscriptionController, self).__init__(*args, **kwargs) self._collection = self.driver.subscriptions_database.subscriptions self._collection.create_index(SUBSCRIPTIONS_INDEX, unique=True) # NOTE(flwang): MongoDB will automatically delete the subscription # from the subscriptions collection when the subscription's 'e' value # is older than the number of seconds specified in expireAfterSeconds, # i.e. 0 seconds older in this case. As such, the data expires at the # specified 'e' value. self._collection.create_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True) @utils.raises_conn_error def list(self, queue, project=None, marker=None, limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE): query = {'s': queue, 'p': project} if marker is not None: query['_id'] = {'$gt': utils.to_oid(marker)} projection = {'s': 1, 'u': 1, 't': 1, 'p': 1, 'o': 1, '_id': 1, 'c': 1} cursor = self._collection.find(query, projection=projection) cursor = cursor.limit(limit).sort('_id') marker_name = {} ntotal = self._collection.count_documents(query, limit=limit) now = timeutils.utcnow_ts() def normalizer(record): marker_name['next'] = record['_id'] return _basic_subscription(record, now) yield utils.HookedCursor(cursor, normalizer, ntotal=ntotal) yield marker_name and marker_name['next'] @utils.raises_conn_error def get(self, queue, subscription_id, project=None): res = self._collection.find_one({'_id': utils.to_oid(subscription_id), 'p': project, 's': queue}) if not res: raise errors.SubscriptionDoesNotExist(subscription_id) now = timeutils.utcnow_ts() return _basic_subscription(res, now) @utils.raises_conn_error def create(self, queue, subscriber, ttl, options, project=None): source = queue now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) expires = now_dt + datetime.timedelta(seconds=ttl) confirmed = False try: res = self._collection.insert_one({'s': source, 'u': subscriber, 't': ttl, 'e': expires, 'o': options, 'p': project, 'c': confirmed}) return res.inserted_id except pymongo.errors.DuplicateKeyError: return None @utils.raises_conn_error def exists(self, queue, subscription_id, project=None): return self._collection.find_one({'_id': utils.to_oid(subscription_id), 'p': project}) is not None @utils.raises_conn_error def update(self, queue, subscription_id, project=None, **kwargs): names = ('subscriber', 'ttl', 'options') key_transform = lambda x: 'u' if x == 'subscriber' else x[0] fields = common_utils.fields(kwargs, names, pred=lambda x: x is not None, key_transform=key_transform) assert fields, ('`subscriber`, `ttl`, ' 'or `options` not found in kwargs') new_ttl = fields.get('t') if new_ttl is not None: now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) expires = now_dt + datetime.timedelta(seconds=new_ttl) fields['e'] = expires try: res = self._collection.update_one( {'_id': utils.to_oid(subscription_id), 'p': project, 's': queue}, {'$set': fields}, upsert=False) except pymongo.errors.DuplicateKeyError: raise errors.SubscriptionAlreadyExists() if res.matched_count == 0: raise errors.SubscriptionDoesNotExist(subscription_id) @utils.raises_conn_error def delete(self, queue, subscription_id, project=None): self._collection.delete_one({'_id': utils.to_oid(subscription_id), 'p': project, 's': queue}) @utils.raises_conn_error def get_with_subscriber(self, queue, subscriber, project=None): res = self._collection.find_one({'u': subscriber, 's': queue, 'p': project}) now = timeutils.utcnow_ts() return _basic_subscription(res, now) @utils.raises_conn_error def confirm(self, queue, subscription_id, project=None, confirmed=True): res = self._collection.update_one( {'_id': utils.to_oid(subscription_id), 'p': project}, {'$set': {'c': confirmed}}, upsert=False) if res.matched_count == 0: raise errors.SubscriptionDoesNotExist(subscription_id) def _basic_subscription(record, now): # NOTE(Eva-i): unused here record's field 'e' (expires) has changed it's # format from int (timestamp) to datetime since patch # 1d122b1671792aff0055ed5396111cd441fb8269. Any future change about # starting using 'e' field should make sure support both of the formats. oid = record['_id'] age = now - utils.oid_ts(oid) confirmed = record.get('c', True) return { 'id': str(oid), 'source': record['s'], 'subscriber': record['u'], 'ttl': record['t'], 'age': int(age), 'options': record['o'], 'confirmed': confirmed, } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/topic_messages.py0000664000175100017510000011670115033040005022532 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements MongoDB the storage controller for messages. Field Mappings: In order to reduce the disk / memory space used, field names will be, most of the time, the first letter of their long name. """ import datetime import time import uuid from bson import binary from bson import objectid from oslo_log import log as logging from oslo_utils import timeutils import pymongo.errors import pymongo.read_preferences from zaqar.i18n import _ from zaqar import storage from zaqar.storage import errors from zaqar.storage.mongodb import utils from zaqar.storage import utils as s_utils LOG = logging.getLogger(__name__) # NOTE(kgriffs): This value, in seconds, should be at least less than the # minimum allowed TTL for messages (60 seconds). Make it 45 to allow for # some fudge room. MAX_RETRY_POST_DURATION = 45 # NOTE(kgriffs): It is extremely unlikely that all workers would somehow hang # for more than 5 seconds, without a single one being able to succeed in # posting some messages and incrementing the counter, thus allowing the other # producers to succeed in turn. COUNTER_STALL_WINDOW = 5 # For hinting ID_INDEX_FIELDS = [('_id', 1)] # For removing expired messages TTL_INDEX_FIELDS = [ ('e', 1), ] # to unify use of project/topic across mongodb # storage impls. PROJ_TOPIC = utils.PROJ_TOPIC_KEY # NOTE(kgriffs): This index is for listing messages, usually # filtering out claimed ones. ACTIVE_INDEX_FIELDS = [ (PROJ_TOPIC, 1), # Project will be unique, so put first ('k', 1), # Used for sorting and paging, must come before range queries ] # For counting COUNTING_INDEX_FIELDS = [ (PROJ_TOPIC, 1), # Project will be unique, so put first ] # This index is meant to be used as a shard-key and to ensure # uniqueness for markers. # # As for other compound indexes, order matters. The marker `k` # gives enough cardinality to ensure chunks are evenly distributed, # whereas the `p_q` field helps keeping chunks from the same project # and queue together. # # In a sharded environment, uniqueness of this index is still guaranteed # because it's used as a shard key. MARKER_INDEX_FIELDS = [ ('k', 1), (PROJ_TOPIC, 1), ] TRANSACTION_INDEX_FIELDS = [ ('tx', 1), ] class MessageController(storage.Message): """Implements message resource operations using MongoDB. Messages are scoped by project + topic. :: Messages: Name Field ------------------------- scope -> p_t ttl -> t expires -> e marker -> k body -> b client uuid -> u transaction -> tx delay -> d checksum -> cs """ def __init__(self, *args, **kwargs): super(MessageController, self).__init__(*args, **kwargs) # Cache for convenience and performance self._num_partitions = self.driver.mongodb_conf.partitions self._topic_ctrl = self.driver.topic_controller self._retry_range = range(self.driver.mongodb_conf.max_attempts) # Create a list of 'messages' collections, one for each database # partition, ordered by partition number. # # NOTE(kgriffs): Order matters, since it is used to lookup the # collection by partition number. For example, self._collections[2] # would provide access to zaqar_p2.messages (partition numbers are # zero-based). self._collections = [db.messages for db in self.driver.message_databases] # Ensure indexes are initialized before any queries are performed for collection in self._collections: self._ensure_indexes(collection) # ---------------------------------------------------------------------- # Helpers # ---------------------------------------------------------------------- def _ensure_indexes(self, collection): """Ensures that all indexes are created.""" collection.create_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True) collection.create_index(ACTIVE_INDEX_FIELDS, name='active', background=True) collection.create_index(COUNTING_INDEX_FIELDS, name='counting', background=True) collection.create_index(MARKER_INDEX_FIELDS, name='queue_marker', background=True) collection.create_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) def _collection(self, topic_name, project=None): """Get a partitioned collection instance.""" return self._collections[utils.get_partition(self._num_partitions, topic_name, project)] def _backoff_sleep(self, attempt): """Sleep between retries using a jitter algorithm. Mitigates thrashing between multiple parallel requests, and creates backpressure on clients to slow down the rate at which they submit requests. :param attempt: current attempt number, zero-based """ conf = self.driver.mongodb_conf seconds = utils.calculate_backoff(attempt, conf.max_attempts, conf.max_retry_sleep, conf.max_retry_jitter) time.sleep(seconds) def _purge_topic(self, topic_name, project=None): """Removes all messages from the queue. Warning: Only use this when deleting the queue; otherwise you can cause a side-effect of reseting the marker counter which can cause clients to miss tons of messages. If the queue does not exist, this method fails silently. :param topic_name: name of the queue to purge :param project: ID of the project to which the queue belongs """ scope = utils.scope_queue_name(topic_name, project) collection = self._collection(topic_name, project) collection.delete_many({PROJ_TOPIC: scope}) def _list(self, topic_name, project=None, marker=None, echo=False, client_uuid=None, projection=None, include_claimed=False, include_delayed=False, sort=1, limit=None, count=False): """Message document listing helper. :param topic_name: Name of the topic to list :param project: (Default None) Project `topic_name` belongs to. If not specified, queries the "global" namespace/project. :param marker: (Default None) Message marker from which to start iterating. If not specified, starts with the first message available in the topic. :param echo: (Default False) Whether to return messages that match client_uuid :param client_uuid: (Default None) UUID for the client that originated this request :param projection: (Default None) a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude :param include_claimed: (Default False) Whether to include claimed messages, not just active ones :param include_delayed: (Default False) Whether to include delayed messages, not just active ones :param sort: (Default 1) Sort order for the listing. Pass 1 for ascending (oldest message first), or -1 for descending (newest message first). :param limit: (Default None) The maximum number of messages to list. The results may include fewer messages than the requested `limit` if not enough are available. If limit is not specified :param count: (Default False) If return the count number of cursor :returns: Generator yielding up to `limit` messages. """ if sort not in (1, -1): raise ValueError('sort must be either 1 (ascending) ' 'or -1 (descending)') now = timeutils.utcnow_ts() query = { # Messages must belong to this topic and project. PROJ_TOPIC: utils.scope_queue_name(topic_name, project), # NOTE(kgriffs): Messages must be finalized (i.e., must not # be part of an unfinalized transaction). # # See also the note wrt 'tx' within the definition # of ACTIVE_INDEX_FIELDS. 'tx': None, } if not echo: if (client_uuid is not None) and not isinstance(client_uuid, uuid.UUID): client_uuid = uuid.UUID(client_uuid) client_uuid = binary.Binary.from_uuid(client_uuid) elif isinstance(client_uuid, uuid.UUID): client_uuid = binary.Binary.from_uuid(client_uuid) query['u'] = {'$ne': client_uuid} if marker is not None: query['k'] = {'$gt': marker} collection = self._collection(topic_name, project) if not include_delayed: # NOTE(cdyangzhenyu): Only include messages that are not # part of any delay, or are part of an expired delay. if # the message has no attribute 'd', it will also be obtained. # This is for compatibility with old data. query['$or'] = [{'d': {'$lte': now}}, {'d': {'$exists': False}}] # Construct the request cursor = collection.find(query, projection=projection, sort=[('k', sort)]) ntotal = None if count: ntotal = collection.count_documents(query) if limit is not None: cursor.limit(limit) if count: ntotal = collection.count_documents(query, limit=limit) # NOTE(flaper87): Suggest the index to use for this query to # ensure the most performant one is chosen. if count: return cursor.hint(ACTIVE_INDEX_FIELDS), ntotal return cursor.hint(ACTIVE_INDEX_FIELDS) # ---------------------------------------------------------------------- # "Friends" interface # ---------------------------------------------------------------------- def _count(self, topic_name, project=None, include_claimed=False): """Return total number of messages in a topic. This method is designed to very quickly count the number of messages in a given topic. Expired messages are not counted, of course. If the queue does not exist, the count will always be 0. Note: Some expired messages may be included in the count if they haven't been GC'd yet. This is done for performance. """ query = { # Messages must belong to this queue and project. PROJ_TOPIC: utils.scope_queue_name(topic_name, project), # NOTE(kgriffs): Messages must be finalized (i.e., must not # be part of an unfinalized transaction). # # See also the note wrt 'tx' within the definition # of ACTIVE_INDEX_FIELDS. 'tx': None, } collection = self._collection(topic_name, project) return collection.count_documents(filter=query, hint=COUNTING_INDEX_FIELDS) def _active(self, topic_name, marker=None, echo=False, client_uuid=None, projection=None, project=None, limit=None, include_delayed=False): return self._list(topic_name, project=project, marker=marker, echo=echo, client_uuid=client_uuid, projection=projection, include_claimed=False, include_delayed=include_delayed, limit=limit) def _inc_counter(self, topic_name, project=None, amount=1, window=None): """Increments the message counter and returns the new value. :param topic_name: Name of the topic to which the counter is scoped :param project: Queue's project name :param amount: (Default 1) Amount by which to increment the counter :param window: (Default None) A time window, in seconds, that must have elapsed since the counter was last updated, in order to increment the counter. :returns: Updated message counter value, or None if window was specified, and the counter has already been updated within the specified time period. :raises QueueDoesNotExist: if not found """ # NOTE(flaper87): If this `if` is True, it means we're # using a mongodb in the control plane. To avoid breaking # environments doing so already, we'll keep using the counter # in the mongodb topic_controller rather than the one in the # message_controller. This should go away, eventually if hasattr(self._topic_ctrl, '_inc_counter'): return self._topic_ctrl._inc_counter(topic_name, project, amount, window) now = timeutils.utcnow_ts() update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}} query = _get_scoped_query(topic_name, project) if window is not None: threshold = now - window query['c.t'] = {'$lt': threshold} while True: try: collection = self._collection(topic_name, project).stats doc = collection.find_one_and_update( query, update, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) break except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect error.') if doc is None: if window is None: # NOTE(kgriffs): Since we did not filter by a time window, # the topic should have been found and updated. Perhaps # the topic has been deleted? message = ('Failed to increment the message ' 'counter for topic %(name)s and ' 'project %(project)s') message %= dict(name=topic_name, project=project) LOG.warning(message) raise errors.TopicDoesNotExist(topic_name, project) # NOTE(kgriffs): Assume the queue existed, but the counter # was recently updated, causing the range query on 'c.t' to # exclude the record. return None return doc['c']['v'] def _get_counter(self, topic_name, project=None): """Retrieves the current message counter value for a given topic. This helper is used to generate monotonic pagination markers that are saved as part of the message document. Note 1: Markers are scoped per-queue and so are *not* globally unique or globally ordered. Note 2: If two or more requests to this method are made in parallel, this method will return the same counter value. This is done intentionally so that the caller can detect a parallel message post, allowing it to mitigate race conditions between producer and observer clients. :param topic_name: Name of the topic to which the counter is scoped :param project: Topic's project :returns: current message counter as an integer """ # NOTE(flaper87): If this `if` is True, it means we're # using a mongodb in the control plane. To avoid breaking # environments doing so already, we'll keep using the counter # in the mongodb queue_controller rather than the one in the # message_controller. This should go away, eventually if hasattr(self._topic_ctrl, '_get_counter'): return self._topic_ctrl._get_counter(topic_name, project) update = {'$inc': {'c.v': 0, 'c.t': 0}} query = _get_scoped_query(topic_name, project) try: collection = self._collection(topic_name, project).stats doc = collection.find_one_and_update( query, update, upsert=True, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) return doc['c']['v'] except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect error.') # ---------------------------------------------------------------------- # Public interface # ---------------------------------------------------------------------- def list(self, topic_name, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False): if marker is not None: try: marker = int(marker) except ValueError: yield iter([]) messages, ntotal = self._list(topic_name, project=project, marker=marker, client_uuid=client_uuid, echo=echo, include_claimed=include_claimed, include_delayed=include_delayed, limit=limit, count=True) marker_id = {} now = timeutils.utcnow_ts() # NOTE (kgriffs) @utils.raises_conn_error not needed on this # function, since utils.HookedCursor already has it. def denormalizer(msg): marker_id['next'] = msg['k'] return _basic_message(msg, now) yield utils.HookedCursor(messages, denormalizer, ntotal=ntotal) yield str(marker_id['next']) @utils.raises_conn_error @utils.retries_on_autoreconnect def first(self, topic_name, project=None, sort=1): cursor = self._list(topic_name, project=project, include_claimed=True, sort=sort, limit=1) try: message = next(cursor) except StopIteration: raise errors.TopicIsEmpty(topic_name, project) now = timeutils.utcnow_ts() return _basic_message(message, now) @utils.raises_conn_error @utils.retries_on_autoreconnect def get(self, topic_name, message_id, project=None): mid = utils.to_oid(message_id) if mid is None: raise errors.MessageDoesNotExist(message_id, topic_name, project) now = timeutils.utcnow_ts() query = { '_id': mid, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } collection = self._collection(topic_name, project) message = list(collection.find(query).limit(1).hint(ID_INDEX_FIELDS)) if not message: raise errors.MessageDoesNotExist(message_id, topic_name, project) return _basic_message(message[0], now) @utils.raises_conn_error @utils.retries_on_autoreconnect def bulk_get(self, topic_name, message_ids, project=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if not message_ids: return iter([]) now = timeutils.utcnow_ts() # Base query, always check expire time query = { '_id': {'$in': message_ids}, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } collection = self._collection(topic_name, project) # NOTE(flaper87): Should this query # be sorted? messages = collection.find(query).hint(ID_INDEX_FIELDS) ntotal = collection.count_documents(query) def denormalizer(msg): return _basic_message(msg, now) return utils.HookedCursor(messages, denormalizer, ntotal=ntotal) @utils.raises_conn_error @utils.retries_on_autoreconnect def post(self, topic_name, messages, client_uuid, project=None): # NOTE(flaper87): This method should be safe to retry on # autoreconnect, since we've a 2-step insert for messages. # The worst-case scenario is that we'll increase the counter # several times and we'd end up with some non-active messages. if not self._topic_ctrl.exists(topic_name, project): raise errors.TopicDoesNotExist(topic_name, project) # NOTE(flaper87): Make sure the counter exists. This method # is an upsert. self._get_counter(topic_name, project) now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) collection = self._collection(topic_name, project) messages = list(messages) msgs_n = len(messages) next_marker = self._inc_counter(topic_name, project, amount=msgs_n) - msgs_n if (client_uuid is not None) and not isinstance(client_uuid, uuid.UUID): client_uuid = uuid.UUID(client_uuid) client_uuid = binary.Binary.from_uuid(client_uuid) elif isinstance(client_uuid, uuid.UUID): client_uuid = binary.Binary.from_uuid(client_uuid) prepared_messages = [] for index, message in enumerate(messages): msg = { PROJ_TOPIC: utils.scope_queue_name(topic_name, project), 't': message['ttl'], 'e': now_dt + datetime.timedelta(seconds=message['ttl']), 'u': client_uuid, 'd': now + message.get('delay', 0), 'b': message['body'] if 'body' in message else {}, 'k': next_marker + index, 'tx': None } if self.driver.conf.enable_checksum: msg['cs'] = s_utils.get_checksum(message.get('body', None)) prepared_messages.append(msg) res = collection.insert_many(prepared_messages, bypass_document_validation=True) return [str(id_) for id_ in res.inserted_ids] @utils.raises_conn_error @utils.retries_on_autoreconnect def delete(self, topic_name, message_id, project=None, claim=None): # NOTE(cpp-cabrera): return early - this is an invalid message # id so we won't be able to find it any way mid = utils.to_oid(message_id) if mid is None: return collection = self._collection(topic_name, project) query = { '_id': mid, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } cid = utils.to_oid(claim) if cid is None: raise errors.ClaimDoesNotExist(claim, topic_name, project) now = timeutils.utcnow_ts() cursor = collection.find(query).hint(ID_INDEX_FIELDS) try: message = next(cursor) except StopIteration: return if claim is None: if _is_claimed(message, now): raise errors.MessageIsClaimed(message_id) else: if message['c']['id'] != cid: kwargs = {} # NOTE(flaper87): In pymongo 3.0 PRIMARY is the default and # `read_preference` is read only. We'd need to set it when the # client is created. # NOTE(kgriffs): Read from primary in case the message # was just barely claimed, and claim hasn't made it to # the secondary. message = collection.find_one(query, **kwargs) if message['c']['id'] != cid: if _is_claimed(message, now): raise errors.MessageNotClaimedBy(message_id, claim) raise errors.MessageNotClaimed(message_id) collection.delete_one(query) @utils.raises_conn_error @utils.retries_on_autoreconnect def bulk_delete(self, topic_name, message_ids, project=None, claim_ids=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if claim_ids: claim_ids = [cid for cid in map(utils.to_oid, claim_ids) if cid] query = { '_id': {'$in': message_ids}, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } collection = self._collection(topic_name, project) if claim_ids: message_claim_ids = [] messages = collection.find(query).hint(ID_INDEX_FIELDS) for message in messages: message_claim_ids.append(message['c']['id']) for cid in claim_ids: if cid not in message_claim_ids: raise errors.ClaimDoesNotExist(cid, topic_name, project) collection.delete_many(query) @utils.raises_conn_error @utils.retries_on_autoreconnect def pop(self, topic_name, limit, project=None): query = { PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } # Only include messages that are not part of # any claim, or are part of an expired claim. now = timeutils.utcnow_ts() query['c.e'] = {'$lte': now} collection = self._collection(topic_name, project) projection = {'_id': 1, 't': 1, 'b': 1, 'c.id': 1} messages = (collection.find_one_and_delete(query, projection=projection) for _ in range(limit)) final_messages = [_basic_message(message, now) for message in messages if message] return final_messages class FIFOMessageController(MessageController): def _ensure_indexes(self, collection): """Ensures that all indexes are created.""" collection.create_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True) collection.create_index(ACTIVE_INDEX_FIELDS, name='active', background=True) collection.create_index(COUNTING_INDEX_FIELDS, name='counting', background=True) # NOTE(kgriffs): This index must be unique so that # inserting a message with the same marker to the # same queue will fail; this is used to detect a # race condition which can cause an observer client # to miss a message when there is more than one # producer posting messages to the same queue, in # parallel. collection.create_index(MARKER_INDEX_FIELDS, name='queue_marker', unique=True, background=True) collection.create_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) @utils.raises_conn_error @utils.retries_on_autoreconnect def post(self, topic_name, messages, client_uuid, project=None): # NOTE(flaper87): This method should be safe to retry on # autoreconnect, since we've a 2-step insert for messages. # The worst-case scenario is that we'll increase the counter # several times and we'd end up with some non-active messages. if not self._topic_ctrl.exists(topic_name, project): raise errors.TopicDoesNotExist(topic_name, project) # NOTE(flaper87): Make sure the counter exists. This method # is an upsert. self._get_counter(topic_name, project) now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) collection = self._collection(topic_name, project) # Set the next basis marker for the first attempt. # # Note that we don't increment the counter right away because # if 2 concurrent posts happen and the one with the higher counter # ends before the one with the lower counter, there's a window # where a client paging through the queue may get the messages # with the higher counter and skip the previous ones. This would # make our FIFO guarantee unsound. next_marker = self._get_counter(topic_name, project) # Unique transaction ID to facilitate atomic batch inserts transaction = objectid.ObjectId() if (client_uuid is not None) and not isinstance(client_uuid, uuid.UUID): client_uuid = uuid.UUID(client_uuid) client_uuid = binary.Binary.from_uuid(client_uuid) elif isinstance(client_uuid, uuid.UUID): client_uuid = binary.Binary.from_uuid(client_uuid) prepared_messages = [] for index, message in enumerate(messages): msg = { PROJ_TOPIC: utils.scope_queue_name(topic_name, project), 't': message['ttl'], 'e': now_dt + datetime.timedelta(seconds=message['ttl']), 'u': client_uuid, 'd': now + message.get('delay', 0), 'b': message['body'] if 'body' in message else {}, 'k': next_marker + index, 'tx': None } if self.driver.conf.enable_checksum: msg['cs'] = s_utils.get_checksum(message.get('body', None)) prepared_messages.append(msg) # NOTE(kgriffs): Don't take the time to do a 2-phase insert # if there is no way for it to partially succeed. if len(prepared_messages) == 1: transaction = None prepared_messages[0]['tx'] = None # Use a retry range for sanity, although we expect # to rarely, if ever, reach the maximum number of # retries. # # NOTE(kgriffs): With the default configuration (100 ms # max sleep, 1000 max attempts), the max stall time # before the operation is abandoned is 49.95 seconds. for attempt in self._retry_range: try: res = collection.insert_many(prepared_messages, bypass_document_validation=True) # Log a message if we retried, for debugging perf issues if attempt != 0: msgtmpl = _('%(attempts)d attempt(s) required to post ' '%(num_messages)d messages to queue ' '"%(topic)s" under project %(project)s') LOG.debug(msgtmpl, dict(topic=topic_name, attempts=attempt + 1, num_messages=len(res.inserted_ids), project=project)) # Update the counter in preparation for the next batch # # NOTE(kgriffs): Due to the unique index on the messages # collection, competing inserts will fail as a whole, # and keep retrying until the counter is incremented # such that the competing marker's will start at a # unique number, 1 past the max of the messages just # inserted above. self._inc_counter(topic_name, project, amount=len(res.inserted_ids)) # NOTE(kgriffs): Finalize the insert once we can say that # all the messages made it. This makes bulk inserts # atomic, assuming queries filter out any non-finalized # messages. if transaction is not None: collection.update_many({'tx': transaction}, {'$set': {'tx': None}}, upsert=False) return [str(id_) for id_ in res.inserted_ids] except (pymongo.errors.DuplicateKeyError, pymongo.errors.BulkWriteError): # TODO(kgriffs): Record stats of how often retries happen, # and how many attempts, on average, are required to insert # messages. # NOTE(kgriffs): This can be used in conjunction with the # log line, above, that is emitted after all messages have # been posted, to gauge how long it is taking for messages # to be posted to a given topic, or overall. # # TODO(kgriffs): Add transaction ID to help match up loglines if attempt == 0: msgtmpl = _('First attempt failed while ' 'adding messages to topic ' '"%(topic)s" under project %(project)s') LOG.debug(msgtmpl, dict(topic=topic_name, project=project)) # NOTE(kgriffs): Never retry past the point that competing # messages expire and are GC'd, since once they are gone, # the unique index no longer protects us from getting out # of order, which could cause an observer to miss this # message. The code below provides a sanity-check to ensure # this situation can not happen. elapsed = timeutils.utcnow_ts() - now if elapsed > MAX_RETRY_POST_DURATION: msgtmpl = ('Exceeded maximum retry duration for topic ' '"%(topic)s" under project %(project)s') LOG.warning(msgtmpl, dict(topic=topic_name, project=project)) break # Chill out for a moment to mitigate thrashing/thundering self._backoff_sleep(attempt) # NOTE(kgriffs): Perhaps we failed because a worker crashed # after inserting messages, but before incrementing the # counter; that would cause all future requests to stall, # since they would keep getting the same base marker that is # conflicting with existing messages, until the messages that # "won" expire, at which time we would end up reusing markers, # and that could make some messages invisible to an observer # that is querying with a marker that is large than the ones # being reused. # # To mitigate this, we apply a heuristic to determine whether # a counter has stalled. We attempt to increment the counter, # but only if it hasn't been updated for a few seconds, which # should mean that nobody is left to update it! # # Note that we increment one at a time until the logjam is # broken, since we don't know how many messages were posted # by the worker before it crashed. next_marker = self._inc_counter( topic_name, project, window=COUNTER_STALL_WINDOW) # Retry the entire batch with a new sequence of markers. # # NOTE(kgriffs): Due to the unique index, and how # MongoDB works with batch requests, we will never # end up with a partially-successful update. The first # document in the batch will fail to insert, and the # remainder of the documents will not be attempted. if next_marker is None: # NOTE(kgriffs): Usually we will end up here, since # it should be rare that a counter becomes stalled. next_marker = self._get_counter( topic_name, project) else: msgtmpl = ('Detected a stalled message counter ' 'for topic "%(topic)s" under ' 'project %(project)s.' 'The counter was incremented to %(value)d.') LOG.warning(msgtmpl, dict(topic=topic_name, project=project, value=next_marker)) for index, message in enumerate(prepared_messages): message['k'] = next_marker + index except Exception: LOG.exception('Error parsing document.') raise msgtmpl = ('Hit maximum number of attempts (%(max)s) for topic ' '"%(topic)s" under project %(project)s') LOG.warning(msgtmpl, dict(max=self.driver.mongodb_conf.max_attempts, topic=topic_name, project=project)) raise errors.MessageConflict(topic_name, project) def _is_claimed(msg, now): return (msg['c']['id'] is not None and msg['c']['e'] > now) def _basic_message(msg, now): oid = msg['_id'] age = now - utils.oid_ts(oid) res = { 'id': str(oid), 'age': int(age), 'ttl': msg['t'], 'body': msg['b'] } if msg.get('cs'): res['checksum'] = msg.get('cs') return res class MessageTopicHandler(object): def __init__(self, driver, control_driver): self.driver = driver self._cache = self.driver.cache self.topic_controller = self.driver.topic_controller self.message_controller = self.driver.message_controller def delete(self, topic_name, project=None): self.message_controller._purge_queue(topic_name, project) @utils.raises_conn_error @utils.retries_on_autoreconnect def stats(self, name, project=None): if not self.topic_controller.exists(name, project=project): raise errors.TopicDoesNotExist(name, project) controller = self.message_controller total = controller._count(name, project=project, include_claimed=True) message_stats = { 'total': total, } try: oldest = controller.first(name, project=project, sort=1) newest = controller.first(name, project=project, sort=-1) except errors.QueueIsEmpty: pass else: now = timeutils.utcnow_ts() message_stats['oldest'] = utils.stat_message(oldest, now) message_stats['newest'] = utils.stat_message(newest, now) return {'messages': message_stats} def _get_scoped_query(name, project): return {'p_t': utils.scope_queue_name(name, project)} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/topics.py0000664000175100017510000002364115033040005021026 0ustar00mylesmyles# Copyright (c) 2019 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Implements the MongoDB storage controller for topics. Field Mappings: In order to reduce the disk / memory space used, field names will be, most of the time, the first letter of their long name. """ from oslo_log import log as logging from oslo_utils import timeutils from pymongo.collection import ReturnDocument import pymongo.errors from zaqar.common import decorators from zaqar.i18n import _ from zaqar import storage from zaqar.storage import errors from zaqar.storage.mongodb import utils LOG = logging.getLogger(__name__) # NOTE(wanghao): Keep this as same as queues' _TOPIC_CACHE_PREFIX = 'topiccontroller:' _TOPIC_CACHE_TTL = 5 def _topic_exists_key(topic, project=None): # NOTE(kgriffs): Use string concatenation for performance, # also put project first since it is guaranteed to be # unique, which should reduce lookup time. return _TOPIC_CACHE_PREFIX + 'exists:' + str(project) + '/' + topic class TopicController(storage.Topic): """Implements Topic resource operations using MongoDB. Topics are scoped by project, which is prefixed to the topic name. :: Topic: Name Field --------------------- name -> p_t msg counter -> c metadata -> m Message Counter: Name Field ------------------- value -> v modified ts -> t """ def __init__(self, *args, **kwargs): super(TopicController, self).__init__(*args, **kwargs) self._cache = self.driver.cache self._collection = self.driver.topics_database.topics # NOTE(flaper87): This creates a unique index for # project and name. Using project as the prefix # allows for querying by project and project+name. # This is also useful for retrieving the queues list for # a specific project, for example. Order matters! self._collection.create_index([('p_t', 1)], unique=True) # ---------------------------------------------------------------------- # Helpers # ---------------------------------------------------------------------- def _get_counter(self, name, project=None): """Retrieves the current message counter value for a given topic. This helper is used to generate monotonic pagination markers that are saved as part of the message document. Note 1: Markers are scoped per-topic and so are *not* globally unique or globally ordered. Note 2: If two or more requests to this method are made in parallel, this method will return the same counter value. This is done intentionally so that the caller can detect a parallel message post, allowing it to mitigate race conditions between producer and observer clients. :param name: Name of the queue to which the counter is scoped :param project: Topic's project :returns: current message counter as an integer """ doc = self._collection.find_one(_get_scoped_query(name, project), projection={'c.v': 1, '_id': 0}) if doc is None: raise errors.TopicDoesNotExist(name, project) return doc['c']['v'] def _inc_counter(self, name, project=None, amount=1, window=None): """Increments the message counter and returns the new value. :param name: Name of the topic to which the counter is scoped :param project: Topic's project name :param amount: (Default 1) Amount by which to increment the counter :param window: (Default None) A time window, in seconds, that must have elapsed since the counter was last updated, in order to increment the counter. :returns: Updated message counter value, or None if window was specified, and the counter has already been updated within the specified time period. :raises TopicDoesNotExist: if not found """ now = timeutils.utcnow_ts() update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}} query = _get_scoped_query(name, project) if window is not None: threshold = now - window query['c.t'] = {'$lt': threshold} while True: try: doc = self._collection.find_one_and_update( query, update, return_document=ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) break except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect failure') if doc is None: if window is None: # NOTE(kgriffs): Since we did not filter by a time window, # the topic should have been found and updated. Perhaps # the topic has been deleted? message = _('Failed to increment the message ' 'counter for topic %(name)s and ' 'project %(project)s') message %= dict(name=name, project=project) LOG.warning(message) raise errors.TopicDoesNotExist(name, project) # NOTE(kgriffs): Assume the topic existed, but the counter # was recently updated, causing the range topic on 'c.t' to # exclude the record. return None return doc['c']['v'] # ---------------------------------------------------------------------- # Interface # ---------------------------------------------------------------------- def _get(self, name, project=None): try: return self.get_metadata(name, project) except errors.TopicDoesNotExist: return {} def _list(self, project=None, kfilter={}, marker=None, limit=storage.DEFAULT_TOPICS_PER_PAGE, detailed=False, name=None): query = utils.scoped_query(marker, project, name, kfilter, key_value='p_t') projection = {'p_t': 1, '_id': 0} if detailed: projection['m'] = 1 cursor = self._collection.find(query, projection=projection) cursor = cursor.limit(limit).sort('p_t') marker_name = {} ntotal = self._collection.count_documents(query, limit=limit) def normalizer(record): topic = {'name': utils.descope_queue_name(record['p_t'])} marker_name['next'] = topic['name'] if detailed: topic['metadata'] = record['m'] return topic yield utils.HookedCursor(cursor, normalizer, ntotal=ntotal) yield marker_name and marker_name['next'] @utils.raises_conn_error @utils.retries_on_autoreconnect def get_metadata(self, name, project=None): queue = self._collection.find_one(_get_scoped_query(name, project), projection={'m': 1, '_id': 0}) if queue is None: raise errors.TopicDoesNotExist(name, project) return queue.get('m', {}) @utils.raises_conn_error # @utils.retries_on_autoreconnect def _create(self, name, metadata=None, project=None): # NOTE(flaper87): If the connection fails after it was called # and we retry to insert the topic, we could end up returning # `False` because of the `DuplicatedKeyError` although the # topic was indeed created by this API call. # # TODO(kgriffs): Commented out `retries_on_autoreconnect` for # now due to the above issue, since creating a topic is less # important to make super HA. try: # NOTE(kgriffs): Start counting at 1, and assume the first # message ever posted will succeed and set t to a UNIX # "modified at" timestamp. counter = {'v': 1, 't': 0} scoped_name = utils.scope_queue_name(name, project) self._collection.insert_one( {'p_t': scoped_name, 'm': metadata or {}, 'c': counter}) except pymongo.errors.DuplicateKeyError: return False else: return True # NOTE(kgriffs): Only cache when it exists; if it doesn't exist, and # someone creates it, we want it to be immediately visible. @utils.raises_conn_error @utils.retries_on_autoreconnect @decorators.caches(_topic_exists_key, _TOPIC_CACHE_TTL, lambda v: v) def _exists(self, name, project=None): query = _get_scoped_query(name, project) return self._collection.find_one(query) is not None @utils.raises_conn_error @utils.retries_on_autoreconnect def set_metadata(self, name, metadata, project=None): rst = self._collection.update_one(_get_scoped_query(name, project), {'$set': {'m': metadata}}) if rst.matched_count == 0: raise errors.TopicDoesNotExist(name, project) @utils.raises_conn_error @utils.retries_on_autoreconnect @_exists.purges def _delete(self, name, project=None): self._collection.delete_one(_get_scoped_query(name, project)) @utils.raises_conn_error @utils.retries_on_autoreconnect def _stats(self, name, project=None): pass def _get_scoped_query(name, project): return {'p_t': utils.scope_queue_name(name, project)} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/mongodb/utils.py0000664000175100017510000002525515033040005020670 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import binascii import collections import datetime import functools import random import time from bson import errors as berrors from bson import objectid from bson import tz_util from oslo_log import log as logging from oslo_utils import timeutils from pymongo import errors from zaqar.storage import errors as storage_errors # BSON ObjectId gives TZ-aware datetime, so we generate a # TZ-aware UNIX epoch for convenience. EPOCH = datetime.datetime.fromtimestamp( 0, tz=datetime.timezone.utc).replace(tzinfo=tz_util.utc) # NOTE(cpp-cabrera): the authoritative form of project/queue keys. PROJ_QUEUE_KEY = 'p_q' PROJ_TOPIC_KEY = 'p_t' LOG = logging.getLogger(__name__) def cached_gen(iterable): """Converts the iterable into a caching generator. Returns a proxy that yields each item of iterable, while at the same time caching those items in a deque. :param iterable: an iterable to wrap in a caching generator :returns: (proxy(iterable), cached_items) """ cached_items = collections.deque() def generator(iterable): for item in iterable: cached_items.append(item) yield item return generator(iterable), cached_items def calculate_backoff(attempt, max_attempts, max_sleep, max_jitter=0): """Calculates backoff time, in seconds, when retrying an operation. This function calculates a simple linear backoff time with optional jitter, useful for retrying a request under high concurrency. The result may be passed directly into time.sleep() in order to mitigate stampeding herd syndrome and introduce backpressure towards the clients, slowing them down. :param attempt: current value of the attempt counter (zero-based) :param max_attempts: maximum number of attempts that will be tried :param max_sleep: maximum sleep value to apply before jitter, assumed to be seconds. Fractional seconds are supported to 1 ms granularity. :param max_jitter: maximum jitter value to add to the baseline sleep time. Actual value will be chosen randomly. :raises ValueError: if the parameter is not invalid :returns: float representing the number of seconds to sleep, within the interval [0, max_sleep), determined linearly according to the ratio attempt / max_attempts, with optional jitter. """ if max_sleep < 0: raise ValueError('max_sleep must be >= 0') if max_jitter < 0: raise ValueError('max_jitter must be >= 0') if not (0 <= attempt < max_attempts): raise ValueError('attempt value is out of range') ratio = attempt / max_attempts backoff_sec = ratio * max_sleep jitter_sec = random.random() * max_jitter return backoff_sec + jitter_sec def to_oid(obj): """Creates a new ObjectId based on the input. Returns None when TypeError or berrors.InvalidId is raised by the ObjectId class. :param obj: Anything that can be passed as an input to `objectid.ObjectId` """ try: return objectid.ObjectId(obj) except (TypeError, berrors.InvalidId): return None def oid_ts(oid): """Converts an ObjectId to a UNIX timestamp. :raises TypeError: if oid isn't an ObjectId """ try: return timeutils.delta_seconds(EPOCH, oid.generation_time) except AttributeError: raise TypeError('Expected ObjectId and got %s' % type(oid)) def stat_message(message, now): """Creates a stat document from the given message, relative to now.""" msg_id = message['id'] created = oid_ts(to_oid(msg_id)) age = now - created created_iso = datetime.datetime.fromtimestamp( created, tz=datetime.timezone.utc).replace(tzinfo=None).strftime( '%Y-%m-%dT%H:%M:%SZ') return { 'id': msg_id, 'age': int(age), 'created': created_iso, } def normalize_none_str(string_or_none): """Returns '' IFF given value is None, passthrough otherwise. This function normalizes None to the empty string to facilitate string concatenation when a variable could be None. """ return '' if string_or_none is None else string_or_none def scope_queue_name(queue=None, project=None): """Returns a scoped name for a queue based on project and queue. If only the project name is specified, a scope signifying "all queues" for that project is returned. If neither queue nor project are specified, a scope for "all global queues" is returned, which is to be interpreted as excluding queues scoped by project. :param queue: name of queue to seek :type queue: str :param project: namespace :type project: str :returns: '{project}/{queue}' if project and queue are given, '{project}/' if ONLY project is given, '/{queue}' if ONLY queue is given, and '/' if neither are given. """ # NOTE(kgriffs): Concatenation is faster than format, and # put project first since it is guaranteed to be unique. return normalize_none_str(project) + '/' + normalize_none_str(queue) def descope_queue_name(scoped_name): """Returns the unscoped queue name, given a fully-scoped name.""" # NOTE(kgriffs): scoped_name can be either '/', '/global-queue-name', # or 'project-id/queue-name'. return scoped_name.partition('/')[2] or None def parse_scoped_project_queue(scoped_name): """Returns the project and queue name for a scoped catalogue entry. :param scoped_name: a project/queue as given by :scope_queue_name: :type scoped_name: str :returns: (project, queue) :rtype: (str, six.text_type) """ return scoped_name.split('/') def scoped_query(queue, project, name=None, kfilter={}, key_value=PROJ_QUEUE_KEY): """Returns a dict usable for querying for scoped project/queues. :param queue: name of queue to seek :type queue: str :param project: namespace :type project: str :returns: query to issue :rtype: dict """ key = key_value query = {} scoped_name = scope_queue_name(queue, project) if not scoped_name.startswith('/'): # NOTE(kgriffs): scoped queue, e.g., 'project-id/queue-name' if name: project_prefix = '^' + project + '/.*' + name + '.*' else: project_prefix = '^' + project + '/' query[key] = {'$regex': project_prefix, '$gt': scoped_name} elif scoped_name == '/': # NOTE(kgriffs): list global queues, but exclude scoped ones if name: query[key] = {'$regex': '^/.*' + name + '.*'} else: query[key] = {'$regex': '^/'} else: # NOTE(kgriffs): unscoped queue, e.g., '/my-global-queue' if name: query[key] = {'$regex': '^/.*' + name + '.*', '$gt': scoped_name} else: query[key] = {'$regex': '^/', '$gt': scoped_name} # Handler the metadata filter in request. for key, value in kfilter.items(): key = 'm.' + key query[key] = {'$eq': value} return query def get_partition(num_partitions, queue, project=None): """Get the partition number for a given queue and project. Hashes the queue to a partition number. The hash is stable, meaning given the same queue name and project ID, the same partition number will always be returned. Note also that queues will be uniformly distributed across partitions. The number of partitions is taken from the "partitions" property in the config file, under the [drivers:storage:mongodb] section. """ name = project + queue if project is not None else queue # NOTE(kgriffs): For small numbers of partitions, crc32 will # provide a uniform distribution. This was verified experimentally # with up to 100 partitions. return binascii.crc32(name.encode('utf-8')) % num_partitions def raises_conn_error(func): """Handles the MongoDB ConnectionFailure error. This decorator catches MongoDB's ConnectionFailure error and raises Zaqar's ConnectionError instead. """ @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except errors.ConnectionFailure: LOG.exception('Connection failure.') raise storage_errors.ConnectionError() return wrapper def retries_on_autoreconnect(func): """Causes the wrapped function to be re-called on AutoReconnect. This decorator catches MongoDB's AutoReconnect error and retries the function call. .. Note:: Assumes that the decorated function has defined self.driver.mongodb_conf so that `max_reconnect_attempts` and `reconnect_sleep` can be taken into account. .. Warning:: The decorated function must be idempotent. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): # TODO(kgriffs): Figure out a way to not have to rely on the # presence of `mongodb_conf` max_attemps = self.driver.mongodb_conf.max_reconnect_attempts sleep_sec = self.driver.mongodb_conf.reconnect_sleep last_ex = None for attempt in range(max_attemps): try: return func(self, *args, **kwargs) break except errors.AutoReconnect as ex: LOG.warning('Caught AutoReconnect, retrying the ' 'call to %s', func.__name__) last_ex = ex time.sleep(sleep_sec * (2 ** attempt)) else: LOG.error('Caught AutoReconnect, maximum attempts ' 'to %s exceeded.', func.__name__) raise last_ex return wrapper class HookedCursor(object): def __init__(self, cursor, denormalizer, ntotal=None): self.cursor = cursor self.denormalizer = denormalizer self.ntotal = ntotal def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return self def __len__(self): return self.ntotal @raises_conn_error def next(self): try: item = next(self.cursor) except errors.InvalidOperation: raise StopIteration() return self.denormalizer(item) def __next__(self): return self.next() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/pipeline.py0000664000175100017510000001453515033040005017707 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_log import log as logging from osprofiler import profiler from stevedore import driver from stevedore import extension from zaqar import common from zaqar.common import decorators from zaqar.conf import storage from zaqar.storage import base LOG = logging.getLogger(__name__) def _get_storage_pipeline(resource_name, conf, *args, **kwargs): """Constructs and returns a storage resource pipeline. This is a helper function for any service supporting pipelines for the storage layer. The function returns a pipeline based on the `{resource_name}_pipeline` config option. Stages in the pipeline implement controller methods that they want to hook. A stage can halt the pipeline immediate by returning a value that is not None; otherwise, processing will continue to the next stage, ending with the actual storage controller. :param conf: Configuration instance. :type conf: `cfg.ConfigOpts` :returns: A pipeline to use. :rtype: `Pipeline` """ conf.register_opts(storage.ALL_OPTS, group=storage.GROUP_NAME) storage_conf = conf[storage.GROUP_NAME] pipeline = [] for ns in storage_conf[resource_name + '_pipeline']: try: mgr = driver.DriverManager('zaqar.storage.stages', ns, invoke_args=args, invoke_kwds=kwargs, invoke_on_load=True) pipeline.append(mgr.driver) except RuntimeError as exc: LOG.warning('Stage %(stage)s could not be imported: %(ex)s', {'stage': ns, 'ex': str(exc)}) continue return pipeline def _get_builtin_entry_points(resource_name, storage, control_driver, conf): # Load builtin stages builtin_entry_points = [] # NOTE(flaper87): The namespace will look like: # `zaqar.storage.$STORAGE.driver.stages`. For now, # the builtin stages are bound to a single store and # are not applied to every store. namespace = '%s.%s.stages' % (storage.__module__, resource_name) extensions = extension.ExtensionManager(namespace, invoke_on_load=True, invoke_args=[storage, control_driver]) if len(extensions.extensions) == 0: return [] for ext in extensions.extensions: builtin_entry_points.append(ext.obj) if conf.profiler.enabled and conf.profiler.trace_message_store: return (profiler.trace_cls("stages_controller") (builtin_entry_points)) return builtin_entry_points class DataDriver(base.DataDriverBase): """Meta-driver for injecting pipelines in front of controllers. :param conf: Configuration from which to load pipeline settings :param storage: Storage driver that will service requests as the last step in the pipeline """ def __init__(self, conf, storage, control_driver): # NOTE(kgriffs): Pass None for cache since it won't ever # be referenced. super(DataDriver, self).__init__(conf, None, control_driver) self._storage = storage @property def capabilities(self): return self._storage.capabilities() def close(self): self._storage.close() def is_alive(self): return self._storage.is_alive() def _health(self): return self._storage._health() @decorators.lazy_property(write=False) def queue_controller(self): stages = _get_builtin_entry_points('queue', self._storage, self.control_driver, self.conf) stages.extend(_get_storage_pipeline('queue', self.conf)) stages.append(self._storage.queue_controller) return common.Pipeline(stages) @decorators.lazy_property(write=False) def message_controller(self): stages = _get_builtin_entry_points('message', self._storage, self.control_driver, self.conf) kwargs = {'subscription_controller': self._storage.subscription_controller, 'max_notifier_workers': self.conf.notification.max_notifier_workers, 'require_confirmation': self.conf.notification.require_confirmation, 'queue_controller': self._storage.queue_controller} stages.extend(_get_storage_pipeline('message', self.conf, **kwargs)) stages.append(self._storage.message_controller) return common.Pipeline(stages) @decorators.lazy_property(write=False) def claim_controller(self): stages = _get_builtin_entry_points('claim', self._storage, self.control_driver, self.conf) stages.extend(_get_storage_pipeline('claim', self.conf)) stages.append(self._storage.claim_controller) return common.Pipeline(stages) @decorators.lazy_property(write=False) def subscription_controller(self): stages = _get_builtin_entry_points('subscription', self._storage, self.control_driver, self.conf) stages.extend(_get_storage_pipeline('subscription', self.conf)) stages.append(self._storage.subscription_controller) return common.Pipeline(stages) @decorators.lazy_property(write=False) def topic_controller(self): stages = _get_builtin_entry_points('topic', self._storage, self.control_driver, self.conf) stages.extend(_get_storage_pipeline('topic', self.conf)) stages.append(self._storage.topic_controller) return common.Pipeline(stages) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/pooling.py0000664000175100017510000010220715033040005017543 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # Copyright 2014 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import heapq import itertools from oslo_log import log from osprofiler import profiler from zaqar.common import decorators from zaqar.common import errors as cerrors from zaqar.common.storage import select from zaqar.conf import pooling_catalog from zaqar.i18n import _ from zaqar import storage from zaqar.storage import errors from zaqar.storage import pipeline from zaqar.storage import utils LOG = log.getLogger(__name__) # NOTE(kgriffs): E.g.: 'zaqar-pooling:5083853/my-queue' _POOL_CACHE_PREFIX = 'pooling:' # TODO(kgriffs): If a queue is migrated, everyone's # caches need to have the relevant entry invalidated # before "unfreezing" the queue, rather than waiting # on the TTL. # # TODO(kgriffs): Make configurable? _POOL_CACHE_TTL = 10 def _pool_cache_key(queue, project=None): # NOTE(kgriffs): Use string concatenation for performance, # also put project first since it is guaranteed to be # unique, which should reduce lookup time. return _POOL_CACHE_PREFIX + str(project) + '/' + queue class DataDriver(storage.DataDriverBase): """Pooling meta-driver for routing requests to multiple backends. :param conf: Configuration from which to read pooling options :param cache: Cache instance that will be passed to individual storage driver instances that correspond to each pool. will also be used by the pool controller to reduce latency for some operations. """ BASE_CAPABILITIES = tuple(storage.Capabilities) def __init__(self, conf, cache, control, control_driver=None): super(DataDriver, self).__init__(conf, cache, control_driver) catalog = Catalog(conf, cache, control) if self.conf.profiler.enabled: catalog = profiler.trace_cls("pooling_catalogue_" "controller")(catalog) self._pool_catalog = catalog @property def capabilities(self): # NOTE(flaper87): We can't know the capabilities # of this driver because pools are loaded based on # the queue and project of the request. Therefore, # we will just assume all capabilities are supported. # This shouldn't be an issue because the pooling driver # is neither used for pools creation nor flavor creation. return self.BASE_CAPABILITIES def close(self): cursor = self._pool_catalog._pools_ctrl.list(limit=0) # Messages of each pool for pool in next(cursor): driver = self._pool_catalog.get_driver(pool['name']) driver.close() def is_alive(self): cursor = self._pool_catalog._pools_ctrl.list(limit=0) pools = next(cursor) return all(self._pool_catalog.get_driver(pool['name']).is_alive() for pool in pools) def _health(self): KPI = {} # Leverage the is_alive to indicate if the backend storage is # reachable or not KPI['catalog_reachable'] = self.is_alive() cursor = self._pool_catalog._pools_ctrl.list(limit=0) # Messages of each pool for pool in next(cursor): driver = self._pool_catalog.get_driver(pool['name']) KPI[pool['name']] = driver._health() return KPI def gc(self): cursor = self._pool_catalog._pools_ctrl.list(limit=0) for pool in next(cursor): driver = self._pool_catalog.get_driver(pool['name']) driver.gc() @decorators.lazy_property(write=False) def queue_controller(self): controller = QueueController(self._pool_catalog) if self.conf.profiler.enabled: return profiler.trace_cls("pooling_queue_controller")(controller) else: return controller @decorators.lazy_property(write=False) def message_controller(self): controller = MessageController(self._pool_catalog) if self.conf.profiler.enabled: return profiler.trace_cls("pooling_message_controller")(controller) else: return controller @decorators.lazy_property(write=False) def claim_controller(self): controller = ClaimController(self._pool_catalog) if self.conf.profiler.enabled: return profiler.trace_cls("pooling_claim_controller")(controller) else: return controller @decorators.lazy_property(write=False) def subscription_controller(self): controller = SubscriptionController(self._pool_catalog) if self.conf.profiler.enabled: return (profiler.trace_cls("pooling_subscription_controller") (controller)) else: return controller @decorators.lazy_property(write=False) def topic_controller(self): controller = TopicController(self._pool_catalog) if self.conf.profiler.enabled: return profiler.trace_cls("pooling_topic_controller")(controller) else: return controller class QueueController(storage.Queue): """Routes operations to get the appropriate queue controller. :param pool_catalog: a catalog of available pools :type pool_catalog: queues.pooling.base.Catalog """ def __init__(self, pool_catalog): super(QueueController, self).__init__(None) self._pool_catalog = pool_catalog self._mgt_queue_ctrl = self._pool_catalog.control.queue_controller self._get_controller = self._pool_catalog.get_queue_controller def _list(self, project=None, kfilter={}, marker=None, limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False, name=None): def all_pages(): yield next(self._mgt_queue_ctrl.list( project=project, kfilter=kfilter, marker=marker, limit=limit, detailed=detailed, name=name)) # make a heap compared with 'name' ls = heapq.merge(*[ utils.keyify('name', page) for page in all_pages() ]) marker_name = {} # limit the iterator and strip out the comparison wrapper def it(): for queue_cmp in itertools.islice(ls, limit): marker_name['next'] = queue_cmp.obj['name'] yield queue_cmp.obj yield it() yield marker_name and marker_name['next'] def _get(self, name, project=None): try: return self.get_metadata(name, project) except errors.QueueDoesNotExist: return {} def _create(self, name, metadata=None, project=None): flavor = None if isinstance(metadata, dict): flavor = metadata.get('_flavor') self._pool_catalog.register(name, project=project, flavor=flavor) # NOTE(cpp-cabrera): This should always succeed since we just # registered the project/queue. There is a race condition, # however. If between the time we register a queue and go to # look it up, the queue is deleted, then this assertion will # fail. pool = self._pool_catalog.lookup(name, project) if not pool: raise RuntimeError('Failed to register queue') return self._mgt_queue_ctrl.create(name, metadata=metadata, project=project) def _delete(self, name, project=None): mqHandler = self._get_controller(name, project) if mqHandler: # NOTE(cpp-cabrera): delete from the catalogue first. If # zaqar crashes in the middle of these two operations, # it is desirable that the entry be missing from the # catalogue and present in storage, rather than the # reverse. The former case leads to all operations # behaving as expected: 404s across the board, and a # functionally equivalent 204 on a create queue. The # latter case is more difficult to reason about, and may # yield 500s in some operations. self._pool_catalog.deregister(name, project) mqHandler.delete(name, project) return self._mgt_queue_ctrl.delete(name, project) def _exists(self, name, project=None): return self._mgt_queue_ctrl.exists(name, project=project) def get_metadata(self, name, project=None): return self._mgt_queue_ctrl.get_metadata(name, project=project) def set_metadata(self, name, metadata, project=None): # NOTE(gengchc2): If flavor metadata is modified in queue, # The queue needs to be re-registered to pools, otherwise # the queue flavor parameter is not consistent with the pool. flavor = None if isinstance(metadata, dict): flavor = metadata.get('_flavor') self._pool_catalog.register(name, project=project, flavor=flavor) return self._mgt_queue_ctrl.set_metadata(name, metadata=metadata, project=project) def _stats(self, name, project=None): mqHandler = self._get_controller(name, project) if mqHandler: return mqHandler.stats(name, project=project) raise errors.QueueDoesNotExist(name, project) def _calculate_resource_count(self, project=None): return self._mgt_queue_ctrl.calculate_resource_count(project=project) class MessageController(storage.Message): """Routes operations to a message controller in the appropriate pool. :param pool_catalog: a catalog of available pools :type pool_catalog: queues.pooling.base.Catalog """ def __init__(self, pool_catalog): super(MessageController, self).__init__(None) self._pool_catalog = pool_catalog self._get_controller = self._pool_catalog.get_message_controller def post(self, queue, messages, client_uuid, project=None): control = self._get_controller(queue, project) if control: return control.post(queue, project=project, messages=messages, client_uuid=client_uuid) raise errors.QueueDoesNotExist(queue, project) def delete(self, queue, message_id, project=None, claim=None): control = self._get_controller(queue, project) if control: return control.delete(queue, project=project, message_id=message_id, claim=claim) return None def bulk_delete(self, queue, message_ids, project=None, claim_ids=None): control = self._get_controller(queue, project) if control: return control.bulk_delete(queue, project=project, message_ids=message_ids, claim_ids=claim_ids) return None def pop(self, queue, limit, project=None): control = self._get_controller(queue, project) if control: return control.pop(queue, project=project, limit=limit) return None def bulk_get(self, queue, message_ids, project=None): control = self._get_controller(queue, project) if control: return control.bulk_get(queue, project=project, message_ids=message_ids) return [] def list(self, queue, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False): control = self._get_controller(queue, project) if control: return control.list(queue, project=project, marker=marker, limit=limit, echo=echo, client_uuid=client_uuid, include_claimed=include_claimed, include_delayed=include_delayed) return iter([[]]) def get(self, queue, message_id, project=None): control = self._get_controller(queue, project) if control: return control.get(queue, message_id=message_id, project=project) raise errors.QueueDoesNotExist(queue, project) def first(self, queue, project=None, sort=1): control = self._get_controller(queue, project) if control: return control.first(queue, project=project, sort=sort) raise errors.QueueDoesNotExist(queue, project) class ClaimController(storage.Claim): """Routes operations to a claim controller in the appropriate pool. :param pool_catalog: a catalog of available pools :type pool_catalog: queues.pooling.base.Catalog """ def __init__(self, pool_catalog): super(ClaimController, self).__init__(None) self._pool_catalog = pool_catalog self._get_controller = self._pool_catalog.get_claim_controller def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): control = self._get_controller(queue, project) if control: return control.create(queue, metadata=metadata, project=project, limit=limit) return [None, []] def get(self, queue, claim_id, project=None): control = self._get_controller(queue, project) if control: return control.get(queue, claim_id=claim_id, project=project) raise errors.ClaimDoesNotExist(claim_id, queue, project) def update(self, queue, claim_id, metadata, project=None): control = self._get_controller(queue, project) if control: return control.update(queue, claim_id=claim_id, project=project, metadata=metadata) raise errors.ClaimDoesNotExist(claim_id, queue, project) def delete(self, queue, claim_id, project=None): control = self._get_controller(queue, project) if control: return control.delete(queue, claim_id=claim_id, project=project) return None class SubscriptionController(storage.Subscription): """Controller to facilitate processing for subscription operations.""" _resource_name = 'subscription' def __init__(self, pool_catalog): super(SubscriptionController, self).__init__(pool_catalog) self._pool_catalog = pool_catalog self._get_controller = self._pool_catalog.get_subscription_controller def list(self, queue, project=None, marker=None, limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE): control = self._get_controller(queue, project) if control: return control.list(queue, project=project, marker=marker, limit=limit) def get(self, queue, subscription_id, project=None): control = self._get_controller(queue, project) if control: return control.get(queue, subscription_id, project=project) def create(self, queue, subscriber, ttl, options, project=None): control = self._get_controller(queue, project) if control: return control.create(queue, subscriber, ttl, options, project=project) def update(self, queue, subscription_id, project=None, **kwargs): control = self._get_controller(queue, project) if control: return control.update(queue, subscription_id, project=project, **kwargs) def delete(self, queue, subscription_id, project=None): control = self._get_controller(queue, project) if control: return control.delete(queue, subscription_id, project=project) def exists(self, queue, subscription_id, project=None): control = self._get_controller(queue, project) if control: return control.exists(queue, subscription_id, project=project) def confirm(self, queue, subscription_id, project=None, confirmed=None): control = self._get_controller(queue, project) if control: return control.confirm(queue, subscription_id, project=project, confirmed=confirmed) def get_with_subscriber(self, queue, subscriber, project=None): control = self._get_controller(queue, project) if control: return control.get_with_subscriber(queue, subscriber, project) class Catalog(object): """Represents the mapping between queues and pool drivers.""" def __init__(self, conf, cache, control): self._drivers = {} self._conf = conf self._cache = cache self.control = control self._conf.register_opts(pooling_catalog.ALL_OPTS, group=pooling_catalog.GROUP_NAME) self._catalog_conf = self._conf[pooling_catalog.GROUP_NAME] self._pools_ctrl = control.pools_controller self._flavor_ctrl = control.flavors_controller self._catalogue_ctrl = control.catalogue_controller # FIXME(cpp-cabrera): https://bugs.launchpad.net/zaqar/+bug/1252791 def _init_driver(self, pool_id, pool_conf=None): """Given a pool name, returns a storage driver. :param pool_id: The name of a pool. :type pool_id: str :returns: a storage driver :rtype: zaqar.storage.base.DataDriverBase """ if pool_id is not None: pool = self._pools_ctrl.get(pool_id, detailed=True) else: pool = pool_conf conf = utils.dynamic_conf(pool['uri'], pool['options'], conf=self._conf) storage = utils.load_storage_driver(conf, self._cache, control_driver=self.control) return pipeline.DataDriver(conf, storage, self.control) @decorators.caches(_pool_cache_key, _POOL_CACHE_TTL) def _pool_id(self, queue, project=None): """Get the ID for the pool assigned to the given queue. :param queue: name of the queue :param project: project to which the queue belongs :returns: pool id :raises QueueNotMapped: if queue is not mapped """ return self._catalogue_ctrl.get(project, queue)['pool'] def register(self, queue, project=None, flavor=None): """Register a new queue in the pool catalog. This method should be called whenever a new queue is being created, and will create an entry in the pool catalog for the given queue. After using this method to register the queue in the catalog, the caller should call `lookup()` to get a reference to a storage driver which will allow interacting with the queue's assigned backend pool. :param queue: Name of the new queue to assign to a pool :type queue: str :param project: Project to which the queue belongs, or None for the "global" or "generic" project. :type project: str :param flavor: Flavor for the queue (OPTIONAL) :type flavor: str :raises NoPoolFound: if not found """ # NOTE(gengchc): if exist, get queue's pool.flavor: # if queue's pool.flavor is different, first delete it and add it. # Otherwise, if the flavor in the meteredata of the queue is # modified, the catalog will be inconsistent. if self._catalogue_ctrl.exists(project, queue): catalogue = self._catalogue_ctrl.get(project, queue) oldpoolids = catalogue['pool'] oldpool = self._pools_ctrl.get(oldpoolids) oldflavor = oldpool['flavor'] msgtmpl = _('register queue to pool: old flavor: %(oldflavor)s ' ', new flavor: %(flavor)s') LOG.info(msgtmpl, {'oldflavor': oldflavor, 'flavor': flavor}) if oldpool['flavor'] != flavor: self._catalogue_ctrl.delete(project, queue) if not self._catalogue_ctrl.exists(project, queue): if flavor is not None: flavor = self._flavor_ctrl.get(flavor, project=project) pools = self._pools_ctrl.get_pools_by_flavor( flavor=flavor, detailed=True) pool = select.weighted(pools) pool = pool and pool['name'] or None msgtmpl = _('register queue to pool: new flavor:%(flavor)s') LOG.info(msgtmpl, {'flavor': flavor.get('name', None)}) else: # NOTE(flaper87): Get pools assigned to the default # group `None`. We should consider adding a `default_group` # option in the future. pools = self._pools_ctrl.get_pools_by_flavor(detailed=True) pool = select.weighted(pools) pool = pool and pool['name'] or None if not pool: # NOTE(flaper87): We used to raise NoPoolFound in this # case but we've decided to support automatic pool # creation. Note that we're now returning and the queue # is not being registered in the catalogue. This is done # on purpose since no pool exists and the "dummy" pool # doesn't exist in the storage if self.lookup(queue, project) is not None: return raise errors.NoPoolFound() msgtmpl = _('register queue to pool: new flavor: None') LOG.info(msgtmpl) msgtmpl = _('register queue: project:%(project)s' ' queue:%(queue)s pool:%(pool)s') LOG.info(msgtmpl, {'project': project, 'queue': queue, 'pool': pool}) self._catalogue_ctrl.insert(project, queue, pool) @_pool_id.purges def deregister(self, queue, project=None): """Removes a queue from the pool catalog. Call this method after successfully deleting it from a backend pool. :param queue: Name of the new queue to assign to a pool :type queue: str :param project: Project to which the queue belongs, or None for the "global" or "generic" project. :type project: str """ self._catalogue_ctrl.delete(project, queue) def get_queue_controller(self, queue, project=None): """Lookup the queue controller for the given queue and project. :param queue: Name of the queue for which to find a pool :param project: Project to which the queue belongs, or None to specify the "global" or "generic" project. :returns: The queue controller associated with the data driver for the pool containing (queue, project) or None if this doesn't exist. :rtype: Maybe QueueController """ target = self.lookup(queue, project) return target and target.queue_controller def get_message_controller(self, queue, project=None): """Lookup the message controller for the given queue and project. :param queue: Name of the queue for which to find a pool :param project: Project to which the queue belongs, or None to specify the "global" or "generic" project. :returns: The message controller associated with the data driver for the pool containing (queue, project) or None if this doesn't exist. :rtype: Maybe MessageController """ target = self.lookup(queue, project) return target and target.message_controller def get_claim_controller(self, queue, project=None): """Lookup the claim controller for the given queue and project. :param queue: Name of the queue for which to find a pool :param project: Project to which the queue belongs, or None to specify the "global" or "generic" project. :returns: The claim controller associated with the data driver for the pool containing (queue, project) or None if this doesn't exist. :rtype: Maybe ClaimController """ target = self.lookup(queue, project) return target and target.claim_controller def get_subscription_controller(self, queue, project=None): """Lookup the subscription controller for the given queue and project. :param queue: Name of the queue for which to find a pool :param project: Project to which the queue belongs, or None to specify the "global" or "generic" project. :returns: The subscription controller associated with the data driver for the pool containing (queue, project) or None if this doesn't exist. :rtype: Maybe SubscriptionController """ target = self.lookup(queue, project) return target and target.subscription_controller def get_topic_controller(self, topic, project=None): """Lookup the topic controller for the given queue and project. :param topic: Name of the topic for which to find a pool :param project: Project to which the topic belongs, or None to specify the "global" or "generic" project. :returns: The topic controller associated with the data driver for the pool containing (queue, project) or None if this doesn't exist. :rtype: Maybe TopicController """ target = self.lookup(topic, project) return target and target.topic_controller def get_default_pool(self, use_listing=True): if use_listing: cursor = self._pools_ctrl.list(limit=0) pools_list = list(next(cursor)) if pools_list: return self.get_driver(pools_list[0]['name']) if self._catalog_conf.enable_virtual_pool: conf_section = ('drivers:message_store:%s' % self._conf.drivers.message_store) try: # NOTE(flaper87): Try to load the driver to check # whether it can be used as the default store for # the default pool. utils.load_storage_driver(self._conf, self._cache, control_driver=self.control) except cerrors.InvalidDriver: # NOTE(kgriffs): Return `None`, rather than letting the # exception bubble up, so that the higher layer doesn't # have to duplicate the try..except..log code all over # the place. return None if conf_section not in self._conf: # NOTE(flaper87): If there's no config section for this storage # skip the pool registration entirely since we won't know how # to connect to it. return None # NOTE(flaper87): This assumes the storage driver type is the # same as the management. pool_conf = {'uri': self._conf[conf_section].uri, 'options': {}} # NOTE(flaper87): This will be using the config # storage configuration as the default one if no # default storage has been registered in the pool # store. return self.get_driver(None, pool_conf) def lookup(self, queue, project=None): """Lookup a pool driver for the given queue and project. :param queue: Name of the queue for which to find a pool :param project: Project to which the queue belongs, or None to specify the "global" or "generic" project. :returns: A storage driver instance for the appropriate pool. If the driver does not exist yet, it is created and cached. If the queue is not mapped, returns None. :rtype: Maybe DataDriver """ try: pool_id = self._pool_id(queue, project) except errors.QueueNotMapped as ex: LOG.debug(ex) return self.get_default_pool(use_listing=False) return self.get_driver(pool_id) def get_driver(self, pool_id, pool_conf=None): """Get storage driver, preferably cached, from a pool name. :param pool_id: The name of a pool. :type pool_id: str :returns: a storage driver :rtype: zaqar.storage.base.DataDriver """ try: return self._drivers[pool_id] except KeyError: # NOTE(cpp-cabrera): cache storage driver connection self._drivers[pool_id] = self._init_driver(pool_id, pool_conf) return self._drivers[pool_id] class TopicController(storage.Topic): """Routes operations to get the appropriate topic controller. :param pool_catalog: a catalog of available pools :type pool_catalog: queues.pooling.base.Catalog """ def __init__(self, pool_catalog): super(TopicController, self).__init__(None) self._pool_catalog = pool_catalog self._mgt_topic_ctrl = self._pool_catalog.control.topic_controller self._get_controller = self._pool_catalog.get_topic_controller def _list(self, project=None, kfilter={}, marker=None, limit=storage.DEFAULT_TOPICS_PER_PAGE, detailed=False, name=None): def all_pages(): yield next(self._mgt_topic_ctrl.list( project=project, kfilter=kfilter, marker=marker, limit=limit, detailed=detailed, name=name)) # make a heap compared with 'name' ls = heapq.merge(*[ utils.keyify('name', page) for page in all_pages() ]) marker_name = {} # limit the iterator and strip out the comparison wrapper def it(): for topic_cmp in itertools.islice(ls, limit): marker_name['next'] = topic_cmp.obj['name'] yield topic_cmp.obj yield it() yield marker_name and marker_name['next'] def _get(self, name, project=None): try: return self.get_metadata(name, project) except errors.TopicDoesNotExist: return {} def _create(self, name, metadata=None, project=None): flavor = None if isinstance(metadata, dict): flavor = metadata.get('_flavor') self._pool_catalog.register(name, project=project, flavor=flavor) # NOTE(cpp-cabrera): This should always succeed since we just # registered the project/topic. There is a race condition, # however. If between the time we register a topic and go to # look it up, the topic is deleted, then this assertion will # fail. pool = self._pool_catalog.lookup(name, project) if not pool: raise RuntimeError('Failed to register topic') return self._mgt_topic_ctrl.create(name, metadata=metadata, project=project) def _delete(self, name, project=None): mtHandler = self._get_controller(name, project) if mtHandler: # NOTE(cpp-cabrera): delete from the catalogue first. If # zaqar crashes in the middle of these two operations, # it is desirable that the entry be missing from the # catalogue and present in storage, rather than the # reverse. The former case leads to all operations # behaving as expected: 404s across the board, and a # functionally equivalent 204 on a create queue. The # latter case is more difficult to reason about, and may # yield 500s in some operations. self._pool_catalog.deregister(name, project) mtHandler.delete(name, project) return self._mgt_topic_ctrl.delete(name, project) def _exists(self, name, project=None): return self._mgt_topic_ctrl.exists(name, project=project) def get_metadata(self, name, project=None): return self._mgt_topic_ctrl.get_metadata(name, project=project) def set_metadata(self, name, metadata, project=None): # NOTE(gengchc2): If flavor metadata is modified in topic, # The topic needs to be re-registered to pools, otherwise # the topic flavor parameter is not consistent with the pool. flavor = None if isinstance(metadata, dict): flavor = metadata.get('_flavor') self._pool_catalog.register(name, project=project, flavor=flavor) return self._mgt_topic_ctrl.set_metadata(name, metadata=metadata, project=project) def _stats(self, name, project=None): mtHandler = self._get_controller(name, project) if mtHandler: return mtHandler.stats(name, project=project) raise errors.TopicDoesNotExist(name, project) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5730135 zaqar-20.1.0.dev29/zaqar/storage/redis/0000775000175100017510000000000015033040026016631 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/__init__.py0000664000175100017510000000370615033040005020745 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Zaqar backed by Redis. Redis? ------ Redis is sometimes called a "data structure store" because it makes common data structures like hashes, lists, and sets available in shared, in-memory storage. Zaqar chose redis because it has strong consistency and its Lua scripting allows for semi-complex transactions to be built atop the primitives it provides. Supported Features ------------------ - FIFO - Claims - High Throughput[1]_ - At-least-once Delivery .. [1] This depends on the backing Redis store performance. For more information, see `Redis' benchmarks `_. Redis can be used both a storage driver and management driver. For the management driver, you need to enable the redis storage options in redis.conf. Redis persistent storage supports two ways: RDB and AOF. The following is RDB way: The configuration is as follows: save E.g save 900 1 save 300 10 save 60 10000 NOTE: save time, the above means that a changed key interval 900s for persistent storage; 10 changed keys 300s for storage; 10000 changed keys 60s for storage. Unsupported Features -------------------- - Durability[2]_ .. [2] As an in-memory store, Redis doesn't support the durability guarantees the MongoDB or SQLAlchemy backends do. """ from zaqar.storage.redis import driver # Hoist classes into package namespace ControlDriver = driver.ControlDriver DataDriver = driver.DataDriver ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/catalogue.py0000664000175100017510000002130115033040005021141 0ustar00mylesmyles# Copyright (c) 2017 ZTE Corporation.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Redis storage controller for the queues catalogue. Serves to construct an association between a project + queue -> pool. :: { 'p_q': project_queue :: str, 's': pool_identifier :: str } """ from oslo_log import log as logging import redis from zaqar.i18n import _ from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.redis import utils LOG = logging.getLogger(__name__) CATALOGUE_SUFFIX = 'catalogue' COUNTING_BATCH_SIZE = 100 class CatalogueController(base.CatalogueBase): """Implements Catalogue resource operations using Redis. * Project Index (Redis sorted set): Set of all queue_ids for the given project, ordered by name. Key: .catalogue +--------+-----------------------------+ | Id | Value | +========+=============================+ | name | . | +--------+-----------------------------+ * Queue and pool Information (Redis hash): Key: ..catalogue +----------------------+---------+ | Name | Field | +======================+=========+ | Project | p | +----------------------+---------+ | Queue | p_q | +----------------------+---------+ | Pool | p_p | +----------------------+---------+ """ def __init__(self, *args, **kwargs): super(CatalogueController, self).__init__(*args, **kwargs) self._client = self.driver.connection @utils.raises_conn_error @utils.retries_on_connection_error def _insert(self, project, queue, pool): queue_key = utils.scope_queue_name(queue, project) catalogue_project_key = utils.scope_pool_catalogue(project, CATALOGUE_SUFFIX) catalogue_queue_key = utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) # Check if the queue already exists. if self._exists(queue, project): return False catalogue = { 'p': project, 'p_q': queue, 'p_p': pool } # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(catalogue_project_key, {queue_key: 1}) pipe.hmset(catalogue_queue_key, catalogue) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _('CatalogueController:insert %(prj)s:' '%(queue)s %(pool)s failed') LOG.exception(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) return False msgtmpl = _('CatalogueController:insert %(prj)s:%(queue)s' ':%(pool)s, success') LOG.info(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) return True @utils.raises_conn_error @utils.retries_on_connection_error def list(self, project): catalogue_project_key = utils.scope_pool_catalogue(project, CATALOGUE_SUFFIX) ctlgs = [] offset = 0 while True: queues = self._client.zrange(catalogue_project_key, offset, offset + COUNTING_BATCH_SIZE - 1) if not queues: break offset += len(queues) for queue in queues: catalogue_queue_key =\ utils.scope_pool_catalogue(queue, CATALOGUE_SUFFIX) ctlg = self._client.hgetall(catalogue_queue_key) ctlgs.append(ctlg) return (_normalize(v) for v in ctlgs) @utils.raises_conn_error @utils.retries_on_connection_error def get(self, project, queue): queue_key = utils.scope_queue_name(queue, project) catalogue_queue_key = \ utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) ctlg = self._client.hgetall(catalogue_queue_key) if ctlg is None or len(ctlg) == 0: raise errors.QueueNotMapped(queue, project) return _normalize(ctlg) @utils.raises_conn_error @utils.retries_on_connection_error def _exists(self, project, queue): queue_key = utils.scope_queue_name(queue, project) catalogue_queue_key = \ utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) return self._client.exists(catalogue_queue_key) @utils.raises_conn_error @utils.retries_on_connection_error def exists(self, project, queue): return self._exists(project, queue) def insert(self, project, queue, pool): self._insert(project, queue, pool) @utils.raises_conn_error @utils.retries_on_connection_error def delete(self, project, queue): # (gengchc): Check if the queue already exists. if not self._exists(project, queue): return True queue_key = utils.scope_queue_name(queue, project) catalogue_project_key = utils.scope_pool_catalogue(project, CATALOGUE_SUFFIX) catalogue_queue_key = utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) # (gengchc) Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zrem(catalogue_project_key, queue_key) pipe.delete(catalogue_queue_key) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _('CatalogueController:delete %(prj)s' ':%(queue)s failed') LOG.info(msgtmpl, {'prj': project, 'queue': queue}) return False msgtmpl = _('CatalogueController:delete %(prj)s:%(queue)s success') LOG.info(msgtmpl, {'prj': project, 'queue': queue}) @utils.raises_conn_error @utils.retries_on_connection_error def _update(self, project, queue, pool): # Check if the queue already exists. if not self._exists(project, queue): raise errors.QueueNotMapped(queue, project) queue_key = utils.scope_queue_name(queue, project) catalogue_queue_key = utils.scope_pool_catalogue(queue_key, CATALOGUE_SUFFIX) with self._client.pipeline() as pipe: pipe.hset(catalogue_queue_key, "pl", pool) try: pipe.execute() except redis.exceptions.ResponseError: msgtmpl = _('CatalogueController:_update %(prj)s' ':%(queue)s:%(pool)s failed') LOG.exception(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) return False msgtmpl = _('CatalogueController:_update %(prj)s:%(queue)s' ':%(pool)s') LOG.info(msgtmpl, {'prj': project, 'queue': queue, 'pool': pool}) @utils.raises_conn_error @utils.retries_on_connection_error def update(self, project, queue, pool=None): if pool is None: return False self._update(project, queue, pool) @utils.raises_conn_error @utils.retries_on_connection_error def drop_all(self): allcatalogueobj_key = self._client.keys(pattern='*catalog') if len(allcatalogueobj_key) == 0: return with self._client.pipeline() as pipe: for key in allcatalogueobj_key: pipe.delete(key) try: pipe.execute() except redis.exceptions.ResponseError: return False def _normalize(entry): return { 'queue': str(entry['p_q']), 'project': str(entry['p']), 'pool': str(entry['p_p']) } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/claims.py0000664000175100017510000004441015033040005020453 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import msgpack from oslo_utils import timeutils from oslo_utils import uuidutils from zaqar.common import decorators from zaqar import storage from zaqar.storage import errors from zaqar.storage.redis import messages from zaqar.storage.redis import scripting from zaqar.storage.redis import utils QUEUE_CLAIMS_SUFFIX = 'claims' CLAIM_MESSAGES_SUFFIX = 'messages' # The rank counter is an atomic index to rank messages # in a FIFO manner. MESSAGE_RANK_COUNTER_SUFFIX = 'rank_counter' RETRY_CLAIM_TIMEOUT = 10 # NOTE(kgriffs): Number of claims to read at a time when counting # the total number of claimed messages for a queue. # # TODO(kgriffs): Tune this parameter and/or make it configurable. It # takes ~0.8 ms to retrieve 100 items from a sorted set on a 2.7 GHz # Intel Core i7 (not including network latency). COUNTING_BATCH_SIZE = 100 class ClaimController(storage.Claim, scripting.Mixin): """Implements claim resource operations using Redis. Redis Data Structures: 1. Claims list (Redis set) contains claim IDs Key: ..claims +-------------+---------+ | Name | Field | +=============+=========+ | claim_ids | m | +-------------+---------+ 2. Claimed Messages (Redis set) contains the list of message ids stored per claim Key: .messages 3. Claim info (Redis hash): Key: +----------------+---------+ | Name | Field | +================+=========+ | ttl | t | +----------------+---------+ | id | id | +----------------+---------+ | expires | e | +----------------+---------+ | num_messages | n | +----------------+---------+ """ script_names = ['claim_messages'] def __init__(self, *args, **kwargs): super(ClaimController, self).__init__(*args, **kwargs) self._client = self.driver.connection self._packer = msgpack.Packer(use_bin_type=True).pack self._unpacker = functools.partial(msgpack.unpackb) @decorators.lazy_property(write=False) def _queue_ctrl(self): return self.driver.queue_controller def _get_claim_info(self, claim_id, fields, transform=int): """Get one or more fields from the claim Info.""" values = self._client.hmget(claim_id, fields) if values == [None]: return values else: return [transform(v) for v in values] if transform else values def _claim_messages(self, msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires): # NOTE(kgriffs): A watch on a pipe could also be used, but that # is less efficient and predictable, based on our experience in # having to do something similar in the MongoDB driver. func = self._scripts['claim_messages'] args = [now, limit, claim_id, claim_expires, msg_ttl, msg_expires] return func(keys=[msgset_key], args=args) def _exists(self, queue, claim_id, project): client = self._client claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) # In some cases, the queue maybe doesn't exist. So we should check # whether the queue exists. Return False if no such queue exists. # Todo(flwang): We should delete all related data after the queue is # deleted. See the blueprint for more detail: # https://blueprints.launchpad.net/zaqar/+spec/clear-resources-after-delete-queue if not self._queue_ctrl._exists(queue, project): return False # Return False if no such claim exists # TODO(prashanthr_): Discuss the feasibility of a bloom filter. if client.zscore(claims_set_key, claim_id) is None: return False expires = self._get_claim_info(claim_id, b'e')[0] now = timeutils.utcnow_ts() if expires <= now: # NOTE(kgriffs): Redis should automatically remove the # other records in the very near future. This one # has to be manually deleted, however. client.zrem(claims_set_key, claim_id) return False return True def _get_claimed_message_keys(self, claim_msgs_key): return self._client.lrange(claim_msgs_key, 0, -1) def _count_messages(self, queue, project): """Count and return the total number of claimed messages.""" # NOTE(kgriffs): Iterate through all claims, adding up the # number of messages per claim. This is obviously slower # than keeping a side counter, but is also less error-prone. # Plus, it avoids having to do a lot of extra work during # garbage collection passes. Also, considering that most # workloads won't require a large number of claims, most of # the time we can do this in a single pass, so it is still # pretty fast. claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) num_claimed = 0 offset = 0 while True: claim_ids = self._client.zrange(claims_set_key, offset, offset + COUNTING_BATCH_SIZE - 1) if not claim_ids: break offset += len(claim_ids) with self._client.pipeline() as pipe: for cid in claim_ids: pipe.hmget(cid, 'n') claim_infos = pipe.execute() for info in claim_infos: # NOTE(kgriffs): In case the claim was deleted out # from under us, sanity-check that we got a non-None # info list. if info: num_claimed += int(info[0]) return num_claimed def _del_message(self, queue, project, claim_id, message_id, pipe): """Called by MessageController when messages are being deleted. This method removes the message from claim data structures. """ claim_msgs_key = utils.scope_claim_messages(claim_id, CLAIM_MESSAGES_SUFFIX) # NOTE(kgriffs): In practice, scanning will be quite fast, # since the usual pattern is to delete messages from oldest # to newest, and the list is sorted in that order. Also, # the length of the list will usually be ~10 messages. pipe.lrem(claim_msgs_key, 1, message_id) # NOTE(kgriffs): Decrement the message counter used for stats pipe.hincrby(claim_id, 'n', -1) @utils.raises_conn_error @utils.retries_on_connection_error def _gc(self, queue, project): """Garbage-collect expired claim data. Not all claim data can be automatically expired. This method cleans up the remainder. :returns: Number of claims removed """ claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) now = timeutils.utcnow_ts() num_removed = self._client.zremrangebyscore(claims_set_key, 0, now) return num_removed @utils.raises_conn_error @utils.retries_on_connection_error def get(self, queue, claim_id, project=None): if not self._exists(queue, claim_id, project): raise errors.ClaimDoesNotExist(claim_id, queue, project) claim_msgs_key = utils.scope_claim_messages(claim_id, CLAIM_MESSAGES_SUFFIX) # basic_messages msg_keys = self._get_claimed_message_keys(claim_msgs_key) claimed_msgs = messages.Message.from_redis_bulk(msg_keys, self._client) now = timeutils.utcnow_ts() basic_messages = [msg.to_basic(now) for msg in claimed_msgs if msg] # claim_meta now = timeutils.utcnow_ts() expires, ttl = self._get_claim_info(claim_id, [b'e', b't']) update_time = expires - ttl age = now - update_time claim_meta = { 'age': age, 'ttl': ttl, 'id': claim_id, } return claim_meta, basic_messages @utils.raises_conn_error @utils.retries_on_connection_error def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): queue_ctrl = self.driver.queue_controller msg_ctrl = self.driver.message_controller claim_ttl = metadata['ttl'] grace = metadata['grace'] now = timeutils.utcnow_ts() msg_ttl = claim_ttl + grace claim_expires = now + claim_ttl msg_expires = claim_expires + grace # Get the maxClaimCount and deadLetterQueue from current queue's meta queue_meta = queue_ctrl.get(queue, project=project) claim_id = uuidutils.generate_uuid() claimed_msgs = [] # NOTE(kgriffs): Claim some messages msgset_key = utils.msgset_key(queue, project) claimed_ids = self._claim_messages(msgset_key, now, limit, claim_id, claim_expires, msg_ttl, msg_expires) if claimed_ids: claimed_msgs = messages.Message.from_redis_bulk(claimed_ids, self._client) claimed_msgs = [msg.to_basic(now) for msg in claimed_msgs] # NOTE(kgriffs): Perist claim records with self._client.pipeline() as pipe: claim_msgs_key = utils.scope_claim_messages( claim_id, CLAIM_MESSAGES_SUFFIX) for mid in claimed_ids: pipe.rpush(claim_msgs_key, mid) pipe.expire(claim_msgs_key, claim_ttl) claim_info = { 'id': claim_id, 't': claim_ttl, 'e': claim_expires, 'n': len(claimed_ids), } pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) # NOTE(kgriffs): Add the claim ID to a set so that # existence checks can be performed quickly. This # is also used as a watch key in order to guard # against race conditions. # # A sorted set is used to facilitate cleaning # up the IDs of expired claims. claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, {claim_id: claim_expires}) pipe.execute() if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta): claimed_msgs_removed = [] for msg in claimed_msgs: if msg: claimed_count = msg['claim_count'] if claimed_count < queue_meta['_max_claim_count']: # 1. Save the new max claim count for message claim_count = claimed_count + 1 dic = {"c.c": claim_count} pipe.hmset(msg['id'], dic) pipe.execute() else: # 2. Check if the message's claim count has # exceeded the max claim count defined in the # queue, if so, move the message to the dead # letter queue and modify it's ttl. # NOTE(gengchc): We're moving message by # moving the message id from queue to dead # letter queue directly.That means, the queue # and dead letter queue must be created on # the same pool. ddl = utils.scope_queue_name( queue_meta['_dead_letter_queue'], project) ddl_ttl = queue_meta.get( "_dead_letter_queue_messages_ttl") dic = {"t": msg['ttl']} if ddl_ttl: dic = {"t": ddl_ttl} pipe.hmset(msg['id'], dic) queueproject = [s for s in ddl.split('.')] msgs_key_ddl = utils.msgset_key( queueproject[1], queueproject[0]) counter_key_ddl = utils.scope_queue_index( queueproject[1], queueproject[0], MESSAGE_RANK_COUNTER_SUFFIX) msgs_key = utils.msgset_key( queue, project=project) pipe.zrem(msgs_key, msg['id']) message_ids = [] message_ids.append(msg['id']) msg_ctrl._index_messages(msgs_key_ddl, counter_key_ddl, message_ids) pipe.execute() # Add dead letter message to # claimed_msgs_removed, finally remove # them from claimed_msgs. claimed_msgs_removed.append(msg) # Remove dead letter messages from claimed_msgs. for msg_remove in claimed_msgs_removed: claimed_msgs.remove(msg_remove) if len(claimed_msgs) == 0: return None, iter([]) return claim_id, claimed_msgs @utils.raises_conn_error @utils.retries_on_connection_error def update(self, queue, claim_id, metadata, project=None): if not self._exists(queue, claim_id, project): raise errors.ClaimDoesNotExist(claim_id, queue, project) now = timeutils.utcnow_ts() claim_ttl = metadata['ttl'] claim_expires = now + claim_ttl grace = metadata['grace'] msg_ttl = claim_ttl + grace msg_expires = claim_expires + grace claim_msgs_key = utils.scope_claim_messages(claim_id, CLAIM_MESSAGES_SUFFIX) msg_keys = self._get_claimed_message_keys(claim_msgs_key) claimed_msgs = messages.MessageEnvelope.from_redis_bulk(msg_keys, self._client) claim_info = { 't': claim_ttl, 'e': claim_expires, } with self._client.pipeline() as pipe: for msg in claimed_msgs: if msg: msg.claim_id = claim_id msg.claim_expires = claim_expires if _msg_would_expire(msg, claim_expires): msg.ttl = msg_ttl msg.expires = msg_expires # TODO(kgriffs): Rather than writing back the # entire message, only set the fields that # have changed. # # When this change is made, don't forget to # also call pipe.expire with the new TTL value. msg.to_redis(pipe) # Update the claim id and claim expiration info # for all the messages. pipe.hmset(claim_id, claim_info) pipe.expire(claim_id, claim_ttl) pipe.expire(claim_msgs_key, claim_ttl) claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) pipe.zadd(claims_set_key, {claim_id: claim_expires}) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def delete(self, queue, claim_id, project=None): # NOTE(prashanthr_): Return silently when the claim # does not exist if not self._exists(queue, claim_id, project): return now = timeutils.utcnow_ts() claim_msgs_key = utils.scope_claim_messages(claim_id, CLAIM_MESSAGES_SUFFIX) msg_keys = self._get_claimed_message_keys(claim_msgs_key) claimed_msgs = messages.MessageEnvelope.from_redis_bulk(msg_keys, self._client) # Update the claim id and claim expiration info # for all the messages. claims_set_key = utils.scope_claims_set(queue, project, QUEUE_CLAIMS_SUFFIX) with self._client.pipeline() as pipe: pipe.zrem(claims_set_key, claim_id) pipe.delete(claim_id) pipe.delete(claim_msgs_key) for msg in claimed_msgs: if msg: msg.claim_id = None msg.claim_expires = now # TODO(kgriffs): Rather than writing back the # entire message, only set the fields that # have changed. msg.to_redis(pipe) pipe.execute() def _msg_would_expire(message, now): return message.expires <= now ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/controllers.py0000664000175100017510000000225615033040005021553 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.storage.redis import catalogue from zaqar.storage.redis import claims from zaqar.storage.redis import flavors from zaqar.storage.redis import messages from zaqar.storage.redis import pools from zaqar.storage.redis import queues from zaqar.storage.redis import subscriptions CatalogueController = catalogue.CatalogueController ClaimController = claims.ClaimController FlavorsController = flavors.FlavorsController MessageController = messages.MessageController QueueController = queues.QueueController PoolsController = pools.PoolsController SubscriptionController = subscriptions.SubscriptionController ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/driver.py0000664000175100017510000002762515033040005020507 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_utils import netutils from osprofiler import profiler import redis import redis.sentinel import urllib from zaqar.common import decorators from zaqar.common import errors from zaqar.conf import drivers_management_store_redis from zaqar.conf import drivers_message_store_redis from zaqar.i18n import _ from zaqar import storage from zaqar.storage.redis import controllers REDIS_DEFAULT_PORT = 6379 SENTINEL_DEFAULT_PORT = 26379 DEFAULT_SOCKET_TIMEOUT = 0.1 DEFAULT_DBID = 0 STRATEGY_TCP = 1 STRATEGY_UNIX = 2 STRATEGY_SENTINEL = 3 LOG = logging.getLogger(__name__) class ConnectionURI(object): def __init__(self, uri): # TODO(prashanthr_): Add SSL support try: parsed_url = urllib.parse.urlparse(uri) except SyntaxError: raise errors.ConfigurationError(_('Malformed Redis URI')) if parsed_url.scheme != 'redis': raise errors.ConfigurationError(_('Invalid scheme in Redis URI')) path = parsed_url.path query = parsed_url.query # NOTE(tkajinam): Replace '' by None self.password = parsed_url.password or None self.username = parsed_url.username or None netloc = parsed_url.netloc if '@' in netloc: cred, sep, netloc = netloc.partition('@') if self.username and not self.password: # NOTE(tkajinam): This is kept for backword compatibility but # should be removed after 2025.1 LOG.warning('Credential in redis uri does not contain \':\'. ' 'Make sure that \':\' is added before password.') self.password = self.username self.username = None query_params = dict(urllib.parse.parse_qsl(query)) # Generic self.strategy = None self.socket_timeout = float(query_params.get('socket_timeout', DEFAULT_SOCKET_TIMEOUT)) self.dbid = int(query_params.get('dbid', DEFAULT_DBID)) # TCP self.port = None self.hostname = None # UNIX socket self.unix_socket_path = None # Sentinel self.master = None self.sentinels = [] self.sentinel_username = query_params.get('sentinel_username') self.sentinel_password = query_params.get('sentinel_password') if 'master' in query_params: # NOTE(prashanthr_): Configure redis driver in sentinel mode self.strategy = STRATEGY_SENTINEL self.master = query_params['master'] # NOTE(kgriffs): Have to parse list of sentinel hosts ourselves # since urllib doesn't support it. for each_host in netloc.split(','): try: name, port = netutils.parse_host_port( each_host, SENTINEL_DEFAULT_PORT) except ValueError: raise errors.ConfigurationError( 'invalid redis server format %s' % each_host) self.sentinels.append((name, port)) if not self.sentinels: msg = _('The Redis configuration URI does not define any ' 'sentinel hosts') raise errors.ConfigurationError(msg) elif netloc: if ',' in netloc: # NOTE(kgriffs): They probably were specifying # a list of sentinel hostnames, but forgot to # add 'master' to the query string. msg = _('The Redis URI specifies multiple sentinel hosts, ' 'but is missing the "master" query string ' 'parameter. Please set "master" to the name of ' 'the Redis master server as specified in the ' 'sentinel configuration file.') raise errors.ConfigurationError(msg) self.strategy = STRATEGY_TCP try: self.port = parsed_url.port or REDIS_DEFAULT_PORT except ValueError: msg = _('The Redis configuration URI contains an ' 'invalid port') raise errors.ConfigurationError(msg) if not parsed_url.hostname: msg = _('Missing host name in Redis URI') raise errors.ConfigurationError(msg) self.hostname = parsed_url.hostname else: self.strategy = STRATEGY_UNIX if not path: msg = _('Missing path in Redis URI') raise errors.ConfigurationError(msg) self.unix_socket_path = path assert self.strategy in (STRATEGY_TCP, STRATEGY_UNIX, STRATEGY_SENTINEL) class DataDriver(storage.DataDriverBase): # NOTE(flaper87): The driver doesn't guarantee # durability for Redis. BASE_CAPABILITIES = (storage.Capabilities.FIFO, storage.Capabilities.CLAIMS, storage.Capabilities.AOD, storage.Capabilities.HIGH_THROUGHPUT) _DRIVER_OPTIONS = [(drivers_management_store_redis.GROUP_NAME, drivers_management_store_redis.ALL_OPTS), (drivers_message_store_redis.GROUP_NAME, drivers_message_store_redis.ALL_OPTS)] def __init__(self, conf, cache, control_driver): super(DataDriver, self).__init__(conf, cache, control_driver) self.redis_conf = self.conf[drivers_message_store_redis.GROUP_NAME] server_version = self.connection.info()['redis_version'] if tuple(map(int, server_version.split('.'))) < (2, 6): msg = _('The Redis driver requires redis-server>=2.6, ' '%s found') % server_version raise RuntimeError(msg) # FIXME(flaper87): Make this dynamic self._capabilities = self.BASE_CAPABILITIES @property def capabilities(self): return self._capabilities def is_alive(self): try: return self.connection.ping() except redis.exceptions.ConnectionError: return False def close(self): self.connection.close() def _health(self): KPI = {} KPI['storage_reachable'] = self.is_alive() KPI['operation_status'] = self._get_operation_status() # TODO(kgriffs): Add metrics re message volume return KPI def gc(self): # TODO(kgriffs): Check time since last run, and if # it hasn't been very long, skip. This allows for # running the GC script on multiple boxes for HA, # without having them all attempting to GC at the # same moment. self.message_controller.gc() @decorators.lazy_property(write=False) def connection(self): """Redis client connection instance.""" return _get_redis_client(self) @decorators.lazy_property(write=False) def message_controller(self): controller = controllers.MessageController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("redis_message_controller")(controller) else: return controller @decorators.lazy_property(write=False) def claim_controller(self): controller = controllers.ClaimController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("redis_claim_controller")(controller) else: return controller @decorators.lazy_property(write=False) def subscription_controller(self): controller = controllers.SubscriptionController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("redis_subscription_" "controller")(controller) else: return controller class ControlDriver(storage.ControlDriverBase): def __init__(self, conf, cache): super(ControlDriver, self).__init__(conf, cache) self.conf.register_opts( drivers_management_store_redis.ALL_OPTS, group=drivers_management_store_redis.GROUP_NAME) self.redis_conf = self.conf[drivers_management_store_redis.GROUP_NAME] def close(self): self.connection.close() @decorators.lazy_property(write=False) def connection(self): """Redis client connection instance.""" return _get_redis_client(self) @decorators.lazy_property(write=False) def queue_controller(self): controller = controllers.QueueController(self) if (self.conf.profiler.enabled and (self.conf.profiler.trace_message_store or self.conf.profiler.trace_management_store)): return profiler.trace_cls("redis_queue_controller")(controller) else: return controller @property def pools_controller(self): controller = controllers.PoolsController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("redis_pools_controller")(controller) else: return controller @property def catalogue_controller(self): controller = controllers.CatalogueController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("redis_catalogue_" "controller")(controller) else: return controller @property def flavors_controller(self): controller = controllers.FlavorsController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("redis_flavors_controller")(controller) else: return controller @decorators.lazy_property(write=False) def topic_controller(self): pass def _get_redis_client(driver): conf = driver.redis_conf connection_uri = ConnectionURI(conf.uri) if connection_uri.strategy == STRATEGY_SENTINEL: sentinel = redis.sentinel.Sentinel( connection_uri.sentinels, db=connection_uri.dbid, username=connection_uri.username, password=connection_uri.password, sentinel_kwargs={ 'socket_timeout': connection_uri.socket_timeout, 'username': connection_uri.sentinel_username, 'password': connection_uri.sentinel_password }, socket_timeout=connection_uri.socket_timeout) return sentinel.master_for(connection_uri.master) elif connection_uri.strategy == STRATEGY_TCP: return redis.Redis( host=connection_uri.hostname, port=connection_uri.port, db=connection_uri.dbid, username=connection_uri.username, password=connection_uri.password, socket_timeout=connection_uri.socket_timeout) else: return redis.Redis( unix_socket_path=connection_uri.unix_socket_path, db=connection_uri.dbid, username=connection_uri.username, password=connection_uri.password, socket_timeout=connection_uri.socket_timeout) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/flavors.py0000664000175100017510000001445315033040005020663 0ustar00mylesmyles# Copyright (c) 2017 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import functools import msgpack import redis from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.redis import utils class FlavorsController(base.FlavorsBase): """Implements flavor resource operations using Redis. Redis Data Structures: 1 All flavor_ids (Redis sorted set): Set of all flavor_ids, ordered by name. Used to delete the all records of table flavors Key: flavors +--------+-----------------------------+ | Id | Value | +========+=============================+ | name | | +--------+-----------------------------+ 2 Project Index (Redis sorted set): Set of all flavors for the given project, ordered by name. Key: .flavors +--------+-----------------------------+ | Id | Value | +========+=============================+ | name | | +--------+-----------------------------+ 3 Flavor Information (Redis hash): Key: .flavors +----------------------+---------+ | Name | Field | +======================+=========+ | flavor | f | +----------------------+---------+ | project | p | +----------------------+---------+ | capabilities | c | +----------------------+---------+ """ def __init__(self, *args, **kwargs): super(FlavorsController, self).__init__(*args, **kwargs) self._client = self.driver.connection self._packer = msgpack.Packer(use_bin_type=True).pack self._unpacker = functools.partial(msgpack.unpackb) @utils.raises_conn_error def list(self, project=None, marker=None, limit=10, detailed=False): client = self._client subset_key = utils.flavor_project_subset_key(project) marker_key = utils.flavor_name_hash_key(marker) if marker_key: rank = client.zrank(subset_key, marker_key) else: rank = None start = rank + 1 if rank is not None else 0 cursor = (f for f in client.zrange(subset_key, start, start + limit - 1)) marker_next = {} def normalizer(flavor): marker_next['next'] = flavor['f'] return self._normalize(flavor, detailed=detailed) yield utils.FlavorListCursor(self._client, cursor, normalizer) yield marker_next and marker_next['next'] @utils.raises_conn_error def get(self, name, project=None, detailed=False): hash_key = utils.flavor_name_hash_key(name) flavors = self._client.hgetall(hash_key) if flavors is None or len(flavors) == 0: raise errors.FlavorDoesNotExist(name) return self._normalize(flavors, detailed) @utils.raises_conn_error def create(self, name, project=None, capabilities=None): capabilities = {} if capabilities is None else capabilities subset_key = utils.flavor_project_subset_key(project) set_key = utils.flavor_set_key() hash_key = utils.flavor_name_hash_key(name) flavors = self._client.hgetall(hash_key) if len(flavors) == 0: flavors = { 'f': name, 'p': project, 'c': self._packer(capabilities or {}), } # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(set_key, {hash_key: 1}) pipe.zadd(subset_key, {hash_key: 1}) pipe.hmset(hash_key, flavors) pipe.execute() else: with self._client.pipeline() as pipe: pipe.hset(hash_key, "c", self._packer(capabilities)) pipe.hset(hash_key, "p", project) pipe.execute() @utils.raises_conn_error def exists(self, name, project=None): set_key = utils.flavor_set_key() hash_key = utils.flavor_name_hash_key(name) return self._client.zrank(set_key, hash_key) is not None @utils.raises_conn_error def update(self, name, project=None, capabilities=None): hash_key = utils.flavor_name_hash_key(name) with self._client.pipeline() as pipe: pipe.hset(hash_key, "c", self._packer(capabilities)) pipe.hset(hash_key, "p", project) try: pipe.execute() except redis.exceptions.ResponseError: raise errors.FlavorDoesNotExist(name) @utils.raises_conn_error def delete(self, name, project=None): subset_key = utils.flavor_project_subset_key(project) set_key = utils.flavor_set_key() hash_key = utils.flavor_name_hash_key(name) if self._client.zrank(subset_key, hash_key) is not None: with self._client.pipeline() as pipe: pipe.zrem(set_key, hash_key) pipe.zrem(subset_key, hash_key) pipe.delete(hash_key) pipe.execute() @utils.raises_conn_error def drop_all(self): allflavor_key = self._client.keys(pattern='*flavors') if len(allflavor_key) == 0: return with self._client.pipeline() as pipe: for key in allflavor_key: pipe.delete(key) try: pipe.execute() except redis.exceptions.ResponseError: return False def _normalize(self, flavor, detailed=False): ret = { 'name': flavor['f'], } if detailed: ret['capabilities'] = self._unpacker(flavor['c']) return ret ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/messages.py0000664000175100017510000005741015033040005021016 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import uuid from oslo_utils import encodeutils from oslo_utils import timeutils import redis from zaqar.common import decorators from zaqar import storage from zaqar.storage import errors from zaqar.storage.redis import models from zaqar.storage.redis import scripting from zaqar.storage.redis import utils from zaqar.storage import utils as s_utils Message = models.Message MessageEnvelope = models.MessageEnvelope MSGSET_INDEX_KEY = 'msgset_index' # The rank counter is an atomic index to rank messages # in a FIFO manner. MESSAGE_RANK_COUNTER_SUFFIX = 'rank_counter' # NOTE(kgriffs): This value, in seconds, should be at least less than the # minimum allowed TTL for messages (60 seconds). RETRY_POST_TIMEOUT = 10 # TODO(kgriffs): Tune this and/or make it configurable. Don't want # it to be so large that it blocks other operations for more than # 1-2 milliseconds. GC_BATCH_SIZE = 100 class MessageController(storage.Message, scripting.Mixin): """Implements message resource operations using Redis. Messages are scoped by project + queue. Redis Data Structures: 1. Message id's list (Redis sorted set) Each queue in the system has a set of message ids currently in the queue. The list is sorted based on a ranking which is incremented atomically using the counter(MESSAGE_RANK_COUNTER_SUFFIX) also stored in the database for every queue. Key: ..messages 2. Index of message ID lists (Redis sorted set) This is a sorted set that facilitates discovery of all the message ID lists. This is necessary when performing garbage collection on the IDs contained within these lists. Key: msgset_index 3. Messages(Redis Hash): Scoped by the UUID of the message, the redis datastructure has the following information. +---------------------+---------+ | Name | Field | +=====================+=========+ | id | id | +---------------------+---------+ | ttl | t | +---------------------+---------+ | expires | e | +---------------------+---------+ | body | b | +---------------------+---------+ | claim | c | +---------------------+---------+ | claim expiry time | c.e | +---------------------+---------+ | claim count | c.c | +---------------------+---------+ | client uuid | u | +---------------------+---------+ | created time | cr | +---------------------+---------+ | delay expiry time | d | +---------------------+---------+ | body checksum | cs | +---------------------+---------+ 4. Messages rank counter (Redis Hash): Key: ..rank_counter """ script_names = ['index_messages'] def __init__(self, *args, **kwargs): super(MessageController, self).__init__(*args, **kwargs) self._client = self.driver.connection @decorators.lazy_property(write=False) def _queue_ctrl(self): return self.driver.queue_controller def _index_messages(self, msgset_key, counter_key, message_ids): # NOTE(kgriffs): A watch on a pipe could also be used to ensure # messages are inserted in order, but that would be less efficient. func = self._scripts['index_messages'] arguments = [len(message_ids)] + message_ids func(keys=[msgset_key, counter_key], args=arguments) def _count(self, queue, project): """Return total number of messages in a queue. Note: Some expired messages may be included in the count if they haven't been GC'd yet. This is done for performance. """ return self._client.zcard(utils.msgset_key(queue, project)) def _create_msgset(self, queue, project, pipe): pipe.zadd(MSGSET_INDEX_KEY, {utils.msgset_key(queue, project): 1}) def _delete_msgset(self, queue, project, pipe): pipe.zrem(MSGSET_INDEX_KEY, utils.msgset_key(queue, project)) @utils.raises_conn_error @utils.retries_on_connection_error def _delete_queue_messages(self, queue, project, pipe): """Method to remove all the messages belonging to a queue. Will be referenced from the QueueController. The pipe to execute deletion will be passed from the QueueController executing the operation. """ client = self._client msgset_key = utils.msgset_key(queue, project) message_ids = client.zrange(msgset_key, 0, -1) pipe.delete(msgset_key) for msg_id in message_ids: pipe.delete(msg_id) # TODO(prashanthr_): Look for better ways to solve the issue. def _find_first_unclaimed(self, queue, project, limit): """Find the first unclaimed message in the queue.""" msgset_key = utils.msgset_key(queue, project) now = timeutils.utcnow_ts() # TODO(kgriffs): Generalize this paging pattern (DRY) offset = 0 while True: msg_keys = self._client.zrange(msgset_key, offset, offset + limit - 1) if not msg_keys: return None offset += len(msg_keys) messages = [MessageEnvelope.from_redis(msg_key, self._client) for msg_key in msg_keys] for msg in messages: if msg and not utils.msg_claimed_filter(msg, now): return msg.id def _exists(self, message_id): """Check if message exists in the Queue.""" return self._client.exists(message_id) def _get_first_message_id(self, queue, project, sort): """Fetch head/tail of the Queue. Helper function to get the first message in the queue sort > 0 get from the left else from the right. """ msgset_key = utils.msgset_key(queue, project) zrange = self._client.zrange if sort == 1 else self._client.zrevrange message_ids = zrange(msgset_key, 0, 0) return message_ids[0] if message_ids else None def _get_claim(self, message_id): """Gets minimal claim doc for a message. :returns: {'id': cid, 'expires': ts} IFF the message is claimed, and that claim has not expired. """ claim = self._client.hmget(message_id, 'c', 'c.e') if claim == [None, None]: # NOTE(kgriffs): message_id was not found return None info = { # NOTE(kgriffs): A "None" claim is serialized as an empty str 'id': encodeutils.safe_decode(claim[0]) or None, 'expires': int(claim[1]), } # Is the message claimed? now = timeutils.utcnow_ts() if info['id'] and (now < info['expires']): return info # Not claimed return None def _list(self, queue, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False, to_basic=True): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) msgset_key = utils.msgset_key(queue, project) client = self._client if not marker and not include_claimed: # NOTE(kgriffs): Skip claimed messages at the head # of the queue; otherwise we would just filter them all # out and likely end up with an empty list to return. marker = self._find_first_unclaimed(queue, project, limit) if marker: start = client.zrank(msgset_key, marker) or 0 else: start = 0 else: rank = client.zrank(msgset_key, marker) start = rank + 1 if rank else 0 message_ids = client.zrange(msgset_key, start, start + (limit - 1)) messages = Message.from_redis_bulk(message_ids, client) # NOTE(prashanthr_): Build a list of filters for checking # the following: # # 1. Message is expired # 2. Message is claimed # 3. Message should not be echoed # now = timeutils.utcnow_ts() filters = [functools.partial(utils.msg_expired_filter, now=now)] if not include_claimed: filters.append(functools.partial(utils.msg_claimed_filter, now=now)) if not include_delayed: filters.append(functools.partial(utils.msg_delayed_filter, now=now)) if not echo: filters.append(functools.partial(utils.msg_echo_filter, client_uuid=client_uuid)) marker = {} yield _filter_messages(messages, filters, to_basic, marker) yield marker['next'] @utils.raises_conn_error @utils.retries_on_connection_error def gc(self): """Garbage-collect expired message data. Not all message data can be automatically expired. This method cleans up the remainder. :returns: Number of messages removed """ claim_ctrl = self.driver.claim_controller client = self._client num_removed = 0 offset_msgsets = 0 while True: # NOTE(kgriffs): Iterate across all message sets; there will # be one set of message IDs per queue. msgset_keys = client.zrange(MSGSET_INDEX_KEY, offset_msgsets, offset_msgsets + GC_BATCH_SIZE - 1) if not msgset_keys: break offset_msgsets += len(msgset_keys) for msgset_key in msgset_keys: msgset_key = encodeutils.safe_decode(msgset_key) # NOTE(kgriffs): Drive the claim controller GC from # here, because we already know the queue and project # scope. queue, project = utils.descope_message_ids_set(msgset_key) claim_ctrl._gc(queue, project) offset_mids = 0 while True: # NOTE(kgriffs): Look up each message in the message set, # see if it has expired, and if so, remove it from msgset. mids = client.zrange(msgset_key, offset_mids, offset_mids + GC_BATCH_SIZE - 1) if not mids: break offset_mids += len(mids) # NOTE(kgriffs): If redis expired the message, it will # not exist, so all we have to do is remove mid from # the msgset collection. with client.pipeline() as pipe: for mid in mids: pipe.exists(mid) mid_exists_flags = pipe.execute() with client.pipeline() as pipe: for mid, exists in zip(mids, mid_exists_flags): if not exists: pipe.zrem(msgset_key, mid) num_removed += 1 pipe.execute() return num_removed @utils.raises_conn_error @utils.retries_on_connection_error def list(self, queue, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False): return self._list(queue, project, marker, limit, echo, client_uuid, include_claimed, include_delayed) @utils.raises_conn_error @utils.retries_on_connection_error def first(self, queue, project=None, sort=1): if sort not in (1, -1): raise ValueError('sort must be either 1 (ascending) ' 'or -1 (descending)') message_id = self._get_first_message_id(queue, project, sort) if not message_id: raise errors.QueueIsEmpty(queue, project) message = Message.from_redis(message_id, self._client) if message is None: raise errors.QueueIsEmpty(queue, project) now = timeutils.utcnow_ts() return message.to_basic(now, include_created=True) @utils.raises_conn_error @utils.retries_on_connection_error def get(self, queue, message_id, project=None): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) message = Message.from_redis(message_id, self._client) now = timeutils.utcnow_ts() if message and not utils.msg_expired_filter(message, now): return message.to_basic(now) else: raise errors.MessageDoesNotExist(message_id, queue, project) @utils.raises_conn_error @utils.retries_on_connection_error def bulk_get(self, queue, message_ids, project=None): if not self._queue_ctrl.exists(queue, project): return iter([]) # NOTE(prashanthr_): Pipelining is used here purely # for performance. with self._client.pipeline() as pipe: for mid in message_ids: pipe.hgetall(mid) messages = pipe.execute() # NOTE(kgriffs): Skip messages that may have been deleted now = timeutils.utcnow_ts() return (Message.from_hmap(msg).to_basic(now) for msg in messages if msg) @utils.raises_conn_error @utils.retries_on_connection_error def post(self, queue, messages, client_uuid, project=None): msgset_key = utils.msgset_key(queue, project) counter_key = utils.scope_queue_index(queue, project, MESSAGE_RANK_COUNTER_SUFFIX) message_ids = [] now = timeutils.utcnow_ts() with self._client.pipeline() as pipe: for msg in messages: prepared_msg = Message( ttl=msg['ttl'], created=now, client_uuid=client_uuid, claim_id=None, claim_expires=now, claim_count=0, delay_expires=now + msg.get('delay', 0), body=msg.get('body', {}), checksum=s_utils.get_checksum(msg.get('body', None)) if self.driver.conf.enable_checksum else None ) prepared_msg.to_redis(pipe) message_ids.append(prepared_msg.id) pipe.execute() # NOTE(kgriffs): If this call fails, we will return # an error to the client and the messages will be # orphaned, but Redis will remove them when they # expire, so we will just pretend they don't exist # in that case. self._index_messages(msgset_key, counter_key, message_ids) return message_ids @utils.raises_conn_error @utils.retries_on_connection_error def delete(self, queue, message_id, project=None, claim=None): claim_ctrl = self.driver.claim_controller if not self._queue_ctrl.exists(queue, project): return # NOTE(kgriffs): The message does not exist, so # it is essentially "already" deleted. if not self._exists(message_id): return # TODO(kgriffs): Create decorator for validating claim and message # IDs, since those are not checked at the transport layer. This # decorator should be applied to all relevant methods. if claim is not None: try: uuid.UUID(claim) except ValueError: raise errors.ClaimDoesNotExist(claim, queue, project) msg_claim = self._get_claim(message_id) is_claimed = (msg_claim is not None) # Authorize the request based on having the correct claim ID if claim is None: if is_claimed: raise errors.MessageIsClaimed(message_id) elif not is_claimed: raise errors.MessageNotClaimed(message_id) elif msg_claim['id'] != claim: if not claim_ctrl._exists(queue, claim, project): raise errors.ClaimDoesNotExist(claim, queue, project) raise errors.MessageNotClaimedBy(message_id, claim) msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: pipe.delete(message_id) pipe.zrem(msgset_key, message_id) if is_claimed: claim_ctrl._del_message(queue, project, msg_claim['id'], message_id, pipe) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def bulk_delete(self, queue, message_ids, project=None, claim_ids=None): claim_ctrl = self.driver.claim_controller if not self._queue_ctrl.exists(queue, project): return msgset_key = utils.msgset_key(queue, project) with self._client.pipeline() as pipe: for mid in message_ids: if not self._exists(mid): continue pipe.delete(mid) pipe.zrem(msgset_key, mid) msg_claim = self._get_claim(mid) if claim_ids and msg_claim is None: raise errors.MessageNotClaimed(mid) if msg_claim is not None: if claim_ids and (msg_claim['id'] not in claim_ids): raise errors.ClaimDoesNotMatch(msg_claim['id'], queue, project) claim_ctrl._del_message(queue, project, msg_claim['id'], mid, pipe) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def pop(self, queue, limit, project=None): # Pop is implemented as a chain of the following operations: # 1. Create a claim. # 2. Delete the messages claimed. # 3. Delete the claim. claim_ctrl = self.driver.claim_controller claim_id, messages = claim_ctrl.create( queue, dict(ttl=1, grace=0), project, limit=limit) message_ids = [message['id'] for message in messages] self.bulk_delete(queue, message_ids, project) # NOTE(prashanthr_): Creating a claim controller reference # causes a recursive reference. Hence, using the reference # from the driver. claim_ctrl.delete(queue, claim_id, project) return messages def _filter_messages(messages, filters, to_basic, marker): """Create a filtering iterator over a list of messages. The function accepts a list of filters to be filtered before the message can be included as a part of the reply. """ now = timeutils.utcnow_ts() for msg in messages: # NOTE(kgriffs): Message may have been deleted, so # check each value to ensure we got a message back if msg is None: continue # NOTE(kgriffs): Check to see if any of the filters # indiciate that this message should be skipped. for should_skip in filters: if should_skip(msg): break else: marker['next'] = msg.id if to_basic: yield msg.to_basic(now) else: yield msg QUEUES_SET_STORE_NAME = 'queues_set' class MessageQueueHandler(object): def __init__(self, driver, control_driver): self.driver = driver self._client = self.driver.connection self._queue_ctrl = self.driver.queue_controller self._message_ctrl = self.driver.message_controller self._claim_ctrl = self.driver.claim_controller @utils.raises_conn_error def create(self, name, metadata=None, project=None): with self._client.pipeline() as pipe: self._message_ctrl._create_msgset(name, project, pipe) try: pipe.execute() except redis.exceptions.ResponseError: return False @utils.raises_conn_error @utils.retries_on_connection_error def delete(self, name, project=None): with self._client.pipeline() as pipe: self._message_ctrl._delete_msgset(name, project, pipe) self._message_ctrl._delete_queue_messages(name, project, pipe) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def stats(self, name, project=None): if not self._queue_ctrl.exists(name, project=project): raise errors.QueueDoesNotExist(name, project) total = self._message_ctrl._count(name, project) if total: claimed = self._claim_ctrl._count_messages(name, project) else: claimed = 0 message_stats = { 'claimed': claimed, 'free': total - claimed, 'total': total, } if total: try: newest = self._message_ctrl.first(name, project, -1) oldest = self._message_ctrl.first(name, project, 1) except errors.QueueIsEmpty: pass else: message_stats['newest'] = newest message_stats['oldest'] = oldest return {'messages': message_stats} class MessageTopicHandler(object): def __init__(self, driver, control_driver): self.driver = driver self._client = self.driver.connection self._topic_ctrl = self.driver.topic_controller self._message_ctrl = self.driver.message_controller @utils.raises_conn_error def create(self, name, metadata=None, project=None): with self._client.pipeline() as pipe: self._message_ctrl._create_msgset(name, project, pipe) try: pipe.execute() except redis.exceptions.ResponseError: return False @utils.raises_conn_error @utils.retries_on_connection_error def delete(self, name, project=None): with self._client.pipeline() as pipe: self._message_ctrl._delete_msgset(name, project, pipe) self._message_ctrl._delete_queue_messages(name, project, pipe) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def stats(self, name, project=None): if not self._topic_ctrl.exists(name, project=project): raise errors.TopicDoesNotExist(name, project) total = self._message_ctrl._count(name, project) message_stats = { 'total': total } if total: try: newest = self._message_ctrl.first(name, project, -1) oldest = self._message_ctrl.first(name, project, 1) except errors.QueueIsEmpty: pass else: message_stats['newest'] = newest message_stats['oldest'] = oldest return {'messages': message_stats} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/models.py0000664000175100017510000002376615033040005020501 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu. # Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import functools import uuid import msgpack from oslo_utils import encodeutils from oslo_utils import uuidutils MSGENV_FIELD_KEYS = (b'id', b't', b'cr', b'e', b'u', b'c', b'c.e', b'c.c', b'd', b'cs') SUBENV_FIELD_KEYS = (b'id', b's', b'u', b't', b'e', b'o', b'p', b'c') # TODO(kgriffs): Make similar classes for claims and queues class MessageEnvelope(object): """Encapsulates the message envelope (metadata only, no body). :param id: Message ID in the form of a hexadecimal UUID. If not given, one will be automatically generated. :param ttl: Message TTL in seconds :param created: Message creation time as a UNIX timestamp :param client_uuid: UUID of the client that posted the message :param claim_id: If claimed, the UUID of the claim. Set to None for messages that have never been claimed. :param claim_expires: Claim expiration as a UNIX timestamp """ __slots__ = [ 'id', 'ttl', 'created', 'expires', 'client_uuid', 'claim_id', 'claim_expires', 'claim_count', 'delay_expires', 'checksum', ] def __init__(self, **kwargs): self.id = _validate_uuid4(kwargs.get('id', uuidutils.generate_uuid())) self.ttl = kwargs['ttl'] self.created = kwargs['created'] self.expires = kwargs.get('expires', self.created + self.ttl) self.client_uuid = _validate_uuid4(str(kwargs['client_uuid'])) self.claim_id = kwargs.get('claim_id') if self.claim_id: _validate_uuid4(self.claim_id) self.claim_expires = kwargs['claim_expires'] self.claim_count = kwargs.get('claim_count', 0) self.delay_expires = kwargs.get('delay_expires', 0) self.checksum = kwargs.get('checksum') @staticmethod def from_hmap(hmap): kwargs = _hmap_to_msgenv_kwargs(hmap) return MessageEnvelope(**kwargs) @staticmethod def from_redis(mid, client): values = client.hmget(mid, MSGENV_FIELD_KEYS) # NOTE(kgriffs): If the key does not exist, redis-py returns # an array of None values. if values[0] is None: return None return _hmap_kv_to_msgenv(MSGENV_FIELD_KEYS, values) @staticmethod def from_redis_bulk(message_ids, client): with client.pipeline() as pipe: for mid in message_ids: pipe.hmget(mid, MSGENV_FIELD_KEYS) results = pipe.execute() message_envs = [] for value_list in results: if value_list is None: env = None else: env = _hmap_kv_to_msgenv(MSGENV_FIELD_KEYS, value_list) message_envs.append(env) return message_envs def to_redis(self, pipe): hmap = _msgenv_to_hmap(self) pipe.hmset(self.id, hmap) pipe.expire(self.id, self.ttl) class SubscriptionEnvelope(object): """Encapsulates the subscription envelope.""" __slots__ = [ 'id', 'source', 'subscriber', 'ttl', 'expires', 'options', 'project', 'confirmed', ] def __init__(self, **kwargs): self.id = kwargs.get('id', uuidutils.generate_uuid()) self.source = kwargs['source'] self.subscriber = kwargs['subscriber'] self.ttl = kwargs['ttl'] self.expires = kwargs.get('expires', float('inf')) self.options = kwargs['options'] self.confirmed = kwargs.get('confirmed', 1) @staticmethod def from_redis(sid, client): values = client.hmget(sid, SUBENV_FIELD_KEYS) # NOTE(kgriffs): If the key does not exist, redis-py returns # an array of None values. if values[0] is None: return None return _hmap_kv_to_subenv(SUBENV_FIELD_KEYS, values) def to_redis(self, pipe): hmap = _subenv_to_hmap(self) pipe.hmset(self.id, hmap) pipe.expire(self.id, self.ttl) def to_basic(self, now): created = self.expires - self.ttl is_confirmed = bool(self.confirmed) basic_msg = { 'id': self.id, 'source': encodeutils.safe_decode(self.source), 'subscriber': encodeutils.safe_decode(self.subscriber), 'ttl': self.ttl, 'age': now - created, 'options': self.options, 'confirmed': is_confirmed, } return basic_msg # NOTE(kgriffs): This could have implemented MessageEnvelope functionality # by adding an "include_body" param to all the methods, but then you end # up with tons of if statements that make the code rather ugly. class Message(MessageEnvelope): """Represents an entire message, including envelope and body. :param id: Message ID in the form of a hexadecimal UUID. If not given, one will be automatically generated. :param ttl: Message TTL in seconds :param created: Message creation time as a UNIX timestamp :param client_uuid: UUID of the client that posted the message :param claim_id: If claimed, the UUID of the claim. Set to None for messages that have never been claimed. :param claim_expires: Claim expiration as a UNIX timestamp :param body: Message payload. Must be serializable to mspack. """ __slots__ = MessageEnvelope.__slots__ + ['body'] def __init__(self, **kwargs): super(Message, self).__init__(**kwargs) self.body = kwargs['body'] @staticmethod def from_hmap(hmap): kwargs = _hmap_to_msgenv_kwargs(hmap) kwargs['body'] = _unpack(hmap[b'b']) return Message(**kwargs) @staticmethod def from_redis(mid, client): hmap = client.hgetall(mid) return Message.from_hmap(hmap) if hmap else None @staticmethod def from_redis_bulk(message_ids, client): with client.pipeline() as pipe: for mid in message_ids: pipe.hgetall(mid) results = pipe.execute() messages = [Message.from_hmap(hmap) if hmap else None for hmap in results] return messages def to_redis(self, pipe, include_body=True): if not include_body: super(Message, self).to_redis(pipe) hmap = _msgenv_to_hmap(self) hmap['b'] = _pack(self.body) pipe.hmset(self.id, hmap) pipe.expire(self.id, self.ttl) def to_basic(self, now, include_created=False): basic_msg = { 'id': self.id, 'age': now - self.created, 'ttl': self.ttl, 'body': self.body, 'claim_id': self.claim_id, 'claim_count': self.claim_count, } if include_created: created_iso = datetime.datetime.fromtimestamp( self.created, tz=datetime.timezone.utc).replace( tzinfo=None).strftime('%Y-%m-%dT%H:%M:%SZ') basic_msg['created'] = created_iso if self.checksum: basic_msg['checksum'] = self.checksum return basic_msg # ========================================================================== # Helpers # ========================================================================== _pack = msgpack.Packer(use_bin_type=True).pack _unpack = functools.partial(msgpack.unpackb) def _hmap_kv_to_msgenv(keys, values): hmap = dict(zip(keys, values)) kwargs = _hmap_to_msgenv_kwargs(hmap) return MessageEnvelope(**kwargs) def _hmap_to_msgenv_kwargs(hmap): claim_id = hmap[b'c'] if claim_id: claim_id = encodeutils.safe_decode(claim_id) else: claim_id = None # NOTE(kgriffs): Under Py3K, redis-py converts all strings # into binary. Woohoo! res = { 'id': encodeutils.safe_decode(hmap[b'id']), 'ttl': int(hmap[b't']), 'created': int(hmap[b'cr']), 'expires': int(hmap[b'e']), 'client_uuid': encodeutils.safe_decode(hmap[b'u']), 'claim_id': claim_id, 'claim_expires': int(hmap[b'c.e']), 'claim_count': int(hmap[b'c.c']), 'delay_expires': int(hmap.get(b'd', 0)) } checksum = hmap.get(b'cs') if checksum: res['checksum'] = encodeutils.safe_decode(hmap[b'cs']) return res def _msgenv_to_hmap(msg): res = { 'id': msg.id, 't': msg.ttl, 'cr': msg.created, 'e': msg.expires, 'u': msg.client_uuid, 'c': msg.claim_id or '', 'c.e': msg.claim_expires, 'c.c': msg.claim_count, 'd': msg.delay_expires } if msg.checksum: res['cs'] = msg.checksum return res def _hmap_kv_to_subenv(keys, values): hmap = dict(zip(keys, values)) kwargs = _hmap_to_subenv_kwargs(hmap) return SubscriptionEnvelope(**kwargs) def _hmap_to_subenv_kwargs(hmap): # NOTE(kgriffs): Under Py3K, redis-py converts all strings # into binary. Woohoo! return { 'id': encodeutils.safe_decode(hmap[b'id']), 'source': encodeutils.safe_decode(hmap[b's']), 'subscriber': encodeutils.safe_decode(hmap[b'u']), 'ttl': int(hmap[b't']), 'expires': int(hmap[b'e']), 'options': _unpack(hmap[b'o']), 'confirmed': int(hmap[b'c']) } def _subenv_to_hmap(msg): return { 'id': msg.id, 's': msg.source, 'u': msg.subscriber, 't': msg.ttl, 'e': msg.expires, 'o': msg.options } def _validate_uuid4(_uuid): uuid.UUID(str(_uuid), version=4) return _uuid ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/pools.py0000664000175100017510000002252515033040005020342 0ustar00mylesmyles# Copyright (c) 2017 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """pools: an implementation of the pool management storage controller for redis. Schema: 'n': name :: str 'u': uri :: str 'w': weight :: int 'o': options :: dict """ import functools import msgpack from oslo_log import log as logging import redis from zaqar.common import utils as common_utils from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.redis import utils LOG = logging.getLogger(__name__) class PoolsController(base.PoolsBase): """Implements Pools resource operations using Redis. * All pool (Redis sorted set): Set of all pool_ids, ordered by name. Used to delete the all records of table pools. Key: pools +--------+-----------------------------+ | Id | Value | +========+=============================+ | name | | +--------+-----------------------------+ * Flavor Index (Redis sorted set): Set of all pool_ids for the given flavor, ordered by name. Key: .pools +--------+-----------------------------+ | Id | Value | +========+=============================+ | name | | +--------+-----------------------------+ * Pools Information (Redis hash): Key: .pools +----------------------+---------+ | Name | Field | +======================+=========+ | pool | pl | +----------------------+---------+ | uri | u | +----------------------+---------+ | weight | w | +----------------------+---------+ | options | o | +----------------------+---------+ | flavor | f | +----------------------+---------+ """ def __init__(self, *args, **kwargs): super(PoolsController, self).__init__(*args, **kwargs) self._client = self.driver.connection self.flavor_ctl = self.driver.flavors_controller self._packer = msgpack.Packer(use_bin_type=True).pack self._unpacker = functools.partial(msgpack.unpackb) @utils.raises_conn_error @utils.retries_on_connection_error def _list(self, marker=None, limit=10, detailed=False): client = self._client set_key = utils.pools_set_key() marker_key = utils.pools_name_hash_key(marker) if marker_key: rank = client.zrank(set_key, marker_key) else: rank = None start = rank + 1 if rank is not None else 0 cursor = (f for f in client.zrange(set_key, start, start + limit - 1)) marker_next = {} def normalizer(pools): marker_next['next'] = pools['pl'] return self._normalize(pools, detailed=detailed) yield utils.PoolsListCursor(self._client, cursor, normalizer) yield marker_next and marker_next['next'] @utils.raises_conn_error @utils.retries_on_connection_error def _get(self, name, detailed=False): pool_key = utils.pools_name_hash_key(name) pool = self._client.hgetall(pool_key) if pool is None or len(pool) == 0: raise errors.PoolDoesNotExist(name) return self._normalize(pool, detailed) @utils.raises_conn_error @utils.retries_on_connection_error def _get_pools_by_flavor(self, flavor=None, detailed=False): cursor = None if flavor is None or flavor.get('name') is None: set_key = utils.pools_set_key() cursor = (pl for pl in self._client.zrange(set_key, 0, -1)) elif flavor.get('name') is not None: subset_key = utils.pools_subset_key(flavor['name']) cursor = (pl for pl in self._client.zrange(subset_key, 0, -1)) if cursor is None: return [] normalizer = functools.partial(self._normalize, detailed=detailed) return utils.PoolsListCursor(self._client, cursor, normalizer) @utils.raises_conn_error @utils.retries_on_connection_error def _create(self, name, weight, uri, group=None, flavor=None, options=None): if group is not None: raise errors.PoolRedisNotSupportGroup flavor = flavor if flavor is not None else None options = {} if options is None else options pool_key = utils.pools_name_hash_key(name) subset_key = utils.pools_subset_key(flavor) set_key = utils.pools_set_key() if self._exists(name): self._update(name, weight=weight, uri=uri, flavor=flavor, options=options) return pool = { 'pl': name, 'u': uri, 'w': weight, 'o': self._packer(options), 'f': flavor } # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(set_key, {pool_key: 1}) if flavor is not None: pipe.zadd(subset_key, {pool_key: 1}) pipe.hmset(pool_key, pool) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def _exists(self, name): pool_key = utils.pools_name_hash_key(name) return self._client.exists(pool_key) @utils.raises_conn_error @utils.retries_on_connection_error def _update(self, name, **kwargs): names = ('uri', 'weight', 'flavor', 'options') fields = common_utils.fields(kwargs, names, pred=lambda x: x is not None, key_transform=lambda x: x[0]) assert fields, ('`weight`, `uri`, `flavor`, ' 'or `options` not found in kwargs') if 'o' in fields: new_options = fields.get('o', None) fields['o'] = self._packer(new_options) pool_key = utils.pools_name_hash_key(name) # (gengchc2): Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: # (gengchc2): If flavor is changed, we need to change.pool key # in pools subset. if 'f' in fields: flavor_old = self._get(name).get('flavor') flavor_new = fields['f'] if flavor_old != flavor_new: if flavor_new is not None: new_subset_key = utils.pools_subset_key(flavor_new) pipe.zadd(new_subset_key, {pool_key: 1}) # (gengchc2) remove pool from flavor_old.pools subset if flavor_old is not None: old_subset_key = utils.pools_subset_key(flavor_old) pipe.zrem(old_subset_key, pool_key) pipe.hmset(pool_key, fields) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def _delete(self, name): try: pool = self.get(name) flavor = pool.get("flavor", None) # NOTE(gengchc2): If this is the only pool in the # flavor and it's being used by a flavor, don't allow # it to be deleted. if flavor is not None: flavor1 = {} flavor1['name'] = flavor pools_in_flavor = list(self.get_pools_by_flavor( flavor=flavor1)) if self.flavor_ctl.exists(flavor)\ and len(pools_in_flavor) == 1: raise errors.PoolInUseByFlavor(name, flavor) pool_key = utils.pools_name_hash_key(name) subset_key = utils.pools_subset_key(flavor) set_key = utils.pools_set_key() with self._client.pipeline() as pipe: if flavor is not None: pipe.zrem(subset_key, pool_key) pipe.zrem(set_key, pool_key) pipe.delete(pool_key) pipe.execute() except errors.PoolDoesNotExist: pass @utils.raises_conn_error @utils.retries_on_connection_error def _drop_all(self): poolsobj_key = self._client.keys(pattern='*pools') if len(poolsobj_key) == 0: return with self._client.pipeline() as pipe: for key in poolsobj_key: pipe.delete(key) try: pipe.execute() except redis.exceptions.ResponseError: return False def _normalize(self, pool, detailed=False): ret = { 'name': pool['pl'], 'uri': pool['u'], 'weight': int(pool['w']), 'flavor': pool['f'] } if detailed: ret['options'] = self._unpacker(pool['o']) return ret ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/queues.py0000664000175100017510000001512115033040005020507 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import msgpack from oslo_utils import timeutils import redis from zaqar.common import decorators from zaqar import storage from zaqar.storage import errors from zaqar.storage.redis import utils QUEUES_SET_STORE_NAME = 'queues_set' MESSAGE_IDS_SUFFIX = 'messages' class QueueController(storage.Queue): """Implements queue resource operations using Redis. Queues are scoped by project, which is prefixed to the queue name. Redis Data Structures: 1. Queue Index (Redis sorted set): Set of all queues for the given project, ordered by name. Key: .queues_set +--------+-----------------------------+ | Id | Value | +========+=============================+ | name | . | +--------+-----------------------------+ 2. Queue Information (Redis hash): Key: . +----------------------+---------+ | Name | Field | +======================+=========+ | metadata | m | +----------------------+---------+ | creation timestamp | t | +----------------------+---------+ """ def __init__(self, *args, **kwargs): super(QueueController, self).__init__(*args, **kwargs) self._client = self.driver.connection self._packer = msgpack.Packer(use_bin_type=True).pack self._unpacker = functools.partial(msgpack.unpackb) @decorators.lazy_property(write=False) def _claim_ctrl(self): return self.driver.claim_controller @decorators.lazy_property(write=False) def _subscription_ctrl(self): return self.driver.subscription_controller def _get_queue_info(self, queue_key, fields, transform=str): """Get one or more fields from Queue Info.""" values = self._client.hmget(queue_key, fields) return [transform(v) for v in values] if transform else values @utils.raises_conn_error @utils.retries_on_connection_error def _list(self, project=None, kfilter={}, marker=None, limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False, name=None): client = self._client qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) marker = utils.scope_queue_name(marker, project) if marker: rank = client.zrank(qset_key, marker) else: rank = None start = rank + 1 if rank else 0 cursor = (q for q in client.zrange(qset_key, start, start + limit - 1)) marker_next = {} def denormalizer(info, name): queue = {'name': utils.descope_queue_name(name)} marker_next['next'] = queue['name'] if detailed: queue['metadata'] = self._unpacker(info[1]) return queue yield utils.QueueListCursor(self._client, cursor, denormalizer) yield marker_next and marker_next['next'] def _get(self, name, project=None): """Obtain the metadata from the queue.""" try: return self.get_metadata(name, project) except errors.QueueDoesNotExist: return {} @utils.raises_conn_error def _create(self, name, metadata=None, project=None): # TODO(prashanthr_): Implement as a lua script. queue_key = utils.scope_queue_name(name, project) qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) # Check if the queue already exists. if self._exists(name, project): return False queue = { 'c': 0, 'cl': 0, 'm': self._packer(metadata or {}), 't': timeutils.utcnow_ts() } # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.zadd(qset_key, {queue_key: 1}).hmset(queue_key, queue) try: pipe.execute() except redis.exceptions.ResponseError: return False return True @utils.raises_conn_error @utils.retries_on_connection_error def _exists(self, name, project=None): # TODO(prashanthr_): Cache this lookup queue_key = utils.scope_queue_name(name, project) qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) return self._client.zrank(qset_key, queue_key) is not None @utils.raises_conn_error @utils.retries_on_connection_error def set_metadata(self, name, metadata, project=None): if not self.exists(name, project): raise errors.QueueDoesNotExist(name, project) key = utils.scope_queue_name(name, project) fields = {'m': self._packer(metadata)} self._client.hmset(key, fields) @utils.raises_conn_error @utils.retries_on_connection_error def get_metadata(self, name, project=None): if not self.exists(name, project): raise errors.QueueDoesNotExist(name, project) queue_key = utils.scope_queue_name(name, project) metadata = self._get_queue_info(queue_key, b'm', None)[0] return self._unpacker(metadata) @utils.raises_conn_error @utils.retries_on_connection_error def _delete(self, name, project=None): queue_key = utils.scope_queue_name(name, project) qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) # NOTE(prashanthr_): Pipelining is used to mitigate race conditions with self._client.pipeline() as pipe: pipe.zrem(qset_key, queue_key) pipe.delete(queue_key) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def _stats(self, name, project=None): pass @utils.raises_conn_error @utils.retries_on_connection_error def _calculate_resource_count(self, project=None): client = self._client qset_key = utils.scope_queue_name(QUEUES_SET_STORE_NAME, project) return client.zlexcount(qset_key, '-', '+') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/scripting.py0000664000175100017510000000223515033040005021204 0ustar00mylesmyles# Copyright (c) 2014 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from zaqar.common import decorators class Mixin(object): script_names = [] @decorators.lazy_property(write=False) def _scripts(self): scripts = {} for name in self.script_names: script = _read_script(name) scripts[name] = self._client.register_script(script) return scripts def _read_script(script_name): folder = os.path.abspath(os.path.dirname(__file__)) filename = os.path.join(folder, 'scripts', script_name + '.lua') with open(filename, 'r') as script_file: return script_file.read() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5730135 zaqar-20.1.0.dev29/zaqar/storage/redis/scripts/0000775000175100017510000000000015033040026020320 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/scripts/claim_messages.lua0000664000175100017510000000657315033040005024007 0ustar00mylesmyles--[[ Copyright (c) 2014 Rackspace Hosting, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --]] -- Read params local msgset_key = KEYS[1] local now = tonumber(ARGV[1]) local limit = tonumber(ARGV[2]) local claim_id = ARGV[3] local claim_expires = tonumber(ARGV[4]) local msg_ttl = tonumber(ARGV[5]) local msg_expires = tonumber(ARGV[6]) -- Scan for up to 'limit' unclaimed messages local BATCH_SIZE = 100 local start = 0 local claimed_msgs = {} local msg_ids_to_cleanup = {} local found_unclaimed = false while (#claimed_msgs < limit) do local stop = (start + BATCH_SIZE - 1) local msg_ids = redis.call('ZRANGE', msgset_key, start, stop) if (#msg_ids == 0) then break end start = start + BATCH_SIZE -- TODO(kgriffs): Try moving claimed IDs to a different set -- to avoid scanning through already-claimed messages. for i, mid in ipairs(msg_ids) do -- NOTE(kgriffs): Since execution of this script can not -- happen in parallel, once we find the first unclaimed -- message, the remaining messages will always be -- unclaimed as well. if not found_unclaimed then local msg = redis.call('HMGET', mid, 'c', 'c.e', 'd') if msg[1] == false and msg[2] == false then -- NOTE(Eva-i): It means the message expired and does not -- actually exist anymore, we must later garbage collect it's -- ID from the set and move on. msg_ids_to_cleanup[#msg_ids_to_cleanup + 1] = mid elseif (msg[1] == '' or tonumber(msg[2]) <= now) and tonumber(msg[3]) <= now then -- NOTE(cdyangzhenyu): If the message's delay time has not -- expired, the message can not be claimed. found_unclaimed = true end end if found_unclaimed then -- Found an unclaimed message, so claim it. local msg_expires_prev = redis.call('HGET', mid, 'e') if msg_expires_prev ~= false then -- NOTE(Eva-i): Condition above means the message is not -- expired and we really can claim it. redis.call('HMSET', mid, 'c', claim_id, 'c.e', claim_expires) -- Will the message expire early? if tonumber(msg_expires_prev) < claim_expires then redis.call('HMSET', mid, 't', msg_ttl, 'e', msg_expires) end claimed_msgs[#claimed_msgs + 1] = mid if (#claimed_msgs == limit) then break end end end end end if (#msg_ids_to_cleanup ~= 0) then -- Garbage collect expired message IDs stored in msgset_key. redis.call('ZREM', msgset_key, unpack(msg_ids_to_cleanup)) end return claimed_msgs ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/scripts/index_messages.lua0000664000175100017510000000212015033040005024011 0ustar00mylesmyles--[[ Copyright (c) 2014 Rackspace Hosting, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --]] -- Read params local msgset_key = KEYS[1] local counter_key = KEYS[2] local num_message_ids = tonumber(ARGV[1]) -- Get next rank value local rank_counter = tonumber(redis.call('GET', counter_key) or 1) -- Add ranked message IDs local zadd_args = {'ZADD', msgset_key} for i = 0, (num_message_ids - 1) do zadd_args[#zadd_args+1] = rank_counter + i zadd_args[#zadd_args+1] = ARGV[2 + i] end redis.call(unpack(zadd_args)) -- Set next rank value return redis.call('SET', counter_key, rank_counter + num_message_ids) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/subscriptions.py0000664000175100017510000002607515033040005022121 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import functools import msgpack from oslo_utils import encodeutils from oslo_utils import timeutils from oslo_utils import uuidutils import redis from zaqar.common import utils as common_utils from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.redis import models from zaqar.storage.redis import utils SubscriptionEnvelope = models.SubscriptionEnvelope SUBSET_INDEX_KEY = 'subset_index' SUBSCRIPTION_IDS_SUFFIX = 'subscriptions' class SubscriptionController(base.Subscription): """Implements subscription resource operations using Redis. Subscriptions are unique by project + queue + subscriber. Schema: 's': source :: str 'u': subscriber:: str 't': ttl:: int 'e': expires: int 'o': options :: dict 'p': project :: str """ def __init__(self, *args, **kwargs): super(SubscriptionController, self).__init__(*args, **kwargs) self._client = self.driver.connection self._packer = msgpack.Packer(use_bin_type=True).pack self._unpacker = functools.partial(msgpack.unpackb) @utils.raises_conn_error @utils.retries_on_connection_error def list(self, queue, project=None, marker=None, limit=10): client = self._client subset_key = utils.scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) if marker: rank = client.zrank(subset_key, marker) else: rank = None start = rank + 1 if rank is not None else 0 cursor = (q for q in client.zrange(subset_key, start, start + limit - 1)) marker_next = {} def denormalizer(record, sid): now = timeutils.utcnow_ts() ttl = int(record[2]) expires = int(record[3]) created = expires - ttl is_confirmed = 1 if len(record) == 6: is_confirmed = int(record[5]) ret = { 'id': sid, 'source': encodeutils.safe_decode(record[0]), 'subscriber': encodeutils.safe_decode(record[1]), 'ttl': ttl, 'age': now - created, 'options': self._unpacker(record[4]), 'confirmed': bool(is_confirmed), } marker_next['next'] = sid return ret yield utils.SubscriptionListCursor(self._client, cursor, denormalizer) yield marker_next and marker_next['next'] @utils.raises_conn_error @utils.retries_on_connection_error def get(self, queue, subscription_id, project=None): subscription = None if self.exists(queue, subscription_id, project): subscription = SubscriptionEnvelope.from_redis(subscription_id, self._client) if subscription: now = timeutils.utcnow_ts() return subscription.to_basic(now) else: raise errors.SubscriptionDoesNotExist(subscription_id) @utils.raises_conn_error @utils.retries_on_connection_error def create(self, queue, subscriber, ttl, options, project=None): subscription_id = uuidutils.generate_uuid() subset_key = utils.scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) source = queue now = timeutils.utcnow_ts() expires = now + ttl confirmed = 0 subscription = {'id': subscription_id, 's': source, 'u': subscriber, 't': ttl, 'e': expires, 'o': self._packer(options), 'p': project, 'c': confirmed} try: # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: if not self._is_duplicated_subscriber(subscriber, queue, project): pipe.zadd(subset_key, {subscription_id: 1}).hmset( subscription_id, subscription) pipe.expire(subscription_id, ttl) pipe.execute() else: return None return subscription_id except redis.exceptions.ResponseError: return None def _is_duplicated_subscriber(self, subscriber, queue, project): """Check if the subscriber is existing or not. Given the limitation of Redis' expires(), it's hard to auto expire subscriber from the set and subscription id from the sorted set, so this method is used to do a ugly duplication check when adding a new subscription so that we don't need the set for subscriber. And as a side effect, this method will remove the unreachable subscription's id from the sorted set. """ subset_key = utils.scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) try: sub_ids = (q for q in self._client.zrange(subset_key, 0, -1)) for s_id in sub_ids: subscription = self._client.hmget(s_id, ['s', 'u', 't', 'o', 'c']) if subscription == [None, None, None, None, None]: # NOTE(flwang): Under this check, that means the # subscription has been expired. So redis can't get # the subscription but the id is still there. So let's # delete the id for clean up. self._client.zrem(subset_key, s_id) if encodeutils.safe_decode(subscription[1]) == subscriber: return True return False except redis.exceptions.ResponseError: return True @utils.raises_conn_error @utils.retries_on_connection_error def exists(self, queue, subscription_id, project=None): subset_key = utils.scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) return self._client.zrank(subset_key, subscription_id) is not None @utils.raises_conn_error @utils.retries_on_connection_error def update(self, queue, subscription_id, project=None, **kwargs): names = ('subscriber', 'ttl', 'options') key_transform = lambda x: 'u' if x == 'subscriber' else x[0] fields = common_utils.fields(kwargs, names, pred=lambda x: x is not None, key_transform=key_transform) assert fields, ('`subscriber`, `ttl`, ' 'or `options` not found in kwargs') # Let's get our subscription by ID. If it does not exist, # SubscriptionDoesNotExist error will be raised internally. subscription_to_update = self.get(queue, subscription_id, project=project) new_subscriber = fields.get('u') # Let's do some checks to prevent subscription duplication. if new_subscriber: # Check if 'new_subscriber' is really new for our subscription. if subscription_to_update['subscriber'] != new_subscriber: # It's new. We should raise error if this subscriber already # exists for the queue and project. if self._is_duplicated_subscriber(new_subscriber, queue, project): raise errors.SubscriptionAlreadyExists() # NOTE(Eva-i): if there are new options, we need to pack them before # sending to the database. new_options = fields.get('o') if new_options is not None: fields['o'] = self._packer(new_options) new_ttl = fields.get('t') if new_ttl is not None: now = timeutils.utcnow_ts() expires = now + new_ttl fields['e'] = expires # Pipeline ensures atomic inserts. with self._client.pipeline() as pipe: pipe.hmset(subscription_id, fields) if new_ttl is not None: pipe.expire(subscription_id, new_ttl) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def delete(self, queue, subscription_id, project=None): subset_key = utils.scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) if self._client.zrank(subset_key, subscription_id) is not None: # NOTE(prashanthr_): Pipelining is used to mitigate race conditions with self._client.pipeline() as pipe: pipe.zrem(subset_key, subscription_id) pipe.delete(subscription_id) pipe.execute() @utils.raises_conn_error @utils.retries_on_connection_error def get_with_subscriber(self, queue, subscriber, project=None): subset_key = utils.scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) sub_ids = (q for q in self._client.zrange(subset_key, 0, -1)) for s_id in sub_ids: subscription = self._client.hmget(s_id, ['s', 'u', 't', 'o', 'c']) if encodeutils.safe_decode(subscription[1]) == subscriber: subscription = SubscriptionEnvelope.from_redis(s_id, self._client) now = timeutils.utcnow_ts() return subscription.to_basic(now) @utils.raises_conn_error @utils.retries_on_connection_error def confirm(self, queue, subscription_id, project=None, confirmed=True): # Let's get our subscription by ID. If it does not exist, # SubscriptionDoesNotExist error will be raised internally. self.get(queue, subscription_id, project=project) confirmed = 1 if confirmed else 0 fields = {'c': confirmed} with self._client.pipeline() as pipe: pipe.hmset(subscription_id, fields) pipe.execute() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/redis/utils.py0000664000175100017510000003045615033040005020350 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu. # Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import sys import time from oslo_log import log as logging from oslo_utils import encodeutils import redis from zaqar.storage import errors LOG = logging.getLogger(__name__) MESSAGE_IDS_SUFFIX = 'messages' SUBSCRIPTION_IDS_SUFFIX = 'subscriptions' FLAVORS_IDS_SUFFIX = 'flavors' POOLS_IDS_SUFFIX = 'pools' def descope_queue_name(scoped_name): """Descope Queue name with '.'. Returns the queue name from the scoped name which is of the form project-id.queue-name """ return scoped_name.split('.')[1] def normalize_none_str(string_or_none): """Returns '' IFF given value is None, passthrough otherwise. This function normalizes None to the empty string to facilitate string concatenation when a variable could be None. """ # TODO(prashanthr_) : Try to reuse this utility. Violates DRY return '' if string_or_none is None else string_or_none def scope_queue_name(queue=None, project=None): """Returns a scoped name for a queue based on project and queue. If only the project name is specified, a scope signifying "all queues" for that project is returned. If neither queue nor project are specified, a scope for "all global queues" is returned, which is to be interpreted as excluding queues scoped by project. :returns: '{project}.{queue}' if project and queue are given, '{project}.' if ONLY project is given, '.{queue}' if ONLY queue is given, and '.' if neither are given. """ # TODO(prashanthr_) : Try to reuse this utility. Violates DRY return normalize_none_str(project) + '.' + normalize_none_str(queue) # NOTE(prashanthr_): Aliase the scope_queue_name function # to be used in the pools and claims controller as similar # functionality is required to scope redis id's. scope_pool_catalogue = scope_claim_messages = scope_queue_name def scope_message_ids_set(queue=None, project=None, message_suffix=''): """Scope messages set with '.' Returns a scoped name for the list of messages in the form project-id_queue-name_suffix """ return (normalize_none_str(project) + '.' + normalize_none_str(queue) + '.' + message_suffix) def descope_message_ids_set(msgset_key): """Descope messages set with '.' :returns: (queue, project) """ tokens = msgset_key.split('.') return tokens[1] or None, tokens[0] or None def scope_subscription_ids_set(queue=None, project=None, subscription_suffix=''): """Scope subscriptions set with '.' Returns a scoped name for the list of subscriptions in the form project-id_queue-name_suffix """ return (normalize_none_str(project) + '.' + normalize_none_str(queue) + '.' + subscription_suffix) def descope_subscription_ids_set(subset_key): """Descope subscriptions set with '.' :returns: (queue, project) """ tokens = subset_key.split('.') return (tokens[1] or None, tokens[0] or None) # NOTE(prashanthr_): Aliasing the scope_message_ids_set function # to be used in the pools and claims controller as similar # functionality is required to scope redis id's. scope_queue_catalogue = scope_claims_set = scope_message_ids_set scope_queue_index = scope_message_ids_set def msgset_key(queue, project=None): return scope_message_ids_set(queue, project, MESSAGE_IDS_SUFFIX) def subset_key(queue, project=None): return scope_subscription_ids_set(queue, project, SUBSCRIPTION_IDS_SUFFIX) def raises_conn_error(func): """Handles the Redis ConnectionFailure error. This decorator catches Redis's ConnectionError and raises Zaqar's ConnectionError instead. """ # Note(prashanthr_) : Try to reuse this utility. Violates DRY # Can pass exception type into the decorator and create a # storage level utility. @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except redis.exceptions.ConnectionError: LOG.exception('Connection failure:') raise errors.ConnectionError() return wrapper def retries_on_connection_error(func): """Causes the wrapped function to be re-called on ConnectionError. This decorator catches Redis ConnectionError and retries the function call. .. Note:: Assumes that the decorated function has defined self.driver.redis_cinf so that `max_reconnect_attempts` and `reconnect_sleep` can be taken into account. .. Warning:: The decorated function must be idempotent. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): # TODO(prashanthr_) : Try to reuse this utility. Violates DRY # Can pass config parameters into the decorator and create a # storage level utility. max_attemps = self.driver.redis_conf.max_reconnect_attempts sleep_sec = self.driver.redis_conf.reconnect_sleep for attempt in range(max_attemps): try: return func(self, *args, **kwargs) except redis.exceptions.ConnectionError: # NOTE(kgriffs): redis-py will retry once itself, # but if the command cannot be sent the second time after # disconnecting and reconnecting, the error is raised # and we will catch it here. # # NOTE(kgriffs): When using a sentinel, if a master fails # the initial retry will gracefully fail over to the # new master if the sentinel failover delay is low enough; # if the delay is too long, then redis-py will get a # MasterNotFoundError (a subclass of ConnectionError) on # it's retry, which will then just get raised and caught # here, in which case we will keep retrying until the # sentinel completes the failover and stops raising # MasterNotFoundError. ex = sys.exc_info()[1] LOG.warning('Caught ConnectionError, retrying the ' 'call to %s', func.__name__) time.sleep(sleep_sec * (2 ** attempt)) else: LOG.error('Caught ConnectionError, maximum attempts ' 'to %s exceeded.', func.__name__) raise ex return wrapper def msg_claimed_filter(message, now): """Return True IFF the message is currently claimed.""" return message.claim_id and (now < message.claim_expires) def msg_delayed_filter(message, now): """Return True IFF the message is currently delayed.""" return now < message.delay_expires def msg_echo_filter(message, client_uuid): """Return True IFF the specified client posted the message.""" return message.client_uuid == str(client_uuid) def msg_expired_filter(message, now): """Return True IFF the message has expired.""" return message.expires <= now class QueueListCursor(object): def __init__(self, client, queues, denormalizer): self.queue_iter = queues self.denormalizer = denormalizer self.client = client def __iter__(self): return self @raises_conn_error def next(self): curr = next(self.queue_iter) queue = self.client.hmget(curr, ['c', 'm']) return self.denormalizer(queue, encodeutils.safe_decode(curr)) def __next__(self): return self.next() class SubscriptionListCursor(object): def __init__(self, client, subscriptions, denormalizer): self.subscription_iter = subscriptions self.denormalizer = denormalizer self.client = client def __iter__(self): return self @raises_conn_error def next(self): curr = next(self.subscription_iter) subscription = self.client.hmget(curr, ['s', 'u', 't', 'e', 'o', 'c']) # NOTE(flwang): The expired subscription will be removed # automatically, but the key can't be deleted automatically as well. # Though we clean up those expired ids when create new subscription, # we still need to filter them out before a new subscription creation. if not subscription[0]: return self.next() return self.denormalizer(subscription, encodeutils.safe_decode(curr)) def __next__(self): return self.next() def scope_flavors_ids_set(flavors_suffix=''): """Scope flavors set with '.' Returns a scoped name for the list of flavors in the form suffix """ return flavors_suffix def scope_project_flavors_ids_set(project=None, flavors_suffix=''): """Scope flavors set with '.' Returns a scoped name for the list of flavors in the form project-id_suffix """ return (normalize_none_str(project) + '.' + flavors_suffix) def scope_name_flavors_ids_set(name=None, flavors_suffix=''): """Scope flavors set with '.' Returns a scoped name for the list of flavors in the form flavors_name_suffix """ return (normalize_none_str(name) + '.' + flavors_suffix) def flavor_set_key(): return scope_flavors_ids_set(FLAVORS_IDS_SUFFIX) def flavor_project_subset_key(project=None): return scope_project_flavors_ids_set(project, FLAVORS_IDS_SUFFIX) def flavor_name_hash_key(name=None): return scope_name_flavors_ids_set(name, FLAVORS_IDS_SUFFIX) class FlavorListCursor(object): def __init__(self, client, flavors, denormalizer): self.flavor_iter = flavors self.denormalizer = denormalizer self.client = client def __iter__(self): return self @raises_conn_error def next(self): curr = next(self.flavor_iter) flavor = self.client.hmget(curr, ['f', 'p', 'c']) flavor_dict = {} flavor_dict['f'] = flavor[0] flavor_dict['p'] = flavor[1] flavor_dict['c'] = flavor[2] return self.denormalizer(flavor_dict) def __next__(self): return self.next() def scope_pools_ids_set(pools_suffix=''): """Scope pools set with '.' Returns a scoped name for the list of pools in the form suffix """ return pools_suffix def scope_flavor_pools_ids_set(flavor=None, pools_suffix=''): """Scope pools set with '.' Returns a scoped name for the list of pools in the form project-id_suffix """ return (normalize_none_str(flavor) + '.' + pools_suffix) def scope_name_pools_ids_set(name=None, pools_suffix=''): """Scope pools set with '.' Returns a scoped name for the list of pools in the form pools_name_suffix """ return (normalize_none_str(name) + '.' + pools_suffix) def pools_set_key(): return scope_pools_ids_set(POOLS_IDS_SUFFIX) def pools_subset_key(flavor=None): return scope_flavor_pools_ids_set(flavor, POOLS_IDS_SUFFIX) def pools_name_hash_key(name=None): return scope_name_pools_ids_set(name, POOLS_IDS_SUFFIX) class PoolsListCursor(object): def __init__(self, client, pools, denormalizer): self.pools_iter = pools self.denormalizer = denormalizer self.client = client def __iter__(self): return self @raises_conn_error def next(self): curr = next(self.pools_iter) pools = self.client.hmget(curr, ['pl', 'u', 'w', 'f', 'o']) pool_dict = {} pool_dict['pl'] = pools[0] pool_dict['u'] = pools[1] pool_dict['w'] = pools[2] pool_dict['f'] = pools[3] pool_dict['o'] = pools[4] return self.denormalizer(pool_dict) def __next__(self): return self.next() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5740135 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/0000775000175100017510000000000015033040026017665 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/__init__.py0000664000175100017510000000131115033040005021767 0ustar00mylesmyles# Copyright (c) 2014 Rackspace Hosting Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.storage.sqlalchemy import driver # Hoist classes into package namespace ControlDriver = driver.ControlDriver ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/catalogue.py0000664000175100017510000000610715033040005022204 0ustar00mylesmyles# Copyright (c) 2014 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Sql storage controller for the queues catalogue. Serves to construct an association between a project + queue -> pool name: string -> Pools.name project: string queue: string """ import oslo_db.exception import sqlalchemy as sa from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.sqlalchemy import tables def _match(project, queue): clauses = [ tables.Catalogue.c.project == project, tables.Catalogue.c.queue == queue ] return sa.sql.and_(*clauses) class CatalogueController(base.CatalogueBase): def list(self, project): stmt = sa.sql.select(tables.Catalogue).where( tables.Catalogue.c.project == project ) cursor = self.driver.fetch_all(stmt) return (_normalize(v) for v in cursor) def get(self, project, queue): stmt = sa.sql.select(tables.Catalogue).where( _match(project, queue) ) entry = self.driver.fetch_one(stmt) if entry is None: raise errors.QueueNotMapped(queue, project) return _normalize(entry) def exists(self, project, queue): try: return self.get(project, queue) is not None except errors.QueueNotMapped: return False def insert(self, project, queue, pool): try: stmt = sa.sql.insert(tables.Catalogue).values( project=project, queue=queue, pool=pool ) self.driver.run(stmt) except oslo_db.exception.DBReferenceError: self._update(project, queue, pool) except oslo_db.exception.DBDuplicateError: self._update(project, queue, pool) def delete(self, project, queue): stmt = sa.sql.delete(tables.Catalogue).where( _match(project, queue) ) self.driver.run(stmt) def _update(self, project, queue, pool): stmt = sa.sql.update(tables.Catalogue).where( _match(project, queue) ).values(pool=pool) self.driver.run(stmt) def update(self, project, queue, pool=None): if pool is None: return if not self.exists(project, queue): raise errors.QueueNotMapped(queue, project) self._update(project, queue, pool) def drop_all(self): stmt = sa.sql.expression.delete(tables.Catalogue) self.driver.run(stmt) def _normalize(entry): name, project, queue = entry return { 'queue': queue, 'project': project, 'pool': name } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/controllers.py0000664000175100017510000000173215033040005022605 0ustar00mylesmyles# Copyright (c) 2014 Red Hat, Inc. # Copyright (c) 2014 Rackspace Hosting Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from zaqar.storage.sqlalchemy import catalogue from zaqar.storage.sqlalchemy import flavors from zaqar.storage.sqlalchemy import pools from zaqar.storage.sqlalchemy import queues QueueController = queues.QueueController CatalogueController = catalogue.CatalogueController PoolsController = pools.PoolsController FlavorsController = flavors.FlavorsController ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/driver.py0000664000175100017510000001062515033040005021533 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # Copyright 2014 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_db.sqlalchemy import engines from osprofiler import profiler from osprofiler import sqlalchemy as sa_tracer import sqlalchemy as sa from zaqar.common import decorators from zaqar.conf import drivers_management_store_sqlalchemy from zaqar import storage from zaqar.storage.sqlalchemy import controllers class ControlDriver(storage.ControlDriverBase): def __init__(self, conf, cache): super(ControlDriver, self).__init__(conf, cache) self.conf.register_opts( drivers_management_store_sqlalchemy.ALL_OPTS, group=drivers_management_store_sqlalchemy.GROUP_NAME) self.sqlalchemy_conf = self.conf[ drivers_management_store_sqlalchemy.GROUP_NAME] def _mysql_on_connect(self, conn, record): # NOTE(flaper87): This is necessary in order # to ensure that all date operations in mysql # happen in UTC, `now()` for example. conn.query('SET time_zone = "+0:00"') @decorators.lazy_property(write=False) def engine(self): uri = self.sqlalchemy_conf.uri engine = engines.create_engine(uri, sqlite_fk=True) if (uri.startswith('mysql://') or uri.startswith('mysql+pymysql://')): sa.event.listen(engine, 'connect', self._mysql_on_connect) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): sa_tracer.add_tracing(sa, engine, "db") return engine # TODO(cpp-cabrera): expose connect/close as a context manager # that acquires the connection to the DB for the desired scope and # closes it once the operations are completed # TODO(wangxiyuan): we should migrate to oslo.db asap. def run(self, *args, **kwargs): with self.engine.connect() as conn: result = conn.execute(*args, **kwargs) conn.commit() return result def fetch_all(self, *args, **kwargs): with self.engine.connect() as conn: return conn.execute(*args, **kwargs).fetchall() def fetch_one(self, *args, **kwargs): with self.engine.connect() as conn: return conn.execute(*args, **kwargs).fetchone() def close(self): pass @property def pools_controller(self): controller = controllers.PoolsController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("sqlalchemy_pools_" "controller")(controller) else: return controller @property def queue_controller(self): controller = controllers.QueueController(self) if (self.conf.profiler.enabled and (self.conf.profiler.trace_message_store or self.conf.profiler.trace_management_store)): return profiler.trace_cls("sqlalchemy_queue_" "controller")(controller) else: return controller @property def catalogue_controller(self): controller = controllers.CatalogueController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("sqlalchemy_catalogue_" "controller")(controller) else: return controller @property def flavors_controller(self): controller = controllers.FlavorsController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_management_store): return profiler.trace_cls("sqlalchemy_flavors_" "controller")(controller) else: return controller @property def topic_controller(self): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/flavors.py0000664000175100017510000001110215033040005021703 0ustar00mylesmyles# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """flavors: an implementation of the flavor management storage controller for sqlalchemy. """ import oslo_db.exception import sqlalchemy as sa from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.sqlalchemy import tables from zaqar.storage.sqlalchemy import utils class FlavorsController(base.FlavorsBase): def __init__(self, *args, **kwargs): super(FlavorsController, self).__init__(*args, **kwargs) self._pools_ctrl = self.driver.pools_controller @utils.raises_conn_error def list(self, project=None, marker=None, limit=10, detailed=False): marker = marker or '' # TODO(cpp-cabrera): optimization - limit the columns returned # when detailed=False by specifying them in the select() # clause stmt = sa.sql.select(tables.Flavors).where( sa.and_(tables.Flavors.c.name > marker, tables.Flavors.c.project == project) ) if limit > 0: stmt = stmt.limit(limit) cursor = self.driver.fetch_all(stmt) marker_name = {} def it(): for cur in cursor: marker_name['next'] = cur[0] yield _normalize(cur, detailed=detailed) yield it() yield marker_name and marker_name['next'] @utils.raises_conn_error def get(self, name, project=None, detailed=False): stmt = sa.sql.select(tables.Flavors).where( sa.and_(tables.Flavors.c.name == name, tables.Flavors.c.project == project) ) flavor = self.driver.fetch_one(stmt) if flavor is None: raise errors.FlavorDoesNotExist(name) return _normalize(flavor, detailed) @utils.raises_conn_error def create(self, name, project=None, capabilities=None): cap = None if capabilities is None else utils.json_encode(capabilities) try: stmt = sa.sql.expression.insert(tables.Flavors).values( name=name, project=project, capabilities=cap ) self.driver.run(stmt) except oslo_db.exception.DBDuplicateEntry: # TODO(flaper87): merge update/create into a single # method with introduction of upsert self.update(name, project=project, capabilities=capabilities) @utils.raises_conn_error def exists(self, name, project=None): stmt = sa.sql.select(tables.Flavors.c.name).where( sa.and_(tables.Flavors.c.name == name, tables.Flavors.c.project == project) ).limit(1) return self.driver.fetch_one(stmt) is not None @utils.raises_conn_error def update(self, name, project=None, capabilities=None): fields = {} if capabilities is not None: fields['capabilities'] = capabilities assert fields, '`capabilities` not found in kwargs' if 'capabilities' in fields: fields['capabilities'] = utils.json_encode(fields['capabilities']) stmt = sa.sql.update(tables.Flavors).where( sa.and_(tables.Flavors.c.name == name, tables.Flavors.c.project == project)).values(**fields) res = self.driver.run(stmt) if res.rowcount == 0: raise errors.FlavorDoesNotExist(name) @utils.raises_conn_error def delete(self, name, project=None): stmt = sa.sql.expression.delete(tables.Flavors).where( sa.and_(tables.Flavors.c.name == name, tables.Flavors.c.project == project) ) self.driver.run(stmt) @utils.raises_conn_error def drop_all(self): stmt = sa.sql.expression.delete(tables.Flavors) self.driver.run(stmt) def _normalize(flavor, detailed=False): ret = { 'name': flavor[0], } if detailed: capabilities = flavor[2] ret['capabilities'] = (utils.json_decode(capabilities) if capabilities else {}) return ret ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5740135 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/0000775000175100017510000000000015033040026021656 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/__init__.py0000664000175100017510000000000015033040005023752 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic.ini0000664000175100017510000000171715033040005023756 0ustar00mylesmyles# A generic, single database configuration. [alembic] # path to migration scripts script_location = zaqar/storage/sqlalchemy/migration/alembic_migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5740135 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/0000775000175100017510000000000015033040026025506 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/README.md0000664000175100017510000000467515033040005026776 0ustar00mylesmyles The migrations in `alembic_migrations/versions` contain the changes needed to migrate between Zaqar database revisions. A migration occurs by executing a script that details the changes needed to upgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially. The scripts are executed by Zaqar's migration wrapper which uses the Alembic library to manage the migration. Zaqar supports migration from Liberty or later. You can upgrade to the latest database version via: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade head ``` To check the current database version: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf current ``` To create a script to run the migration offline: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade head --sql ``` To run the offline migration between specific migration versions: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade : --sql ``` Upgrade the database incrementally: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf upgrade --delta <# of revs> ``` Create new revision: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf revision -m "description of revision" --autogenerate ``` Create a blank file: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf revision -m "description of revision" ``` This command does not perform any migrations, it only sets the revision. Revision may be any existing revision. Use this command carefully. ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf stamp ``` To verify that the timeline does branch, you can run this command: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf check_migration ``` If the migration path does branch, you can find the branch point via: ``` $ zaqar-sql-db-manage --config-file /path/to/zaqar.conf history ``` ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/env.py0000664000175100017510000000522515033040005026651 0ustar00mylesmyles# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Based on Neutron's migration/cli.py from logging import config as c from alembic import context from oslo_utils import importutils from sqlalchemy import create_engine from sqlalchemy import pool from zaqar.storage.sqlalchemy import tables importutils.try_import('zaqar.storage.sqlalchemy.tables') # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config zaqar_config = config.zaqar_config # Interpret the config file for Python logging. # This line sets up loggers basically. c.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = tables.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure( url=zaqar_config['drivers:management_store:sqlalchemy'].uri) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( zaqar_config['drivers:management_store:sqlalchemy'].uri, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/script.py.mako0000664000175100017510000000167015033040005030313 0ustar00mylesmyles# Copyright ${create_date.year} OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5750134 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/0000775000175100017510000000000015033040026027356 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/001_liberty.py0000664000175100017510000000524515033040005031765 0ustar00mylesmyles# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Liberty release Revision ID: 001 Revises: None Create Date: 2015-09-13 20:46:25.783444 """ # revision identifiers, used by Alembic. revision = '001' down_revision = None from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table('Queues', sa.Column('id', sa.INTEGER, primary_key=True), sa.Column('project', sa.String(64)), sa.Column('name', sa.String(64)), sa.Column('metadata', sa.LargeBinary), sa.UniqueConstraint('project', 'name')) op.create_table('PoolGroup', sa.Column('name', sa.String(64), primary_key=True)) op.create_table('Pools', sa.Column('name', sa.String(64), primary_key=True), sa.Column('group', sa.String(64), sa.ForeignKey('PoolGroup.name', ondelete='CASCADE'), nullable=True), sa.Column('uri', sa.String(255), unique=True, nullable=False), sa.Column('weight', sa.INTEGER, nullable=False), sa.Column('options', sa.Text())) op.create_table('Flavors', sa.Column('name', sa.String(64), primary_key=True), sa.Column('project', sa.String(64)), sa.Column('pool_group', sa.String(64), sa.ForeignKey('PoolGroup.name', ondelete='CASCADE'), nullable=False), sa.Column('capabilities', sa.Text())) op.create_table('Catalogue', sa.Column('pool', sa.String(64), sa.ForeignKey('Pools.name', ondelete='CASCADE')), sa.Column('project', sa.String(64)), sa.Column('queue', sa.String(64), nullable=False), sa.UniqueConstraint('project', 'queue')) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/002_placeholder.py0000664000175100017510000000142115033040005032566 0ustar00mylesmyles# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 002 Revises: 001 Create Date: 2014-04-01 21:04:47.941098 """ # revision identifiers, used by Alembic. revision = '002' down_revision = '001' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/003_placeholder.py0000664000175100017510000000142115033040005032567 0ustar00mylesmyles# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 003 Revises: 002 Create Date: 2014-04-01 21:05:00.270366 """ # revision identifiers, used by Alembic. revision = '003' down_revision = '002' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/004_placeholder.py0000664000175100017510000000142115033040005032570 0ustar00mylesmyles# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 004 Revises: 003 Create Date: 2014-04-01 21:04:57.627883 """ # revision identifiers, used by Alembic. revision = '004' down_revision = '003' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/005_placeholder.py0000664000175100017510000000142115033040005032571 0ustar00mylesmyles# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 005 Revises: 004 Create Date: 2014-04-01 21:04:54.928605 """ # revision identifiers, used by Alembic. revision = '005' down_revision = '004' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/006_queens.py0000664000175100017510000000226015033040005031612 0ustar00mylesmyles# Copyright 2017 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Queens release Revision ID: 005 Revises: 006 Create Date: 2017-11-09 11:45:45.928605 """ # revision identifiers, used by Alembic. revision = '006' down_revision = '005' from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): # NOTE(gengchc2): Add a new flavor column to Pools nodes op.add_column('Pools', sa.Column('flavor', sa.String(64), nullable=True)) # NOTE(gengchc2): Change pool_group to default null in Flavors table op.execute('alter table Flavors change column pool_group ' 'pool_group varchar(64) default null') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/007_stein.py0000664000175100017510000000305315033040005031436 0ustar00mylesmyles# Copyright 2017 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Stein release Revision ID: 006 Revises: 007 Create Date: 2019-01-09 11:45:45.928605 """ # revision identifiers, used by Alembic. revision = '007' down_revision = '006' from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.drop_constraint(constraint_name='Pools_ibfk_1', table_name='Pools', type_='foreignkey') op.drop_constraint(constraint_name='Flavors_ibfk_1', table_name='Flavors', type_='foreignkey') op.drop_column('Pools', 'group') op.drop_column('Flavors', 'pool_group') op.execute('drop table PoolGroup ') def downgrade(): op.add_column('Pools', sa.Column('group', sa.String(64), nullable=True)) op.add_column('Flavors', sa.Column('pool_group', sa.String(64), nullable=True)) op.create_table('PoolGroup', sa.Column('name', sa.String(64), primary_key=True)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/migration/cli.py0000664000175100017510000000744215033040005023003 0ustar00mylesmyles# Copyright (c) 2016 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from alembic import command as alembic_cmd from alembic import config as alembic_cfg from alembic import util as alembic_u from oslo_config import cfg CONF = cfg.CONF def do_alembic_command(config, cmd, *args, **kwargs): try: getattr(alembic_cmd, cmd)(config, *args, **kwargs) except alembic_u.CommandError as e: alembic_u.err(str(e)) def do_check_migration(config, _cmd): do_alembic_command(config, 'branches') def do_upgrade_downgrade(config, cmd): if not CONF.command.revision and not CONF.command.delta: raise SystemExit('You must provide a revision or relative delta') revision = CONF.command.revision if CONF.command.delta: sign = '+' if CONF.command.name == 'upgrade' else '-' revision = sign + str(CONF.command.delta) do_alembic_command(config, cmd, revision, sql=CONF.command.sql) def do_stamp(config, cmd): do_alembic_command(config, cmd, CONF.command.revision, sql=CONF.command.sql) def do_revision(config, cmd): do_alembic_command(config, cmd, message=CONF.command.message, autogenerate=CONF.command.autogenerate, sql=CONF.command.sql) def add_command_parsers(subparsers): for name in ['current', 'history', 'branches']: parser = subparsers.add_parser(name) parser.set_defaults(func=do_alembic_command) parser = subparsers.add_parser('check_migration') parser.set_defaults(func=do_check_migration) for name in ['upgrade', 'downgrade']: parser = subparsers.add_parser(name) parser.add_argument('--delta', type=int) parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') parser.set_defaults(func=do_upgrade_downgrade) parser = subparsers.add_parser('stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision') parser.set_defaults(func=do_stamp) parser = subparsers.add_parser('revision') parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.add_argument('--sql', action='store_true') parser.set_defaults(func=do_revision) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) CONF.register_cli_opt(command_opt) sqlalchemy_opts = [cfg.StrOpt('uri', help='The SQLAlchemy connection string to' ' use to connect to the database.', secret=True)] CONF.register_opts(sqlalchemy_opts, group='drivers:management_store:sqlalchemy') def main(): config = alembic_cfg.Config( os.path.join(os.path.dirname(__file__), 'alembic.ini') ) config.set_main_option('script_location', 'zaqar.storage.sqlalchemy.' 'migration:alembic_migrations') # attach the octavia conf to the Alembic conf config.zaqar_config = CONF CONF(project='zaqar') CONF.command.func(config, CONF.command.name) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/pools.py0000664000175100017510000001323315033040005021372 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """pools: an implementation of the pool management storage controller for sqlalchemy. """ import functools import oslo_db.exception import sqlalchemy as sa from zaqar.common import utils as common_utils from zaqar.storage import base from zaqar.storage import errors from zaqar.storage.sqlalchemy import tables from zaqar.storage.sqlalchemy import utils class PoolsController(base.PoolsBase): @utils.raises_conn_error def _list(self, marker=None, limit=10, detailed=False): marker = marker or '' # TODO(cpp-cabrera): optimization - limit the columns returned # when detailed=False by specifying them in the select() # clause stmt = sa.sql.select(tables.Pools.c.name, tables.Pools.c.uri, tables.Pools.c.weight, tables.Pools.c.options, tables.Pools.c.flavor).where( tables.Pools.c.name > marker ) if limit > 0: stmt = stmt.limit(limit) cursor = self.driver.fetch_all(stmt) marker_name = {} def it(): for cur in cursor: marker_name['next'] = cur[0] yield _normalize(cur, detailed=detailed) yield it() yield marker_name and marker_name['next'] @utils.raises_conn_error def _get_pools_by_flavor(self, flavor=None, detailed=False): flavor_name = flavor.get("name", None) if flavor is not None\ else None if flavor_name is not None: stmt = sa.sql.select(tables.Pools.c.name, tables.Pools.c.uri, tables.Pools.c.weight, tables.Pools.c.options, tables.Pools.c.flavor).where( tables.Pools.c.flavor == flavor_name ) else: stmt = sa.sql.select(tables.Pools.c.name, tables.Pools.c.uri, tables.Pools.c.weight, tables.Pools.c.options, tables.Pools.c.flavor) cursor = self.driver.fetch_all(stmt) normalizer = functools.partial(_normalize, detailed=detailed) get_result = (normalizer(v) for v in cursor) return get_result @utils.raises_conn_error def _get(self, name, detailed=False): stmt = sa.sql.select(tables.Pools.c.name, tables.Pools.c.uri, tables.Pools.c.weight, tables.Pools.c.options, tables.Pools.c.flavor).where( tables.Pools.c.name == name ) pool = self.driver.fetch_one(stmt) if pool is None: raise errors.PoolDoesNotExist(name) return _normalize(pool, detailed) # TODO(cpp-cabrera): rename to upsert @utils.raises_conn_error def _create(self, name, weight, uri, flavor=None, options=None): opts = None if options is None else utils.json_encode(options) try: stmt = sa.sql.insert(tables.Pools).values( name=name, weight=weight, uri=uri, flavor=flavor, options=opts ) self.driver.run(stmt) except oslo_db.exception.DBDuplicateEntry: # TODO(cpp-cabrera): merge update/create into a single # method with introduction of upsert self._update(name, weight=weight, uri=uri, flavor=flavor, options=options) @utils.raises_conn_error def _exists(self, name): stmt = sa.sql.select(tables.Pools.c.name).where( tables.Pools.c.name == name).limit(1) return self.driver.fetch_one(stmt) is not None @utils.raises_conn_error def _update(self, name, **kwargs): # NOTE(cpp-cabrera): by pruning None-valued kwargs, we avoid # overwriting the existing options field with None, since that # one can be null. names = ('uri', 'weight', 'flavor', 'options') fields = common_utils.fields(kwargs, names, pred=lambda x: x is not None) assert fields, ('`weight`, `uri`, `flavor`, ' 'or `options` not found in kwargs') if 'options' in fields: fields['options'] = utils.json_encode(fields['options']) stmt = sa.sql.update(tables.Pools).where( tables.Pools.c.name == name).values(**fields) res = self.driver.run(stmt) if res.rowcount == 0: raise errors.PoolDoesNotExist(name) @utils.raises_conn_error def _delete(self, name): stmt = sa.sql.expression.delete(tables.Pools).where( tables.Pools.c.name == name ) self.driver.run(stmt) @utils.raises_conn_error def _drop_all(self): stmt = sa.sql.expression.delete(tables.Pools) self.driver.run(stmt) def _normalize(pool, detailed=False): ret = { 'name': pool[0], 'uri': pool[1], 'weight': pool[2], 'flavor': pool[4], } if detailed: opts = pool[3] ret['options'] = utils.json_decode(opts) if opts else {} return ret ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/queues.py0000664000175100017510000001055715033040005021553 0ustar00mylesmyles# Copyright (c) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import oslo_db.exception import sqlalchemy as sa from zaqar import storage from zaqar.storage import errors from zaqar.storage.sqlalchemy import tables from zaqar.storage.sqlalchemy import utils class QueueController(storage.Queue): def _list(self, project, kfilter={}, marker=None, limit=storage.DEFAULT_QUEUES_PER_PAGE, detailed=False, name=None): if project is None: project = '' fields = tables.Queues.c.name if detailed: fields = tables.Queues.c["name", "metadata"] if marker: sel = sa.sql.select(fields).where(sa.and_( tables.Queues.c.project == project, tables.Queues.c.name > marker)) else: sel = sa.sql.select(fields).where( tables.Queues.c.project == project) sel = sel.order_by(sa.asc(tables.Queues.c.name)).limit(limit) records = self.driver.fetch_all(sel) marker_name = {} def it(): for rec in records: marker_name['next'] = rec[0] yield ({'name': rec[0]} if not detailed else {'name': rec[0], 'metadata': utils.json_decode(rec[1])}) yield it() yield marker_name and marker_name['next'] def get_metadata(self, name, project): if project is None: project = '' sel = sa.sql.select(tables.Queues.c.metadata).where(sa.and_( tables.Queues.c.project == project, tables.Queues.c.name == name)) queue = self.driver.fetch_one(sel) if queue is None: raise errors.QueueDoesNotExist(name, project) return utils.json_decode(queue[0]) def _get(self, name, project=None): try: return self.get_metadata(name, project) except errors.QueueDoesNotExist: return {} def _create(self, name, metadata=None, project=None): if project is None: project = '' try: smeta = utils.json_encode(metadata or {}) ins = tables.Queues.insert().values(project=project, name=name, metadata=smeta) res = self.driver.run(ins) except oslo_db.exception.DBDuplicateEntry: return False return res.rowcount == 1 def _exists(self, name, project): if project is None: project = '' sel = sa.sql.select(tables.Queues.c.id).where(sa.and_( tables.Queues.c.project == project, tables.Queues.c.name == name)) res = self.driver.fetch_one(sel) return res is not None def set_metadata(self, name, metadata, project): if project is None: project = '' update = (tables.Queues.update(). where(sa.and_( tables.Queues.c.project == project, tables.Queues.c.name == name)). values(metadata=utils.json_encode(metadata))) res = self.driver.run(update) try: if res.rowcount != 1: raise errors.QueueDoesNotExist(name, project) finally: res.close() def _delete(self, name, project): if project is None: project = '' dlt = tables.Queues.delete().where(sa.and_( tables.Queues.c.project == project, tables.Queues.c.name == name)) self.driver.run(dlt) def _stats(self, name, project): pass def _calculate_resource_count(self, project=None): if project is None: project = '' sel = sa.sql.select(sa.sql.func.count('*')).where( tables.Queues.c.project == project) res = self.driver.fetch_one(sel) return res is not None ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/tables.py0000664000175100017510000000420615033040005021510 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import sqlalchemy as sa metadata = sa.MetaData() Queues = sa.Table('Queues', metadata, sa.Column('id', sa.INTEGER, primary_key=True), sa.Column('project', sa.String(64)), sa.Column('name', sa.String(64)), sa.Column('metadata', sa.LargeBinary), sa.UniqueConstraint('project', 'name'), ) Pools = sa.Table('Pools', metadata, sa.Column('name', sa.String(64), primary_key=True), sa.Column('uri', sa.String(255), unique=True, nullable=False), sa.Column('weight', sa.INTEGER, nullable=False), sa.Column('options', sa.Text()), sa.Column('flavor', sa.String(64), nullable=True)) # NOTE(gengchc2): Modify pool_group define: turn NOT NULL into DEFAULT NULL: # [alter table Flavors change column pool_group pool_group varchar(64) # default null;] Flavors = sa.Table('Flavors', metadata, sa.Column('name', sa.String(64), primary_key=True), sa.Column('project', sa.String(64)), sa.Column('capabilities', sa.Text())) Catalogue = sa.Table('Catalogue', metadata, sa.Column('pool', sa.String(64), sa.ForeignKey('Pools.name', ondelete='CASCADE')), sa.Column('project', sa.String(64)), sa.Column('queue', sa.String(64), nullable=False), sa.UniqueConstraint('project', 'queue')) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/sqlalchemy/utils.py0000664000175100017510000000722515033040005021402 0ustar00mylesmyles# Copyright (c) 2014 Red Hat, Inc. # Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import functools from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import sqlalchemy as sa from sqlalchemy import exc from sqlalchemy.sql import func as sfunc from zaqar.storage import errors from zaqar.storage.sqlalchemy import tables LOG = logging.getLogger(__name__) UNIX_EPOCH_AS_JULIAN_SEC = 2440587.5 * 86400.0 def raises_conn_error(func): """Handles sqlalchemy DisconnectionError When sqlalchemy detects a disconnect from the database server, it retries a number of times. After failing that number of times, it will convert the internal DisconnectionError into an InvalidRequestError. This decorator handles that error. """ @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except exc.InvalidRequestError: LOG.exception('Connection error:') raise errors.ConnectionError() return wrapper class NoResult(Exception): pass def get_qid(driver, queue, project): sel = sa.sql.select([tables.Queues.c.id], sa.and_( tables.Queues.c.project == project, tables.Queues.c.name == queue)) try: return driver.get(sel)[0] except NoResult: raise errors.QueueDoesNotExist(queue, project) def get_age(created): return sfunc.now() - created # The utilities below make the database IDs opaque to the users # of Zaqar API. The only purpose is to advise the users NOT to # make assumptions on the implementation of and/or relationship # between the message IDs, the markers, and claim IDs. # # The magic numbers are arbitrarily picked; the numbers themselves # come with no special functionalities. def msgid_encode(id): # NOTE(jeffrey4l): When using mysql-python, the id is converted to # long type, which will lead to a L letter in the last. return hex(int(id) ^ 0x5c693a53)[2:] def msgid_decode(id): try: return int(id, 16) ^ 0x5c693a53 except ValueError: return None def marker_encode(id): # NOTE(AAzza): cannot use oct(id) here, because on Python 3 it returns # string with prefix '0o', whereas on Python 2 prefix is just '0' return '{0:o}'.format(id ^ 0x3c96a355) def marker_decode(id): try: return int(id, 8) ^ 0x3c96a355 except ValueError: return None def cid_encode(id): return hex(id ^ 0x63c9a59c)[2:] def cid_decode(id): try: return int(id, 16) ^ 0x63c9a59c except ValueError: return None def julian_to_unix(julian_sec): """Converts Julian timestamp, in seconds, to a UNIX timestamp.""" return int(round(julian_sec - UNIX_EPOCH_AS_JULIAN_SEC)) def stat_message(message): """Creates a stat document based on a message.""" return { 'id': message['id'], 'age': message['age'], 'created': message['created'], } def json_encode(obj): return encodeutils.safe_encode(jsonutils.dumps(obj), 'utf-8') def json_decode(binary): return jsonutils.loads(binary, 'utf-8') ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5750134 zaqar-20.1.0.dev29/zaqar/storage/swift/0000775000175100017510000000000015033040026016657 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/swift/__init__.py0000664000175100017510000000000015033040005020753 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/swift/claims.py0000664000175100017510000002323715033040005020505 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import math from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils import swiftclient from zaqar.common import decorators from zaqar import storage from zaqar.storage import errors from zaqar.storage.swift import utils class ClaimController(storage.Claim): """Implements claims resource operations with swift backend Claims are scoped by project + queue. """ def __init__(self, *args, **kwargs): super(ClaimController, self).__init__(*args, **kwargs) self._client = self.driver.connection @decorators.lazy_property(write=False) def _queue_ctrl(self): return self.driver.queue_controller def _exists(self, queue, claim_id, project=None): try: return self._client.head_object( utils._claim_container(queue, project), claim_id) except swiftclient.ClientException as exc: if exc.http_status == 404: raise errors.ClaimDoesNotExist(claim_id, queue, project) raise def _get(self, queue, claim_id, project=None): try: container = utils._claim_container(queue, project) headers, claim = self._client.get_object(container, claim_id) except swiftclient.ClientException as exc: if exc.http_status != 404: raise return now = timeutils.utcnow_ts(True) return { 'id': claim_id, 'age': now - float(headers['x-timestamp']), 'ttl': int(headers['x-delete-at']) - math.floor(now), } def get(self, queue, claim_id, project=None): message_ctrl = self.driver.message_controller now = timeutils.utcnow_ts(True) self._exists(queue, claim_id, project) container = utils._claim_container(queue, project) headers, claim_obj = self._client.get_object(container, claim_id) def g(): for msg_id in jsonutils.loads(claim_obj): try: headers, msg = message_ctrl._find_message(queue, msg_id, project) except errors.MessageDoesNotExist: continue else: yield utils._message_to_json(msg_id, msg, headers, now) claim_meta = { 'id': claim_id, 'age': now - float(headers['x-timestamp']), 'ttl': int(headers['x-delete-at']) - math.floor(now), } return claim_meta, g() def create(self, queue, metadata, project=None, limit=storage.DEFAULT_MESSAGES_PER_CLAIM): message_ctrl = self.driver.message_controller queue_ctrl = self.driver.queue_controller try: queue_meta = queue_ctrl.get_metadata(queue, project=project) except errors.QueueDoesNotExist: return None, iter([]) ttl = metadata['ttl'] grace = metadata['grace'] msg_ts = ttl + grace claim_id = uuidutils.generate_uuid() dlq = True if ('_max_claim_count' in queue_meta and '_dead_letter_queue' in queue_meta) else False include_delayed = False if queue_meta.get('_default_message_delay', 0) else True messages, marker = message_ctrl._list(queue, project, limit=limit, include_claimed=False, include_delayed=include_delayed) claimed = [] for msg in messages: claim_count = msg.get('claim_count', 0) md5 = hashlib.md5() md5.update( jsonutils.dump_as_bytes( {'body': msg['body'], 'claim_id': None, 'ttl': msg['ttl'], 'claim_count': claim_count})) md5 = md5.hexdigest() msg_ttl = max(msg['ttl'], msg_ts) move_to_dlq = False if dlq: if claim_count < queue_meta['_max_claim_count']: # Check if the message's claim count has exceeded the # max claim count defined in the queue, if not , # Save the new max claim count for message claim_count = claim_count + 1 else: # if the message's claim count has exceeded the # max claim count defined in the queue, move the # message to the dead letter queue. # NOTE: We're moving message by changing the # project info directly. That means, the queue and dead # letter queue must be created on the same pool. dlq_ttl = queue_meta.get("_dead_letter_queue_messages_ttl") move_to_dlq = True if dlq_ttl: msg_ttl = dlq_ttl content = jsonutils.dumps( {'body': msg['body'], 'claim_id': claim_id, 'ttl': msg_ttl, 'claim_count': claim_count}) if move_to_dlq: dead_letter_queue = queue_meta.get("_dead_letter_queue") utils._put_or_create_container( self._client, utils._message_container(dead_letter_queue, project), msg['id'], content, content_type='application/json', headers={'x-object-meta-clientid': msg['client_uuid'], 'if-match': md5, 'x-object-meta-claimid': claim_id, 'x-delete-after': msg_ttl}) message_ctrl._delete(queue, msg['id'], project) else: try: self._client.put_object( utils._message_container(queue, project), msg['id'], content, content_type='application/json', headers={'x-object-meta-clientid': msg['client_uuid'], 'if-match': md5, 'x-object-meta-claimid': claim_id, 'x-delete-after': msg_ttl}) except swiftclient.ClientException as exc: if exc.http_status == 412: continue raise else: msg['claim_id'] = claim_id msg['ttl'] = msg_ttl msg['claim_count'] = claim_count claimed.append(msg) utils._put_or_create_container( self._client, utils._claim_container(queue, project), claim_id, jsonutils.dumps([msg['id'] for msg in claimed]), content_type='application/json', headers={'x-delete-after': ttl} ) return claim_id, claimed def update(self, queue, claim_id, metadata, project=None): if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) container = utils._claim_container(queue, project) try: headers, obj = self._client.get_object(container, claim_id) except swiftclient.ClientException as exc: if exc.http_status == 404: raise errors.ClaimDoesNotExist(claim_id, queue, project) raise self._client.put_object(container, claim_id, obj, content_type='application/json', headers={'x-delete-after': metadata['ttl']}) def delete(self, queue, claim_id, project=None): message_ctrl = self.driver.message_controller try: header, obj = self._client.get_object( utils._claim_container(queue, project), claim_id) for msg_id in jsonutils.loads(obj): try: headers, msg = message_ctrl._find_message(queue, msg_id, project) except errors.MessageDoesNotExist: continue md5 = hashlib.md5() md5.update(msg) md5 = md5.hexdigest() msg = jsonutils.loads(msg) content = jsonutils.dumps( {'body': msg['body'], 'claim_id': None, 'ttl': msg['ttl']}) client_id = headers['x-object-meta-clientid'] self._client.put_object( utils._message_container(queue, project), msg_id, content, content_type='application/json', headers={'x-object-meta-clientid': client_id, 'if-match': md5, 'x-delete-at': headers['x-delete-at']}) self._client.delete_object( utils._claim_container(queue, project), claim_id) except swiftclient.ClientException as exc: if exc.http_status != 404: raise ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/swift/controllers.py0000664000175100017510000000147015033040005021576 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.storage.swift import claims from zaqar.storage.swift import messages from zaqar.storage.swift import subscriptions MessageController = messages.MessageController ClaimController = claims.ClaimController SubscriptionController = subscriptions.SubscriptionController ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/swift/driver.py0000664000175100017510000001141015033040005020516 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from osprofiler import profiler import urllib from keystoneauth1.identity import generic from keystoneauth1 import session as keystone_session from oslo_log import log as oslo_logging import swiftclient from zaqar.common import decorators from zaqar.conf import drivers_message_store_swift from zaqar import storage from zaqar.storage.swift import controllers LOG = oslo_logging.getLogger(__name__) class DataDriver(storage.DataDriverBase): _DRIVER_OPTIONS = [(drivers_message_store_swift.GROUP_NAME, drivers_message_store_swift.ALL_OPTS)] def __init__(self, conf, cache, control_driver): super(DataDriver, self).__init__(conf, cache, control_driver) self.swift_conf = self.conf[drivers_message_store_swift.GROUP_NAME] if not self.conf.debug: # Reduce swiftclient logging, in particular to remove 404s logging.getLogger("swiftclient").setLevel(logging.WARNING) @property def capabilities(self): return ( storage.Capabilities.AOD, storage.Capabilities.DURABILITY, ) @decorators.lazy_property(write=False) def connection(self): return _ClientWrapper(self.swift_conf) def is_alive(self): try: self.connection.get_capabilities() return True except Exception: LOG.exception('Aliveness check failed:') return False @decorators.lazy_property(write=False) def message_controller(self): controller = controllers.MessageController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("swift_message_controller")(controller) else: return controller @decorators.lazy_property(write=False) def subscription_controller(self): controller = controllers.SubscriptionController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("swift_subscription_" "controller")(controller) else: return controller @decorators.lazy_property(write=False) def claim_controller(self): controller = controllers.ClaimController(self) if (self.conf.profiler.enabled and self.conf.profiler.trace_message_store): return profiler.trace_cls("swift_claim_controller")(controller) else: return controller def _health(self): raise NotImplementedError("No health checks") def close(self): pass class _ClientWrapper(object): """Wrapper around swiftclient.Connection. This wraps swiftclient.Connection to give the same API, but provide a thread-safe alternative with a different object for every method call. It maintains performance by managing authentication itself, and passing the token afterwards. """ def __init__(self, conf): self.conf = conf self.endpoint = None self.parsed_url = urllib.parse.urlparse(conf.uri) self.session = None def _init_auth(self): auth = generic.Password( username=self.parsed_url.username, password=self.parsed_url.password, project_name=self.parsed_url.path[1:], user_domain_id=self.conf.user_domain_id, user_domain_name=self.conf.user_domain_name, project_domain_id=self.conf.project_domain_id, project_domain_name=self.conf.project_domain_name, auth_url=self.conf.auth_url) self.session = keystone_session.Session(auth=auth) self.endpoint = self.session.get_endpoint( service_type='object-store', interface=self.conf.interface, region_name=self.conf.region_name ) def __getattr__(self, attr): if self.session is None: self._init_auth() os_options = { 'object_storage_url': self.endpoint } client = swiftclient.Connection(session=self.session, insecure=self.conf.insecure, os_options=os_options) return getattr(client, attr) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/swift/messages.py0000664000175100017510000004437515033040005021052 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import functools import uuid from oslo_serialization import jsonutils from oslo_utils import timeutils import swiftclient from zaqar.common import decorators from zaqar import storage from zaqar.storage import errors from zaqar.storage.swift import utils from zaqar.storage import utils as s_utils class MessageController(storage.Message): """Implements message resource operations with swift backend Messages are scoped by project + queue. message -> Swift mapping: +--------------+-----------------------------------------+ | Attribute | Storage location | +--------------+-----------------------------------------+ | Msg UUID | Object name | +--------------+-----------------------------------------+ | Queue Name | Container name prefix | +--------------+-----------------------------------------+ | Project name | Container name prefix | +--------------+-----------------------------------------+ | Created time | Object Creation Time | +--------------+-----------------------------------------+ | Msg Body | Object content 'body' | +--------------+-----------------------------------------+ | Client ID | Object header 'ClientID' | +--------------+-----------------------------------------+ | Claim ID | Object content 'claim_id' | +--------------+-----------------------------------------+ | Delay Expires| Object content 'delay_expires' | +--------------+-----------------------------------------+ | Expires | Object Delete-After header | +--------------------------------------------------------+ | Checksum | Object content 'body' checksum | +--------------------------------------------------------+ """ def __init__(self, *args, **kwargs): super(MessageController, self).__init__(*args, **kwargs) self._client = self.driver.connection @decorators.lazy_property(write=False) def _queue_ctrl(self): return self.driver.queue_controller def _delete_queue_messages(self, queue, project, pipe): """Method to remove all the messages belonging to a queue. Will be referenced from the QueueController. The pipe to execute deletion will be passed from the QueueController executing the operation. """ container = utils._message_container(queue, project) remaining = True key = '' while remaining: headers, objects = self._client.get_container(container, limit=1000, marker=key) if not objects: return remaining = len(objects) == 1000 key = objects[-1]['name'] for o in objects: try: self._client.delete_object(container, o['name']) except swiftclient.ClientException as exc: if exc.http_status == 404: continue raise def _list(self, queue, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False, sort=1): """List messages in the queue, oldest first(ish) Time ordering and message inclusion in lists are soft, there is no global order and times are based on the UTC time of the zaqar-api server that the message was created from. Here be consistency dragons. """ if not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) client = self._client container = utils._message_container(queue, project) query_string = None if sort == -1: query_string = 'reverse=on' try: _, objects = client.get_container( container, marker=marker, # list 2x the objects because some listing items may have # expired limit=limit * 2, query_string=query_string) except swiftclient.ClientException as exc: if exc.http_status == 404: raise errors.QueueDoesNotExist(queue, project) raise def is_claimed(msg, headers): if include_claimed or msg['claim_id'] is None: return False claim_obj = self.driver.claim_controller._get( queue, msg['claim_id'], project) return claim_obj is not None and claim_obj['ttl'] > 0 def is_delayed(msg, headers): if include_delayed: return False now = timeutils.utcnow_ts() return msg.get('delay_expires', 0) > now def is_echo(msg, headers): if echo: return False return headers['x-object-meta-clientid'] == str(client_uuid) filters = [ is_echo, is_claimed, is_delayed, ] marker = {} get_object = functools.partial(client.get_object, container) list_objects = functools.partial(client.get_container, container, limit=limit * 2, query_string=query_string) yield utils._filter_messages(objects, filters, marker, get_object, list_objects, limit=limit) yield marker and marker['next'] def list(self, queue, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False,): return self._list(queue, project, marker, limit, echo, client_uuid, include_claimed, include_delayed) def first(self, queue, project=None, sort=1): if sort not in (1, -1): raise ValueError('sort must be either 1 (ascending) ' 'or -1 (descending)') cursor = self._list(queue, project, limit=1, sort=sort) try: message = next(next(cursor)) except StopIteration: raise errors.QueueIsEmpty(queue, project) return message def get(self, queue, message_id, project=None): return self._get(queue, message_id, project) def _get(self, queue, message_id, project=None, check_queue=True): if check_queue and not self._queue_ctrl.exists(queue, project): raise errors.QueueDoesNotExist(queue, project) now = timeutils.utcnow_ts(True) headers, msg = self._find_message(queue, message_id, project) return utils._message_to_json(message_id, msg, headers, now) def _find_message(self, queue, message_id, project): try: return self._client.get_object( utils._message_container(queue, project), message_id) except swiftclient.ClientException as exc: if exc.http_status == 404: raise errors.MessageDoesNotExist(message_id, queue, project) else: raise def bulk_delete(self, queue, message_ids, project=None, claim_ids=None): for message_id in message_ids: try: if claim_ids: msg = self._get(queue, message_id, project) if not msg['claim_id']: raise errors.MessageNotClaimed(message_id) if msg['claim_id'] not in claim_ids: raise errors.ClaimDoesNotMatch(msg['claim_id'], queue, project) self._delete(queue, message_id, project) except errors.MessageDoesNotExist: pass def bulk_get(self, queue, message_ids, project=None): if not self._queue_ctrl.exists(queue, project): return for id in message_ids: try: yield self._get(queue, id, project, check_queue=False) except errors.MessageDoesNotExist: pass def post(self, queue, messages, client_uuid, project=None): # TODO(flwang): It would be nice if we can create a middleware in Swift # to accept a json list so that Zaqar can create objects in bulk. return [self._create_msg(queue, m, client_uuid, project) for m in messages] def _create_msg(self, queue, msg, client_uuid, project): slug = str(uuid.uuid1()) now = timeutils.utcnow_ts() message = {'body': msg.get('body', {}), 'claim_id': None, 'ttl': msg['ttl'], 'claim_count': 0, 'delay_expires': now + msg.get('delay', 0)} if self.driver.conf.enable_checksum: message['checksum'] = s_utils.get_checksum(msg.get('body', None)) contents = jsonutils.dumps(message) utils._put_or_create_container( self._client, utils._message_container(queue, project), slug, contents=contents, content_type='application/json', headers={ 'x-object-meta-clientid': str(client_uuid), 'x-delete-after': msg['ttl']}) return slug def delete(self, queue, message_id, project=None, claim=None): claim_ctrl = self.driver.claim_controller try: msg = self._get(queue, message_id, project) except (errors.QueueDoesNotExist, errors.MessageDoesNotExist): return if claim is None: if msg['claim_id']: claim_obj = claim_ctrl._get(queue, msg['claim_id'], project) if claim_obj is not None and claim_obj['ttl'] > 0: raise errors.MessageIsClaimed(message_id) else: # Check if the claim does exist claim_ctrl._exists(queue, claim, project) if not msg['claim_id']: raise errors.MessageNotClaimed(message_id) elif msg['claim_id'] != claim: raise errors.MessageNotClaimedBy(message_id, claim) self._delete(queue, message_id, project) def _delete(self, queue, message_id, project=None): try: self._client.delete_object( utils._message_container(queue, project), message_id) except swiftclient.ClientException as exc: if exc.http_status != 404: raise def pop(self, queue, limit, project=None): # Pop is implemented as a chain of the following operations: # 1. Create a claim. # 2. Delete the messages claimed. # 3. Delete the claim. claim_ctrl = self.driver.claim_controller claim_id, messages = claim_ctrl.create(queue, dict(ttl=1, grace=0), project, limit=limit) message_ids = [message['id'] for message in messages] self.bulk_delete(queue, message_ids, project) return messages class MessageQueueHandler(object): def __init__(self, driver, control_driver): self.driver = driver self._client = self.driver.connection self._queue_ctrl = self.driver.queue_controller self._message_ctrl = self.driver.message_controller self._claim_ctrl = self.driver.claim_controller def create(self, name, metadata=None, project=None): self._client.put_container(utils._message_container(name, project)) def delete(self, name, project=None): for container in [utils._message_container(name, project), utils._claim_container(name, project)]: try: headers, objects = self._client.get_container(container) except swiftclient.ClientException as exc: if exc.http_status != 404: raise else: for obj in objects: try: self._client.delete_object(container, obj['name']) except swiftclient.ClientException as exc: if exc.http_status != 404: raise try: self._client.delete_container(container) except swiftclient.ClientException as exc: if exc.http_status not in (404, 409): raise def stats(self, name, project=None): if not self._queue_ctrl.exists(name, project=project): raise errors.QueueDoesNotExist(name, project) total = 0 claimed = 0 container = utils._message_container(name, project) try: _, objects = self._client.get_container(container) except swiftclient.ClientException as exc: if exc.http_status == 404: raise errors.QueueIsEmpty(name, project) newest = None oldest = None now = timeutils.utcnow_ts(True) for obj in objects: try: headers = self._client.head_object(container, obj['name']) except swiftclient.ClientException as exc: if exc.http_status != 404: raise else: created = float(headers['x-timestamp']) created_iso = datetime.datetime.fromtimestamp( created, tz=datetime.timezone.utc).replace( tzinfo=None).strftime('%Y-%m-%dT%H:%M:%SZ') newest = { 'id': obj['name'], 'age': now - created, 'created': created_iso} if oldest is None: oldest = copy.deepcopy(newest) total += 1 if headers.get('x-object-meta-claimid'): claimed += 1 msg_stats = { 'claimed': claimed, 'free': total - claimed, 'total': total, } if newest is not None: msg_stats['newest'] = newest msg_stats['oldest'] = oldest return {'messages': msg_stats} def exists(self, queue, project=None): try: self._client.head_container(utils._message_container(queue, project)) except swiftclient.ClientException as exc: if exc.http_status == 404: return False raise else: return True class MessageTopicHandler(object): def __init__(self, driver, control_driver): self.driver = driver self._client = self.driver.connection self._topic_ctrl = self.driver.topic_controller self._message_ctrl = self.driver.message_controller def create(self, name, metadata=None, project=None): self._client.put_container(utils._message_container(name, project)) def delete(self, name, project=None): for container in [utils._message_container(name, project)]: try: headers, objects = self._client.get_container(container) except swiftclient.ClientException as exc: if exc.http_status != 404: raise else: for obj in objects: try: self._client.delete_object(container, obj['name']) except swiftclient.ClientException as exc: if exc.http_status != 404: raise try: self._client.delete_container(container) except swiftclient.ClientException as exc: if exc.http_status not in (404, 409): raise def stats(self, name, project=None): if not self._topic_ctrl.exists(name, project=project): raise errors.TopicDoesNotExist(name, project) total = 0 container = utils._message_container(name, project) try: _, objects = self._client.get_container(container) except swiftclient.ClientException as exc: if exc.http_status == 404: raise errors.QueueIsEmpty(name, project) newest = None oldest = None now = timeutils.utcnow_ts(True) for obj in objects: try: headers = self._client.head_object(container, obj['name']) except swiftclient.ClientException as exc: if exc.http_status != 404: raise else: created = float(headers['x-timestamp']) created_iso = datetime.datetime.fromtimestamp( created, tz=datetime.timezone.utc).replace( tzinfo=None).strftime('%Y-%m-%dT%H:%M:%SZ') newest = { 'id': obj['name'], 'age': now - created, 'created': created_iso} if oldest is None: oldest = copy.deepcopy(newest) total += 1 msg_stats = { 'total': total, } if newest is not None: msg_stats['newest'] = newest msg_stats['oldest'] = oldest return {'messages': msg_stats} def exists(self, topic, project=None): try: self._client.head_container(utils._message_container(topic, project)) except swiftclient.ClientException as exc: if exc.http_status == 404: return False raise else: return True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/swift/subscriptions.py0000664000175100017510000001550315033040005022141 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import functools from oslo_serialization import jsonutils from oslo_utils import uuidutils import swiftclient from urllib.parse import quote_plus from zaqar import storage from zaqar.storage import errors from zaqar.storage.swift import utils class SubscriptionController(storage.Subscription): """Implements subscription resource operations with swift backend. Subscriptions are scoped by queue and project. subscription -> Swift mapping: +----------------+---------------------------------------+ | Attribute | Storage location | +----------------+---------------------------------------+ | Sub UUID | Object name | +----------------+---------------------------------------+ | Queue Name | Container name prefix | +----------------+---------------------------------------+ | Project name | Container name prefix | +----------------+---------------------------------------+ | Created time | Object Creation Time | +----------------+---------------------------------------+ | Sub options | Object content | +----------------+---------------------------------------+ """ def __init__(self, *args, **kwargs): super(SubscriptionController, self).__init__(*args, **kwargs) self._client = self.driver.connection def list(self, queue, project=None, marker=None, limit=storage.DEFAULT_SUBSCRIPTIONS_PER_PAGE): container = utils._subscription_container(queue, project) try: _, objects = self._client.get_container(container, limit=limit, marker=marker) except swiftclient.ClientException as exc: if exc.http_status == 404: objects = [] else: raise marker_next = {} yield utils.SubscriptionListCursor( objects, marker_next, functools.partial(self._client.get_object, container)) yield marker_next and marker_next['next'] def get(self, queue, subscription_id, project=None): container = utils._subscription_container(queue, project) try: headers, data = self._client.get_object(container, subscription_id) except swiftclient.ClientException as exc: if exc.http_status == 404: raise errors.SubscriptionDoesNotExist(subscription_id) raise return utils._subscription_to_json(data, headers) def create(self, queue, subscriber, ttl, options, project=None): sub_container = utils._subscriber_container(queue, project) slug = uuidutils.generate_uuid() try: utils._put_or_create_container( self._client, sub_container, quote_plus(subscriber), contents=slug, headers={'x-delete-after': ttl, 'if-none-match': '*'}) except swiftclient.ClientException as exc: if exc.http_status == 412: return raise container = utils._subscription_container(queue, project) data = {'id': slug, 'source': queue, 'subscriber': subscriber, 'options': options, 'ttl': ttl, 'confirmed': False} utils._put_or_create_container( self._client, container, slug, contents=jsonutils.dumps(data), content_type='application/json', headers={'x-delete-after': ttl}) return slug def update(self, queue, subscription_id, project=None, **kwargs): container = utils._subscription_container(queue, project) data = self.get(queue, subscription_id, project) data.pop('age') ttl = data['ttl'] if 'subscriber' in kwargs: sub_container = utils._subscriber_container(queue, project) try: self._client.put_object( sub_container, quote_plus(kwargs['subscriber']), contents=subscription_id, headers={'x-delete-after': ttl, 'if-none-match': '*'}) except swiftclient.ClientException as exc: if exc.http_status == 412: raise errors.SubscriptionAlreadyExists() raise self._client.delete_object(sub_container, quote_plus(data['subscriber'])) data.update(kwargs) self._client.put_object(container, subscription_id, contents=jsonutils.dumps(data), content_type='application/json', headers={'x-delete-after': ttl}) def exists(self, queue, subscription_id, project=None): container = utils._subscription_container(queue, project) return self._client.head_object(container, subscription_id) def delete(self, queue, subscription_id, project=None): try: data = self.get(queue, subscription_id, project) except errors.SubscriptionDoesNotExist: return sub_container = utils._subscriber_container(queue, project) try: self._client.delete_object(sub_container, quote_plus(data['subscriber'])) except swiftclient.ClientException as exc: if exc.http_status != 404: raise container = utils._subscription_container(queue, project) try: self._client.delete_object(container, subscription_id) except swiftclient.ClientException as exc: if exc.http_status != 404: raise def get_with_subscriber(self, queue, subscriber, project=None): sub_container = utils._subscriber_container(queue, project) headers, obj = self._client.get_object(sub_container, quote_plus(subscriber)) return self.get(queue, obj, project) def confirm(self, queue, subscription_id, project=None, confirmed=True): self.update(queue, subscription_id, project, confirmed=confirmed) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/swift/utils.py0000664000175100017510000001206115033040005020366 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from oslo_utils import timeutils import swiftclient def _message_container(queue, project=None): return "zaqar_message:%s:%s" % (queue, project) def _claim_container(queue=None, project=None): return "zaqar_claim:%s:%s" % (queue, project) def _subscription_container(queue, project=None): return "zaqar_subscription:%s:%s" % (queue, project) def _subscriber_container(queue, project=None): return "zaqar_subscriber:%s:%s" % (queue, project) def _put_or_create_container(client, *args, **kwargs): """PUT a swift object to a container that may not exist Takes the exact arguments of swiftclient.put_object but will autocreate a container that doesn't exist """ try: client.put_object(*args, **kwargs) except swiftclient.ClientException as e: if e.http_status == 404: # Because of lazy creation, the container may be used by different # clients and cause cache problem. Retrying object creation a few # times should fix this. for i in range(5): client.put_container(args[0]) try: client.put_object(*args, **kwargs) except swiftclient.ClientException as ex: if ex.http_status != 404: raise else: break else: # If we got there, we ignored the 5th exception, so the # exception context will be set. raise else: raise def _message_to_json(message_id, msg, headers, now): msg = jsonutils.loads(msg) return { 'id': message_id, 'age': now - float(headers['x-timestamp']), 'ttl': msg['ttl'], 'body': msg['body'], 'claim_id': msg['claim_id'], 'claim_count': msg.get('claim_count', 0) } def _subscription_to_json(sub, headers): sub = jsonutils.loads(sub) now = timeutils.utcnow_ts(True) return {'id': sub['id'], 'age': now - float(headers['x-timestamp']), 'source': sub['source'], 'subscriber': sub['subscriber'], 'ttl': sub['ttl'], 'options': sub['options'], 'confirmed': sub['confirmed']} def _filter_messages(messages, filters, marker, get_object, list_objects, limit): """Create a filtering iterator over a list of messages. The function accepts a list of filters to be filtered before the message can be included as a part of the reply. """ now = timeutils.utcnow_ts(True) for msg in messages: if msg is None: continue marker['next'] = msg['name'] try: headers, obj = get_object(msg['name']) except swiftclient.ClientException as exc: if exc.http_status == 404: continue raise obj = jsonutils.loads(obj) for should_skip in filters: if should_skip(obj, headers): break else: limit -= 1 yield { 'id': marker['next'], 'ttl': obj['ttl'], 'client_uuid': headers['x-object-meta-clientid'], 'body': obj['body'], 'age': now - float(headers['x-timestamp']), 'claim_id': obj['claim_id'], 'claim_count': obj.get('claim_count', 0), } if limit <= 0: break if limit > 0 and marker: # We haven't reached the limit, let's try to get some more messages _, objects = list_objects(marker=marker['next']) if not objects: return for msg in _filter_messages(objects, filters, marker, get_object, list_objects, limit): yield msg class SubscriptionListCursor(object): def __init__(self, objects, marker_next, get_object): self.objects = iter(objects) self.marker_next = marker_next self.get_object = get_object def __iter__(self): return self def next(self): while True: curr = next(self.objects) self.marker_next['next'] = curr['name'] try: headers, sub = self.get_object(curr['name']) except swiftclient.ClientException as exc: if exc.http_status == 404: continue raise return _subscription_to_json(sub, headers) def __next__(self): return self.next() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/storage/utils.py0000664000175100017510000002035615033040005017240 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import copy import hashlib from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from osprofiler import profiler import stevedore from urllib import parse as urllib_parse from zaqar.common import errors from zaqar.common import utils from zaqar.storage import configuration LOG = log.getLogger(__name__) def dynamic_conf(uri, options, conf=None): """Given metadata, yields a dynamic configuration. :param uri: pool location :type uri: str :param options: additional pool metadata :type options: dict :param conf: Optional conf object to copy :type conf: `oslo_config.cfg.ConfigOpts` :returns: Configuration object suitable for constructing storage drivers :rtype: oslo_config.cfg.ConfigOpts """ storage_type = urllib_parse.urlparse(uri).scheme # NOTE(cpp-cabrera): parse storage-specific opts: # 'drivers:storage:{type}' options['uri'] = uri storage_opts = utils.dict_to_conf(options) storage_group = 'drivers:message_store:%s' % storage_type # NOTE(cpp-cabrera): register those options! if conf is None: conf = cfg.ConfigOpts() else: conf_wrap = configuration.Configuration(conf) conf = copy.copy(conf_wrap) if storage_group not in conf: conf.register_opts(storage_opts, group=storage_group) if 'drivers' not in conf: # NOTE(cpp-cabrera): parse general opts: 'drivers' driver_opts = utils.dict_to_conf({'message_store': storage_type}) conf.register_opts(driver_opts, group=u'drivers') conf.set_override('message_store', storage_type, 'drivers') for opt in options: if opt in conf[storage_group]: conf.set_override(opt, options[opt], group=storage_group) return conf def load_storage_impl(uri, control_mode=False, default_store=None): """Loads a storage driver implementation and returns it. :param uri: The connection uri to parse and load a driver for. :param control_mode: (Default False). Determines which driver type to load; if False, the data driver is loaded. If True, the control driver is loaded. :param default_store: The default store to load if no scheme is parsed. """ mode = 'control' if control_mode else 'data' driver_type = 'zaqar.{0}.storage'.format(mode) # Note(wanghao): In python3.9, urlparse will return 'localhost' as scheme # instead of '' in python3.8 when uri string is 'localhost:xxxxx'. So there # need to handle this change. storage_type = urllib_parse.urlparse(uri).scheme if storage_type == '' or storage_type == 'localhost': storage_type = default_store try: mgr = stevedore.DriverManager(namespace=driver_type, name=storage_type, invoke_on_load=False) return mgr.driver except Exception as exc: LOG.exception('Error loading storage driver') raise errors.InvalidDriver(exc) def load_storage_driver(conf, cache, storage_type=None, control_mode=False, control_driver=None): """Loads a storage driver and returns it. The driver's initializer will be passed conf and cache as its positional args. :param conf: Configuration instance to use for loading the driver. Must include a 'drivers' group. :param cache: Cache instance that the driver can (optionally) use to reduce latency for some operations. :param storage_type: The storage_type to load. If None, then the `drivers` option will be used. :param control_mode: (Default False). Determines which driver type to load; if False, the data driver is loaded. If True, the control driver is loaded. :param control_driver: (Default None). The control driver instance to pass to the storage driver. Needed to access the queue controller, mainly. """ if control_mode: mode = 'control' storage_type = storage_type or conf['drivers'].management_store else: mode = 'data' storage_type = storage_type or conf['drivers'].message_store driver_type = 'zaqar.{0}.storage'.format(mode) _invoke_args = (conf, cache) if control_driver is not None: _invoke_args = (conf, cache, control_driver) try: mgr = stevedore.DriverManager(namespace=driver_type, name=storage_type, invoke_on_load=True, invoke_args=_invoke_args) if conf.profiler.enabled: if ((mode == "control" and conf.profiler.trace_management_store) or (mode == "data" and conf.profiler.trace_message_store)): trace_name = '{0}_{1}_driver'.format(storage_type, mode) return profiler.trace_cls(trace_name, trace_private=True)(mgr.driver) else: return mgr.driver except Exception as exc: LOG.exception('Failed to load "%s" driver for "%s"', driver_type, storage_type) raise errors.InvalidDriver(exc) def keyify(key, iterable): """Make an iterator from an iterable of dicts compared with a key. :param key: A key exists for all dict inside the iterable object :param iterable: The input iterable object """ class Keyed(object): def __init__(self, obj): self.obj = obj def __eq__(self, other): return self.obj[key] == other.obj[key] def __ne__(self, other): return self.obj[key] != other.obj[key] def __lt__(self, other): return self.obj[key] < other.obj[key] def __le__(self, other): return self.obj[key] <= other.obj[key] def __gt__(self, other): return self.obj[key] > other.obj[key] def __ge__(self, other): return self.obj[key] >= other.obj[key] for item in iterable: yield Keyed(item) def can_connect(uri, conf=None): """Given a URI, verifies whether it's possible to connect to it. :param uri: connection string to a storage endpoint :type uri: str :returns: True if can connect else False :rtype: bool """ # NOTE(cabrera): create a mock configuration containing only # the URI field. This should be sufficient to initialize a # storage driver. conf = dynamic_conf(uri, {}, conf=conf) storage_type = urllib_parse.urlparse(uri).scheme try: ctrl = load_storage_driver(conf, None, storage_type=conf.drivers.management_store, control_mode=True) driver = load_storage_driver(conf, None, storage_type=storage_type, control_driver=ctrl) return driver.is_alive() except Exception as exc: LOG.debug('Can\'t connect to: %s \n%s', (uri, exc)) return False def get_checksum(body, algorithm='MD5'): """According to the algorithm to get the message body checksum. :param body: The message body. :type body: str :param algorithm: The algorithm type, default is MD5. :type algorithm: str :returns: The message body checksum. :rtype: str """ checksum = '%s:' % algorithm if body is None: return '' else: checksum_body = jsonutils.dump_as_bytes(body) # TODO(yangzhenyu): We may support other algorithms in future # versions, including SHA1, SHA256, SHA512, and so on. if algorithm == 'MD5': md5 = hashlib.md5() md5.update(checksum_body) checksum += md5.hexdigest() return checksum ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5760136 zaqar-20.1.0.dev29/zaqar/tests/0000775000175100017510000000000015033040026015221 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/__init__.py0000664000175100017510000000170315033040005017330 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Zaqar Unit-ish Tests""" from zaqar.tests import base from zaqar.tests import helpers SKIP_SLOW_TESTS = helpers.SKIP_SLOW_TESTS RUN_SLOW_TESTS = not SKIP_SLOW_TESTS expect = helpers.expect is_slow = helpers.is_slow requires_mongodb = helpers.requires_mongodb requires_redis = helpers.requires_redis requires_swift = helpers.requires_swift TestBase = base.TestBase ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/base.py0000664000175100017510000001024215033040005016501 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import fixtures from oslo_config import cfg from oslo_log import log from osprofiler import opts import testtools from zaqar.conf import default from zaqar.conf import drivers from zaqar.conf import notification from zaqar.conf import profiler from zaqar.conf import signed_url from zaqar.tests import helpers class TestBase(testtools.TestCase): """Child class of testtools.TestCase for testing Zaqar. Inherit from this and write your test methods. If the child class defines a prepare(self) method, this method will be called before executing each test method. """ config_file = None def setUp(self): super(TestBase, self).setUp() self.useFixture(fixtures.FakeLogger('zaqar')) if os.environ.get('OS_STDOUT_CAPTURE') is not None: stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if os.environ.get('OS_STDERR_CAPTURE') is not None: stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) if self.config_file: self.config_file = helpers.override_mongo_conf( self.config_file, self) self.conf = self.load_conf(self.config_file) else: self.conf = cfg.ConfigOpts() self.conf(args=[], project='zaqar') self.conf.register_opts(default.ALL_OPTS) self.conf.register_opts(drivers.ALL_OPTS, group=drivers.GROUP_NAME) self.conf.register_opts(notification.ALL_OPTS, group=notification.GROUP_NAME) self.conf.register_opts(signed_url.ALL_OPTS, group=signed_url.GROUP_NAME) opts.set_defaults(self.conf) self.conf.register_opts(profiler.ALL_OPTS, group=profiler.GROUP_NAME) self.redis_url = os.environ.get('ZAQAR_TEST_REDIS_URL', 'redis://127.0.0.1:6379') self.mongodb_url = os.environ.get('ZAQAR_TEST_MONGODB_URL', 'mongodb://127.0.0.1:27017') @classmethod def conf_path(cls, filename): """Returns the full path to the specified Zaqar conf file. :param filename: Name of the conf file to find (e.g., 'wsgi_memory.conf') """ if os.path.exists(filename): return filename return os.path.join(os.environ["ZAQAR_TESTS_CONFIGS_DIR"], filename) @classmethod def load_conf(cls, filename): """Loads `filename` configuration file. :param filename: Name of the conf file to find (e.g., 'wsgi_memory.conf') :returns: Project's config object. """ conf = cfg.ConfigOpts() log.register_options(conf) conf(args=[], default_config_files=[cls.conf_path(filename)]) return conf def config(self, group=None, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ for k, v in kw.items(): self.conf.set_override(k, v, group) def _my_dir(self): return os.path.abspath(os.path.dirname(__file__)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5770135 zaqar-20.1.0.dev29/zaqar/tests/etc/0000775000175100017510000000000015033040026015774 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/drivers_storage_invalid.conf0000664000175100017510000000027415033040005023553 0ustar00mylesmyles[DEFAULT] debug = False verbose = False admin_mode = False enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = invalid [drivers:transport:wsgi] port = 8888 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/drivers_transport_invalid.conf0000664000175100017510000000025715033040005024144 0ustar00mylesmyles[DEFAULT] debug = False verbose = False enable_deprecated_api_versions = 1,1.1 [drivers] transport = invalid message_store = sqlalchemy [drivers:transport:wsgi] port = 8888 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/functional-tests.conf0000664000175100017510000000033315033040005022141 0ustar00mylesmyles[DEFAULT] # run_tests = True unreliable = True enable_deprecated_api_versions = 1,1.1 [zaqar] # url = http://0.0.0.0:8888 # config = functional-zaqar.conf [headers] # useragent = FunctionalTests # project_id = 123456 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/functional-zaqar.conf0000664000175100017510000000262315033040005022121 0ustar00mylesmyles[DEFAULT] # Show more verbose log output (sets INFO log level output) verbose = True # Show debugging output in logs (sets DEBUG log level output) debug = True enable_deprecated_api_versions = 1,1.1 # Log to this file! ; log_file = /var/log/zaqar/server.log ;auth_strategy = # ================= Syslog Options ============================ # Send logs to syslog (/dev/log) instead of to file specified # by `log_file` ;use_syslog = False # Facility to use. If unset defaults to LOG_USER. ;syslog_log_facility = LOG_LOCAL0 unreliable = True enable_deprecated_api_versions = 1, 1.1 [drivers] # Transport driver module (e.g., wsgi) transport = wsgi # Storage driver module (e.g., mongodb, sqlalchemy) message_store = mongodb [drivers:transport:wsgi] bind = 127.0.0.1 port = 8888 [limits:transport] # The maximum number of queue records per page when listing queues ;max_queues_per_page = 20 # Maximum number of messages per page when listing messages. ;max_messages_per_page = 20 # Maximum number of messages that can be claimed or popped at a time. ;max_messages_per_claim_or_pop = 20 # Expiration limits; the minimal values are all 60 (seconds) ;max_message_ttl = 1209600 ;max_claim_ttl = 43200 ;max_claim_grace = 43200 # Maximum size in bytes allowed for queue metadata and bulk/single # message post bodies (including whitespace and envelope fields). ;max_queue_metadata = 65536 ;max_messages_post_size = 262144 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/keystone_auth.conf0000664000175100017510000000033015033040005021516 0ustar00mylesmyles[DEFAULT] auth_strategy = keystone debug = False verbose = False enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb [drivers:transport:wsgi] bind = 0.0.0.0:8888 workers = 20 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/policy.yaml0000664000175100017510000000025215033040005020153 0ustar00mylesmyles# WARNING: Below rules are either deprecated rules # or extra rules in policy file, it is strongly # recommended to switch to new rules. "default": "rule:admin_or_owner" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/websocket_mongodb.conf0000664000175100017510000000060615033040005022335 0ustar00mylesmyles[DEFAULT] unreliable = True enable_deprecated_api_versions = 1,1.1 [drivers] # Transport driver to use (string value) transport = websocket # Storage driver to use (string value) message_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI uri = mongodb://127.0.0.1:27017 [drivers:message_store:mongodb] # Mongodb Connection URI uri = mongodb://127.0.0.1:27017././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/websocket_mongodb_keystone_auth.conf0000664000175100017510000000061615033040005025300 0ustar00mylesmyles[DEFAULT] auth_strategy = keystone enable_deprecated_api_versions = 1,1.1 [drivers] # Transport driver to use (string value) transport = websocket # Storage driver to use (string value) message_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI uri = mongodb://127.0.0.1:27017 [drivers:message_store:mongodb] # Mongodb Connection URI uri = mongodb://127.0.0.1:27017 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/websocket_mongodb_subscriptions.conf0000664000175100017510000000070015033040005025317 0ustar00mylesmyles[DEFAULT] unreliable = True enable_deprecated_api_versions = 1,1.1 [drivers] # Transport driver to use (string value) transport = websocket # Storage driver to use (string value) message_store = mongodb [drivers:management_store:mongodb] # Mongodb Connection URI uri = mongodb://127.0.0.1:27017 [drivers:message_store:mongodb] # Mongodb Connection URI uri = mongodb://127.0.0.1:27017 [storage] message_pipeline = zaqar.notification.notifier././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_faulty.conf0000664000175100017510000000030215033040005021170 0ustar00mylesmyles[DEFAULT] debug = False verbose = False enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = faulty management_store = faulty [drivers:transport:wsgi] port = 8888 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_fifo_mongodb.conf0000664000175100017510000000070415033040005022322 0ustar00mylesmyles[DEFAULT] debug = False verbose = False unreliable = True enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb [drivers:transport:wsgi] port = 8888 [drivers:message_store:mongodb] uri = mongodb.fifo://127.0.0.1:27017 database = message_zaqar_test_fifo max_reconnect_attempts = 3 reconnect_sleep = 0.001 # NOTE(kgriffs): Reduce from the default of 1000 to reduce the # duration of related tests max_attempts = 5 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_mongodb.conf0000664000175100017510000000073115033040005021317 0ustar00mylesmyles[DEFAULT] debug = False verbose = False unreliable = True enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb [drivers:transport:wsgi] port = 8888 [drivers:message_store:mongodb] uri = mongodb://127.0.0.1:27017 database = message_zaqar_test max_reconnect_attempts = 3 reconnect_sleep = 0.001 # NOTE(kgriffs): Reduce from the default of 1000 to reduce the # duration of related tests max_attempts = 5 [signed_url] secret_key = test././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_mongodb_default_limits.conf0000664000175100017510000000014415033040005024402 0ustar00mylesmyles[DEFAULT] enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_mongodb_pooled.conf0000664000175100017510000000057515033040005022667 0ustar00mylesmyles[DEFAULT] pooling = True admin_mode = True unreliable = True enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb [drivers:message_store:mongodb] uri = mongodb://127.0.0.1:27017 database = zaqar_test_pooled [drivers:management_store:mongodb] uri = mongodb://127.0.0.1:27017 database = zaqar_test [pooling:catalog] enable_virtual_pool = True././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_mongodb_pooled_disable_virtual_pool.conf0000664000175100017510000000057615033040005027152 0ustar00mylesmyles[DEFAULT] pooling = True admin_mode = True unreliable = True enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb [drivers:message_store:mongodb] uri = mongodb://127.0.0.1:27017 database = zaqar_test_pooled [drivers:management_store:mongodb] uri = mongodb://127.0.0.1:27017 database = zaqar_test [pooling:catalog] enable_virtual_pool = False././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_mongodb_validation.conf0000664000175100017510000000034415033040005023531 0ustar00mylesmyles[DEFAULT] enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb # Test support for deprecated options [limits:transport] metadata_size_uplimit = 64 [transport] max_messages_post_size = 256 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_redis.conf0000664000175100017510000000070615033040005021002 0ustar00mylesmyles[DEFAULT] debug = False verbose = False enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = redis [drivers:transport:wsgi] port = 8888 [drivers:message_store:redis] uri = redis://127.0.0.1:6379 # NOTE(kgriffs): Reduce from the default of 10 to reduce the # duration of related tests max_reconnect_attempts = 3 # NOTE(kgriffs): Reduce from the default of 1 to reduce the # duration of related tests reconnect_sleep = 0.1././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_redis_pooled.conf0000664000175100017510000000057015033040005022343 0ustar00mylesmyles[DEFAULT] pooling = True enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = redis [drivers:message_store:redis] uri = redis://127.0.0.1:6379 max_reconnect_attempts = 3 reconnect_sleep = 1 [drivers:management_store:redis] uri = redis://127.0.0.1:6379 max_reconnect_attempts = 3 reconnect_sleep = 1 [pooling:catalog] enable_virtual_pool = True././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_sqlalchemy.conf0000664000175100017510000000036615033040005022040 0ustar00mylesmyles[DEFAULT] debug = False verbose = False admin_mode = False enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb management_store = sqlalchemy [drivers:transport:wsgi] bind = 0.0.0.0 port = 8888 workers = 20 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_sqlalchemy_pooled.conf0000664000175100017510000000042315033040005023374 0ustar00mylesmyles[DEFAULT] pooling = True admin_mode = True enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi message_store = mongodb management_store = sqlalchemy [drivers:transport:wsgi] bind = 0.0.0.0 port = 8888 workers = 20 [pooling:catalog] enable_virtual_pool = True././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/etc/wsgi_swift.conf0000664000175100017510000000030515033040005021023 0ustar00mylesmyles[DEFAULT] debug = False verbose = False enable_deprecated_api_versions = 1,1.1 [drivers] transport = wsgi management_store = sqlalchemy message_store = swift [drivers:transport:wsgi] port = 8888 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/faulty_storage.py0000664000175100017510000001120115033040005020613 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from zaqar import storage _CONFIG_GROUP = 'drivers:message_store:faulty' class DataDriver(storage.DataDriverBase): _DRIVER_OPTIONS = [(_CONFIG_GROUP, [cfg.StrOpt('uri', default='faulty://')])] def __init__(self, conf, cache, control_driver): super(DataDriver, self).__init__(conf, cache, control_driver) def close(self): pass @property def default_options(self): return {} @property def capabilities(self): raise NotImplementedError() def is_alive(self): raise NotImplementedError() def _health(self): raise NotImplementedError() @property def queue_controller(self): return self.control_driver.queue_controller @property def message_controller(self): return MessageController(self) @property def claim_controller(self): return None @property def subscription_controller(self): return None @property def topic_controller(self): return self.control_driver.topic_controller class ControlDriver(storage.ControlDriverBase): def __init__(self, conf, cache): super(ControlDriver, self).__init__(conf, cache) def close(self): pass @property def queue_controller(self): return QueueController(self) @property def catalogue_controller(self): return None @property def pools_controller(self): return None @property def flavors_controller(self): return None @property def topic_controller(self): return TopicController(self) class QueueController(storage.Queue): def __init__(self, driver): pass def _list(self, project=None): raise NotImplementedError() def _get(self, name, project=None): raise NotImplementedError() def get_metadata(self, name, project=None): raise NotImplementedError() def _create(self, name, metadata=None, project=None): raise NotImplementedError() def _exists(self, name, project=None): raise NotImplementedError() def set_metadata(self, name, metadata, project=None): raise NotImplementedError() def _delete(self, name, project=None): raise NotImplementedError() def _stats(self, name, project=None): raise NotImplementedError() def _calculate_resource_count(self, project=None): raise NotImplementedError() class MessageController(storage.Message): def __init__(self, driver): pass def first(self, queue_name, project=None, sort=1): raise NotImplementedError() def get(self, queue, message_id, project=None): raise NotImplementedError() def bulk_get(self, queue, message_ids, project=None): raise NotImplementedError() def list(self, queue, project=None, marker=None, limit=None, echo=False, client_uuid=None): raise NotImplementedError() def post(self, queue, messages, project=None): raise NotImplementedError() def pop(self, queue, pop_limit, project=None): raise NotImplementedError() def delete(self, queue, message_id, project=None, claim=None): raise NotImplementedError() def bulk_delete(self, queue, message_ids, project=None, claim_ids=None): raise NotImplementedError() class TopicController(storage.Topic): def __init__(self, driver): pass def _list(self, project=None): raise NotImplementedError() def _get(self, name, project=None): raise NotImplementedError() def get_metadata(self, name, project=None): raise NotImplementedError() def _create(self, name, metadata=None, project=None): raise NotImplementedError() def _exists(self, name, project=None): raise NotImplementedError() def set_metadata(self, name, metadata, project=None): raise NotImplementedError() def _delete(self, name, project=None): raise NotImplementedError() def _stats(self, name, project=None): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5770135 zaqar-20.1.0.dev29/zaqar/tests/functional/0000775000175100017510000000000015033040026017363 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/__init__.py0000664000175100017510000000000015033040005021457 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/base.py0000664000175100017510000003375715033040005020663 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import multiprocessing import os import jsonschema from oslo_utils import timeutils from zaqar.api.v1_1 import response as response_v1_1 from zaqar.api.v2 import response as response_v2 from zaqar import bootstrap from zaqar.storage import mongodb from zaqar.storage.redis import driver as redis from zaqar import tests as testing from zaqar.tests.functional import config from zaqar.tests.functional import helpers from zaqar.tests.functional import http from zaqar.tests import helpers as base_helpers from zaqar.transport import base as transport_base # TODO(flaper87): This is necessary to register, # wsgi configs and won't be permanent. It'll be # refactored as part of the work for this blueprint from zaqar.transport import validation from zaqar.transport import wsgi # noqa # TODO(kgriffs): Run functional tests to a devstack gate job and # set this using an environment variable or something. # # TODO(kgriffs): Find a more general way to do this; we seem to be # using this environ flag pattern over and over againg. _TEST_INTEGRATION = os.environ.get('ZAQAR_TEST_INTEGRATION') is not None class FunctionalTestBase(testing.TestBase): server = None server_class = None config_file = None class_bootstrap = None # NOTE(Eva-i): ttl_gc_interval is the known maximum time interval between # automatic resource TTL expirations. Depends on message store back end. class_ttl_gc_interval = None wipe_dbs_projects = set([]) def setUp(self): super(FunctionalTestBase, self).setUp() # NOTE(flaper87): Config can't be a class # attribute because it may be necessary to # modify it at runtime which will affect # other instances running instances. self.cfg = config.load_config() if not self.cfg.run_tests: self.skipTest("Functional tests disabled") config_file = self.config_file or self.cfg.zaqar.config config_file = base_helpers.override_mongo_conf(config_file, self) self.mconf = self.load_conf(config_file) validator = validation.Validator(self.mconf) self.limits = validator._limits_conf self.resource_defaults = transport_base.ResourceDefaults(self.mconf) # Always register options self.__class__.class_bootstrap = bootstrap.Bootstrap(self.mconf) self.class_bootstrap.transport datadriver = self.class_bootstrap.storage._storage if isinstance(datadriver, redis.DataDriver): self.__class__.class_ttl_gc_interval = 1 if isinstance(datadriver, mongodb.DataDriver): # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute self.__class__.class_ttl_gc_interval = 60 if _TEST_INTEGRATION: if not (self.server and self.server.is_alive()): self.server = self.server_class() self.server.start(self.mconf) self.addCleanup(self.server.process.terminate) self.client = http.Client() else: if self.server_class == ZaqarAdminServer: self.mconf.pooling = True self.mconf.admin_mode = True self.addCleanup(self.class_bootstrap.storage.close) self.addCleanup(self.class_bootstrap.control.close) self.client = http.WSGIClient(self.class_bootstrap.transport.app) self.headers = helpers.create_zaqar_headers(self.cfg) self.headers_response_with_body = {'location', 'content-type'} self.client.set_headers(self.headers) # Store information required for cleaning databases after # execution of test class self.wipe_dbs_projects.add(self.headers["X-Project-ID"]) def tearDown(self): super(FunctionalTestBase, self).tearDown() # Project might has changed during test case execution. # Lets add it again to the set. self.wipe_dbs_projects.add(self.headers["X-Project-ID"]) @staticmethod def _if_mongo_datadriver_drop_dbs(driver): """Drops MongoDB datadriver's databases. :param driver: instance of zaqar.storage.mongodb.driver.DataDriver """ if not isinstance(driver, mongodb.DataDriver): return for db in driver.message_databases: driver.connection.drop_database(db) subscription_db = driver.subscriptions_database driver.connection.drop_database(subscription_db) @staticmethod def _if_mongo_controldriver_drop_dbs(driver): """Drops all MongoDB controldriver's databases. :param driver: instance of zaqar.storage.mongodb.driver.ControlDriver """ if not isinstance(driver, mongodb.ControlDriver): return driver.connection.drop_database(driver.database) driver.connection.drop_database(driver.queues_database) @classmethod def _pooling_drop_dbs_by_project(cls, xproject): """Finds all pool drivers by project, drops all their databases. Assumes that pooling is enabled. :param xproject: project name to use for pool drivers search """ datadriver = cls.class_bootstrap.storage._storage controldriver = cls.class_bootstrap.control # Let's get list of all queues by project queue_generator = controldriver.queue_controller.list(project=xproject) queues = list(next(queue_generator)) # Let's extract all queue names from the list of queues queue_names = [q['name'] for q in queues] # Finally let's use queues names to get each one of pool datadrivers catalog = datadriver._pool_catalog for queue_name in queue_names: pool_pipe_driver = catalog.lookup(queue_name, project=xproject) pool_datadriver = pool_pipe_driver._storage if pool_datadriver is not None: # Let's delete the queue, so the next invocation of # pooling_catalog.lookup() will not recreate pool driver controldriver.queue_controller.delete(queue_name) # Let's drop pool's databases cls._if_mongo_datadriver_drop_dbs(pool_datadriver) @classmethod def tearDownClass(cls): """Cleans up after test class execution. Drops all databases left. Closes connections to databases. """ # Bootstrap can be None if all test cases were skipped, so nothing to # clean if cls.class_bootstrap is None: return datadriver = cls.class_bootstrap.storage._storage controldriver = cls.class_bootstrap.control if cls.class_bootstrap.conf.pooling: # Pooling detected, let's drop pooling-specific databases for p in cls.wipe_dbs_projects: # This will find all pool databases by project and drop them cls._pooling_drop_dbs_by_project(p) controldriver.pools_controller.drop_all() controldriver.flavors_controller.drop_all() else: # No pooling detected, let's just drop datadriver's databases cls._if_mongo_datadriver_drop_dbs(datadriver) cls.class_bootstrap.storage.close() # Let's drop controldriver's databases controldriver.catalogue_controller.drop_all() cls._if_mongo_controldriver_drop_dbs(controldriver) controldriver.close() def assertIsSubset(self, required_values, actual_values): """Checks if a list is subset of another. :param required_values: superset list. :param required_values: subset list. """ form = 'Missing Header(s) - {0}' self.assertTrue(required_values.issubset(actual_values), msg=form.format((required_values - actual_values))) def assertMessageCount(self, actualCount, expectedCount): """Checks if number of messages returned <= limit :param expectedCount: limit value passed in the url (OR) default(10). :param actualCount: number of messages returned in the API response. """ msg = ('More Messages returned than allowed: expected count = {0}' ', actual count = {1}'.format(expectedCount, actualCount)) self.assertLessEqual(actualCount, expectedCount, msg) def assertQueueStats(self, result_json, claimed): """Checks the Queue Stats results :param result_json: json response returned for Queue Stats. :param claimed: expected number of claimed messages. """ total = self.limits.max_messages_per_claim_or_pop free = total - claimed self.assertEqual(claimed, result_json['messages']['claimed']) self.assertEqual(free, result_json['messages']['free']) self.assertEqual(total, result_json['messages']['total']) if 'oldest' in result_json['messages']: oldest_message = result_json['messages']['oldest'] self.verify_message_stats(oldest_message) newest_message = result_json['messages']['newest'] self.verify_message_stats(newest_message) def assertSchema(self, response, expectedSchemaName): """Compares the json response with the expected schema :param response: response json returned by the API. :type response: dict :param expectedSchema: expected schema definition for response. :type expectedSchema: string """ try: expectedSchema = self.response.get_schema(expectedSchemaName) jsonschema.validate(response, expectedSchema) except jsonschema.ValidationError as message: assert False, message def verify_message_stats(self, message): """Verifies the oldest & newest message stats :param message: oldest (or) newest message returned by queue_name/stats. """ expected_keys = ['age', 'created', 'href'] response_keys = message.keys() response_keys = sorted(response_keys) self.assertEqual(expected_keys, response_keys) # Verify that age has valid values age = message['age'] msg = 'Invalid Age {0}'.format(age) self.assertLessEqual(0, age, msg) self.assertLessEqual(age, self.limits.max_message_ttl, msg) # Verify that GET on href returns 200 path = message['href'] result = self.client.get(path) self.assertEqual(200, result.status_code) # Verify that created time falls within the last 10 minutes # NOTE(malini): The messages are created during the test. created_time = message['created'] created_time = timeutils.normalize_time( timeutils.parse_isotime(created_time)) now = timeutils.utcnow() delta = timeutils.delta_seconds(before=created_time, after=now) # NOTE(malini): The 'int()' below is a work around for the small time # difference between julianday & UTC. # (needed to pass this test on sqlite driver) delta = int(delta) msg = ('Invalid Time Delta {0}, Created time {1}, Now {2}' .format(delta, created_time, now)) self.assertLessEqual(0, delta, msg) self.assertLessEqual(delta, 6000, msg) class Server(object, metaclass=abc.ABCMeta): name = "zaqar-functional-test-server" def __init__(self): self.process = None @abc.abstractmethod def get_target(self, conf): """Prepares the target object This method is meant to initialize server's bootstrap and return a callable to run the server. :param conf: The config instance for the bootstrap class :returns: A callable object """ def is_alive(self): """Returns True IFF the server is running.""" if self.process is None: return False return self.process.is_alive() def start(self, conf): """Starts the server process. :param conf: The config instance to use for the new process :returns: A `multiprocessing.Process` instance """ # TODO(flaper87): Re-use running instances. target = self.get_target(conf) if not callable(target): raise RuntimeError("Target not callable") self.process = multiprocessing.Process(target=target, name=self.name) self.process.daemon = True self.process.start() # NOTE(flaper87): Give it a second # to boot. self.process.join(1) return self.process def stop(self): """Terminates a process This method kills a process by calling `terminate`. Note that children of this process won't be terminated but become orphaned. """ self.process.terminate() class ZaqarServer(Server): name = "zaqar-wsgiref-test-server" def get_target(self, conf): server = bootstrap.Bootstrap(conf) return server.run class ZaqarAdminServer(Server): name = "zaqar-admin-wsgiref-test-server" def get_target(self, conf): conf.admin_mode = True server = bootstrap.Bootstrap(conf) return server.run class V1_1FunctionalTestBase(FunctionalTestBase): def setUp(self): super(V1_1FunctionalTestBase, self).setUp() self.response = response_v1_1.ResponseSchema(self.limits) class V2FunctionalTestBase(FunctionalTestBase): def setUp(self): super(V2FunctionalTestBase, self).setUp() self.response = response_v2.ResponseSchema(self.limits) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/config.py0000664000175100017510000000256315033040005021205 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_config import cfg from oslo_log import log _DEFAULT = ( cfg.BoolOpt("run_tests", default=True), ) _ZAQAR_OPTIONS = ( cfg.StrOpt("url", default="http://127.0.0.1:8888"), cfg.StrOpt("config", default="functional-zaqar.conf"), ) _HEADERS_OPTIONS = ( cfg.StrOpt("user_agent", default="FunctionalTests"), cfg.StrOpt("project_id", default="123456"), ) def load_config(): conf = cfg.ConfigOpts() conf.register_opts(_DEFAULT) conf.register_opts(_ZAQAR_OPTIONS, group="zaqar") conf.register_opts(_HEADERS_OPTIONS, group="headers") log.register_options(conf) conf_path = os.path.join(os.environ["ZAQAR_TESTS_CONFIGS_DIR"], "functional-tests.conf") conf(args=[], default_config_files=[conf_path]) return conf ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/helpers.py0000664000175100017510000000670515033040005021404 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import random import string import uuid def create_zaqar_headers(conf): """Returns headers to be used for all Zaqar requests.""" headers = { "User-Agent": conf.headers.user_agent, "Accept": "application/json", "X-Project-ID": conf.headers.project_id, "Client-ID": str(uuid.uuid1()), } return headers def generate_dict(dict_length): """Returns dictionary of specified length. Key:Value is random data. :param dict_length: length of the dictionary """ return dict([(generate_random_string(), generate_random_string()) for _ in range(dict_length)]) def generate_random_string(length=10): """Returns an ASCII string of specified length.""" chars = string.ascii_letters + string.digits return ''.join(random.choice(chars) for i in range(length)) def single_message_body(messagesize=2, default_ttl=False, ttl=None): """Returns message body for one message. :param messagesize: Size of the message body to generate (default 2) :param default_ttl: Set to True to not set an explicit TTL value in the message request, in which case the server will use a default value (default False). Note that default TTL is only supported in v1.1 of the API. :param ttl: Number of seconds to provide as the TTL for each message. If not specified, a random value is chosen in the range: (60 <= TTL <= 1209600). If `default_ttl` is True, the `ttl` param is ignored. """ message_body = {} message_body['body'] = generate_dict(messagesize) if not default_ttl: if ttl is not None: message_body['ttl'] = ttl else: message_body['ttl'] = random.randint(60, 1209600) return message_body def create_message_body(messagecount, **kwargs): """Returns request body for message-posting tests. :param messagecount: Number of messages to create :param **kwargs: Same as for `single_message_body` """ return [single_message_body(**kwargs) for i in range(messagecount)] def create_message_body_v1_1(messagecount, **kwargs): """Returns request body for message-posting tests. :param messagecount: Number of messages to create :param **kwargs: Same as for `single_message_body` """ return { "messages": [single_message_body(**kwargs) for i in range(messagecount)] } def create_pool_body(**kwargs): pool_body = { 'weight': kwargs['weight'], 'uri': kwargs['uri'], 'options': { 'max_retry_sleep': 1, 'partitions': 8 } } return pool_body def create_subscription_body(subscriber='http://fake:8080', ttl=600, options_key='funny', options_value='no'): options = {options_key: options_value} return {'subscriber': subscriber, 'options': options, 'ttl': ttl} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/http.py0000664000175100017510000001436315033040005020720 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from falcon import testing as ftest from oslo_serialization import jsonutils import requests from urllib import parse as urllib_parse def _build_url(method): @functools.wraps(method) def wrapper(self, url='', **kwargs): if not url.startswith("http"): if not self.base_url: raise RuntimeError("Base url not set") url = self.base_url + url or '' return method(self, url, **kwargs) return wrapper class Client(object): def __init__(self): # NOTE(kgriffs): used by @_build_url self.base_url = None self.session = requests.session() def set_base_url(self, base_url): self.base_url = base_url def set_headers(self, headers): self.session.headers.update(headers) @_build_url def get(self, url=None, **kwargs): """Does http GET.""" return self.session.get(url, **kwargs) @_build_url def head(self, url=None, **kwargs): """Does http HEAD.""" return self.session.head(url, **kwargs) @_build_url def post(self, url=None, **kwargs): """Does http POST.""" if "data" in kwargs: kwargs['data'] = jsonutils.dumps(kwargs["data"]) return self.session.post(url, **kwargs) @_build_url def put(self, url=None, **kwargs): """Does http PUT.""" if "data" in kwargs: kwargs['data'] = jsonutils.dumps(kwargs["data"]) return self.session.put(url, **kwargs) @_build_url def delete(self, url=None, **kwargs): """Does http DELETE.""" return self.session.delete(url, **kwargs) @_build_url def patch(self, url=None, **kwargs): """Does http PATCH.""" if "data" in kwargs: kwargs['data'] = jsonutils.dumps(kwargs["data"]) return self.session.patch(url, **kwargs) class ResponseMock(object): """Mocks part of the Requests library's Response object.""" def __init__(self, srmock, wsgi_result): self.status_code = int(srmock.status.partition(' ')[0]) self._body = wsgi_result[0] if wsgi_result else '' self.headers = srmock.headers_dict def json(self): return jsonutils.loads(self._body) class WSGIClient(object): """Same interface as Client, but speaks directly to a WSGI callable.""" def __init__(self, app): # NOTE(kgriffs): used by @_build_url self.base_url = None self.app = app self.headers = {} @staticmethod def _sanitize_headers(headers): # NOTE(kgriffs): Workaround for a little create_environ bug return dict([(key, '' if value is None else value) for key, value in headers.items()]) def _simulate_request(self, url, method='GET', data=None, headers=None, params=None): """Simulate a request. Simulates a WSGI request to the API for testing. :param url: Request path for the desired resource :param method: (Default 'GET') The HTTP method to send :param data: (Default None) A dict that will be serialized to JSON and submitted as the body of the request. May also be a pre-serialized string. :param headers: (Default None) A dict containing extra HTTP headers to send. :param params: (Default None) A dict of parameters to use in the query string for the request. :returns: a requests response instance """ if headers is None: headers = self.headers headers = self._sanitize_headers(headers) if data is None: body = '' elif isinstance(data, str): body = data else: body = jsonutils.dumps(data, ensure_ascii=False) parsed_url = urllib_parse.urlparse(url) query = parsed_url.query if params is not None: extra = '&'.join([key + '=' + str(value) for key, value in params.items()]) query += '&' + extra environ = ftest.create_environ(method=method, path=parsed_url.path, query_string=query, headers=headers, body=body) srmock = ftest.StartResponseMock() wsgi_result = self.app(environ, srmock) return ResponseMock(srmock, wsgi_result) def set_base_url(self, base_url): self.base_url = base_url def set_headers(self, headers): self.headers.update(headers) @_build_url def get(self, url=None, **kwargs): """Simulate a GET request.""" kwargs['method'] = 'GET' return self._simulate_request(url=url, **kwargs) @_build_url def head(self, url=None, **kwargs): """Simulate a HEAD request.""" kwargs['method'] = 'HEAD' return self._simulate_request(url=url, **kwargs) @_build_url def post(self, url=None, **kwargs): """Simulate a POST request.""" kwargs['method'] = 'POST' return self._simulate_request(url=url, **kwargs) @_build_url def put(self, url=None, **kwargs): """Simulate a PUT request.""" kwargs['method'] = 'PUT' return self._simulate_request(url=url, **kwargs) @_build_url def delete(self, url=None, **kwargs): """Simulate a DELETE request.""" kwargs['method'] = 'DELETE' return self._simulate_request(url=url, **kwargs) @_build_url def patch(self, url=None, **kwargs): """Simulate a PATCH request.""" kwargs['method'] = 'PATCH' return self._simulate_request(url=url, **kwargs) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5770135 zaqar-20.1.0.dev29/zaqar/tests/functional/websocket/0000775000175100017510000000000015033040026021351 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/websocket/__init__.py0000664000175100017510000000000015033040005023445 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/websocket/test_queues.py0000664000175100017510000000537515033040005024300 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from oslo_utils import uuidutils from testtools import testcase import websocket from zaqar.tests.functional import base class TestQueues(base.V1_1FunctionalTestBase): config_file = 'websocket_mongodb.conf' server_class = base.ZaqarServer def setUp(self): if not base._TEST_INTEGRATION: raise testcase.TestSkipped('Only run in integration mode') super(TestQueues, self).setUp() self.project_id = uuidutils.generate_uuid() self.headers = {'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id} self.client = websocket.create_connection('ws://localhost:9000/') self.addCleanup(self.client.close) def test_list_empty(self): self.client.send( jsonutils.dumps({'action': 'queue_list', 'headers': self.headers})) response = jsonutils.loads(self.client.recv()) self.assertEqual( {'body': {'queues': []}, 'headers': {'status': 200}, 'request': {'action': 'queue_list', 'body': {}, 'api': 'v2', 'headers': self.headers}}, response) def test_list(self): self.client.send( jsonutils.dumps({'action': 'queue_create', 'body': {'queue_name': 'my_queue'}, 'headers': self.headers})) response = jsonutils.loads(self.client.recv()) self.assertEqual( {'body': 'Queue my_queue created.', 'headers': {'status': 201}, 'request': {'action': 'queue_create', 'body': {'queue_name': 'my_queue'}, 'api': 'v2', 'headers': self.headers}}, response) self.client.send( jsonutils.dumps({'action': 'queue_list', 'headers': self.headers})) response = jsonutils.loads(self.client.recv()) self.assertEqual( {'body': {'queues': [{'name': 'my_queue'}]}, 'headers': {'status': 200}, 'request': {'action': 'queue_list', 'body': {}, 'api': 'v2', 'headers': self.headers}}, response) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5770135 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/0000775000175100017510000000000015033040026020334 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/__init__.py0000664000175100017510000000000015033040005022430 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/test_versions.py0000664000175100017510000000226415033040005023616 0ustar00mylesmyles# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.tests.functional import base class TestVersions(base.FunctionalTestBase): """Tests for Versions Resource.""" server_class = base.ZaqarServer def setUp(self): super(TestVersions, self).setUp() self.base_url = "{url}/".format(url=self.cfg.zaqar.url) self.client.set_base_url(self.base_url) def test_get_versions_without_headers(self): result = self.client.get('', headers={}) self.assertIn("versions", result.json()) def test_get_versions_with_headers(self): result = self.client.get('') self.assertIn("versions", result.json()) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5780134 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v1_1/0000775000175100017510000000000015033040026021102 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v1_1/__init__.py0000664000175100017510000000000015033040005023176 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v1_1/test_claims.py0000664000175100017510000002150115033040005023757 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid import ddt from zaqar.tests.functional import base from zaqar.tests.functional import helpers @ddt.ddt class TestClaims(base.V1_1FunctionalTestBase): """Tests for Claims.""" server_class = base.ZaqarServer def setUp(self): super(TestClaims, self).setUp() self.headers = helpers.create_zaqar_headers(self.cfg) self.client.headers = self.headers self.queue = uuid.uuid1() self.queue_url = ("{url}/{version}/queues/{queue}".format( url=self.cfg.zaqar.url, version="v1.1", queue=self.queue)) self.client.put(self.queue_url) self.claim_url = self.queue_url + '/claims' self.client.set_base_url(self.claim_url) # Post Messages url = self.queue_url + '/messages' doc = helpers.create_message_body_v1_1( messagecount=self.limits.max_messages_per_page) for i in range(10): self.client.post(url, data=doc) @ddt.data({}, {'limit': 2}) def test_claim_messages(self, params): """Claim messages.""" message_count = params.get('limit', self.limits.max_messages_per_claim_or_pop) doc = {"ttl": 300, "grace": 100} result = self.client.post(params=params, data=doc) self.assertEqual(201, result.status_code) self.assertSchema(result.json(), 'claim_create') actual_message_count = len(result.json()['messages']) self.assertMessageCount(actual_message_count, message_count) response_headers = set(result.headers.keys()) self.assertIsSubset(self.headers_response_with_body, response_headers) test_claim_messages.tags = ['smoke', 'positive'] def test_query_claim(self): """Query Claim.""" params = {'limit': 1} doc = {"ttl": 300, "grace": 100} result = self.client.post(params=params, data=doc) location = result.headers['Location'] url = self.cfg.zaqar.url + location result = self.client.get(url) self.assertEqual(200, result.status_code) test_query_claim.tags = ['smoke', 'positive'] @ddt.data({}, {"grace": 100}) def test_claim_default_ttl(self, doc): """Create claim with default TTL and grace values.""" params = {'limit': 1} result = self.client.post(params=params, data=doc) self.assertEqual(201, result.status_code) location = result.headers['Location'] url = self.cfg.zaqar.url + location result = self.client.get(url) self.assertEqual(200, result.status_code) default_ttl = result.json()['ttl'] self.assertEqual(self.resource_defaults.claim_ttl, default_ttl) test_claim_default_ttl.tags = ['smoke', 'positive'] def test_claim_more_than_allowed(self): """Claim more than max allowed per request. Zaqar allows a maximum of 20 messages per claim by default. """ params = {"limit": self.limits.max_messages_per_claim_or_pop + 1} doc = {"ttl": 300, "grace": 100} result = self.client.post(params=params, data=doc) self.assertEqual(400, result.status_code) test_claim_more_than_allowed.tags = ['negative'] def test_claim_patch(self): """Update Claim.""" # Test Setup - Post Claim doc = {"ttl": 300, "grace": 400} result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Patch Claim claim_location = result.headers['Location'] url = self.cfg.zaqar.url + claim_location doc_updated = {"ttl": 300, 'grace': 60} result = self.client.patch(url, data=doc_updated) self.assertEqual(204, result.status_code) # verify that the claim TTL is updated result = self.client.get(url) new_ttl = result.json()['ttl'] self.assertEqual(doc_updated['ttl'], new_ttl) test_claim_patch.tags = ['smoke', 'positive'] def test_delete_claimed_message(self): """Delete message belonging to a Claim.""" # Test Setup - Post claim doc = {"ttl": 60, "grace": 60} result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Delete Claimed Messages for rst in result.json()['messages']: href = rst['href'] url = self.cfg.zaqar.url + href result = self.client.delete(url) self.assertEqual(204, result.status_code) test_delete_claimed_message.tags = ['smoke', 'positive'] def test_claim_release(self): """Release Claim.""" doc = {"ttl": 300, "grace": 100} result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Extract claim location and construct the claim URL. location = result.headers['Location'] url = self.cfg.zaqar.url + location # Release Claim. result = self.client.delete(url) self.assertEqual(204, result.status_code) test_claim_release.tags = ['smoke', 'positive'] @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) def test_claim_invalid_ttl(self, ttl): """Post Claim with invalid TTL. The request JSON body will have a TTL value outside the allowed range.Allowed ttl values is 60 <= ttl <= 43200. """ doc = {"ttl": ttl, "grace": 100} result = self.client.post(data=doc) self.assertEqual(400, result.status_code) test_claim_invalid_ttl.tags = ['negative'] @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) def test_claim_invalid_grace(self, grace): """Post Claim with invalid grace. The request JSON body will have a grace value outside the allowed range.Allowed grace values is 60 <= grace <= 43200. """ doc = {"ttl": 100, "grace": grace} result = self.client.post(data=doc) self.assertEqual(400, result.status_code) test_claim_invalid_grace.tags = ['negative'] @ddt.data(0, -100, 30, 10000000000000000000) def test_claim_invalid_limit(self, grace): """Post Claim with invalid limit. The request url will have a limit outside the allowed range. Allowed limit values are 0 < limit <= 20(default max). """ doc = {"ttl": 100, "grace": grace} result = self.client.post(data=doc) self.assertEqual(400, result.status_code) test_claim_invalid_limit.tags = ['negative'] @ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000) def test_patch_claim_invalid_ttl(self, ttl): """Patch Claim with invalid TTL. The request JSON body will have a TTL value outside the allowed range.Allowed ttl values is 60 <= ttl <= 43200. """ doc = {"ttl": 100, "grace": 100} result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Extract claim location and construct the claim URL. location = result.headers['Location'] url = self.cfg.zaqar.url + location # Patch Claim. doc = {"ttl": ttl} result = self.client.patch(url, data=doc) self.assertEqual(400, result.status_code) test_patch_claim_invalid_ttl.tags = ['negative'] def test_query_non_existing_claim(self): """Query Non Existing Claim.""" path = '/non-existing-claim' result = self.client.get(path) self.assertEqual(404, result.status_code) test_query_non_existing_claim.tags = ['negative'] def test_patch_non_existing_claim(self): """Patch Non Existing Claim.""" path = '/non-existing-claim' doc = {"ttl": 400} result = self.client.patch(path, data=doc) self.assertEqual(404, result.status_code) test_patch_non_existing_claim.tags = ['negative'] def test_delete_non_existing_claim(self): """Patch Non Existing Claim.""" path = '/non-existing-claim' result = self.client.delete(path) self.assertEqual(204, result.status_code) test_delete_non_existing_claim.tags = ['negative'] def tearDown(self): """Delete Queue after Claim Test.""" super(TestClaims, self).tearDown() self.client.delete(self.queue_url) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v1_1/test_health.py0000664000175100017510000000572615033040005023767 0ustar00mylesmyles# Copyright (c) 2014 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.tests.functional import base from zaqar.tests.functional import helpers class TestHealth(base.V1_1FunctionalTestBase): server_class = base.ZaqarAdminServer config_file = 'wsgi_mongodb_pooled.conf' def setUp(self): super(TestHealth, self).setUp() self.base_url = ("{url}/{version}".format( url=self.cfg.zaqar.url, version="v1.1" )) self.cfg.zaqar.version = "v1.1" self.headers = helpers.create_zaqar_headers(self.cfg) self.client.headers = self.headers self.client.set_base_url(self.base_url) def test_health_with_pool(self): # FIXME(flwang): Please use mongodb after the sqlalchemy is disabled # as pool node and the mongodb is working on gate successfully. doc = helpers.create_pool_body( weight=10, uri=self.mconf['drivers:management_store:mongodb'].uri, options=dict(database='zaqar_test_pooled_1') ) pool_name = "pool_1" result = self.client.put('/pools/' + pool_name, data=doc) self.assertEqual(201, result.status_code) queue_name = 'fake_queue' result = self.client.put('/queues/' + queue_name) self.assertEqual(201, result.status_code) sample_messages = {'messages': [ {'body': 239, 'ttl': 999}, {'body': {'key': 'value'}, 'ttl': 888} ]} result = self.client.post('/queues/%s/messages' % queue_name, data=sample_messages) self.assertEqual(201, result.status_code) claim_metadata = {'ttl': 100, 'grace': 300} result = self.client.post('/queues/%s/claims' % queue_name, data=claim_metadata) self.assertEqual(201, result.status_code) response = self.client.get('/health') self.assertEqual(200, response.status_code) health = response.json() self.assertTrue(health['catalog_reachable']) self.assertTrue(health[pool_name]['storage_reachable']) op_status = health[pool_name]['operation_status'] for op in op_status.keys(): self.assertTrue(op_status[op]['succeeded']) message_volume = health[pool_name]['message_volume'] self.assertEqual(2, message_volume['claimed']) self.assertEqual(0, message_volume['free']) self.assertEqual(2, message_volume['total']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v1_1/test_messages.py0000664000175100017510000004343115033040005024324 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid import ddt from oslo_serialization import jsonutils from zaqar.common import consts from zaqar.tests.functional import base from zaqar.tests.functional import helpers @ddt.ddt class TestMessages(base.V1_1FunctionalTestBase): """Message Tests Specific to V1.1.""" server_class = base.ZaqarServer def setUp(self): super(TestMessages, self).setUp() self.queue = uuid.uuid1() # Generate a random queue ID self.queue_url = ("{url}/{version}/queues/{queue}".format( url=self.cfg.zaqar.url, version="v1.1", queue=self.queue)) self.headers = helpers.create_zaqar_headers(self.cfg) self.client.headers = self.headers self.client.put(self.queue_url) # Create the queue self.message_url = self.queue_url + '/messages' self.client.set_base_url(self.message_url) def tearDown(self): self.client.delete(self.queue_url) # Remove the queue super(TestMessages, self).tearDown() def _post_large_bulk_insert(self, offset): """Insert just under than max allowed messages.""" message1 = {"body": '', "ttl": 300} message2 = {"body": '', "ttl": 120} doc = {'messages': [message1, message2]} overhead = len(jsonutils.dumps(doc)) half_size = (self.limits.max_messages_post_size - overhead) // 2 message1['body'] = helpers.generate_random_string(half_size) message2['body'] = helpers.generate_random_string(half_size + offset) return self.client.post(data=doc) def test_message_single_insert(self): """Insert Single Message into the Queue. This test also verifies that claimed messages are retuned (or not) depending on the include_claimed flag. """ doc = helpers.create_message_body_v1_1(messagecount=1) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) response_headers = set(result.headers.keys()) self.assertIsSubset(self.headers_response_with_body, response_headers) # GET on posted message href = result.json()['resources'][0] url = self.cfg.zaqar.url + href result = self.client.get(url) self.assertEqual(200, result.status_code) # Compare message metadata result_body = result.json()['body'] posted_metadata = doc['messages'][0]['body'] self.assertEqual(posted_metadata, result_body) # Post a claim & verify the include_claimed flag. url = self.queue_url + '/claims' doc = {"ttl": 300, "grace": 100} result = self.client.post(url, data=doc) self.assertEqual(201, result.status_code) params = {'include_claimed': True, 'echo': True} result = self.client.get(params=params) self.assertEqual(200, result.status_code) response_message_body = result.json()["messages"][0]["body"] self.assertEqual(posted_metadata, response_message_body) # By default, include_claimed = false result = self.client.get(self.message_url) self.assertEqual(200, result.status_code) test_message_single_insert.tags = ['smoke', 'positive'] def test_message_bulk_insert(self): """Bulk Insert Messages into the Queue.""" message_count = self.limits.max_messages_per_page doc = helpers.create_message_body_v1_1(messagecount=message_count) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # GET on posted messages location = result.headers['location'] url = self.cfg.zaqar.url + location result = self.client.get(url) self.assertEqual(200, result.status_code) # Verify that the response json schema matches the expected schema self.assertSchema(result.json(), consts.MESSAGE_GET_MANY) self.skipTest('Bug #1273335 - Get set of messages returns wrong hrefs ' '(happens randomly)') # Compare message metadata result_body = [msg['body'] for msg in result.json()['messages']] result_body.sort() posted_metadata = [msg['body'] for msg in doc['messages']] posted_metadata.sort() self.assertEqual(posted_metadata, result_body) test_message_bulk_insert.tags = ['smoke', 'positive'] def test_message_default_ttl(self): """Insert Single Message into the Queue using the default TTL.""" doc = helpers.create_message_body_v1_1(messagecount=1, default_ttl=True) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # GET on posted message href = result.json()['resources'][0] url = self.cfg.zaqar.url + href result = self.client.get(url) self.assertEqual(200, result.status_code) # Compare message metadata default_ttl = result.json()['ttl'] self.assertEqual(self.resource_defaults.message_ttl, default_ttl) test_message_default_ttl.tags = ['smoke', 'positive'] @ddt.data({}, {'limit': 5}) def test_get_message(self, params): """Get Messages.""" # Note(abettadapur): This will now return 200s and []. # Needs to be addressed when feature patch goes in self.skipTest("Not supported") expected_msg_count = params.get('limit', self.limits.max_messages_per_page) # Test Setup doc = helpers.create_message_body_v1_1( messagecount=self.limits.max_messages_per_page) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) url = '' params['echo'] = True # Follow the hrefs & perform GET, till the end of messages i.e. http # 204 while result.status_code in [201, 200]: result = self.client.get(url, params=params) self.assertIn(result.status_code, [200, 204]) if result.status_code == 200: actual_msg_count = len(result.json()['messages']) self.assertMessageCount(actual_msg_count, expected_msg_count) href = result.json()['links'][0]['href'] url = self.cfg.zaqar.url + href self.assertEqual(204, result.status_code) test_get_message.tags = ['smoke', 'positive'] def test_message_delete(self): """Delete Message.""" # Test Setup doc = helpers.create_message_body_v1_1(messagecount=1) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Delete posted message href = result.json()['resources'][0] url = self.cfg.zaqar.url + href result = self.client.delete(url) self.assertEqual(204, result.status_code) result = self.client.get(url) self.assertEqual(404, result.status_code) test_message_delete.tags = ['smoke', 'positive'] def test_message_bulk_delete(self): """Bulk Delete Messages.""" doc = helpers.create_message_body_v1_1(messagecount=10) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Delete posted messages location = result.headers['Location'] url = self.cfg.zaqar.url + location result = self.client.delete(url) self.assertEqual(204, result.status_code) result = self.client.get(url) self.assertEqual(404, result.status_code) test_message_bulk_delete.tags = ['smoke', 'positive'] def test_message_delete_nonexisting(self): """Delete non-existing Messages.""" result = self.client.delete('/non-existing') self.assertEqual(204, result.status_code) test_message_delete_nonexisting.tags = ['negative'] def test_message_partial_delete(self): """Delete Messages will be partially successful.""" doc = helpers.create_message_body_v1_1(messagecount=3) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Delete posted message location = result.headers['Location'] url = self.cfg.zaqar.url + location url += ',nonexisting' result = self.client.delete(url) self.assertEqual(204, result.status_code) test_message_partial_delete.tags = ['negative'] @ddt.data(5, 1) def test_messages_pop(self, limit=5): """Pop messages from a queue.""" doc = helpers.create_message_body_v1_1(messagecount=limit) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Pop messages url = self.message_url + '?pop=' + str(limit) result = self.client.delete(url) self.assertEqual(200, result.status_code) params = {'echo': True} result = self.client.get(self.message_url, params=params) self.assertEqual(200, result.status_code) messages = result.json()['messages'] self.assertEqual([], messages) test_messages_pop.tags = ['smoke', 'positive'] @ddt.data(10000000, 0, -1) def test_messages_pop_invalid(self, limit): """Pop messages from a queue.""" doc = helpers.create_message_body_v1_1( messagecount=self.limits.max_messages_per_page) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Pop messages url = self.message_url + '?pop=' + str(limit) result = self.client.delete(url) self.assertEqual(400, result.status_code) params = {'echo': True} result = self.client.get(self.message_url, params=params) self.assertEqual(200, result.status_code) messages = result.json()['messages'] self.assertNotEqual(messages, []) test_messages_pop_invalid.tags = ['smoke', 'negative'] def test_messages_delete_pop_and_id(self): """Delete messages with pop & id params in the request.""" doc = helpers.create_message_body_v1_1( messagecount=1) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) location = result.headers['Location'] # Pop messages url = self.cfg.zaqar.url + location + '&pop=1' result = self.client.delete(url) self.assertEqual(400, result.status_code) params = {'echo': True} result = self.client.get(self.message_url, params=params) self.assertEqual(200, result.status_code) messages = result.json()['messages'] self.assertNotEqual(messages, []) test_messages_delete_pop_and_id.tags = ['smoke', 'negative'] def test_messages_pop_empty_queue(self): """Pop messages from an empty queue.""" url = self.message_url + '?pop=2' result = self.client.delete(url) self.assertEqual(200, result.status_code) messages = result.json()['messages'] self.assertEqual([], messages) test_messages_pop_empty_queue.tags = ['smoke', 'positive'] def test_messages_pop_one(self): """Pop single messages from a queue.""" doc = helpers.create_message_body_v1_1( messagecount=self.limits.max_messages_per_page) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Pop Single Message url = self.message_url + '?pop=1' result = self.client.delete(url) self.assertEqual(200, result.status_code) # Get messages from the queue & verify message count params = {'echo': True, 'limit': self.limits.max_messages_per_page} result = self.client.get(self.message_url, params=params) self.assertEqual(200, result.status_code) expected_msg_count = self.limits.max_messages_per_page - 1 actual_msg_count = len(result.json()['messages']) self.assertEqual(expected_msg_count, actual_msg_count) test_messages_pop_one.tags = ['smoke', 'positive'] def test_message_partial_get(self): """Get Messages will be partially successful.""" doc = helpers.create_message_body_v1_1(messagecount=3) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) # Get posted message and a nonexisting message location = result.headers['Location'] url = self.cfg.zaqar.url + location url += ',nonexisting' result = self.client.get(url) self.assertEqual(200, result.status_code) test_message_partial_get.tags = ['negative'] @ddt.data(-10, -1, 0) def test_message_bulk_insert_large_bodies(self, offset): """Insert just under than max allowed messages.""" result = self._post_large_bulk_insert(offset) self.assertEqual(201, result.status_code) test_message_bulk_insert_large_bodies.tags = ['positive'] @ddt.data(1, 10) def test_message_bulk_insert_large_bodies_(self, offset): """Insert just under than max allowed messages.""" result = self._post_large_bulk_insert(offset) self.assertEqual(400, result.status_code) test_message_bulk_insert_large_bodies_.tags = ['negative'] def test_message_bulk_insert_oversized(self): """Insert more than max allowed size.""" doc = '[{{"body": "{0}", "ttl": 300}}, {{"body": "{1}", "ttl": 120}}]' overhead = len(doc.format('', '')) half_size = (self.limits.max_messages_post_size - overhead) // 2 doc = doc.format(helpers.generate_random_string(half_size), helpers.generate_random_string(half_size + 1)) result = self.client.post(data=doc) self.assertEqual(400, result.status_code) test_message_bulk_insert_oversized.tags = ['negative'] @ddt.data(10000000000000000000, -100, 0, 30, -10000000000000000000) def test_message_get_invalid_limit(self, limit): """Get Messages with invalid value for limit. Allowed values for limit are 0 < limit <= 20(configurable). """ params = {'limit': limit} result = self.client.get(params=params) self.assertEqual(400, result.status_code) test_message_get_invalid_limit.tags = ['negative'] def test_message_bulk_delete_negative(self): """Delete more messages than allowed in a single request. By default, max messages that can be deleted in a single request is 20. """ url = (self.message_url + '?ids=' + ','.join(str(i) for i in range(self.limits.max_messages_per_page + 1))) result = self.client.delete(url) self.assertEqual(400, result.status_code) test_message_bulk_delete_negative.tags = ['negative'] def test_message_bulk_get_negative(self): """GET more messages by id than allowed in a single request. By default, max messages that can be fetched in a single request is 20. """ url = (self.message_url + '?ids=' + ','.join(str(i) for i in range(self.limits.max_messages_per_page + 1))) result = self.client.get(url) self.assertEqual(400, result.status_code) test_message_bulk_get_negative.tags = ['negative'] def test_get_messages_malformed_marker(self): """Get messages with non-existing marker.""" url = self.message_url + '?marker=invalid' result = self.client.get(url, headers=self.headers) self.assertEqual(200, result.status_code) self.assertSchema(result.json(), 'message_list') test_get_messages_malformed_marker.tags = ['negative'] @ddt.data(None, '1234', 'aa2-bb3', '103e09c6-31b7-11e3-86bc-b8ca3ad0f5d81', '103e09c6-31b7-11e3-86bc-b8ca3ad0f5d') def test_get_messages_invalid_client_id(self, client_id): """Get messages with invalid client id.""" url = self.message_url header = helpers.create_zaqar_headers(self.cfg) header['Client-ID'] = client_id result = self.client.get(url, headers=header) self.assertEqual(400, result.status_code) test_get_messages_invalid_client_id.tags = ['negative'] def test_query_non_existing_message(self): """Get Non Existing Message.""" path = '/non-existing-message' result = self.client.get(path) self.assertEqual(404, result.status_code) test_query_non_existing_message.tags = ['negative'] def test_query_non_existing_message_set(self): """Get Set of Non Existing Messages.""" path = '?ids=not_there1,not_there2' result = self.client.get(path) self.assertEqual(404, result.status_code) test_query_non_existing_message_set.tags = ['negative'] def test_delete_non_existing_message(self): """Delete Non Existing Message.""" path = '/non-existing-message' result = self.client.delete(path) self.assertEqual(204, result.status_code) test_delete_non_existing_message.tags = ['negative'] def test_message_bad_header_single_insert(self): """Insert Single Message into the Queue. This should fail because of the lack of a Client-ID header """ self.skipTest("Not supported") del self.client.headers["Client-ID"] doc = helpers.create_message_body_v1_1(messagecount=1) result = self.client.post(data=doc) self.assertEqual(400, result.status_code) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v1_1/test_pools.py0000664000175100017510000001624315033040005023652 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import ddt from zaqar import tests as testing from zaqar.tests.functional import base from zaqar.tests.functional import helpers @ddt.ddt class TestPools(base.V1_1FunctionalTestBase): server_class = base.ZaqarAdminServer config_file = 'wsgi_mongodb_pooled.conf' @testing.requires_mongodb def setUp(self): super(TestPools, self).setUp() self.pool_url = ("{url}/{version}/pools".format( url=self.cfg.zaqar.url, version="v1.1" )) self.cfg.zaqar.version = "v1.1" self.headers = helpers.create_zaqar_headers(self.cfg) self.client.headers = self.headers self.client.set_base_url(self.pool_url) @ddt.data( { 'name': "newpool", 'weight': 10 } ) def test_insert_pool(self, params): """Test the registering of one pool.""" doc = helpers.create_pool_body( weight=params.get('weight', 10), uri=self.mongodb_url ) pool_name = params.get('name', "newpool") self.addCleanup(self.client.delete, url='/'+pool_name) result = self.client.put('/'+pool_name, data=doc) self.assertEqual(201, result.status_code) # Test existence result = self.client.get('/'+pool_name) self.assertEqual(200, result.status_code) @ddt.data( { 'name': "newpool", 'weight': 10 } ) def test_pool_details(self, params): """Get the details of a pool. Assert the respective schema.""" doc = helpers.create_pool_body( weight=params.get('weight', 10), uri=self.mongodb_url ) pool_name = params.get('name', "newpool") self.addCleanup(self.client.delete, url='/'+pool_name) result = self.client.put('/'+pool_name, data=doc) self.assertEqual(201, result.status_code) # Test existence result = self.client.get('/'+pool_name+'?detailed=true') self.assertEqual(200, result.status_code) self.assertSchema(result.json(), 'pool_get_detail') @ddt.data( { 'name': "newpool", 'weight': 10, } ) def test_delete_pool(self, params): """Create a pool, then delete it. Make sure operation is successful. """ # Create the pool doc = helpers.create_pool_body( weight=params.get('weight', 10), uri=self.mongodb_url ) pool_name = params.get('name', "newpool") result = self.client.put('/'+pool_name, data=doc) self.assertEqual(201, result.status_code) # Make sure it exists result = self.client.get('/'+pool_name) self.assertEqual(200, result.status_code) # Delete it result = self.client.delete('/'+pool_name) self.assertEqual(204, result.status_code) @ddt.data( { 'name': "newpool", 'weight': 10, } ) def test_list_pools(self, params): """Add a pool. Get the list of all the pools. Assert respective schema """ doc = helpers.create_pool_body( weight=params.get('weight', 10), uri=self.mongodb_url ) pool_name = params.get('name', "newpool") self.addCleanup(self.client.delete, url='/'+pool_name) result = self.client.put('/'+pool_name, data=doc) self.assertEqual(201, result.status_code) result = self.client.get() self.assertEqual(200, result.status_code) self.assertSchema(result.json(), 'pool_list') @ddt.data( { 'name': "newpool", 'weight': 10, } ) def test_patch_pool(self, params): """Create a pool. Issue a patch command, make sure command was successful. Check details to be sure. """ doc = helpers.create_pool_body( weight=params.get('weight', 10), uri=self.mongodb_url ) pool_name = params.get('name', "newpool") self.addCleanup(self.client.delete, url='/'+pool_name) result = self.client.put('/'+pool_name, data=doc) self.assertEqual(201, result.status_code) # Update that pool patchdoc = helpers.create_pool_body( weight=5, uri=self.mongodb_url ) result = self.client.patch('/'+pool_name, data=patchdoc) self.assertEqual(200, result.status_code) # Get the pool, check update# result = self.client.get('/'+pool_name) self.assertEqual(200, result.status_code) self.assertEqual(5, result.json()["weight"]) @ddt.data( { 'name': "newpool", 'weight': 10, } ) def test_patch_pool_bad_data(self, params): """Issue a patch command without a body. Assert 400.""" # create a pool doc = helpers.create_pool_body( weight=params.get('weight', 10), uri=self.mongodb_url ) pool_name = params.get('name', "newpool") self.addCleanup(self.client.delete, url='/'+pool_name) result = self.client.put('/'+pool_name, data=doc) self.assertEqual(201, result.status_code) # Update pool with bad post data. Ensure 400 result = self.client.patch('/'+pool_name) self.assertEqual(400, result.status_code) @ddt.data( { 'name': "newpool", 'weight': 10, } ) def test_patch_pool_non_exist(self, params): """Issue patch command to pool that doesn't exist. Assert 404.""" doc = helpers.create_pool_body( weight=5, uri=self.mongodb_url ) result = self.client.patch('/nonexistpool', data=doc) self.assertEqual(404, result.status_code) @ddt.data( {'name': '\u6c49\u5b57\u6f22\u5b57'}, {'name': 'i'*65}, {'weight': -1} ) def test_insert_pool_bad_data(self, params): """Create pools with invalid names and weights. Assert 400.""" self.skip("FIXME: https://bugs.launchpad.net/zaqar/+bug/1373486") doc = helpers.create_pool_body( weight=params.get('weight', 10), uri=self.mongodb_url ) pool_name = params.get('name', "newpool") self.addCleanup(self.client.delete, url='/'+pool_name) result = self.client.put('/'+pool_name, data=doc) self.assertEqual(400, result.status_code) def test_delete_pool_non_exist(self): """Delete a pool that doesn't exist. Assert 404.""" result = self.client.delete('/nonexistpool') self.assertEqual(204, result.status_code) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v1_1/test_queues.py0000664000175100017510000002672015033040005024026 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid import ddt from zaqar.tests.functional import base from zaqar.tests.functional import helpers class NamedBinaryStr(bytes): """Wrapper for bytes to facilitate overriding __name__.""" class NamedUnicodeStr(str): """Unicode string look-alike to facilitate overriding __name__.""" def __init__(self, value): self.value = value def __str__(self): return self.value def encode(self, enc): return self.value.encode(enc) def __format__(self, formatstr): """Workaround for ddt bug. DDT will always call __format__ even when __name__ exists, which blows up for Unicode strings under Py2. """ return '' class NamedDict(dict): """Wrapper for dict to facilitate overriding __name__.""" def annotated(test_name, test_input): if isinstance(test_input, dict): annotated_input = NamedDict(test_input) elif isinstance(test_input, str): annotated_input = NamedUnicodeStr(test_input) else: annotated_input = NamedBinaryStr(test_input) setattr(annotated_input, '__name__', test_name) return annotated_input @ddt.ddt class TestInsertQueue(base.V1_1FunctionalTestBase): """Tests for Insert queue.""" server_class = base.ZaqarServer def setUp(self): super(TestInsertQueue, self).setUp() self.base_url = '{0}/{1}'.format(self.cfg.zaqar.url, "v1.1") self.header = helpers.create_zaqar_headers(self.cfg) self.headers_response_empty = {'location'} self.client.set_base_url(self.base_url) self.client.headers = self.header @ddt.data('qtestqueue', 'TESTqueue', 'hyphen-name', '_undersore', annotated('test_insert_queue_long_name', 'i' * 64)) def test_insert_queue(self, queue_name): """Create Queue.""" self.url = self.base_url + '/queues/' + queue_name self.addCleanup(self.client.delete, self.url) result = self.client.put(self.url) self.assertEqual(201, result.status_code) response_headers = set(result.headers.keys()) self.assertIsSubset(self.headers_response_empty, response_headers) test_insert_queue.tags = ['positive', 'smoke'] @ddt.data(annotated('test_insert_queue_non_ascii_name', '\u6c49\u5b57\u6f22\u5b57'), '@$@^qw', annotated('test_insert_queue_invalid_name_length', 'i' * 65)) def test_insert_queue_invalid_name(self, queue_name): """Create Queue.""" self.url = self.base_url + '/queues/' + queue_name self.addCleanup(self.client.delete, self.url) result = self.client.put(self.url) self.assertEqual(400, result.status_code) test_insert_queue_invalid_name.tags = ['negative'] def test_insert_queue_header_plaintext(self): """Insert Queue with 'Accept': 'plain/text'.""" path = '/queues/plaintextheader' self.addCleanup(self.client.delete, path) header = {"Accept": 'plain/text'} result = self.client.put(path, headers=header) self.assertEqual(406, result.status_code) test_insert_queue_header_plaintext.tags = ['negative'] def test_insert_queue_header_asterisk(self): """Insert Queue with 'Accept': '*/*'.""" path = '/queues/asteriskinheader' headers = {'Accept': '*/*', 'Client-ID': str(uuid.uuid4()), 'X-Project-ID': '518b51ea133c4facadae42c328d6b77b'} self.addCleanup(self.client.delete, url=path, headers=headers) result = self.client.put(path, headers=headers) self.assertEqual(201, result.status_code) test_insert_queue_header_asterisk.tags = ['positive'] def test_insert_queue_with_metadata(self): """Insert queue with a non-empty request body.""" self.url = self.base_url + '/queues/hasmetadata' doc = {"queue": "Has Metadata"} self.addCleanup(self.client.delete, self.url) result = self.client.put(self.url, data=doc) self.assertEqual(201, result.status_code) self.url = self.base_url + '/queues/hasmetadata' result = self.client.get(self.url) self.assertEqual(200, result.status_code) self.assertEqual({"queue": "Has Metadata"}, result.json()) test_insert_queue_with_metadata.tags = ['negative'] def tearDown(self): super(TestInsertQueue, self).tearDown() @ddt.ddt class TestQueueMisc(base.V1_1FunctionalTestBase): server_class = base.ZaqarServer def setUp(self): super(TestQueueMisc, self).setUp() self.base_url = self.cfg.zaqar.url self.client.set_base_url(self.base_url) self.queue_url = self.base_url + ('/{0}/queues/{1}' .format("v1.1", uuid.uuid1())) def test_list_queues(self): """List Queues.""" self.client.put(self.queue_url) self.addCleanup(self.client.delete, self.queue_url) result = self.client.get('/{0}/queues' .format("v1.1")) self.assertEqual(200, result.status_code) self.assertSchema(result.json(), 'queue_list') test_list_queues.tags = ['smoke', 'positive'] def test_list_queues_detailed(self): """List Queues with detailed = True.""" self.client.put(self.queue_url) self.addCleanup(self.client.delete, self.queue_url) params = {'detailed': True} result = self.client.get('/{0}/queues' .format("v1.1"), params=params) self.assertEqual(200, result.status_code) self.assertSchema(result.json(), 'queue_list') response_keys = result.json()['queues'][0].keys() self.assertIn('metadata', response_keys) test_list_queues_detailed.tags = ['smoke', 'positive'] @ddt.data(0, -1, 1001) def test_list_queue_invalid_limit(self, limit): """List Queues with a limit value that is not allowed.""" params = {'limit': limit} result = self.client.get('/{0}/queues' .format("v1.1"), params=params) self.assertEqual(400, result.status_code) test_list_queue_invalid_limit.tags = ['negative'] def test_check_queue_exists(self): """Checks if queue exists.""" self.client.put(self.queue_url) self.addCleanup(self.client.delete, self.queue_url) result = self.client.head(self.queue_url) self.assertEqual(405, result.status_code) test_check_queue_exists.tags = ['negative'] def test_get_queue_malformed_marker(self): """List queues with invalid marker.""" path = '/{0}/queues?marker=zzz'.format("v1.1") result = self.client.get(path) self.assertEqual(200, result.status_code) test_get_queue_malformed_marker.tags = ['negative'] def test_get_stats_empty_queue(self): """Get queue stats on an empty queue.""" result = self.client.put(self.queue_url) self.addCleanup(self.client.delete, self.queue_url) self.assertEqual(201, result.status_code) stats_url = self.queue_url + '/stats' # Get stats on an empty queue result = self.client.get(stats_url) self.assertEqual(200, result.status_code) expected_response = {'messages': {'claimed': 0, 'total': 0, 'free': 0}} self.assertEqual(expected_response, result.json()) test_get_stats_empty_queue.tags = ['positive'] @ddt.data(0, 1) def test_get_queue_stats_claimed(self, claimed): """Get stats on a queue.""" result = self.client.put(self.queue_url) self.addCleanup(self.client.delete, self.queue_url) self.assertEqual(201, result.status_code) # Post Messages to the test queue doc = helpers.create_message_body_v1_1( messagecount=self.limits.max_messages_per_claim_or_pop) message_url = self.queue_url + '/messages' result = self.client.post(message_url, data=doc) self.assertEqual(201, result.status_code) if claimed > 0: claim_url = self.queue_url + '/claims?limit=' + str(claimed) doc = {'ttl': 300, 'grace': 300} result = self.client.post(claim_url, data=doc) self.assertEqual(201, result.status_code) # Get stats on the queue. stats_url = self.queue_url + '/stats' result = self.client.get(stats_url) self.assertEqual(200, result.status_code) self.assertQueueStats(result.json(), claimed) test_get_queue_stats_claimed.tags = ['positive'] def test_ping_queue(self): pass def tearDown(self): super(TestQueueMisc, self).tearDown() class TestQueueNonExisting(base.V1_1FunctionalTestBase): """Test Actions on non existing queue.""" server_class = base.ZaqarServer def setUp(self): super(TestQueueNonExisting, self).setUp() if self.cfg.version != "v1": self.skipTest("Not Supported") self.base_url = '{0}/{1}'.format(self.cfg.zaqar.url, "v1.1") self.queue_url = (self.base_url + '/queues/0a5b1b85-4263-11e3-b034-28cfe91478b9') self.client.set_base_url(self.queue_url) self.header = helpers.create_zaqar_headers(self.cfg) self.headers_response_empty = {'location'} self.header = helpers.create_zaqar_headers(self.cfg) def test_get_stats(self): """Get stats on non existing Queue.""" result = self.client.get('/stats') self.assertEqual(200, result.status_code) self.assertEqual([], result.json()) def test_get_metadata(self): """Get metadata on non existing Queue.""" result = self.client.get('/') self.assertEqual(200, result.status_code) self.assertEqual([], result.json()) def test_get_messages(self): """Get messages on non existing Queue.""" result = self.client.get('/messages') self.assertEqual(200, result.status_code) self.assertEqual([], result.json()) def test_post_messages(self): """Post messages to a non existing Queue.""" doc = [{"ttl": 200, "body": {"Home": ""}}] result = self.client.post('/messages', data=doc) self.assertEqual(201, result.status_code) # check existence of queue result = self.client.get() self.assertEqual(200, result.status_code) self.assertNotEqual([], result.json()) def test_claim_messages(self): """Claim messages from a non existing Queue.""" doc = {"ttl": 200, "grace": 300} result = self.client.post('/claims', data=doc) self.assertEqual(200, result.status_code) self.assertEqual([], result.json()) def test_delete_queue(self): """Delete non existing Queue.""" result = self.client.delete() self.assertEqual(204, result.status_code) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5780134 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v2/0000775000175100017510000000000015033040026020663 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v2/__init__.py0000664000175100017510000000000015033040005022757 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/functional/wsgi/v2/test_subscriptions.py0000664000175100017510000001102115033040005025173 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import time import uuid import ddt from zaqar.tests.functional import base from zaqar.tests.functional import helpers as func_helpers from zaqar.tests import helpers @ddt.ddt class TestSubscriptions(base.V2FunctionalTestBase): """Tests for Subscriptions.""" server_class = base.ZaqarServer def setUp(self): super(TestSubscriptions, self).setUp() self.queue_name = uuid.uuid1() self.queue_url = ("{url}/{version}/queues/{queue}".format( url=self.cfg.zaqar.url, version="v2", queue=self.queue_name)) self.client.put(self.queue_url) self.subscriptions_url = self.queue_url + '/subscriptions' self.client.set_base_url(self.subscriptions_url) def tearDown(self): # Delete test queue subscriptions after each test case. result = self.client.get(self.subscriptions_url) subscriptions = result.json()['subscriptions'] for sub in subscriptions: sub_url = self.subscriptions_url + '/' + sub['id'] self.client.delete(sub_url) # Delete test queue. self.client.delete(self.queue_url) super(TestSubscriptions, self).tearDown() @helpers.is_slow(condition=lambda self: self.class_ttl_gc_interval > 1) def test_expired_subscription(self): # Default TTL value is 600. doc = func_helpers.create_subscription_body() result = self.client.post(data=doc) self.assertEqual(201, result.status_code) longlive_id = result.json()['subscription_id'] # This is a minimum TTL allowed by server. ttl_for_shortlive = 60 doc = func_helpers.create_subscription_body( subscriber='http://expire.me', ttl=ttl_for_shortlive) result = self.client.post(data=doc) self.assertEqual(201, result.status_code) shortlive_id = result.json()['subscription_id'] shortlive_url = self.subscriptions_url + '/' + shortlive_id # Let's wait for subscription to expire. for i in range(self.class_ttl_gc_interval + ttl_for_shortlive): time.sleep(1) result = self.client.get(shortlive_url) if result.status_code == 404: break else: self.fail("Didn't remove the subscription in time.") # Make sure the expired subscription is not returned when listing. result = self.client.get(self.subscriptions_url) self.assertEqual(200, result.status_code) subscriptions = result.json()['subscriptions'] self.assertEqual(1, len(subscriptions)) self.assertEqual(longlive_id, subscriptions[0]['id']) @helpers.is_slow(condition=lambda self: self.class_ttl_gc_interval > 1) def test_update_ttl(self): # Default TTL value is 600. doc = func_helpers.create_subscription_body() result = self.client.post(data=doc) self.assertEqual(201, result.status_code) subscription_id = result.json()['subscription_id'] subscription_url = self.subscriptions_url + '/' + subscription_id # This is a minimum TTL allowed by server. updated_ttl = 60 update_fields = { 'ttl': updated_ttl } result = self.client.patch(subscription_url, data=update_fields) self.assertEqual(204, result.status_code) # Let's wait for updated subscription to expire. for i in range(self.class_ttl_gc_interval + updated_ttl): time.sleep(1) result = self.client.get(subscription_url) if result.status_code == 404: break else: self.fail("Didn't remove the subscription in time.") # Make sure the expired subscription is not returned when listing. result = self.client.get(self.subscriptions_url) self.assertEqual(200, result.status_code) subscriptions = result.json()['subscriptions'] self.assertEqual(0, len(subscriptions)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/helpers.py0000664000175100017510000002275615033040005017246 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import configparser import contextlib import functools import os import tempfile import uuid import testtools RUN_ALL_TESTS = os.environ.get('ZAQAR_TEST_EVERYTHING') def _test_variable_set(variable): return os.environ.get(variable, RUN_ALL_TESTS) is None SKIP_SLOW_TESTS = _test_variable_set('ZAQAR_TEST_SLOW') SKIP_MONGODB_TESTS = _test_variable_set('ZAQAR_TEST_MONGODB') SKIP_REDIS_TESTS = _test_variable_set('ZAQAR_TEST_REDIS') SKIP_SWIFT_TESTS = _test_variable_set('ZAQAR_TEST_SWIFT') @contextlib.contextmanager def expect(*exc_type): """A context manager to validate raised exceptions. Can be used as an alternative to testtools.ExpectedException. Notable differences: 1. This context manager accepts child classes of the given type, testing that an "except" statement referencing the given type would indeed catch it when raised by the statement(s) defined inside the context. 2. When the expected exception (or a child thereof) is not raised, this context manager *always* raises an AssertionError, both when a different exception is raised, and when no exception is raised at all. :param *exc_type: Exception type(s) expected to be raised during execution of the "with" context. """ assert len(exc_type) > 0 try: yield except exc_type: pass else: raise AssertionError( 'Not raised: %s' % ', '.join(e.__name__ for e in exc_type)) @contextlib.contextmanager def partitions(controller, count): """Context manager to create several partitions for testing. The partitions are automatically deleted when the context manager goes out of scope. :param controller: :param count: int - number of partitions to create :returns: [(str, int, [str])] - names, weights, hosts """ spec = [(str(uuid.uuid1()), i, [str(i)]) for i in range(count)] for n, w, h in spec: controller.create(n, w, h) yield spec for n, _, _ in spec: controller.delete(n) @contextlib.contextmanager def partition(controller, name, weight, hosts): """Context manager to create a single partition for testing. The partition is automatically deleted when the context manager goes out of scope. :param controller: storage handler :param name: str - partition name :param weight: int - partition weight :param hosts: [str] - hosts associated with this partition :returns: (str, int, [str]) - name, weight, host used in construction """ controller.create(name, weight, hosts) yield (name, weight, hosts) controller.delete(name) @contextlib.contextmanager def entry(controller, project, queue, partition, host, metadata={}): """Context manager to create a catalogue entry for testing. The entry is automatically deleted when the context manager goes out of scope. :param controller: storage handler :param project: str - namespace for queue :param queue: str - name of queue :param partition: str - associated partition :param host: str - representative host :param metadata: dict - metadata representation for this entry :returns: (str, str, str, str, dict) - (project, queue, part, host, meta) """ controller.insert(project, queue, partition, host, metadata) yield (project, queue, partition, host, metadata) controller.delete(project, queue) @contextlib.contextmanager def entries(controller, count): """Context manager to create several catalogue entries for testing. The entries are automatically deleted when the context manager goes out of scope. :param controller: storage handler :param count: int - number of entries to create :returns: [(str, str, str, str)] - [(project, queue, partition, host)] """ spec = [('_', str(uuid.uuid1()), str(i), str(i)) for i in range(count)] for p, q, n, h in spec: controller.insert(p, q, n, h) yield spec for p, q, _, _ in spec: controller.delete(p, q) @contextlib.contextmanager def pool_entry(controller, project, queue, pool): """Context manager to create a catalogue entry for testing. The entry is automatically deleted when the context manager goes out of scope. :param controller: storage handler :type controller: queues.storage.base:CatalogueBase :param project: namespace for queue :type project: str :param queue: name of queue :type queue: str :param pool: an identifier for the pool :type pool: str :returns: (project, queue, pool) :rtype: (str, str, str) """ controller.insert(project, queue, pool) yield (project, queue, pool) controller.delete(project, queue) @contextlib.contextmanager def pool_entries(controller, pool_ctrl, count): """Context manager to create several catalogue entries for testing. The entries are automatically deleted when the context manager goes out of scope. :param controller: storage handler :type controller: queues.storage.base:CatalogueBase :param count: number of entries to create :type count: int :returns: [(project, queue, pool)] :rtype: [(str, str, str)] """ spec = [('_', str(uuid.uuid1()), str(i)) for i in range(count)] for p, q, s in spec: pool_ctrl.create(s, 100, s) controller.insert(p, q, s) yield spec for p, q, s in spec: controller.delete(p, q) pool_ctrl.delete(s) def requires_mongodb(test_case): """Decorator to flag a test case as being dependent on MongoDB. MongoDB-specific tests will be skipped unless the ZAQAR_TEST_MONGODB environment variable is set. If the variable is set, the tests will assume that mongod is running and listening on localhost. """ reason = ('Skipping tests that require MongoDB. Ensure ' 'mongod is running on localhost and then set ' 'ZAQAR_TEST_MONGODB in order to enable tests ' 'that are specific to this storage backend. ') return testtools.skipIf(SKIP_MONGODB_TESTS, reason)(test_case) def requires_redis(test_case): """Decorator to flag a test case as being dependent on Redis. Redis-specific tests will be skipped unless the ZAQAR_TEST_REDIS environment variable is set. If the variable is set, the tests will assume that redis is running and listening on localhost. """ reason = ('Skipping tests that require Redis. Ensure ' 'Redis is running on localhost and then set ' 'ZAQAR_TEST_REDIS in order to enable tests ' 'that are specific to this storage backend. ') return testtools.skipIf(SKIP_REDIS_TESTS, reason)(test_case) def requires_swift(test_case): """Decorator to flag a test case as being dependent on Swift. Swift-specific tests will be skipped unless the ZAQAR_TEST_SWIFT environment variable is set. If the variable is set, the tests will assume that Swift is accessible and configured properly. """ reason = ('Skipping tests that require Swift. Ensure Swift is running ' 'and then set ZAQAR_TEST_SWIFT in order to enable tests ' 'that are specific to this storage backend. ') return testtools.skipIf(SKIP_SWIFT_TESTS, reason)(test_case) def is_slow(condition=lambda self: True): """Decorator to flag slow tests. Slow tests will be skipped unless ZAQAR_TEST_SLOW is set, and condition(self) returns True. :param condition: Function that returns True IFF the test will be slow; useful for child classes which may modify the behavior of a test such that it may or may not be slow. """ def decorator(test_method): @functools.wraps(test_method) def wrapper(self): if SKIP_SLOW_TESTS and condition(self): msg = ('Skipping slow test. Set ZAQAR_TEST_SLOW ' 'to enable slow tests.') self.skipTest(msg) test_method(self) return wrapper return decorator def override_mongo_conf(conf_file, test): test_mongo_url = os.environ.get('ZAQAR_TEST_MONGODB_URL') if test_mongo_url: parser = configparser.ConfigParser() parser.read(test.conf_path(conf_file)) sections = ['drivers:management_store:mongodb', 'drivers:message_store:mongodb'] for section in sections: if not parser.has_section(section): parser.add_section(section) parser.set(section, 'uri', test_mongo_url) if not parser.has_section('oslo_policy'): parser.add_section('oslo_policy') parser.set('oslo_policy', 'policy_file', test.conf_path('policy.yaml')) fd, path = tempfile.mkstemp() conf_fd = os.fdopen(fd, 'w') try: parser.write(conf_fd) finally: conf_fd.close() test.addCleanup(os.remove, path) return path else: return conf_file ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5780134 zaqar-20.1.0.dev29/zaqar/tests/unit/0000775000175100017510000000000015033040026016200 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/__init__.py0000664000175100017510000000000015033040005020274 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5780134 zaqar-20.1.0.dev29/zaqar/tests/unit/cmd/0000775000175100017510000000000015033040026016743 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/cmd/__init__.py0000664000175100017510000000000015033040005021037 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5780134 zaqar-20.1.0.dev29/zaqar/tests/unit/common/0000775000175100017510000000000015033040026017470 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/__init__.py0000664000175100017510000000000015033040005021564 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5790133 zaqar-20.1.0.dev29/zaqar/tests/unit/common/storage/0000775000175100017510000000000015033040026021134 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/storage/__init__.py0000664000175100017510000000000015033040005023230 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/storage/test_select.py0000664000175100017510000000514715033040005024030 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import testtools from zaqar.common.storage import select class TestSelect(testtools.TestCase): def test_weighted_returns_none_if_no_objs(self): self.assertIsNone(select.weighted([])) def test_weighted_returns_none_if_objs_have_zero_weight(self): objs = [{'weight': 0, 'name': str(i)} for i in range(2)] self.assertIsNone(select.weighted(objs)) def test_weighted_ignores_zero_weight_objs(self): objs = [{'weight': 0, 'name': str(i)} for i in range(2)] expect = {'weight': 1, 'name': 'theone'} objs.append(expect) self.assertEqual(expect, select.weighted(objs)) def test_weighted_returns_an_object_it_was_given(self): objs = [{'weight': 10, 'name': str(i)} for i in range(10)] ret = select.weighted(objs) self.assertIn(ret, objs) def test_weighted_returns_none_if_selector_oob(self): objs = [{'weight': 10, 'name': str(i)} for i in range(10)] sum_weights = sum([o['weight'] for o in objs]) capped_gen = lambda x, y: sum_weights self.assertIsNone(select.weighted(objs, generator=capped_gen)) def test_weighted_returns_first_if_selector_is_zero(self): objs = [{'weight': 10, 'name': str(i)} for i in range(10)] zero_gen = lambda x, y: 0 self.assertEqual(objs[0], select.weighted(objs, generator=zero_gen)) def test_weighted_returns_last_if_selector_is_sum_minus_one(self): objs = [{'weight': 10, 'name': str(i)} for i in range(10)] sum_weights = sum([o['weight'] for o in objs]) capped_gen = lambda x, y: sum_weights - 1 self.assertEqual(objs[-1], select.weighted(objs, generator=capped_gen)) def test_weighted_boundaries(self): objs = [{'weight': 1, 'name': str(i)} for i in range(3)] for i in range(len(objs)): fixed_gen = lambda x, y: i self.assertEqual(objs[i], select.weighted(objs, generator=fixed_gen)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/storage/test_utils.py0000664000175100017510000000446615033040005023714 0ustar00mylesmyles# Copyright (c) 2014 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import ddt from zaqar.conf import default from zaqar.storage import utils from zaqar import tests as testing @ddt.ddt class TestUtils(testing.TestBase): def setUp(self): super(TestUtils, self).setUp() self.conf.register_opts(default.ALL_OPTS) @testing.requires_mongodb def test_can_connect_succeeds_if_good_uri_mongo(self): self.config(unreliable=True) self.assertTrue(utils.can_connect(self.mongodb_url, conf=self.conf)) @testing.requires_redis def test_can_connect_succeeds_if_good_uri_redis(self): self.assertTrue(utils.can_connect('redis://localhost', conf=self.conf)) self.assertTrue(utils.can_connect('redis://localhost:6379', conf=self.conf)) def test_can_connect_fails_if_bad_uri_missing_schema(self): self.assertFalse(utils.can_connect('localhost:27017', conf=self.conf)) @testing.requires_mongodb def test_can_connect_fails_if_bad_uri_mongodb(self): self.config(unreliable=True) uri = 'mongodb://localhost:8080?connectTimeoutMS=100' self.assertFalse(utils.can_connect(uri, conf=self.conf)) uri = 'mongodb://example.com:27017?connectTimeoutMS=100' self.assertFalse(utils.can_connect(uri, conf=self.conf)) @testing.requires_redis def test_can_connect_fails_if_bad_uri_redis(self): self.assertFalse(utils.can_connect('redis://localhost:8080', conf=self.conf)) self.assertFalse(utils.can_connect('redis://example.com:6379', conf=self.conf)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/test_api.py0000664000175100017510000000340615033040005021652 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.common.api import api from zaqar.common import errors from zaqar.tests import base class FakeApi(api.Api): schema = { 'test_operation': { 'ref': 'test/{name}', 'method': 'GET', 'properties': { 'name': {'type': 'string'}, 'address': {'type': 'string'} }, 'additionalProperties': False, 'required': ['name'] } } class TestApi(base.TestBase): def setUp(self): super(TestApi, self).setUp() self.api = FakeApi() def test_valid_params(self): self.assertTrue(self.api.validate('test_operation', {'name': 'Sauron'})) def test_invalid_params(self): self.assertFalse(self.api.validate('test_operation', {'name': 'Sauron', 'lastname': 'From Mordor'})) def test_missing_params(self): self.assertFalse(self.api.validate('test_operation', {})) def test_invalid_operation(self): self.assertRaises(errors.InvalidAction, self.api.validate, 'super_secret_op', {}) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/test_decorators.py0000664000175100017510000001304015033040005023241 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import msgpack from oslo_cache import core from oslo_config import cfg from zaqar.common import cache as oslo_cache from zaqar.common import decorators from zaqar.conf import default from zaqar.tests import base class TestDecorators(base.TestBase): def setUp(self): super(TestDecorators, self).setUp() self.conf.register_opts(default.ALL_OPTS) def test_memoized_getattr(self): class TestClass(object): @decorators.memoized_getattr def __getattr__(self, name): return name instance = TestClass() result = instance.testing self.assertEqual('testing', result) self.assertIn('testing', instance.__dict__) def test_cached(self): conf = cfg.ConfigOpts() oslo_cache.register_config(conf) conf.cache.backend = 'dogpile.cache.memory' conf.cache.enabled = True cache = oslo_cache.get_cache(conf) sample_project = { 'name': 'Cats Abound', 'bits': b'\x80\x81\x82\x83\x84', b'key': 'Value. \x80', } def create_key(user, project=None): return user + ':' + str(project) class TestClass(object): def __init__(self, cache): self._cache = cache self.project_gets = 0 self.project_dels = 0 @decorators.caches(create_key, 60) def get_project(self, user, project=None): self.project_gets += 1 return sample_project @get_project.purges def del_project(self, user, project=None): self.project_dels += 1 instance = TestClass(cache) args = ('23', 'cats') project = instance.get_project(*args) self.assertEqual(sample_project, project) self.assertEqual(1, instance.project_gets) # Should be in the cache now. project = msgpack.unpackb(cache.get(create_key(*args))) self.assertEqual(sample_project, project) # Should read from the cache this time (counter will not # be incremented). project = instance.get_project(*args) self.assertEqual(sample_project, project) self.assertEqual(1, instance.project_gets) # Use kwargs this time instance.del_project('23', project='cats') self.assertEqual(1, instance.project_dels) # Should be a cache miss since we purged (above) project = instance.get_project(*args) self.assertEqual(2, instance.project_gets) def test_cached_with_cond(self): conf = cfg.ConfigOpts() oslo_cache.register_config(conf) conf.cache.backend = 'dogpile.cache.memory' conf.cache.enabled = True cache = oslo_cache.get_cache(conf) class TestClass(object): def __init__(self, cache): self._cache = cache self.user_gets = 0 @decorators.caches(lambda x: x, 60, lambda v: v != 'kgriffs') def get_user(self, name): self.user_gets += 1 return name instance = TestClass(cache) name = 'malini' user = instance.get_user(name) self.assertEqual(name, user) self.assertEqual(1, instance.user_gets) # Should be in the cache now. user = msgpack.unpackb(cache.get(name)) self.assertEqual(name, user) # Should read from the cache this time (counter will not # be incremented). user = instance.get_user(name) self.assertEqual(name, user) self.assertEqual(1, instance.user_gets) # Won't go into the cache because of cond name = 'kgriffs' for i in range(3): user = instance.get_user(name) self.assertEqual(cache.get(name), core.NO_VALUE) self.assertEqual(name, user) self.assertEqual(2 + i, instance.user_gets) def test_api_version_manager(self): self.config(enable_deprecated_api_versions=[]) # 1. Test accessing current API version VERSION = { 'id': '1', 'status': 'CURRENT', 'updated': 'Just yesterday' } @decorators.api_version_manager(VERSION) def public_endpoint_1(driver, conf): return True self.assertTrue(public_endpoint_1(None, self.conf)) # 2. Test accessing deprecated API version VERSION = { 'id': '1', 'status': 'DEPRECATED', 'updated': 'A long time ago' } @decorators.api_version_manager(VERSION) def public_endpoint_2(driver, conf): self.fail('Deprecated API enabled') public_endpoint_2(None, self.conf) # 3. Test enabling deprecated API version self.config(enable_deprecated_api_versions=['1']) @decorators.api_version_manager(VERSION) def public_endpoint_3(driver, conf): return True self.assertTrue(public_endpoint_3(None, self.conf)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/test_pipeline.py0000664000175100017510000000526715033040005022715 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.common import pipeline from zaqar.tests import base class FirstClass(object): def with_args(self, name): return name def with_kwargs(self, lastname='yo'): return lastname def with_args_kwargs(self, name, lastname='yo'): return '{0} {1}'.format(name, lastname) def no_args(self): return True def does_nothing(self): return None def calls_the_latest(self): return None class SecondClass(object): def does_nothing(self): return None def calls_the_latest(self): return True def _raise_rterror(self): raise RuntimeError("It shouldn't get here!") # NOTE(flaper87): This methods will be used to test # that the pipeline stops at the first class returning # something. with_args = with_kwargs = no_args = _raise_rterror class TestPipeLine(base.TestBase): def setUp(self): super(TestPipeLine, self).setUp() self.pipeline = pipeline.Pipeline([FirstClass(), SecondClass()]) def test_attribute_error(self): consumer = self.pipeline.does_not_exist self.assertRaises(AttributeError, consumer) def test_with_args(self): name = 'James' self.assertEqual(name, self.pipeline.with_args(name)) def test_with_kwargs(self): lastname = 'Bond' self.assertEqual(lastname, self.pipeline.with_kwargs(lastname)) self.assertEqual(lastname, self.pipeline.with_kwargs(lastname=lastname)) def test_with_args_kwargs(self): fullname = 'James Bond' name, lastname = fullname.split() result = self.pipeline.with_args_kwargs(name, lastname=lastname) self.assertEqual(fullname, result) def test_does_nothing(self): self.assertIsNone(self.pipeline.does_nothing()) def test_calls_the_latest(self): self.assertTrue(self.pipeline.calls_the_latest()) def test_pipeline_context_manager(self): ctxt = self.pipeline.consumer_for('does_nothing') with ctxt as consumer: self.assertIsNone(consumer()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/test_request.py0000664000175100017510000000210315033040005022562 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.common.api import request from zaqar.common import consts from zaqar.tests import base class TestRequest(base.TestBase): def test_request(self): action = consts.MESSAGE_POST data = 'body' env = {'foo': 'bar'} req = request.Request(action=action, body=data, env=env) self.assertEqual({'foo': 'bar'}, req._env) self.assertEqual('body', req._body) self.assertEqual(consts.MESSAGE_POST, req._action) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/common/test_urls.py0000664000175100017510000001061615033040005022067 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import hashlib import hmac from oslo_utils import timeutils from zaqar.common import urls from zaqar.tests import base class TestURLs(base.TestBase): def test_create_signed_url(self): timeutils.set_time_override() self.addCleanup(timeutils.clear_time_override) key = 'test'.encode('latin-1') methods = ['POST'] project = 'my-project' paths = ['/v2/queues/shared/messages'] expires = timeutils.utcnow() + datetime.timedelta(days=1) expires_str = expires.strftime(urls._DATE_FORMAT) hmac_body = (r'%(paths)s\n%(methods)s\n' r'%(project)s\n%(expires)s' % {'paths': ','.join(paths), 'methods': ','.join(methods), 'project': project, 'expires': expires_str}).encode("latin-1") expected = hmac.new(key, hmac_body, hashlib.sha256).hexdigest() actual = urls.create_signed_url(key, paths, methods=['POST'], project=project) self.assertEqual(expected, actual['signature']) def test_create_signed_url_multiple_paths(self): timeutils.set_time_override() self.addCleanup(timeutils.clear_time_override) key = 'test'.encode("latin-1") methods = ['POST'] project = 'my-project' paths = ['/v2/queues/shared/messages', '/v2/queues/shared/subscriptions'] expires = timeutils.utcnow() + datetime.timedelta(days=1) expires_str = expires.strftime(urls._DATE_FORMAT) hmac_body = (r'%(paths)s\n%(methods)s\n' r'%(project)s\n%(expires)s' % {'paths': ','.join(paths), 'methods': ','.join(methods), 'project': project, 'expires': expires_str}).encode("latin-1") expected = hmac.new(key, hmac_body, hashlib.sha256).hexdigest() actual = urls.create_signed_url(key, paths, methods=['POST'], project=project) self.assertEqual(expected, actual['signature']) def test_create_signed_url_utc(self): """Test that the method converts the TZ to UTC.""" date_str = '2100-05-31T19:00:17+02' date_str_utc = '2100-05-31T17:00:17' key = 'test'.encode("latin-1") project = None methods = ['GET'] paths = ['/v2/queues/shared/messages'] parsed = timeutils.parse_isotime(date_str_utc) expires = timeutils.normalize_time(parsed) expires_str = expires.strftime(urls._DATE_FORMAT) hmac_body = ('%(paths)s\\n%(methods)s\\n' '%(project)s\\n%(expires)s' % {'paths': ','.join(paths), 'methods': ','.join(methods), 'project': project, 'expires': expires_str}).encode("latin-1") expected = hmac.new(key, hmac_body, hashlib.sha256).hexdigest() actual = urls.create_signed_url(key, paths, expires=date_str) self.assertEqual(expected, actual['signature']) def test_create_signed_urls_validation(self): self.assertRaises(ValueError, urls.create_signed_url, None, ['/test']) self.assertRaises(ValueError, urls.create_signed_url, 'test', None) self.assertRaises(ValueError, urls.create_signed_url, 'test', ['/test'], methods='not list') self.assertRaises(ValueError, urls.create_signed_url, 'test', []) self.assertRaises(ValueError, urls.create_signed_url, 'test', '/test') self.assertRaises(ValueError, urls.create_signed_url, 'test', ['/test'], expires='wrong date format') self.assertRaises(ValueError, urls.create_signed_url, 'test', ['/test'], expires='3600') ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5790133 zaqar-20.1.0.dev29/zaqar/tests/unit/hacking/0000775000175100017510000000000015033040026017604 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/hacking/__init__.py0000664000175100017510000000000015033040005021700 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/hacking/test_hacking.py0000664000175100017510000000221015033040005022611 0ustar00mylesmyles# Copyright (c) 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zaqar.hacking import checks from zaqar.tests import base class HackingTestCase(base.TestBase): def test_no_log_translations(self): for log in checks._all_log_levels: for hint in checks._all_hints: bad = 'LOG.%s(%s("Bad"))' % (log, hint) self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) # Catch abuses when used with a variable and not a literal bad = 'LOG.%s(%s(msg))' % (log, hint) self.assertEqual(1, len(list(checks.no_translate_logs(bad)))) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5790133 zaqar-20.1.0.dev29/zaqar/tests/unit/notification/0000775000175100017510000000000015033040026020666 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/notification/__init__.py0000664000175100017510000000000015033040005022762 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/notification/test_notifier.py0000664000175100017510000005321615033040005024122 0ustar00mylesmyles# Copyright (c) 2014 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import uuid import ddt from oslo_serialization import jsonutils from oslo_utils import encodeutils from zaqar.common import urls from zaqar.notification import notifier from zaqar.notification.tasks import webhook from zaqar import tests as testing @ddt.ddt class NotifierTest(testing.TestBase): def setUp(self): super(NotifierTest, self).setUp() self.client_id = uuid.uuid4() self.project = uuid.uuid4() self.messages = [{"ttl": 300, "body": {"event": "BackupStarted", "backup_id": "c378813c-3f0b-11e2-ad92"} }, {"body": {"event": "BackupProgress", "current_bytes": "0", "total_bytes": "99614720"} } ] # NOTE(Eva-i): NotifiedDriver adds "queue_name" key to each # message (dictionary), so final notifications look like this self.notifications = [{"ttl": 300, "body": {"event": "BackupStarted", "backup_id": "c378813c-3f0b-11e2-ad92"}, "queue_name": "fake_queue", "Message_Type": "Notification" }, {"body": {"event": "BackupProgress", "current_bytes": "0", "total_bytes": "99614720"}, "queue_name": "fake_queue", "Message_Type": "Notification" } ] self.api_version = 'v2' def test_webhook(self): subscription = [{'subscriber': 'http://trigger_me', 'source': 'fake_queue', 'options': {}}, {'subscriber': 'http://call_me', 'source': 'fake_queue', 'options': {}}, {'subscriber': 'http://ping_me', 'source': 'fake_queue', 'options': {}}] ctlr = mock.MagicMock() ctlr.list = mock.Mock(return_value=iter([subscription, {}])) queue_ctlr = mock.MagicMock() queue_ctlr.get = mock.Mock(return_value={}) driver = notifier.NotifierDriver(subscription_controller=ctlr, queue_controller=queue_ctlr) headers = {'Content-Type': 'application/json'} with mock.patch('requests.post') as mock_post: mock_post.return_value = None driver.post('fake_queue', self.messages, self.client_id, self.project) driver.executor.shutdown() # Let's deserialize "data" from JSON string to dict in each mock # call, so we can do dict comparisons. JSON string comparisons # often fail, because dict keys can be serialized in different # order inside the string. for call in mock_post.call_args_list: call[1]['data'] = jsonutils.loads(call[1]['data']) # These are not real calls. In real calls each "data" argument is # serialized by json.dumps. But we made a substitution before, # so it will work. mock_post.assert_has_calls([ mock.call(subscription[0]['subscriber'], data=self.notifications[0], headers=headers), mock.call(subscription[1]['subscriber'], data=self.notifications[0], headers=headers), mock.call(subscription[2]['subscriber'], data=self.notifications[0], headers=headers), mock.call(subscription[0]['subscriber'], data=self.notifications[1], headers=headers), mock.call(subscription[1]['subscriber'], data=self.notifications[1], headers=headers), mock.call(subscription[2]['subscriber'], data=self.notifications[1], headers=headers), ], any_order=True) self.assertEqual(6, len(mock_post.mock_calls)) def test_webhook_post_data(self): post_data = {'foo': 'bar', 'egg': '$zaqar_message$'} subscription = [{'subscriber': 'http://trigger_me', 'source': 'fake_queue', 'options': {'post_data': jsonutils.dumps(post_data)}}] ctlr = mock.MagicMock() ctlr.list = mock.Mock(return_value=iter([subscription, {}])) queue_ctlr = mock.MagicMock() queue_ctlr.get = mock.Mock(return_value={}) driver = notifier.NotifierDriver(subscription_controller=ctlr, queue_controller=queue_ctlr) headers = {'Content-Type': 'application/json'} with mock.patch('requests.post') as mock_post: mock_post.return_value = None driver.post('fake_queue', self.messages, self.client_id, self.project) driver.executor.shutdown() # Let's deserialize "data" from JSON string to dict in each mock # call, so we can do dict comparisons. JSON string comparisons # often fail, because dict keys can be serialized in different # order inside the string. for call in mock_post.call_args_list: call[1]['data'] = jsonutils.loads(call[1]['data']) # These are not real calls. In real calls each "data" argument is # serialized by json.dumps. But we made a substitution before, # so it will work. mock_post.assert_has_calls([ mock.call(subscription[0]['subscriber'], data={'foo': 'bar', 'egg': self.notifications[0]}, headers=headers), mock.call(subscription[0]['subscriber'], data={'foo': 'bar', 'egg': self.notifications[1]}, headers=headers), ], any_order=True) self.assertEqual(2, len(mock_post.mock_calls)) def test_marker(self): subscription1 = [{'subscriber': 'http://trigger_me1', 'source': 'fake_queue', 'options': {}}] subscription2 = [{'subscriber': 'http://trigger_me2', 'source': 'fake_queue', 'options': {}}] ctlr = mock.MagicMock() def mock_list(queue, project, marker): if not marker: return iter([subscription1, 'marker_id']) else: return iter([subscription2, {}]) ctlr.list = mock_list queue_ctlr = mock.MagicMock() queue_ctlr.get = mock.Mock(return_value={}) driver = notifier.NotifierDriver(subscription_controller=ctlr, queue_controller=queue_ctlr) headers = {'Content-Type': 'application/json'} with mock.patch('requests.post') as mock_post: mock_post.return_value = None driver.post('fake_queue', self.messages, self.client_id, self.project) driver.executor.shutdown() # Let's deserialize "data" from JSON string to dict in each mock # call, so we can do dict comparisons. JSON string comparisons # often fail, because dict keys can be serialized in different # order inside the string. for call in mock_post.call_args_list: call[1]['data'] = jsonutils.loads(call[1]['data']) # These are not real calls. In real calls each "data" argument is # serialized by json.dumps. But we made a substitution before, # so it will work. mock_post.assert_has_calls([ mock.call(subscription1[0]['subscriber'], data=self.notifications[0], headers=headers), mock.call(subscription2[0]['subscriber'], data=self.notifications[0], headers=headers), ], any_order=True) self.assertEqual(4, len(mock_post.mock_calls)) @mock.patch('subprocess.Popen') def test_mailto(self, mock_popen): subscription = [{'subscriber': 'mailto:aaa@example.com', 'source': 'fake_queue', 'options': {'subject': 'Hello', 'from': 'zaqar@example.com'}}, {'subscriber': 'mailto:bbb@example.com', 'source': 'fake_queue', 'options': {'subject': 'Hello', 'from': 'zaqar@example.com'}}] ctlr = mock.MagicMock() ctlr.list = mock.Mock(return_value=iter([subscription, {}])) queue_ctlr = mock.MagicMock() queue_ctlr.get = mock.Mock(return_value={}) driver = notifier.NotifierDriver(subscription_controller=ctlr, queue_controller=queue_ctlr) ctlr.driver.conf.notification.smtp_mode = 'third_part' called = set() msg = ('Content-Type: text/plain; charset="us-ascii"\n' 'MIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nto:' ' %(to)s\nfrom: %(from)s\nsubject: %(subject)s\n\n%(body)s') mail1 = msg % {'to': subscription[0]['subscriber'][7:], 'from': 'zaqar@example.com', 'subject': 'Hello', 'body': jsonutils.dumps(self.notifications[0])} mail2 = msg % {'to': subscription[0]['subscriber'][7:], 'from': 'zaqar@example.com', 'subject': 'Hello', 'body': jsonutils.dumps(self.notifications[1])} mail3 = msg % {'to': subscription[1]['subscriber'][7:], 'from': 'zaqar@example.com', 'subject': 'Hello', 'body': jsonutils.dumps(self.notifications[0])} mail4 = msg % {'to': subscription[1]['subscriber'][7:], 'from': 'zaqar@example.com', 'subject': 'Hello', 'body': jsonutils.dumps(self.notifications[1])} def _communicate(msg, timeout=None): called.add(msg) return ('', '') mock_process = mock.Mock() attrs = {'communicate': _communicate, 'returncode': 0} mock_process.configure_mock(**attrs) mock_popen.return_value = mock_process driver.post('fake_queue', self.messages, self.client_id, self.project) driver.executor.shutdown() self.assertEqual(4, len(called)) # Let's deserialize "body" from JSON string to dict and then serialize # it back to JSON, but sorted, allowing us make comparisons. mails = {mail1, mail2, mail3, mail4} mail_options = [] mail_bodies = [] for mail in mails: options, body = mail.split('\n\n') mail_options.append(options) mail_bodies.append(jsonutils.dumps(jsonutils.loads(body), sort_keys=True)) called_options = [] called_bodies = [] for call in called: options, body = encodeutils.safe_decode(call).split('\n\n') called_options.append(options) called_bodies.append(jsonutils.dumps(jsonutils.loads(body), sort_keys=True)) self.assertEqual(sorted(mail_options), sorted(called_options)) self.assertEqual(sorted(mail_bodies), sorted(called_bodies)) def test_post_no_subscriber(self): ctlr = mock.MagicMock() ctlr.list = mock.Mock(return_value=iter([[], {}])) queue_ctlr = mock.MagicMock() queue_ctlr.get = mock.Mock(return_value={}) driver = notifier.NotifierDriver(subscription_controller=ctlr, queue_controller=queue_ctlr) with mock.patch('requests.post') as mock_post: driver.post('fake_queue', self.messages, self.client_id, self.project) driver.executor.shutdown() self.assertEqual(0, mock_post.call_count) def test_proper_notification_data(self): subscription = [{'subscriber': 'http://trigger_me', 'source': 'fake_queue', 'options': {}}] ctlr = mock.MagicMock() ctlr.list = mock.Mock(return_value=iter([subscription, {}])) queue_ctlr = mock.MagicMock() queue_ctlr.get = mock.Mock(return_value={}) driver = notifier.NotifierDriver(subscription_controller=ctlr, queue_controller=queue_ctlr) with mock.patch('requests.post') as mock_post: mock_post.return_value = None driver.post('fake_queue', self.messages, self.client_id, self.project) driver.executor.shutdown() self.assertEqual(2, mock_post.call_count) self.assertEqual(self.notifications[1], jsonutils.loads(mock_post.call_args[1]['data'])) @mock.patch('requests.post') def test_send_confirm_notification(self, mock_request): self.conf.notification.require_confirmation = True subscription = {'id': '5760c9fb3990b42e8b7c20bd', 'subscriber': 'http://trigger_me', 'source': 'fake_queue', 'options': {}} ctlr = mock.MagicMock() ctlr.list = mock.Mock(return_value=subscription) driver = notifier.NotifierDriver(subscription_controller=ctlr, require_confirmation=True) self.conf.signed_url.secret_key = 'test_key' driver.send_confirm_notification('test_queue', subscription, self.conf, str(self.project), api_version=self.api_version) driver.executor.shutdown() self.assertEqual(1, mock_request.call_count) expect_args = ['SubscribeBody', 'queue_name', 'URL-Methods', 'X-Project-ID', 'URL-Signature', 'URL-Paths', 'Message', 'URL-Expires', 'Message_Type', 'WSGISubscribeURL', 'WebSocketSubscribeURL' 'UnsubscribeBody'] actual_args = jsonutils.loads(mock_request.call_args[1]['data']).keys() self.assertEqual(expect_args.sort(), list(actual_args).sort()) @mock.patch('requests.post') def test_send_confirm_notification_without_signed_url(self, mock_request): subscription = [{'subscriber': 'http://trigger_me', 'source': 'fake_queue', 'options': {}}] ctlr = mock.MagicMock() ctlr.list = mock.Mock(return_value=iter([subscription, {}])) driver = notifier.NotifierDriver(subscription_controller=ctlr) driver.send_confirm_notification('test_queue', subscription, self.conf, str(self.project), self.api_version) driver.executor.shutdown() self.assertEqual(0, mock_request.call_count) @mock.patch.object(urls, 'create_signed_url') def test_require_confirmation_false(self, mock_create_signed_url): subscription = [{'subscriber': 'http://trigger_me', 'source': 'fake_queue', 'options': {}}] ctlr = mock.MagicMock() driver = notifier.NotifierDriver(subscription_controller=ctlr, require_confirmation=False) driver.send_confirm_notification('test_queue', subscription, self.conf, str(self.project), self.api_version) self.assertFalse(mock_create_signed_url.called) def _make_confirm_string(self, conf, message, queue_name): confirmation_url = conf.notification.external_confirmation_url param_string_signature = '?Signature=' + message.get('signature') param_string_methods = '&Methods=' + message.get('methods')[0] param_string_paths = '&Paths=' + message.get('paths')[0] param_string_project = '&Project=' + message.get('project') param_string_expires = '&Expires=' + message.get('expires') param_string_confirm_url = '&Url=' + message.get('WSGISubscribeURL', '') param_string_queue = '&Queue=' + queue_name confirm_url_string = (confirmation_url + param_string_signature + param_string_methods + param_string_paths + param_string_project + param_string_expires + param_string_confirm_url + param_string_queue) return confirm_url_string @mock.patch('zaqar.common.urls.create_signed_url') @mock.patch('subprocess.Popen') def _send_confirm_notification_with_email(self, mock_popen, mock_signed_url, is_unsubscribed=False): subscription = {'id': '5760c9fb3990b42e8b7c20bd', 'subscriber': 'mailto:aaa@example.com', 'source': 'test_queue', 'options': {'subject': 'Hello', 'from': 'zaqar@example.com'} } driver = notifier.NotifierDriver(require_confirmation=True) self.conf.signed_url.secret_key = 'test_key' self.conf.notification.external_confirmation_url = 'http://127.0.0.1' self.conf.notification.require_confirmation = True message = {'methods': ['PUT'], 'paths': ['/v2/queues/test_queue/subscriptions/' '5760c9fb3990b42e8b7c20bd/confirm'], 'project': str(self.project), 'expires': '2016-12-20T02:01:23', 'signature': 'e268676368c235dbe16e0e9ac40f2829a92c948288df' '36e1cbabd9de73f698df', } confirm_url = self._make_confirm_string(self.conf, message, 'test_queue') msg = ('Content-Type: text/plain; charset="us-ascii"\n' 'MIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nto:' ' %(to)s\nfrom: %(from)s\nsubject: %(subject)s\n\n%(body)s') if is_unsubscribed: e = self.conf.notification.unsubscribe_confirmation_email_template body = e['body'] topic = e['topic'] sender = e['sender'] else: e = self.conf.notification.subscription_confirmation_email_template body = e['body'] topic = e['topic'] sender = e['sender'] body = body.format(subscription['source'], str(self.project), confirm_url) mail1 = msg % {'to': subscription['subscriber'][7:], 'from': sender, 'subject': topic, 'body': body} called = set() def _communicate(msg, timeout=None): called.add(msg) return ('', '') mock_process = mock.Mock() attrs = {'communicate': _communicate, 'returncode': 0} mock_process.configure_mock(**attrs) mock_popen.return_value = mock_process mock_signed_url.return_value = message driver.send_confirm_notification('test_queue', subscription, self.conf, str(self.project), api_version=self.api_version, is_unsubscribed=is_unsubscribed) driver.executor.shutdown() self.assertEqual(1, mock_popen.call_count) options, body = mail1.split('\n\n') expec_options = [options] expect_body = [body] called_options = [] called_bodies = [] for call in called: options, body = encodeutils.safe_decode(call).split('\n\n') called_options.append(options) called_bodies.append(body) self.assertEqual(expec_options, called_options) self.assertEqual(expect_body, called_bodies) @ddt.data(False, True) def test_send_confirm_notification_with_email(self, is_unsub): self._send_confirm_notification_with_email(is_unsubscribed=is_unsub) def test_webhook_backoff_function(self): expect = [10, 12, 14, 18, 22, 27, 33, 40, 49, 60] sec = webhook._Exponential_function(10, 60, 5) self.assertEqual(expect, sec) expect = [20, 22, 25, 29, 33, 37, 42, 48, 54, 62, 70, 80] sec = webhook._Geometric_function(20, 80, 5) self.assertEqual(expect, sec) expect = [30, 30, 32, 34, 37, 41, 46, 51, 57, 64, 72, 80, 90, 100] sec = webhook._Arithmetic_function(30, 100, 5) self.assertEqual(expect, sec) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5790133 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/0000775000175100017510000000000015033040026017644 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/__init__.py0000664000175100017510000000000015033040005021740 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/base.py0000664000175100017510000024136415033040005021137 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # Copyright (c) 2014 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import abc import datetime import hashlib import math import os import random import time from unittest import mock import uuid import ddt from oslo_serialization import jsonutils from oslo_utils import timeutils import testtools from testtools import matchers from zaqar.common import cache as oslo_cache from zaqar import storage from zaqar.storage import errors from zaqar.storage import pipeline from zaqar import tests as testing from zaqar.tests import helpers class ControllerBaseTest(testing.TestBase): project = 'project' driver_class = None controller_class = None controller_base_class = None def setUp(self): super(ControllerBaseTest, self).setUp() if not self.driver_class: self.skipTest('No driver class specified') if not issubclass(self.controller_class, self.controller_base_class): self.skipTest('{0} is not an instance of {1}. ' 'Tests not supported'.format( self.controller_class, self.controller_base_class)) oslo_cache.register_config(self.conf) cache = oslo_cache.get_cache(self.conf) pooling = 'pooling' in self.conf and self.conf.pooling if pooling and not self.control_driver_class: self.skipTest("Pooling is enabled, " "but control driver class is not specified") self.control = self.control_driver_class(self.conf, cache) if not pooling: args = [self.conf, cache] if issubclass(self.driver_class, storage.DataDriverBase): args.append(self.control) self.driver = self.driver_class(*args) else: testredis = os.environ.get('ZAQAR_TEST_REDIS', 0) if testredis: uri = self.redis_url for i in range(4): db_name = "?dbid = " + str(i) # NOTE(dynarro): we need to create a unique uri. new_uri = "%s/%s" % (uri, db_name) self.control.pools_controller.create(str(i), 100, new_uri) else: uri = self.mongodb_url for i in range(4): db_name = "zaqar_test_pools_" + str(i) # NOTE(dynarro): we need to create a unique uri. new_uri = "%s/%s" % (uri, db_name) options = {'database': db_name} self.control.pools_controller.create(str(i), 100, new_uri, options=options) self.driver = self.driver_class(self.conf, cache, self.control) self.addCleanup(self.control.pools_controller.drop_all) self.addCleanup(self.control.catalogue_controller.drop_all) self._prepare_conf() self.addCleanup(self._purge_databases) if not pooling: self.controller = self.controller_class(self.driver) else: self.controller = self.controller_class(self.driver._pool_catalog) self.pipeline = pipeline.DataDriver(self.conf, self.driver, self.control) def _prepare_conf(self): """Prepare the conf before running tests Classes overriding this method, must use the `self.conf` instance and alter its state. """ def _purge_databases(self): """Override to clean databases.""" @ddt.ddt class QueueControllerTest(ControllerBaseTest): """Queue Controller base tests.""" controller_base_class = storage.Queue def setUp(self): super(QueueControllerTest, self).setUp() self.queue_controller = self.pipeline.queue_controller @ddt.data(None, ControllerBaseTest.project) def test_list(self, project): # NOTE(kgriffs): Ensure we mix global and scoped queues # in order to verify that queue records are excluded that # are not at the same level. project_alt = self.project if project is None else None num = 15 for queue in range(num): queue = str(queue) self.controller.create(queue, project=project) self.controller.create(queue, project=project_alt) self.addCleanup(self.controller.delete, queue, project=project) self.addCleanup(self.controller.delete, queue, project=project_alt) interaction = self.controller.list(project=project, detailed=True) queues = list(next(interaction)) self.assertTrue(all(map(lambda queue: 'name' in queue and 'metadata' in queue, queues))) self.assertEqual(10, len(queues)) interaction = self.controller.list(project=project, marker=next(interaction)) queues = list(next(interaction)) self.assertTrue(all(map(lambda queue: 'name' in queue and 'metadata' not in queue, queues))) self.assertEqual(5, len(queues)) def test_queue_lifecycle(self): # Test queue creation created = self.controller.create('test', metadata=dict(meta='test_meta'), project=self.project) self.assertTrue(created) # Test queue existence self.assertTrue(self.controller.exists('test', project=self.project)) # Test queue retrieval interaction = self.controller.list(project=self.project) queue = list(next(interaction))[0] self.assertEqual('test', queue['name']) # Test queue metadata retrieval metadata = self.controller.get('test', project=self.project) self.assertEqual('test_meta', metadata['meta']) # Touching an existing queue does not affect metadata created = self.controller.create('test', project=self.project) self.assertFalse(created) metadata = self.controller.get('test', project=self.project) self.assertEqual('test_meta', metadata['meta']) # Test queue deletion self.controller.delete('test', project=self.project) # Test queue existence self.assertFalse(self.controller.exists('test', project=self.project)) class MessageControllerTest(ControllerBaseTest): """Message Controller base tests. NOTE(flaper87): Implementations of this class should override the tearDown method in order to clean up storage's state. """ queue_name = 'test_queue' controller_base_class = storage.Message # Specifies how often expired messages are purged, in sec. gc_interval = 0 def setUp(self): super(MessageControllerTest, self).setUp() # Lets create a queue self.queue_controller = self.pipeline.queue_controller self.claim_controller = self.pipeline.claim_controller self.queue_controller.create(self.queue_name, project=self.project) def tearDown(self): self.queue_controller.delete(self.queue_name, project=self.project) super(MessageControllerTest, self).tearDown() def test_stats_for_empty_queue(self): self.addCleanup(self.queue_controller.delete, 'test', project=self.project) created = self.queue_controller.create('test', project=self.project) self.assertTrue(created) stats = self.queue_controller.stats('test', project=self.project) message_stats = stats['messages'] self.assertEqual(0, message_stats['free']) self.assertEqual(0, message_stats['claimed']) self.assertEqual(0, message_stats['total']) self.assertNotIn('newest', message_stats) self.assertNotIn('oldest', message_stats) def test_queue_count_on_bulk_delete(self): self.addCleanup(self.queue_controller.delete, 'test-queue', project=self.project) queue_name = 'test-queue' client_uuid = uuid.uuid4() created = self.queue_controller.create(queue_name, project=self.project) self.assertTrue(created) # Create 10 messages. msg_keys = _insert_fixtures(self.controller, queue_name, project=self.project, client_uuid=client_uuid, num=10) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(10, stats['total']) # Delete 5 messages self.controller.bulk_delete(queue_name, msg_keys[0:5], self.project) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(5, stats['total']) def test_queue_count_on_bulk_delete_with_invalid_id(self): self.addCleanup(self.queue_controller.delete, 'test-queue', project=self.project) queue_name = 'test-queue' client_uuid = uuid.uuid4() created = self.queue_controller.create(queue_name, project=self.project) self.assertTrue(created) # Create 10 messages. msg_keys = _insert_fixtures(self.controller, queue_name, project=self.project, client_uuid=client_uuid, num=10) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(10, stats['total']) # Delete 5 messages self.controller.bulk_delete(queue_name, msg_keys[0:5] + ['invalid'], self.project) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(5, stats['total']) def test_queue_count_on_delete(self): self.addCleanup(self.queue_controller.delete, 'test-queue', project=self.project) queue_name = 'test-queue' client_uuid = uuid.uuid4() created = self.queue_controller.create(queue_name, project=self.project) self.assertTrue(created) # Create 10 messages. msg_keys = _insert_fixtures(self.controller, queue_name, project=self.project, client_uuid=client_uuid, num=10) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(10, stats['total']) # Delete 1 message self.controller.delete(queue_name, msg_keys[0], self.project) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(9, stats['total']) def test_queue_stats(self): # Test queue creation self.addCleanup(self.queue_controller.delete, 'test', project=self.project) created = self.queue_controller.create('test', metadata=dict(meta='test_meta'), project=self.project) client_uuid = uuid.uuid4() # Test queue statistic _insert_fixtures(self.controller, 'test', project=self.project, client_uuid=client_uuid, num=6) # NOTE(kgriffs): We can't get around doing this, because # we don't know how the storage drive may be calculating # message timestamps (and may not be monkey-patchable). time.sleep(1.2) _insert_fixtures(self.controller, 'test', project=self.project, client_uuid=client_uuid, num=6) stats = self.queue_controller.stats('test', project=self.project) message_stats = stats['messages'] self.assertEqual(12, message_stats['free']) self.assertEqual(0, message_stats['claimed']) self.assertEqual(12, message_stats['total']) oldest = message_stats['oldest'] newest = message_stats['newest'] self.assertNotEqual(oldest, newest) age = oldest['age'] self.assertThat(age, matchers.GreaterThan(0)) # NOTE(kgriffs): Ensure is different enough # for the next comparison to work. soon = timeutils.utcnow() + datetime.timedelta(seconds=60) for message_stat in (oldest, newest): created_iso = message_stat['created'] created = timeutils.parse_isotime(created_iso) self.assertThat(timeutils.normalize_time(created), matchers.LessThan(soon)) self.assertIn('id', message_stat) self.assertThat(oldest['created'], matchers.LessThan(newest['created'])) def test_queue_count_on_claim_delete(self): self.addCleanup(self.queue_controller.delete, 'test-queue', project=self.project) queue_name = 'test-queue' client_uuid = uuid.uuid4() created = self.queue_controller.create(queue_name, project=self.project) self.assertTrue(created) # Create 15 messages. msg_keys = _insert_fixtures(self.controller, queue_name, project=self.project, client_uuid=client_uuid, num=15) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(15, stats['total']) metadata = {'ttl': 120, 'grace': 60} # Claim 10 messages claim_id, _ = self.claim_controller.create(queue_name, metadata, self.project) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(10, stats['claimed']) # Delete one message and ensure stats are updated even # thought the claim itself has not been deleted. self.controller.delete(queue_name, msg_keys[0], self.project, claim_id) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(14, stats['total']) self.assertEqual(9, stats['claimed']) self.assertEqual(5, stats['free']) # Same thing but use bulk_delete interface self.controller.bulk_delete(queue_name, msg_keys[1:3], self.project) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(12, stats['total']) self.assertEqual(7, stats['claimed']) self.assertEqual(5, stats['free']) # Delete the claim self.claim_controller.delete(queue_name, claim_id, self.project) stats = self.queue_controller.stats(queue_name, self.project)['messages'] self.assertEqual(0, stats['claimed']) def test_message_lifecycle(self): queue_name = self.queue_name message = { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce' } } # Test Message Creation created = list(self.controller.post(queue_name, [message], project=self.project, client_uuid=uuid.uuid4())) self.assertEqual(1, len(created)) message_id = created[0] # Test Message Get message_out = self.controller.get(queue_name, message_id, project=self.project) self.assertEqual({'id', 'body', 'ttl', 'age', 'claim_count', 'claim_id'}, set(message_out)) self.assertEqual(message_id, message_out['id']) self.assertEqual(message['body'], message_out['body']) self.assertEqual(message['ttl'], message_out['ttl']) # Test Message Deletion self.controller.delete(queue_name, message_id, project=self.project) # Test does not exist with testing.expect(errors.DoesNotExist): self.controller.get(queue_name, message_id, project=self.project) def test_message_body_checksum(self): self.conf.enable_checksum = True queue_name = self.queue_name message = { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce' } } # Test Message Creation created = list(self.controller.post(queue_name, [message], project=self.project, client_uuid=uuid.uuid4())) self.assertEqual(1, len(created)) message_id = created[0] # Test Message Get message_out = self.controller.get(queue_name, message_id, project=self.project) self.assertEqual({'id', 'body', 'ttl', 'age', 'claim_count', 'claim_id', 'checksum'}, set(message_out)) algorithm, checksum = message_out['checksum'].split(':') expected_checksum = '' if algorithm == 'MD5': md5 = hashlib.md5() md5.update(jsonutils.dump_as_bytes(message['body'])) expected_checksum = md5.hexdigest() self.assertEqual(expected_checksum, checksum) def test_get_multi(self): client_uuid = uuid.uuid4() _insert_fixtures(self.controller, self.queue_name, project=self.project, client_uuid=client_uuid, num=15) def load_messages(expected, *args, **kwargs): interaction = self.controller.list(*args, **kwargs) msgs = list(next(interaction)) self.assertEqual(expected, len(msgs)) return interaction # Test all messages, echo False and uuid load_messages(0, self.queue_name, project=self.project, client_uuid=client_uuid) # Test all messages and limit load_messages(15, self.queue_name, project=self.project, limit=20, echo=True) # Test default limit load_messages(storage.DEFAULT_MESSAGES_PER_PAGE, self.queue_name, project=self.project, echo=True) # Test all messages, echo True, and uuid interaction = load_messages(10, self.queue_name, echo=True, project=self.project, client_uuid=client_uuid) # Test all messages, echo True, uuid and marker load_messages(5, self.queue_name, echo=True, project=self.project, marker=next(interaction), client_uuid=client_uuid) def test_multi_ids(self): messages_in = [{'ttl': 120, 'body': 0}, {'ttl': 240, 'body': 1}] ids = self.controller.post(self.queue_name, messages_in, project=self.project, client_uuid=uuid.uuid4()) messages_out = self.controller.bulk_get(self.queue_name, ids, project=self.project) for idx, message in enumerate(messages_out): self.assertEqual({'id', 'body', 'ttl', 'age', 'claim_count', 'claim_id'}, set(message)) self.assertEqual(idx, message['body']) self.controller.bulk_delete(self.queue_name, ids, project=self.project) with testing.expect(StopIteration): result = self.controller.bulk_get(self.queue_name, ids, project=self.project) next(result) def test_claim_effects(self): client_uuid = uuid.uuid4() _insert_fixtures(self.controller, self.queue_name, project=self.project, client_uuid=client_uuid, num=12) def list_messages(include_claimed=None): kwargs = { 'project': self.project, 'client_uuid': client_uuid, 'echo': True, } # Properly test default value if include_claimed is not None: kwargs['include_claimed'] = include_claimed interaction = self.controller.list(self.queue_name, **kwargs) messages = next(interaction) return [msg['id'] for msg in messages] messages_before = list_messages(True) meta = {'ttl': 70, 'grace': 60} another_cid, _ = self.claim_controller.create(self.queue_name, meta, project=self.project) messages_after = list_messages(True) self.assertEqual(messages_before, messages_after) messages_excluding_claimed = list_messages() self.assertNotEqual(messages_before, messages_excluding_claimed) self.assertEqual(2, len(messages_excluding_claimed)) cid, msgs = self.claim_controller.create(self.queue_name, meta, project=self.project) [msg1, msg2] = msgs # A wrong claim does not ensure the message deletion with testing.expect(errors.NotPermitted): self.controller.delete(self.queue_name, msg1['id'], project=self.project, claim=another_cid) # Make sure a message can be deleted with a claim self.controller.delete(self.queue_name, msg1['id'], project=self.project, claim=cid) with testing.expect(errors.DoesNotExist): self.controller.get(self.queue_name, msg1['id'], project=self.project) # Make sure such a deletion is idempotent self.controller.delete(self.queue_name, msg1['id'], project=self.project, claim=cid) # A non-existing claim does not ensure the message deletion self.claim_controller.delete(self.queue_name, cid, project=self.project) # NOTE(kgriffs) Message is no longer claimed, but try # to delete it with the claim anyway. It should raise # an error, because the client needs a hint that # perhaps the claim expired before it got around to # trying to delete the message, which means another # worker could be processing this message now. with testing.expect(errors.NotPermitted, errors.ClaimDoesNotExist): self.controller.delete(self.queue_name, msg2['id'], project=self.project, claim=cid) @testing.is_slow(condition=lambda self: self.gc_interval > 1) def test_expired_messages(self): messages = [{'body': 3.14, 'ttl': 1}, {'body': 0.618, 'ttl': 600}] client_uuid = uuid.uuid4() [msgid_expired, msgid] = self.controller.post(self.queue_name, messages, project=self.project, client_uuid=client_uuid) # NOTE(kgriffs): Allow for automatic GC of claims, messages for i in range(self.gc_interval): time.sleep(1) # NOTE(kgriffs): Some drivers require a manual GC to be # triggered to clean up claims and messages. self.driver.gc() try: self.controller.get(self.queue_name, msgid_expired, project=self.project) except errors.DoesNotExist: break else: self.fail("Didn't remove the queue") # Make sure expired messages not return when listing interaction = self.controller.list(self.queue_name, project=self.project) messages = list(next(interaction)) self.assertEqual(1, len(messages)) self.assertEqual(msgid, messages[0]['id']) stats = self.queue_controller.stats(self.queue_name, project=self.project) self.assertEqual(1, stats['messages']['free']) # Make sure expired messages not return when popping messages = self.controller.pop(self.queue_name, limit=10, project=self.project) self.assertEqual(1, len(messages)) self.assertEqual(msgid, messages[0]['id']) def test_bad_id(self): # NOTE(cpp-cabrera): A malformed ID should result in an empty # query. Raising an exception for validating IDs makes the # implementation more verbose instead of taking advantage of # the Maybe/Optional protocol, particularly when dealing with # bulk operations. bad_message_id = 'xyz' self.controller.delete(self.queue_name, bad_message_id, project=self.project) with testing.expect(errors.MessageDoesNotExist): self.controller.get(self.queue_name, bad_message_id, project=self.project) def test_bad_claim_id(self): [msgid] = self.controller.post(self.queue_name, [{'body': {}, 'ttl': 10}], project=self.project, client_uuid=uuid.uuid4()) # NOTE(kgriffs): If the client has a typo or # something, they will need a hint that the # request was invalid. # # On the other hand, if they are actually # probing for a vulnerability, telling them # the claim they requested doesn't exist should # be harmless. with testing.expect(storage.errors.ClaimDoesNotExist): bad_claim_id = '; DROP TABLE queues' self.controller.delete(self.queue_name, msgid, project=self.project, claim=bad_claim_id) def test_bad_marker(self): bad_marker = 'xyz' interaction = self.controller.list(self.queue_name, project=self.project, client_uuid=uuid.uuid4(), marker=bad_marker) messages = list(next(interaction)) self.assertEqual([], messages) def test_sort_for_first(self): client_uuid = uuid.uuid4() [msgid_first] = self.controller.post(self.queue_name, [{'body': {}, 'ttl': 120}], project=self.project, client_uuid=client_uuid) _insert_fixtures(self.controller, self.queue_name, project=self.project, client_uuid=client_uuid, num=10) [msgid_last] = self.controller.post(self.queue_name, [{'body': {}, 'ttl': 120}], project=self.project, client_uuid=client_uuid) msg_asc = self.controller.first(self.queue_name, self.project, 1) self.assertEqual(msgid_first, msg_asc['id']) msg_desc = self.controller.first(self.queue_name, self.project, -1) self.assertEqual(msgid_last, msg_desc['id']) def test_get_first_with_empty_queue_exception(self): self.assertRaises(errors.QueueIsEmpty, self.controller.first, self.queue_name, project=self.project) def test_get_first_with_invalid_sort_option(self): self.assertRaises(ValueError, self.controller.first, self.queue_name, sort=0, project=self.project) def test_pop_message(self): self.queue_controller.create(self.queue_name, project=self.project) messages = [ { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce', }, }, { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'd378813c-3f0b-11e2-ad92-7823d2b0f3ce', }, }, { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'e378813c-3f0b-11e2-ad92-7823d2b0f3ce', }, }, ] client_uuid = uuid.uuid1() self.controller.post(self.queue_name, messages, client_uuid, project=self.project) # Test Message Pop popped_messages = self.controller.pop(self.queue_name, limit=1, project=self.project) self.assertEqual(1, len(popped_messages)) @testtools.skip("Skip until fixed bug 1739332") def test_message_period(self): self.queue_controller.create(self.queue_name, project=self.project) messages = [ { 'ttl': 60, 'body': { 'event.data': 'BackupStarted', 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce', }, }, ] client_uuid = uuid.uuid1() self.controller.post(self.queue_name, messages, client_uuid, project=self.project) stored_messages = self.controller.list(self.queue_name, project=self.project) self.assertCountEqual(['event.data', 'backupId'], list(next(stored_messages))[0]['body'].keys()) def test_delete_message_from_nonexistent_queue(self): queue_name = 'fake_name' message_id = 'fake_id' res = self.controller.delete(queue_name, message_id, project=self.project) self.assertIsNone(res) def test_delete_messages_with_ids_from__nonexistent_queue(self): queue_name = 'fake_name' message_ids = ['fake_id1', 'fake_id2'] res = self.controller.bulk_delete(queue_name, message_ids, project=self.project) self.assertIsNone(res) def test_get_messages_with_ids_from__nonexistent_queue(self): queue_name = 'fake_name' message_ids = ['fake_id1', 'fake_id2'] res = self.controller.bulk_get(queue_name, message_ids, project=self.project) self.assertIsInstance(res, abc.Iterable) self.assertEqual([], list(res)) class ClaimControllerTest(ControllerBaseTest): """Claim Controller base tests. NOTE(flaper87): Implementations of this class should override the tearDown method in order to clean up storage's state. """ queue_name = 'test_queue' controller_base_class = storage.Claim def setUp(self): super(ClaimControllerTest, self).setUp() # Lets create a queue self.queue_controller = self.pipeline.queue_controller self.message_controller = self.pipeline.message_controller self.queue_controller.create(self.queue_name, project=self.project) def tearDown(self): self.queue_controller.delete(self.queue_name, project=self.project) super(ClaimControllerTest, self).tearDown() def test_claim_lifecycle(self): _insert_fixtures(self.message_controller, self.queue_name, project=self.project, client_uuid=uuid.uuid4(), num=20) meta = {'ttl': 70, 'grace': 30} # Make sure create works claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project, limit=15) messages = list(messages) self.assertEqual(15, len(messages)) # Ensure Queue stats countof = self.queue_controller.stats(self.queue_name, project=self.project) self.assertEqual(15, countof['messages']['claimed']) self.assertEqual(5, countof['messages']['free']) self.assertEqual(20, countof['messages']['total']) # Make sure get works claim, messages2 = self.controller.get(self.queue_name, claim_id, project=self.project) messages2 = list(messages2) self.assertEqual(15, len(messages2)) for msg1, msg2 in zip(messages, messages2): self.assertEqual(msg1['body'], msg2['body']) self.assertEqual(msg1['claim_id'], msg2['claim_id']) self.assertEqual(msg1['id'], msg2['id']) self.assertEqual(msg1['ttl'], msg2['ttl']) self.assertEqual(70, claim['ttl']) self.assertEqual(claim_id, claim['id']) new_meta = {'ttl': 100, 'grace': 60} self.controller.update(self.queue_name, claim_id, new_meta, project=self.project) # Make sure update works claim, messages2 = self.controller.get(self.queue_name, claim_id, project=self.project) messages2 = list(messages2) self.assertEqual(15, len(messages2)) # TODO(zyuan): Add some tests to ensure the ttl is # extended/not-extended. for msg1, msg2 in zip(messages, messages2): self.assertEqual(msg1['body'], msg2['body']) self.assertEqual(new_meta['ttl'], claim['ttl']) self.assertEqual(claim_id, claim['id']) # Make sure delete works self.controller.delete(self.queue_name, claim_id, project=self.project) self.assertRaises(errors.ClaimDoesNotExist, self.controller.get, self.queue_name, claim_id, project=self.project) def test_claim_create_default_limit_multi(self): num_claims = 5 num_messages = storage.DEFAULT_MESSAGES_PER_CLAIM * num_claims # NOTE(kgriffs): + 1 on num_messages to check for off-by-one error _insert_fixtures(self.message_controller, self.queue_name, project=self.project, client_uuid=uuid.uuid4(), num=num_messages + 1) meta = {'ttl': 70, 'grace': 30} total_claimed = 0 for _ in range(num_claims): claim_id, messages = self.controller.create( self.queue_name, meta, project=self.project) messages = list(messages) num_claimed = len(messages) self.assertEqual(storage.DEFAULT_MESSAGES_PER_CLAIM, num_claimed) total_claimed += num_claimed self.assertEqual(num_messages, total_claimed) def test_extend_lifetime(self): _insert_fixtures(self.message_controller, self.queue_name, project=self.project, client_uuid=uuid.uuid4(), num=20, ttl=120) meta = {'ttl': 777, 'grace': 0} claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) for message in messages: self.assertEqual(777, message['ttl']) def test_extend_lifetime_with_grace_1(self): _insert_fixtures(self.message_controller, self.queue_name, project=self.project, client_uuid=uuid.uuid4(), num=20, ttl=120) meta = {'ttl': 777, 'grace': 23} claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) for message in messages: self.assertEqual(800, message['ttl']) def test_extend_lifetime_with_grace_2(self): _insert_fixtures(self.message_controller, self.queue_name, project=self.project, client_uuid=uuid.uuid4(), num=20, ttl=120) meta = {'ttl': 121, 'grace': 22} claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) for message in messages: self.assertEqual(143, message['ttl']) def test_do_not_extend_lifetime(self): _insert_fixtures(self.message_controller, self.queue_name, project=self.project, client_uuid=uuid.uuid4(), num=20, ttl=120) # Choose a ttl that is less than the message's current TTL meta = {'ttl': 60, 'grace': 30} claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) for message in messages: self.assertEqual(120, message['ttl']) def test_expired_claim(self): meta = {'ttl': 1, 'grace': 60} claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) time.sleep(1) with testing.expect(errors.DoesNotExist): self.controller.get(self.queue_name, claim_id, project=self.project) with testing.expect(errors.DoesNotExist): self.controller.update(self.queue_name, claim_id, meta, project=self.project) def test_delete_message_expired_claim(self): meta = {'ttl': 2, 'grace': 2} new_messages = [{'ttl': 60, 'body': {}}, {'ttl': 60, 'body': {}}, {'ttl': 60, 'body': {}}] self.message_controller.post(self.queue_name, new_messages, client_uuid=str(uuid.uuid1()), project=self.project) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) now = timeutils.utcnow_ts() timeutils_utcnow = 'oslo_utils.timeutils.utcnow_ts' with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now + 2 messages = [msg['id'] for msg in messages] self.message_controller.delete(self.queue_name, messages.pop(), project=self.project) self.message_controller.bulk_delete(self.queue_name, messages, project=self.project) def test_illformed_id(self): # any ill-formed IDs should be regarded as non-existing ones. self.controller.delete(self.queue_name, 'illformed', project=self.project) with testing.expect(errors.DoesNotExist): self.controller.get(self.queue_name, 'illformed', project=self.project) with testing.expect(errors.DoesNotExist): self.controller.update(self.queue_name, 'illformed', {'ttl': 40}, project=self.project) def test_dead_letter_queue(self): DLQ_name = "DLQ" meta = {'ttl': 3, 'grace': 3} self.queue_controller.create("DLQ", project=self.project) # Set dead letter queeu metadata metadata = {"_max_claim_count": 2, "_dead_letter_queue": DLQ_name, "_dead_letter_queue_messages_ttl": 9999} self.queue_controller.set_metadata(self.queue_name, metadata, project=self.project) new_messages = [{'ttl': 3600, 'body': {"key": "value"}}] self.message_controller.post(self.queue_name, new_messages, client_uuid=str(uuid.uuid1()), project=self.project) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNotNone(claim_id) self.assertEqual(1, len(list(messages))) time.sleep(5) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNotNone(claim_id) messages = list(messages) self.assertEqual(1, len(messages)) time.sleep(5) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNone(claim_id) self.assertEqual(0, len(list(messages))) DLQ_messages = self.message_controller.list(DLQ_name, project=self.project, include_claimed=True) expected_msg = list(next(DLQ_messages))[0] self.assertEqual(9999, expected_msg["ttl"]) self.assertEqual({"key": "value"}, expected_msg["body"]) def test_delay_queue(self): meta = {'ttl': 2, 'grace': 0} # Set default message delay for queue. metadata = {'_default_message_delay': 3} self.queue_controller.set_metadata(self.queue_name, metadata, project=self.project) # Scenarios 1: Normal message sent to delayed queue new_messages = [{'ttl': 3600, 'body': {'key': 'value'}}] queue_meta = self.queue_controller.get_metadata(self.queue_name, project=self.project) delay = queue_meta.get('_default_message_delay', 0) for msg in new_messages: if delay and 'delay' not in new_messages: msg['delay'] = delay ids = self.message_controller.post(self.queue_name, new_messages, client_uuid=str(uuid.uuid1()), project=self.project) interaction = self.message_controller.list(self.queue_name, project=self.project, include_delayed=True) delay_messages = list(next(interaction)) self.assertEqual(1, len(list(delay_messages))) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNone(claim_id) self.assertEqual(0, len(list(messages))) time.sleep(4) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNotNone(claim_id) self.assertEqual(1, len(list(messages))) time.sleep(2) self.message_controller.delete(self.queue_name, ids[0], project=self.project) # Scenarios 2: Delay message sent to delayed queue new_messages = [{'ttl': 3600, 'delay': 1, 'body': {'key': 'value'}}] ids = self.message_controller.post(self.queue_name, new_messages, client_uuid=str(uuid.uuid1()), project=self.project) interaction = self.message_controller.list(self.queue_name, project=self.project, include_delayed=True) delay_messages = list(next(interaction)) self.assertEqual(1, len(list(delay_messages))) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNone(claim_id) self.assertEqual(0, len(list(messages))) time.sleep(2) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNotNone(claim_id) self.assertEqual(1, len(list(messages))) time.sleep(2) self.message_controller.delete(self.queue_name, ids[0], project=self.project) # Scenarios 3: Message sent to normal queue new_messages = [{'ttl': 3600, 'body': {'key': 'value'}}] self.queue_controller.set_metadata(self.queue_name, {}, project=self.project) ids = self.message_controller.post(self.queue_name, new_messages, client_uuid=str(uuid.uuid1()), project=self.project) interaction = self.message_controller.list(self.queue_name, project=self.project, include_delayed=True) delay_messages = list(next(interaction)) self.assertEqual(1, len(list(delay_messages))) claim_id, messages = self.controller.create(self.queue_name, meta, project=self.project) self.assertIsNotNone(claim_id) self.assertEqual(1, len(list(messages))) time.sleep(2) self.message_controller.delete(self.queue_name, ids[0], project=self.project) @ddt.ddt class SubscriptionControllerTest(ControllerBaseTest): """Subscriptions Controller base tests. """ queue_name = 'test_queue' controller_base_class = storage.Subscription def setUp(self): super(SubscriptionControllerTest, self).setUp() self.subscription_controller = self.driver.subscription_controller self.queue_controller = self.driver.queue_controller self.source = self.queue_name self.subscriber = 'http://trigger.me' self.ttl = 600 self.options = {'uri': 'http://fake.com'} def tearDown(self): self.queue_controller.delete(self.queue_name, project=self.project) super(SubscriptionControllerTest, self).tearDown() # NOTE(Eva-i): this method helps to test cases when the queue is # pre-created and when it's not. def _precreate_queue(self, precreate_queue): if precreate_queue: # Let's create a queue as the source of subscription self.queue_controller.create(self.queue_name, project=self.project) @ddt.data(True, False) def test_list(self, precreate_queue): self._precreate_queue(precreate_queue) for s in range(15): subscriber = 'http://fake_{0}'.format(s) s_id = self.subscription_controller.create( self.source, subscriber, self.ttl, self.options, project=self.project) self.addCleanup(self.subscription_controller.delete, self.source, s_id, self.project) added_age = 1 time.sleep(added_age) interaction = self.subscription_controller.list(self.source, project=self.project) subscriptions = list(next(interaction)) self.assertTrue(all(map(lambda s: 'source' in s and 'subscriber' in s, subscriptions))) self.assertEqual(10, len(subscriptions)) self.assertLessEqual(added_age, math.ceil(subscriptions[2]['age'])) interaction = (self.subscription_controller.list(self.source, project=self.project, marker=next(interaction))) subscriptions = list(next(interaction)) self.assertTrue(all(map(lambda s: 'source' in s and 'subscriber' in s, subscriptions))) self.assertEqual(5, len(subscriptions)) def test_small_list(self): subscriber = 'http://fake' s_id = self.subscription_controller.create( self.source, subscriber, self.ttl, self.options, project=self.project) self.addCleanup(self.subscription_controller.delete, self.source, s_id, self.project) interaction = self.subscription_controller.list(self.source, project=self.project) subscriptions = list(next(interaction)) marker = next(interaction) self.assertEqual(1, len(subscriptions)) interaction = (self.subscription_controller.list(self.source, project=self.project, marker=marker)) subscriptions = list(next(interaction)) self.assertEqual([], subscriptions) @ddt.data(True, False) def test_get_raises_if_subscription_does_not_exist(self, precreate_queue): self._precreate_queue(precreate_queue) self.assertRaises(errors.SubscriptionDoesNotExist, self.subscription_controller.get, self.queue_name, 'notexists', project=self.project) @ddt.data(True, False) def test_lifecycle(self, precreate_queue): self._precreate_queue(precreate_queue) s_id = self.subscription_controller.create(self.source, self.subscriber, self.ttl, self.options, project=self.project) added_age = 2 time.sleep(added_age) subscription = self.subscription_controller.get(self.queue_name, s_id, self.project) self.assertEqual(self.source, subscription['source']) self.assertEqual(self.subscriber, subscription['subscriber']) self.assertEqual(self.ttl, subscription['ttl']) self.assertEqual(self.options, subscription['options']) self.assertLessEqual(added_age, math.ceil(subscription['age'])) exist = self.subscription_controller.exists(self.queue_name, s_id, self.project) self.assertTrue(exist) self.subscription_controller.update(self.queue_name, s_id, project=self.project, subscriber='http://a.com', options={'funny': 'no'} ) updated = self.subscription_controller.get(self.queue_name, s_id, self.project) self.assertEqual('http://a.com', updated['subscriber']) self.assertEqual({'funny': 'no'}, updated['options']) self.subscription_controller.delete(self.queue_name, s_id, project=self.project) self.assertRaises(errors.SubscriptionDoesNotExist, self.subscription_controller.get, self.queue_name, s_id) @ddt.data(True, False) def test_create_existed(self, precreate_queue): self._precreate_queue(precreate_queue) s_id = self.subscription_controller.create( self.source, self.subscriber, self.ttl, self.options, project=self.project) self.addCleanup(self.subscription_controller.delete, self.source, s_id, self.project) self.assertIsNotNone(s_id) s_id = self.subscription_controller.create(self.source, self.subscriber, self.ttl, self.options, project=self.project) self.assertIsNone(s_id) def test_create_existed_and_get_correct_id(self): queue_name1 = 'test_queue1' queue_name2 = 'test_queue2' subscriber = 'http://fake' self.queue_controller.create(queue_name1, project=self.project) self.queue_controller.create(queue_name2, project=self.project) self.subscription_controller.create(queue_name1, subscriber, self.ttl, self.options, project=self.project) s_id_2 = self.subscription_controller.create(queue_name2, subscriber, self.ttl, self.options, project=self.project) s_id_3 = self.subscription_controller.create(queue_name2, subscriber, self.ttl, self.options, project=self.project) self.assertIsNone(s_id_3) s_id = self.subscription_controller. \ get_with_subscriber(queue_name2, subscriber, project=self.project)['id'] self.assertEqual(str(s_id_2), s_id) def test_get_update_delete_on_non_existing_queue(self): self._precreate_queue(precreate_queue=True) s_id = self.subscription_controller.create( self.source, self.subscriber, self.ttl, self.options, project=self.project) self.addCleanup(self.subscription_controller.delete, self.source, s_id, self.project) self.assertIsNotNone(s_id) non_existing_queue = "fake_name" # get self.assertRaises(errors.SubscriptionDoesNotExist, self.subscription_controller.get, non_existing_queue, s_id, project=self.project) # update body = { "subscriber": self.subscriber, "ttl": self.ttl, "options": self.options } self.assertRaises(errors.SubscriptionDoesNotExist, self.subscription_controller.update, non_existing_queue, s_id, project=self.project, **body) # delete self.subscription_controller.delete(non_existing_queue, s_id, project=self.project) s_id = self.subscription_controller.get(self.queue_name, s_id, project=self.project) self.assertIsNotNone(s_id) def test_nonexist_source(self): try: s_id = self.subscription_controller.create('fake_queue_name', self.subscriber, self.ttl, self.options, self.project) except Exception: self.fail("Subscription controller should not raise an exception " "in case of non-existing queue.") self.addCleanup(self.subscription_controller.delete, 'fake_queue_name', s_id, self.project) @ddt.data(True, False) def test_update_raises_if_try_to_update_to_existing_subscription( self, precreate_queue): self._precreate_queue(precreate_queue) # create two subscriptions: fake_0 and fake_1 ids = [] for s in range(2): subscriber = 'http://fake_{0}'.format(s) s_id = self.subscription_controller.create( self.source, subscriber, self.ttl, self.options, project=self.project) self.addCleanup(self.subscription_controller.delete, self.source, s_id, self.project) ids.append(s_id) # update fake_0 to fake_2, success update_fields = { 'subscriber': 'http://fake_2' } self.subscription_controller.update(self.queue_name, ids[0], project=self.project, **update_fields) # update fake_1 to fake_2, raise error self.assertRaises(errors.SubscriptionAlreadyExists, self.subscription_controller.update, self.queue_name, ids[1], project=self.project, **update_fields) @ddt.data(True, False) def test_update_raises_if_subscription_does_not_exist(self, precreate_queue): self._precreate_queue(precreate_queue) update_fields = { 'subscriber': 'http://fake' } self.assertRaises(errors.SubscriptionDoesNotExist, self.subscription_controller.update, self.queue_name, 'notexists', project=self.project, **update_fields) def test_confirm(self): s_id = self.subscription_controller.create(self.source, self.subscriber, self.ttl, self.options, project=self.project) self.addCleanup(self.subscription_controller.delete, self.source, s_id, self.project) subscription = self.subscription_controller.get(self.source, s_id, project=self.project) self.assertFalse(subscription['confirmed']) self.subscription_controller.confirm(self.source, s_id, project=self.project, confirmed=True) subscription = self.subscription_controller.get(self.source, s_id, project=self.project) self.assertTrue(subscription['confirmed']) self.subscription_controller.confirm(self.source, s_id, project=self.project, confirmed=False) subscription = self.subscription_controller.get(self.source, s_id, project=self.project) self.assertFalse(subscription['confirmed']) def test_confirm_with_nonexist_subscription(self): s_id = 'fake-id' self.assertRaises(errors.SubscriptionDoesNotExist, self.subscription_controller.confirm, self.source, s_id, project=self.project, confirmed=True ) class PoolsControllerTest(ControllerBaseTest): """Pools Controller base tests. NOTE(flaper87): Implementations of this class should override the tearDown method in order to clean up storage's state. """ controller_base_class = storage.PoolsBase def setUp(self): super(PoolsControllerTest, self).setUp() self.pools_controller = self.driver.pools_controller # Let's create one pool self.pool = str(uuid.uuid1()) self.pool1 = str(uuid.uuid1()) self.flavor = str(uuid.uuid1()) self.uri = str(uuid.uuid1()) self.pools_controller.create(self.pool1, 100, self.uri, flavor=self.flavor, options={}) self.flavors_controller = self.driver.flavors_controller def tearDown(self): self.pools_controller.drop_all() super(PoolsControllerTest, self).tearDown() def test_create_succeeds(self): self.pools_controller.create(str(uuid.uuid1()), 100, 'localhost:13124', options={}) def test_create_replaces_on_duplicate_insert(self): name = str(uuid.uuid1()) self.pools_controller.create(name, 100, 'localhost:76553', options={}) self.pools_controller.create(name, 111, 'localhost:758353', options={}) entry = self.pools_controller.get(name) self._pool_expects(entry, xname=name, xweight=111, xlocation='localhost:758353') def _pool_expects(self, pool, xname, xweight, xlocation): self.assertIn('name', pool) self.assertEqual(xname, pool['name']) self.assertIn('weight', pool) self.assertEqual(xweight, pool['weight']) self.assertIn('uri', pool) self.assertEqual(xlocation, pool['uri']) def test_get_returns_expected_content(self): res = self.pools_controller.get(self.pool1) self._pool_expects(res, self.pool1, 100, self.uri) self.assertNotIn('options', res) def test_detailed_get_returns_expected_content(self): res = self.pools_controller.get(self.pool1, detailed=True) self.assertIn('options', res) self.assertEqual({}, res['options']) def test_get_raises_if_not_found(self): self.assertRaises(errors.PoolDoesNotExist, self.pools_controller.get, 'notexists') def test_exists(self): self.assertTrue(self.pools_controller.exists(self.pool1)) self.assertFalse(self.pools_controller.exists('notexists')) def test_update_raises_assertion_error_on_bad_fields(self): self.assertRaises(AssertionError, self.pools_controller.update, self.pool) def test_update_works(self): # NOTE(flaper87): This may fail for redis. Create # a dummy store for tests. self.uri3 = str(uuid.uuid1()) self.pools_controller.update(self.pool1, weight=101, uri=self.uri3, options={'a': 1}) res = self.pools_controller.get(self.pool1, detailed=True) self._pool_expects(res, self.pool1, 101, self.uri3) self.assertEqual({'a': 1}, res['options']) def test_delete_works(self): # self.pools_controller.delete(self.pool) # (gengchc): Remove the flavor from pool, then testcase cleanup pool self.pools_controller.update(self.pool1, flavor="") self.pools_controller.delete(self.pool1) self.assertFalse(self.pools_controller.exists(self.pool1)) def test_delete_nonexistent_is_silent(self): self.pools_controller.delete('nonexisting') def test_drop_all_leads_to_empty_listing(self): self.pools_controller.drop_all() cursor = self.pools_controller.list() pools = next(cursor) self.assertRaises(StopIteration, next, pools) def test_listing_simple(self): # (gengchc): Remove the flavor from pool, then testcase cleanup pool self.pools_controller.update(self.pool1, flavor="") self.pools_controller._drop_all() pools = [] marker = '' for i in range(15): n = str(uuid.uuid4()) w = random.randint(1, 100) pools.append({'n': n, 'w': w, 'u': str(i)}) # Keep the max name as marker if n > marker: marker = n self.pools_controller.create(n, w, str(i), options={}) # Get the target pool def _pool(name): pool = [p for p in pools if p['n'] == name] self.assertEqual(1, len(pool)) pool = pool[0] n = pool['n'] w = pool['w'] u = pool['u'] return n, w, u def get_res(**kwargs): cursor = self.pools_controller.list(**kwargs) res = list(next(cursor)) marker = next(cursor) # TODO(jeffrey4l): marker should exist self.assertTrue(marker) return res res = get_res() self.assertEqual(10, len(res)) for entry in res: n, w, u = _pool(entry['name']) self._pool_expects(entry, n, w, u) self.assertNotIn('options', entry) res = get_res(limit=5) self.assertEqual(5, len(res)) res = get_res(limit=0) self.assertEqual(15, len(res)) next_name = marker + 'n' self.pools_controller.create(next_name, 123, '123', options={}) res = get_res(marker=marker) self._pool_expects(res[0], next_name, 123, '123') self.pools_controller.delete(next_name) res = get_res(detailed=True) self.assertEqual(10, len(res)) for entry in res: n, w, u = _pool(entry['name']) self._pool_expects(entry, n, w, u) self.assertIn('options', entry) self.assertEqual({}, entry['options']) class CatalogueControllerTest(ControllerBaseTest): controller_base_class = storage.CatalogueBase def setUp(self): super(CatalogueControllerTest, self).setUp() self.controller = self.driver.catalogue_controller self.pool_ctrl = self.driver.pools_controller self.queue = str(uuid.uuid4()) self.project = str(uuid.uuid4()) self.pool = str(uuid.uuid1()) self.flavor = str(uuid.uuid1()) self.uri = str(uuid.uuid1()) self.uri1 = str(uuid.uuid1()) self.pool_ctrl.create(self.pool, 100, self.uri, flavor=self.flavor, options={}) self.addCleanup(self.pool_ctrl.delete, self.pool) self.pool1 = str(uuid.uuid1()) self.pool_ctrl.create(self.pool1, 100, self.uri1, flavor=self.flavor, options={}) self.addCleanup(self.pool_ctrl.delete, self.pool1) def tearDown(self): self.pool_ctrl.update(self.pool, flavor="") self.pool_ctrl.update(self.pool1, flavor="") self.pool_ctrl.drop_all() self.controller.drop_all() super(CatalogueControllerTest, self).tearDown() def _check_structure(self, entry): self.assertIn('queue', entry) self.assertIn('project', entry) self.assertIn('pool', entry) self.assertIsInstance(entry['queue'], str) self.assertIsInstance(entry['project'], str) self.assertIsInstance(entry['pool'], str) def _check_value(self, entry, xqueue, xproject, xpool): self.assertEqual(xqueue, entry['queue']) self.assertEqual(xproject, entry['project']) self.assertEqual(xpool, entry['pool']) def test_catalogue_entry_life_cycle(self): queue = self.queue project = self.project # check listing is initially empty for p in self.controller.list(project): self.fail('There should be no entries at this time') # create a listing, check its length with helpers.pool_entries(self.controller, self.pool_ctrl, 10) as expect: project = expect[0][0] xs = list(self.controller.list(project)) self.assertEqual(10, len(xs)) # create, check existence, delete with helpers.pool_entry(self.controller, project, queue, self.pool): self.assertTrue(self.controller.exists(project, queue)) # verify it no longer exists self.assertFalse(self.controller.exists(project, queue)) # verify it isn't listable self.assertEqual(0, len(list(self.controller.list(project)))) def test_list(self): with helpers.pool_entries(self.controller, self.pool_ctrl, 10) as expect: values = zip(self.controller.list('_'), expect) for e, x in values: p, q, s = x self._check_structure(e) self._check_value(e, xqueue=q, xproject=p, xpool=s) def test_update(self): p2 = 'b' # NOTE(gengchc2): Remove [group=self.pool_group] in # it can be tested for redis as management. self.pool_ctrl.create(p2, 100, '127.0.0.1', options={}) self.addCleanup(self.pool_ctrl.delete, p2) with helpers.pool_entry(self.controller, self.project, self.queue, self.pool) as expect: p, q, s = expect self.controller.update(p, q, pool=p2) entry = self.controller.get(p, q) self._check_value(entry, xqueue=q, xproject=p, xpool=p2) def test_update_raises_when_entry_does_not_exist(self): e = self.assertRaises(errors.QueueNotMapped, self.controller.update, 'p', 'q', 'a') self.assertIn('queue q for project p', str(e)) def test_get(self): with helpers.pool_entry(self.controller, self.project, self.queue, self.pool) as expect: p, q, s = expect e = self.controller.get(p, q) self._check_value(e, xqueue=q, xproject=p, xpool=s) def test_get_raises_if_does_not_exist(self): with helpers.pool_entry(self.controller, self.project, self.queue, 'a') as expect: p, q, _ = expect self.assertRaises(errors.QueueNotMapped, self.controller.get, p, 'non_existing') self.assertRaises(errors.QueueNotMapped, self.controller.get, 'non_existing', q) self.assertRaises(errors.QueueNotMapped, self.controller.get, 'non_existing', 'non_existing') def test_exists(self): with helpers.pool_entry(self.controller, self.project, self.queue, self.pool) as expect: p, q, _ = expect self.assertTrue(self.controller.exists(p, q)) self.assertFalse(self.controller.exists('nada', 'not_here')) def test_insert(self): q1 = str(uuid.uuid1()) q2 = str(uuid.uuid1()) self.controller.insert(self.project, q1, 'a') self.controller.insert(self.project, q2, 'a') # NOTE(gengchc2): Unittest for new flavor configure scenario. class FlavorsControllerTest1(ControllerBaseTest): """Flavors Controller base tests. NOTE(flaper87): Implementations of this class should override the tearDown method in order to clean up storage's state. """ controller_base_class = storage.FlavorsBase def setUp(self): super(FlavorsControllerTest1, self).setUp() self.pools_controller = self.driver.pools_controller self.flavors_controller = self.driver.flavors_controller # Let's create one pool self.pool = str(uuid.uuid1()) self.flavor = 'durable' self.uri = str(uuid.uuid1()) self.pools_controller.create(self.pool, 100, self.uri, flavor=self.flavor, options={}) self.addCleanup(self.pools_controller.delete, self.pool) def tearDown(self): self.pools_controller.update(self.pool, flavor="") self.pools_controller.drop_all() self.flavors_controller.drop_all() super(FlavorsControllerTest1, self).tearDown() def test_create_succeeds(self): self.flavors_controller.create(self.flavor, project=self.project, capabilities={}) def _flavors_expects(self, flavor, xname, xproject): self.assertIn('name', flavor) self.assertEqual(xname, flavor['name']) self.assertNotIn('project', flavor) def test_create_replaces_on_duplicate_insert(self): name = str(uuid.uuid1()) self.flavors_controller.create(name, project=self.project, capabilities={}) entry = self.flavors_controller.get(name, self.project) self._flavors_expects(entry, name, self.project) new_capabilities = {'fifo': False} self.flavors_controller.create(name, project=self.project, capabilities=new_capabilities) entry = self.flavors_controller.get(name, project=self.project, detailed=True) self._flavors_expects(entry, name, self.project) self.assertEqual(new_capabilities, entry['capabilities']) def test_get_returns_expected_content(self): name = 'durable' capabilities = {'fifo': True} self.flavors_controller.create(name, project=self.project, capabilities=capabilities) res = self.flavors_controller.get(name, project=self.project) self._flavors_expects(res, name, self.project) self.assertNotIn('capabilities', res) def test_detailed_get_returns_expected_content(self): name = 'durable' capabilities = {'fifo': True} self.flavors_controller.create(name, project=self.project, capabilities=capabilities) res = self.flavors_controller.get(name, project=self.project, detailed=True) self._flavors_expects(res, name, self.project) self.assertIn('capabilities', res) self.assertEqual(capabilities, res['capabilities']) def test_get_raises_if_not_found(self): self.assertRaises(errors.FlavorDoesNotExist, self.flavors_controller.get, 'notexists') def test_exists(self): self.flavors_controller.create('exists', project=self.project, capabilities={}) self.assertTrue(self.flavors_controller.exists('exists', project=self.project)) self.assertFalse(self.flavors_controller.exists('notexists', project=self.project)) def test_update_raises_assertion_error_on_bad_fields(self): self.assertRaises(AssertionError, self.pools_controller.update, self.flavor) def test_update_works(self): name = 'yummy' self.flavors_controller.create(name, project=self.project, capabilities={}) res = self.flavors_controller.get(name, project=self.project, detailed=True) p = 'olympic' flavor = name self.uri2 = str(uuid.uuid1()) self.pools_controller.create(p, 100, self.uri2, flavor=flavor, options={}) self.addCleanup(self.pools_controller.delete, p) new_capabilities = {'fifo': False} self.flavors_controller.update(name, project=self.project, capabilities={'fifo': False}) res = self.flavors_controller.get(name, project=self.project, detailed=True) self._flavors_expects(res, name, self.project) self.assertEqual(new_capabilities, res['capabilities']) self.pools_controller.update(p, flavor="") def test_delete_works(self): name = 'puke' self.flavors_controller.create(name, project=self.project, capabilities={}) self.flavors_controller.delete(name, project=self.project) self.assertFalse(self.flavors_controller.exists(name)) def test_delete_nonexistent_is_silent(self): self.flavors_controller.delete('nonexisting') def test_drop_all_leads_to_empty_listing(self): self.flavors_controller.drop_all() cursor = self.flavors_controller.list() flavors = next(cursor) self.assertRaises(StopIteration, next, flavors) self.assertFalse(next(cursor)) def test_listing_simple(self): name_gen = lambda i: chr(ord('A') + i) for i in range(15): pool = str(i) flavor = name_gen(i) uri = 'localhost:2701' + pool self.pools_controller.create(pool, 100, uri, flavor=flavor, options={}) self.addCleanup(self.pools_controller.delete, pool) self.flavors_controller.create(flavor, project=self.project, capabilities={}) def get_res(**kwargs): cursor = self.flavors_controller.list(project=self.project, **kwargs) res = list(next(cursor)) marker = next(cursor) self.assertTrue(marker) return res res = get_res() self.assertEqual(10, len(res)) for i, entry in enumerate(res): self._flavors_expects(entry, name_gen(i), self.project) self.assertNotIn('capabilities', entry) res = get_res(limit=5) self.assertEqual(5, len(res)) res = get_res(marker=name_gen(3)) self._flavors_expects(res[0], name_gen(4), self.project) res = get_res(detailed=True) self.assertEqual(10, len(res)) for i, entry in enumerate(res): self._flavors_expects(entry, name_gen(i), self.project) self.assertIn('capabilities', entry) self.assertEqual({}, entry['capabilities']) # (gengchc): Remove the flavor from pool, then testcase cleanup pools for i in range(15): pool = str(i) self.pools_controller.update(pool, flavor="") def _insert_fixtures(controller, queue_name, project=None, client_uuid=None, num=4, ttl=120): def messages(): for n in range(num): yield { 'ttl': ttl, 'body': { 'event': 'Event number {0}'.format(n) }} return controller.post(queue_name, messages(), project=project, client_uuid=client_uuid) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5800135 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/sqlalchemy_migration/0000775000175100017510000000000015033040026024057 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/sqlalchemy_migration/__init__.py0000664000175100017510000000000015033040005026153 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/sqlalchemy_migration/test_db_manage_cli.py0000664000175100017510000000620315033040005030212 0ustar00mylesmyles# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import testscenarios import testtools from zaqar.storage.sqlalchemy.migration import cli class TestCli(testtools.TestCase): func_name = '' exp_args = () exp_kwargs = {} scenarios = [ ('stamp', dict(argv=['prog', 'stamp', 'foo'], func_name='stamp', exp_args=('foo',), exp_kwargs={'sql': False})), ('stamp-sql', dict(argv=['prog', 'stamp', 'foo', '--sql'], func_name='stamp', exp_args=('foo',), exp_kwargs={'sql': True})), ('current', dict(argv=['prog', 'current'], func_name='current', exp_args=[], exp_kwargs=dict())), ('history', dict(argv=['prog', 'history'], func_name='history', exp_args=[], exp_kwargs=dict())), ('check_migration', dict(argv=['prog', 'check_migration'], func_name='branches', exp_args=[], exp_kwargs=dict())), ('sync_revision_autogenerate', dict(argv=['prog', 'revision', '--autogenerate', '-m', 'message'], func_name='revision', exp_args=(), exp_kwargs={ 'message': 'message', 'sql': False, 'autogenerate': True})), ('sync_revision_sql', dict(argv=['prog', 'revision', '--sql', '-m', 'message'], func_name='revision', exp_args=(), exp_kwargs={ 'message': 'message', 'sql': True, 'autogenerate': False})), ('upgrade-sql', dict(argv=['prog', 'upgrade', '--sql', 'head'], func_name='upgrade', exp_args=('head',), exp_kwargs={'sql': True})), ('upgrade-delta', dict(argv=['prog', 'upgrade', '--delta', '3'], func_name='upgrade', exp_args=('+3',), exp_kwargs={'sql': False})) ] def setUp(self): super(TestCli, self).setUp() do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.addCleanup(do_alembic_cmd_p.stop) self.do_alembic_cmd = do_alembic_cmd_p.start() self.addCleanup(cli.CONF.reset) def test_cli(self): with mock.patch.object(sys, 'argv', self.argv): cli.main() self.do_alembic_cmd.assert_has_calls( [mock.call( mock.ANY, self.func_name, *self.exp_args, **self.exp_kwargs)] ) def load_tests(loader, in_tests, pattern): return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations.py0000664000175100017510000001265715033040005027654 0ustar00mylesmyles# Copyright 2014 OpenStack Foundation # Copyright 2014 Mirantis Inc # Copyright 2016 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. For the opportunistic testing you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and u/p combo to run the tests. For postgres on Ubuntu this can be done with the following commands: sudo -u postgres psql postgres=# create user openstack_citest with createdb login password 'openstack_citest'; postgres=# create database openstack_citest with owner openstack_citest; """ from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from zaqar.tests.unit.storage.sqlalchemy_migration import \ test_migrations_base as base class ZaqarMigrationsCheckers(object): def assertColumnExists(self, engine, table, column): t = db_utils.get_table(engine, table) self.assertIn(column, t.c) def assertColumnsExist(self, engine, table, columns): for column in columns: self.assertColumnExists(engine, table, column) def assertColumnType(self, engine, table, column, column_type): t = db_utils.get_table(engine, table) column_ref_type = str(t.c[column].type) self.assertEqual(column_ref_type, column_type) def assertColumnCount(self, engine, table, columns): t = db_utils.get_table(engine, table) self.assertEqual(len(columns), len(t.columns)) def assertColumnNotExists(self, engine, table, column): t = db_utils.get_table(engine, table) self.assertNotIn(column, t.c) def assertIndexExists(self, engine, table, index): t = db_utils.get_table(engine, table) index_names = [idx.name for idx in t.indexes] self.assertIn(index, index_names) def assertIndexMembers(self, engine, table, index, members): self.assertIndexExists(engine, table, index) t = db_utils.get_table(engine, table) index_columns = None for idx in t.indexes: if idx.name == index: index_columns = idx.columns.keys() break self.assertEqual(sorted(members), sorted(index_columns)) # NOTE(wanghao) Just skip this test unit oslo.db/+/747762 merged # def test_walk_versions(self): # self.walk_versions(self.engine) def _pre_upgrade_001(self, engine): # Anything returned from this method will be # passed to corresponding _check_xxx method as 'data'. pass def _check_001(self, engine, data): queues_columns = [ 'id', 'name', 'project', 'metadata' ] self.assertColumnsExist( engine, 'Queues', queues_columns) self.assertColumnCount( engine, 'Queues', queues_columns) poolgroup_columns = [ 'name', ] self.assertColumnsExist( engine, 'PoolGroup', poolgroup_columns) self.assertColumnCount( engine, 'PoolGroup', poolgroup_columns) pools_columns = [ 'name', 'group', 'uri', 'weight', 'options', ] self.assertColumnsExist( engine, 'Pools', pools_columns) self.assertColumnCount( engine, 'Pools', pools_columns) flavors_columns = [ 'name', 'project', 'pool_group', 'capabilities', ] self.assertColumnsExist( engine, 'Flavors', flavors_columns) self.assertColumnCount( engine, 'Flavors', flavors_columns) catalogue_columns = [ 'pool', 'project', 'queue', ] self.assertColumnsExist( engine, 'Catalogue', catalogue_columns) self.assertColumnCount( engine, 'Catalogue', catalogue_columns) self._data_001(engine, data) def _data_001(self, engine, data): project = 'myproject' t = db_utils.get_table(engine, 'Queues') engine.execute(t.insert(), id='123', name='name', project='myproject', metadata={}) new_project = engine.execute(t.select()).fetchone().project self.assertEqual(project, new_project) engine.execute(t.delete()) def _check_002(self, engine, data): # currently, 002 is just a placeholder pass def _check_003(self, engine, data): # currently, 003 is just a placeholder pass def _check_004(self, engine, data): # currently, 004 is just a placeholder pass def _check_005(self, engine, data): # currently, 005 is just a placeholder pass class TestMigrationsMySQL(ZaqarMigrationsCheckers, base.BaseWalkMigrationTestCase, test_fixtures.MySQLOpportunisticFixture): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations_base.py0000664000175100017510000001563015033040005030640 0ustar00mylesmyles# Copyright 2010-2011 OpenStack Foundation # Copyright 2012-2013 IBM Corp. # Copyright 2016 Catalyst IT Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # Ripped off from Nova's test_migrations.py # The only difference between Nova and this code is usage of alembic instead # of sqlalchemy migrations. # # There is an ongoing work to extact similar code to oslo incubator. Once it is # extracted we'll be able to remove this file and use oslo. import io import os import alembic from alembic import command from alembic import config as alembic_config from alembic import migration from alembic import script as alembic_script from oslo_config import cfg from oslo_db.sqlalchemy import test_migrations as t_m from oslo_log import log as logging import zaqar.storage.sqlalchemy.migration from zaqar.storage.sqlalchemy import tables LOG = logging.getLogger(__name__) CONF = cfg.CONF sqlalchemy_opts = [cfg.StrOpt('uri', help='The SQLAlchemy connection string to' ' use to connect to the database.', secret=True)] CONF.register_opts(sqlalchemy_opts, group='drivers:management_store:sqlalchemy') class BaseWalkMigrationTestCase(object): ALEMBIC_CONFIG = alembic_config.Config( os.path.join( os.path.dirname(zaqar.storage.sqlalchemy.migration.__file__), 'alembic.ini') ) ALEMBIC_CONFIG.zaqar_config = CONF def _configure(self, engine): """For each type of repository we should do some of configure steps. For migrate_repo we should set under version control our database. For alembic we should configure database settings. For this goal we should use oslo_config and openstack.commom.db.sqlalchemy.session with database functionality (reset default settings and session cleanup). """ CONF.set_override( 'uri', engine.url.render_as_string(hide_password=False), group='drivers:management_store:sqlalchemy') def _alembic_command(self, alembic_command, engine, *args, **kwargs): """Most of alembic command return data into output. We should redefine this setting for getting info. """ self.ALEMBIC_CONFIG.stdout = buf = io.StringIO() CONF.set_override( 'uri', engine.url.render_as_string(hide_password=False), group='drivers:management_store:sqlalchemy') getattr(command, alembic_command)(*args, **kwargs) res = buf.getvalue().strip() LOG.debug('Alembic command {command} returns: {result}', {'command': alembic_command, 'result': res}) return res def _get_versions(self): """Stores a list of versions. Since alembic version has a random algorithm of generation (SA-migrate has an ordered autoincrement naming) we should store a list of versions (version for upgrade) for successful testing of migrations in up mode. """ env = alembic_script.ScriptDirectory.from_config(self.ALEMBIC_CONFIG) versions = [] for rev in env.walk_revisions(): versions.append(rev.revision) versions.reverse() return versions def walk_versions(self, engine=None): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. self._configure(engine) versions = self._get_versions() for ver in versions: self._migrate_up(engine, ver, with_data=True) def _get_version_from_db(self, engine): """Returns latest version from db for each type of migrate repo.""" conn = engine.connect() try: context = migration.MigrationContext.configure(conn) version = context.get_current_revision() or '-1' finally: conn.close() return version def _migrate(self, engine, version, cmd): """Base method for manipulation with migrate repo. It will upgrade or downgrade the actual database. """ self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version) def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise check_version = version try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%s" % check_version, None) if pre_upgrade: data = pre_upgrade(engine) self._migrate(engine, version, 'upgrade') self.assertEqual(version, self._get_version_from_db(engine)) if with_data: check = getattr(self, "_check_%s" % check_version, None) if check: check(engine, data) except Exception: LOG.error("Failed to migrate to version {version} on engine " "{engine}", {'version': version, 'engine': engine}) raise class TestModelsMigrationsSync(t_m.ModelsMigrationsSync): """Class for comparison of DB migration scripts and models. Allows to check if the DB schema obtained by applying of migration scripts is equal to the one produced from models definitions. """ mg_path = os.path.dirname(zaqar.storage.sqlalchemy.migration.__file__) ALEMBIC_CONFIG = alembic_config.Config( os.path.join(mg_path, 'alembic.ini') ) ALEMBIC_CONFIG.zaqar_config = CONF def get_engine(self): return self.engine def db_sync(self, engine): CONF.set_override( 'uri', engine.url.render_as_string(hide_password=False), group='drivers:management_store:sqlalchemy') script_location = os.path.join(self.mg_path, 'alembic_migrations') self.ALEMBIC_CONFIG.set_main_option('script_location', script_location) alembic.command.upgrade(self.ALEMBIC_CONFIG, 'head') def get_metadata(self): return tables.metadata ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/test_impl_mongodb.py0000664000175100017510000005662115033040005023732 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import datetime import time from unittest import mock import uuid from oslo_utils import timeutils from pymongo import cursor import pymongo.errors from testtools import matchers from zaqar.common import cache as oslo_cache from zaqar.conf import default from zaqar.conf import drivers_management_store_mongodb from zaqar.conf import drivers_message_store_mongodb from zaqar import storage from zaqar.storage import errors from zaqar.storage import mongodb from zaqar.storage.mongodb import controllers from zaqar.storage.mongodb import utils from zaqar.storage import pooling from zaqar import tests as testing from zaqar.tests.unit.storage import base class MongodbSetupMixin(object): def _purge_databases(self): if isinstance(self.driver, mongodb.DataDriver): databases = (self.driver.message_databases + [self.control.queues_database, self.driver.subscriptions_database]) else: databases = [self.driver.queues_database] for db in databases: self.driver.connection.drop_database(db) def _prepare_conf(self): if drivers_message_store_mongodb.GROUP_NAME in self.conf: self.config(drivers_message_store_mongodb.GROUP_NAME, database=uuid.uuid4().hex) if drivers_management_store_mongodb.GROUP_NAME in self.conf: self.config(drivers_management_store_mongodb.GROUP_NAME, database=uuid.uuid4().hex) class MongodbUtilsTest(MongodbSetupMixin, testing.TestBase): config_file = 'wsgi_mongodb.conf' def setUp(self): super(MongodbUtilsTest, self).setUp() self.conf.register_opts(drivers_message_store_mongodb.ALL_OPTS, group=drivers_message_store_mongodb.GROUP_NAME) self.mongodb_conf = self.conf[drivers_message_store_mongodb.GROUP_NAME] MockDriver = collections.namedtuple('MockDriver', 'mongodb_conf') self.driver = MockDriver(self.mongodb_conf) self.control_driver = MockDriver(self.mongodb_conf) def test_scope_queue_name(self): self.assertEqual('/my-q', utils.scope_queue_name('my-q')) self.assertEqual('/my-q', utils.scope_queue_name('my-q', None)) self.assertEqual('123/my-q', utils.scope_queue_name('my-q', '123')) self.assertEqual('/', utils.scope_queue_name(None)) self.assertEqual('123/', utils.scope_queue_name(None, '123')) def test_descope_queue_name(self): self.assertIsNone(utils.descope_queue_name('/')) self.assertEqual('some-pig', utils.descope_queue_name('/some-pig')) self.assertEqual('some-pig', utils.descope_queue_name('radiant/some-pig')) def test_calculate_backoff(self): sec = utils.calculate_backoff(0, 10, 2, 0) self.assertEqual(0, sec) sec = utils.calculate_backoff(9, 10, 2, 0) self.assertEqual(1.8, sec) sec = utils.calculate_backoff(4, 10, 2, 0) self.assertEqual(0.8, sec) sec = utils.calculate_backoff(4, 10, 2, 1) if sec != 0.8: self.assertThat(sec, matchers.GreaterThan(0.8)) self.assertThat(sec, matchers.LessThan(1.8)) self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, -2, -1) self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, -2, 0) self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, 2, -1) self.assertRaises(ValueError, utils.calculate_backoff, -2, 10, 2, 0) self.assertRaises(ValueError, utils.calculate_backoff, -1, 10, 2, 0) self.assertRaises(ValueError, utils.calculate_backoff, 10, 10, 2, 0) self.assertRaises(ValueError, utils.calculate_backoff, 11, 10, 2, 0) def test_retries_on_autoreconnect(self): num_calls = [0] @utils.retries_on_autoreconnect def _raises_autoreconnect(self): num_calls[0] += 1 raise pymongo.errors.AutoReconnect() self.assertRaises(pymongo.errors.AutoReconnect, _raises_autoreconnect, self) self.assertEqual([self.mongodb_conf.max_reconnect_attempts], num_calls) def test_retries_on_autoreconnect_neg(self): num_calls = [0] @utils.retries_on_autoreconnect def _raises_autoreconnect(self): num_calls[0] += 1 # NOTE(kgriffs): Don't exceed until the last attempt if num_calls[0] < self.mongodb_conf.max_reconnect_attempts: raise pymongo.errors.AutoReconnect() # NOTE(kgriffs): Test that this does *not* raise AutoReconnect _raises_autoreconnect(self) self.assertEqual([self.mongodb_conf.max_reconnect_attempts], num_calls) @testing.requires_mongodb class MongodbDriverTest(MongodbSetupMixin, testing.TestBase): config_file = 'wsgi_mongodb.conf' def setUp(self): super(MongodbDriverTest, self).setUp() self.conf.register_opts(default.ALL_OPTS) self.config(unreliable=False) oslo_cache.register_config(self.conf) def test_db_instance(self): self.config(unreliable=True) cache = oslo_cache.get_cache(self.conf) control = mongodb.ControlDriver(self.conf, cache) data = mongodb.DataDriver(self.conf, cache, control) for db in data.message_databases: self.assertThat(db.name, matchers.StartsWith( data.mongodb_conf.database)) def test_version_match(self): self.config(unreliable=True) cache = oslo_cache.get_cache(self.conf) with mock.patch('pymongo.MongoClient.server_info') as info: info.return_value = {'version': '2.1'} self.assertRaises(RuntimeError, mongodb.DataDriver, self.conf, cache, mongodb.ControlDriver(self.conf, cache)) info.return_value = {'version': '2.11'} try: mongodb.DataDriver(self.conf, cache, mongodb.ControlDriver(self.conf, cache)) except RuntimeError: self.fail('version match failed') def test_replicaset_or_mongos_needed(self): cache = oslo_cache.get_cache(self.conf) with mock.patch('pymongo.MongoClient.nodes') as nodes: nodes.__get__ = mock.Mock(return_value=[]) with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: is_mongos.__get__ = mock.Mock(return_value=False) self.assertRaises(RuntimeError, mongodb.DataDriver, self.conf, cache, mongodb.ControlDriver(self.conf, cache)) def test_using_replset(self): cache = oslo_cache.get_cache(self.conf) with mock.patch('pymongo.MongoClient.nodes') as nodes: nodes.__get__ = mock.Mock(return_value=['node1', 'node2']) with mock.patch('pymongo.MongoClient.write_concern') as wc: write_concern = pymongo.WriteConcern(w=2) wc.__get__ = mock.Mock(return_value=write_concern) mongodb.DataDriver(self.conf, cache, mongodb.ControlDriver(self.conf, cache)) def test_using_mongos(self): cache = oslo_cache.get_cache(self.conf) with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: is_mongos.__get__ = mock.Mock(return_value=True) with mock.patch('pymongo.MongoClient.write_concern') as wc: write_concern = pymongo.WriteConcern(w=2) wc.__get__ = mock.Mock(return_value=write_concern) mongodb.DataDriver(self.conf, cache, mongodb.ControlDriver(self.conf, cache)) def test_write_concern_check_works(self): cache = oslo_cache.get_cache(self.conf) with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: is_mongos.__get__ = mock.Mock(return_value=True) with mock.patch('pymongo.MongoClient.write_concern') as wc: write_concern = pymongo.WriteConcern(w=1) wc.__get__ = mock.Mock(return_value=write_concern) self.assertRaises(RuntimeError, mongodb.DataDriver, self.conf, cache, mongodb.ControlDriver(self.conf, cache)) write_concern = pymongo.WriteConcern(w=2) wc.__get__ = mock.Mock(return_value=write_concern) mongodb.DataDriver(self.conf, cache, mongodb.ControlDriver(self.conf, cache)) def test_write_concern_is_set(self): cache = oslo_cache.get_cache(self.conf) with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos: is_mongos.__get__ = mock.Mock(return_value=True) self.config(unreliable=True) driver = mongodb.DataDriver(self.conf, cache, mongodb.ControlDriver (self.conf, cache)) driver.server_version = (2, 6) for db in driver.message_databases: wc = db.write_concern self.assertEqual('majority', wc.document['w']) self.assertFalse(wc.document['j']) @testing.requires_mongodb class MongodbQueueTests(MongodbSetupMixin, base.QueueControllerTest): driver_class = mongodb.ControlDriver config_file = 'wsgi_mongodb.conf' controller_class = controllers.QueueController control_driver_class = mongodb.ControlDriver def test_indexes(self): collection = self.controller._collection indexes = collection.index_information() self.assertIn('p_q_1', indexes) def test_raises_connection_error(self): with mock.patch.object(cursor.Cursor, '__next__', spec=True) as method: error = pymongo.errors.ConnectionFailure() method.side_effect = error queues = next(self.controller.list()) self.assertRaises(storage.errors.ConnectionError, queues.next) @testing.requires_mongodb class MongodbMessageTests(MongodbSetupMixin, base.MessageControllerTest): driver_class = mongodb.DataDriver config_file = 'wsgi_mongodb.conf' controller_class = controllers.MessageController control_driver_class = mongodb.ControlDriver # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute gc_interval = 60 def test_indexes(self): for collection in self.controller._collections: indexes = collection.index_information() self.assertIn('active', indexes) self.assertIn('claimed', indexes) self.assertIn('queue_marker', indexes) self.assertIn('counting', indexes) def test_message_counter(self): queue_name = self.queue_name iterations = 10 m = mock.MagicMock(controllers.QueueController) self.controller._queue_ctrl = m del self.controller._queue_ctrl._get_counter del self.controller._queue_ctrl._inc_counter seed_marker1 = self.controller._get_counter(queue_name, self.project) self.assertEqual(0, seed_marker1, 'First marker is 0') uuid = '97b64000-2526-11e3-b088-d85c1300734c' for i in range(iterations): self.controller.post(queue_name, [{'ttl': 60}], uuid, project=self.project) marker1 = self.controller._get_counter(queue_name, self.project) marker2 = self.controller._get_counter(queue_name, self.project) marker3 = self.controller._get_counter(queue_name, self.project) self.assertEqual(marker1, marker2) self.assertEqual(marker2, marker3) self.assertEqual(i + 1, marker1) new_value = self.controller._inc_counter(queue_name, self.project) self.assertIsNotNone(new_value) value_before = self.controller._get_counter(queue_name, project=self.project) new_value = self.controller._inc_counter(queue_name, project=self.project) self.assertIsNotNone(new_value) value_after = self.controller._get_counter(queue_name, project=self.project) self.assertEqual(value_before + 1, value_after) value_before = value_after new_value = self.controller._inc_counter(queue_name, project=self.project, amount=7) value_after = self.controller._get_counter(queue_name, project=self.project) self.assertEqual(value_before + 7, value_after) self.assertEqual(new_value, value_after) reference_value = value_after unchanged = self.controller._inc_counter(queue_name, project=self.project, window=10) self.assertIsNone(unchanged) timeutils.set_time_override() timeutils.advance_time_delta(datetime.timedelta(seconds=10)) changed = self.controller._inc_counter(queue_name, project=self.project, window=5) self.assertEqual(reference_value + 1, changed) timeutils.clear_time_override() @testing.requires_mongodb class MongodbFIFOMessageTests(MongodbSetupMixin, base.MessageControllerTest): driver_class = mongodb.FIFODataDriver config_file = 'wsgi_fifo_mongodb.conf' controller_class = controllers.FIFOMessageController control_driver_class = mongodb.ControlDriver # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute gc_interval = 60 def test_race_condition_on_post(self): queue_name = self.queue_name expected_messages = [ { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce', }, }, { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'd378813c-3f0b-11e2-ad92-7823d2b0f3ce', }, }, { 'ttl': 60, 'body': { 'event': 'BackupStarted', 'backupId': 'e378813c-3f0b-11e2-ad92-7823d2b0f3ce', }, }, ] uuid = '97b64000-2526-11e3-b088-d85c1300734c' # NOTE(kgriffs): Patch _inc_counter so it is a noop, so that # the second time we post, we will get a collision. This simulates # what happens when we have parallel requests and the "winning" # requests hasn't gotten around to calling _inc_counter before the # "losing" request attempts to insert it's batch of messages. with mock.patch.object(mongodb.messages.MessageController, '_inc_counter', autospec=True) as ic: ic.return_value = 2 messages = expected_messages[:1] created = list(self.controller.post(queue_name, messages, uuid, project=self.project)) self.assertEqual(1, len(created)) # Force infinite retries ic.return_value = None with testing.expect(errors.MessageConflict): self.controller.post(queue_name, messages, uuid, project=self.project) created = list(self.controller.post(queue_name, expected_messages[1:], uuid, project=self.project)) self.assertEqual(2, len(created)) expected_ids = [m['body']['backupId'] for m in expected_messages] interaction = self.controller.list(queue_name, client_uuid=uuid, echo=True, project=self.project) actual_messages = list(next(interaction)) self.assertEqual(len(expected_messages), len(actual_messages)) actual_ids = [m['body']['backupId'] for m in actual_messages] self.assertEqual(expected_ids, actual_ids) @testing.requires_mongodb class MongodbClaimTests(MongodbSetupMixin, base.ClaimControllerTest): driver_class = mongodb.DataDriver config_file = 'wsgi_mongodb.conf' controller_class = controllers.ClaimController control_driver_class = mongodb.ControlDriver def test_claim_doesnt_exist(self): """Verifies that operations fail on expired/missing claims. Methods should raise an exception when the claim doesn't exists and/or has expired. """ epoch = '000000000000000000000000' self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.get, self.queue_name, epoch, project=self.project) claim_id, messages = self.controller.create(self.queue_name, {'ttl': 1, 'grace': 0}, project=self.project) # Lets let it expire time.sleep(1) self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.update, self.queue_name, claim_id, {'ttl': 1, 'grace': 0}, project=self.project) self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.update, self.queue_name, claim_id, {'ttl': 1, 'grace': 0}, project=self.project) @testing.requires_mongodb class MongodbSubscriptionTests(MongodbSetupMixin, base.SubscriptionControllerTest): driver_class = mongodb.DataDriver config_file = 'wsgi_mongodb.conf' controller_class = controllers.SubscriptionController control_driver_class = mongodb.ControlDriver # # TODO(kgriffs): Do these need database purges as well as those above? # @testing.requires_mongodb class MongodbPoolsTests(base.PoolsControllerTest): config_file = 'wsgi_mongodb.conf' driver_class = mongodb.ControlDriver controller_class = controllers.PoolsController control_driver_class = mongodb.ControlDriver def setUp(self): super(MongodbPoolsTests, self).setUp() self.uri2 = str(uuid.uuid1()) self.flavor2 = str(uuid.uuid1()) self.pools_controller.create(self.pool, 100, self.uri2, flavor=self.flavor2, options={}) def tearDown(self): # self.pool_ctrl.update(self.pool, flavor="") self.pools_controller.drop_all() super(MongodbPoolsTests, self).tearDown() # NOTE(gengchc2): Unittest for new flavor configure scenario. def test_delete_pool_used_by_flavor1(self): self.flavors_controller.create(self.flavor, project=self.project, capabilities={}) self.pools_controller.update(self.pool1, flavor=self.flavor) with testing.expect(errors.PoolInUseByFlavor): self.pools_controller.delete(self.pool1) # NOTE(gengchc2): Unittest for new flavor configure scenario. def test_mismatching_capabilities_fifo1(self): with testing.expect(errors.PoolCapabilitiesMismatch): self.pools_controller.create(str(uuid.uuid1()), 100, 'mongodb.fifo://localhost', flavor=self.flavor, options={}) def test_mismatching_capabilities1(self): # NOTE(gengchc2): This test is used for testing mismatchming # capabilities in pool with flavor with testing.expect(errors.PoolCapabilitiesMismatch): self.pools_controller.create(str(uuid.uuid1()), 100, 'redis://localhost', flavor=self.flavor, options={}) # NOTE(gengchc2): Unittest for new flavor configure scenario. def test_duplicate_uri1(self): with testing.expect(errors.PoolAlreadyExists): # The url 'localhost' is used in setUp(). So reusing the uri # 'localhost' here will raise PoolAlreadyExists. self.pools_controller.create(str(uuid.uuid1()), 100, self.uri, flavor=str(uuid.uuid1()), options={}) @testing.requires_mongodb class MongodbCatalogueTests(base.CatalogueControllerTest): driver_class = mongodb.ControlDriver controller_class = controllers.CatalogueController control_driver_class = mongodb.ControlDriver config_file = 'wsgi_mongodb.conf' def setUp(self): super(MongodbCatalogueTests, self).setUp() self.addCleanup(self.controller.drop_all) @testing.requires_mongodb class PooledMessageTests(base.MessageControllerTest): config_file = 'wsgi_mongodb_pooled.conf' controller_class = pooling.MessageController driver_class = pooling.DataDriver control_driver_class = mongodb.ControlDriver controller_base_class = storage.Message # NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute gc_interval = 60 @testing.requires_mongodb class PooledClaimsTests(base.ClaimControllerTest): config_file = 'wsgi_mongodb_pooled.conf' controller_class = pooling.ClaimController driver_class = pooling.DataDriver control_driver_class = mongodb.ControlDriver controller_base_class = storage.Claim def test_delete_message_expired_claim(self): # NOTE(flaper87): The pool tests uses sqlalchemy # as one of the pools, which causes this test to fail. # Several reasons to do this: # The sqla driver is deprecated # It's not optimized # mocking utcnow mocks the driver too, which # requires to put sleeps in the test self.skip("Fix sqlalchemy driver") # NOTE(gengchc2): Unittest for new flavor configure scenario. @testing.requires_mongodb class MongodbFlavorsTest1(base.FlavorsControllerTest1): driver_class = mongodb.ControlDriver controller_class = controllers.FlavorsController control_driver_class = mongodb.ControlDriver config_file = 'wsgi_mongodb.conf' def setUp(self): super(MongodbFlavorsTest1, self).setUp() self.addCleanup(self.controller.drop_all) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/test_impl_redis.py0000664000175100017510000006744215033040005023416 0ustar00mylesmyles# Copyright (c) 2014 Prashanth Raghu. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import time from unittest import mock import uuid from oslo_utils import timeutils from oslo_utils import uuidutils import redis from zaqar.common import cache as oslo_cache from zaqar.common import errors from zaqar.conf import drivers_message_store_redis from zaqar import storage from zaqar.storage import pooling from zaqar.storage.redis import controllers from zaqar.storage.redis import driver from zaqar.storage.redis import messages from zaqar.storage.redis import utils from zaqar import tests as testing from zaqar.tests.unit.storage import base def _create_sample_message(now=None, claimed=False, body=None): if now is None: now = timeutils.utcnow_ts() if claimed: claim_id = uuid.uuid4() claim_expires = now + 300 else: claim_id = None claim_expires = now if body is None: body = {} return messages.Message( ttl=60, created=now, client_uuid=uuid.uuid4(), claim_id=claim_id, claim_expires=claim_expires, body=body ) class RedisUtilsTest(testing.TestBase): config_file = 'wsgi_redis.conf' def setUp(self): super(RedisUtilsTest, self).setUp() self.conf.register_opts(drivers_message_store_redis.ALL_OPTS, group=drivers_message_store_redis.GROUP_NAME) self.redis_conf = self.conf[drivers_message_store_redis.GROUP_NAME] MockDriver = collections.namedtuple('MockDriver', 'redis_conf') self.driver = MockDriver(self.redis_conf) def test_scope_queue_name(self): self.assertEqual('.my-q', utils.scope_queue_name('my-q')) self.assertEqual('.my-q', utils.scope_queue_name('my-q', None)) self.assertEqual('123.my-q', utils.scope_queue_name('my-q', '123')) self.assertEqual('123.my-q_1', utils.scope_queue_name('my-q_1', '123')) self.assertEqual('.', utils.scope_queue_name()) self.assertEqual('123.', utils.scope_queue_name(None, '123')) def test_scope_messages_set(self): self.assertEqual('.my-q.', utils.scope_message_ids_set('my-q')) self.assertEqual('p.my-q.', utils.scope_message_ids_set('my-q', 'p')) self.assertEqual('p.my-q.s', utils.scope_message_ids_set('my-q', 'p', 's')) self.assertEqual('..', utils.scope_message_ids_set(None)) self.assertEqual('123..', utils.scope_message_ids_set(None, '123')) self.assertEqual('..s', utils.scope_message_ids_set(None, None, 's')) def test_descope_messages_set(self): key = utils.scope_message_ids_set('my-q') self.assertEqual(('my-q', None), utils.descope_message_ids_set(key)) key = utils.scope_message_ids_set('my-q', '123') self.assertEqual(('my-q', '123'), utils.descope_message_ids_set(key)) key = utils.scope_message_ids_set(None, '123') self.assertEqual((None, '123'), utils.descope_message_ids_set(key)) key = utils.scope_message_ids_set() self.assertEqual((None, None), utils.descope_message_ids_set(key)) def test_normalize_none_str(self): self.assertEqual('my-q', utils.normalize_none_str('my-q')) self.assertEqual('', utils.normalize_none_str(None)) def test_msg_claimed_filter(self): now = timeutils.utcnow_ts() unclaimed_msg = _create_sample_message() self.assertFalse(utils.msg_claimed_filter(unclaimed_msg, now)) claimed_msg = _create_sample_message(claimed=True) self.assertTrue(utils.msg_claimed_filter(claimed_msg, now)) # NOTE(kgriffs): Has a claim ID, but the claim is expired claimed_msg.claim_expires = now - 60 self.assertFalse(utils.msg_claimed_filter(claimed_msg, now)) def test_descope_queue_name(self): self.assertEqual('q', utils.descope_queue_name('p.q')) self.assertEqual('q', utils.descope_queue_name('.q')) self.assertEqual('', utils.descope_queue_name('.')) def test_msg_echo_filter(self): msg = _create_sample_message() self.assertTrue(utils.msg_echo_filter(msg, msg.client_uuid)) alt_uuid = uuidutils.generate_uuid() self.assertFalse(utils.msg_echo_filter(msg, alt_uuid)) def test_basic_message(self): now = timeutils.utcnow_ts() body = { 'msg': 'Hello Earthlings!', 'unicode': 'ab\u00e7', 'bytes': b'ab\xc3\xa7', b'ab\xc3\xa7': 'one, two, three', 'ab\u00e7': 'one, two, three', } msg = _create_sample_message(now=now, body=body) basic_msg = msg.to_basic(now + 5) self.assertEqual(msg.id, basic_msg['id']) self.assertEqual(5, basic_msg['age']) self.assertEqual(body, basic_msg['body']) self.assertEqual(msg.ttl, basic_msg['ttl']) def test_retries_on_connection_error(self): num_calls = [0] @utils.retries_on_connection_error def _raises_connection_error(self): num_calls[0] += 1 raise redis.exceptions.ConnectionError self.assertRaises(redis.exceptions.ConnectionError, _raises_connection_error, self) self.assertEqual([self.redis_conf.max_reconnect_attempts], num_calls) @testing.requires_redis class RedisDriverTest(testing.TestBase): config_file = 'wsgi_redis.conf' def test_db_instance(self): oslo_cache.register_config(self.conf) cache = oslo_cache.get_cache(self.conf) redis_driver = driver.DataDriver(self.conf, cache, driver.ControlDriver (self.conf, cache)) self.assertIsInstance(redis_driver.connection, redis.Redis) def test_version_match(self): oslo_cache.register_config(self.conf) cache = oslo_cache.get_cache(self.conf) with mock.patch('redis.Redis.info') as info: info.return_value = {'redis_version': '2.4.6'} self.assertRaises(RuntimeError, driver.DataDriver, self.conf, cache, driver.ControlDriver(self.conf, cache)) info.return_value = {'redis_version': '2.11'} try: driver.DataDriver(self.conf, cache, driver.ControlDriver(self.conf, cache)) except RuntimeError: self.fail('version match failed') def test_connection_url_invalid(self): self.assertRaises(errors.ConfigurationError, driver.ConnectionURI, 'red://example.com') self.assertRaises(errors.ConfigurationError, driver.ConnectionURI, 'redis://') self.assertRaises(errors.ConfigurationError, driver.ConnectionURI, 'redis://example.com:not_an_integer') self.assertRaises(errors.ConfigurationError, driver.ConnectionURI, 'redis://s1:not_an_integer,s2?master=obi-wan') self.assertRaises(errors.ConfigurationError, driver.ConnectionURI, 'redis://s1,s2') self.assertRaises(errors.ConfigurationError, driver.ConnectionURI, 'redis:') self.assertRaises(errors.ConfigurationError, driver.ConnectionURI, 'redis:') def test_connection_url_tcp(self): uri = driver.ConnectionURI('redis://example.com') self.assertEqual(driver.STRATEGY_TCP, uri.strategy) self.assertEqual(6379, uri.port) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) uri = driver.ConnectionURI('redis://example.com:7777') self.assertEqual(driver.STRATEGY_TCP, uri.strategy) self.assertEqual(7777, uri.port) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) uri = driver.ConnectionURI( 'redis://example.com:7777?socket_timeout=1') self.assertEqual(driver.STRATEGY_TCP, uri.strategy) self.assertEqual(7777, uri.port) self.assertEqual(1.0, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) uri = driver.ConnectionURI( 'redis://:test123@example.com:7777?socket_timeout=1&dbid=5') self.assertEqual(driver.STRATEGY_TCP, uri.strategy) self.assertEqual(7777, uri.port) self.assertEqual(1.0, uri.socket_timeout) self.assertEqual(5, uri.dbid) self.assertIsNone(uri.username) self.assertEqual('test123', uri.password) # NOTE(tkajinam): Test fallback for backword compatibility uri = driver.ConnectionURI('redis://test123@example.com') self.assertEqual(driver.STRATEGY_TCP, uri.strategy) self.assertEqual(6379, uri.port) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertEqual('test123', uri.password) uri = driver.ConnectionURI( 'redis://default:test123@example.com') self.assertEqual(driver.STRATEGY_TCP, uri.strategy) self.assertEqual(6379, uri.port) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertEqual('default', uri.username) self.assertEqual('test123', uri.password) def test_connection_uri_unix_socket(self): uri = driver.ConnectionURI('redis:///tmp/redis.sock') self.assertEqual(driver.STRATEGY_UNIX, uri.strategy) self.assertEqual('/tmp/redis.sock', uri.unix_socket_path) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) uri = driver.ConnectionURI( 'redis:///tmp/redis.sock?socket_timeout=1.5') self.assertEqual(driver.STRATEGY_UNIX, uri.strategy) self.assertEqual('/tmp/redis.sock', uri.unix_socket_path) self.assertEqual(1.5, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) uri = driver.ConnectionURI( 'redis://:test123@/tmp/redis.sock?' 'socket_timeout=1.5&dbid=5') self.assertEqual(driver.STRATEGY_UNIX, uri.strategy) self.assertEqual('/tmp/redis.sock', uri.unix_socket_path) self.assertEqual(1.5, uri.socket_timeout) self.assertEqual(5, uri.dbid) self.assertIsNone(uri.username) self.assertEqual('test123', uri.password) # NOTE(tkajinam): Test fallback for backword compatibility uri = driver.ConnectionURI( 'redis://test123@/tmp/redis.sock') self.assertEqual(driver.STRATEGY_UNIX, uri.strategy) self.assertEqual('/tmp/redis.sock', uri.unix_socket_path) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertEqual('test123', uri.password) uri = driver.ConnectionURI( 'redis://default:test123@/tmp/redis.sock') self.assertEqual(driver.STRATEGY_UNIX, uri.strategy) self.assertEqual('/tmp/redis.sock', uri.unix_socket_path) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertEqual('default', uri.username) self.assertEqual('test123', uri.password) def test_connection_uri_sentinel(self): uri = driver.ConnectionURI('redis://s1?master=dumbledore') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) uri = driver.ConnectionURI('redis://s1,s2?master=dumbledore') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26379), ('s2', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) uri = driver.ConnectionURI('redis://s1:26389,s1?master=dumbledore') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26389), ('s1', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) uri = driver.ConnectionURI( 'redis://[::1]:26389,[::2]?master=dumbledore') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('::1', 26389), ('::2', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) uri = driver.ConnectionURI( 'redis://s1?master=dumbledore&socket_timeout=0.5') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.5, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertIsNone(uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) uri = driver.ConnectionURI( 'redis://:test123@s1?master=dumbledore&socket_timeout=0.5&dbid=5') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.5, uri.socket_timeout) self.assertEqual(5, uri.dbid) self.assertIsNone(uri.username) self.assertEqual('test123', uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) # NOTE(tkajinam): Test fallback for backword compatibility uri = driver.ConnectionURI( 'redis://test123@s1?master=dumbledore') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertIsNone(uri.username) self.assertEqual('test123', uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) uri = driver.ConnectionURI( 'redis://default:test123@s1?master=dumbledore') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertEqual('default', uri.username) self.assertEqual('test123', uri.password) self.assertIsNone(uri.sentinel_username) self.assertIsNone(uri.sentinel_password) uri = driver.ConnectionURI( 'redis://default:test123@s1?master=dumbledore' '&sentinel_username=sentinel&sentinel_password=test456') self.assertEqual(driver.STRATEGY_SENTINEL, uri.strategy) self.assertEqual([('s1', 26379)], uri.sentinels) self.assertEqual('dumbledore', uri.master) self.assertEqual(0.1, uri.socket_timeout) self.assertEqual(0, uri.dbid) self.assertEqual('default', uri.username) self.assertEqual('test123', uri.password) self.assertEqual('sentinel', uri.sentinel_username) self.assertEqual('test456', uri.sentinel_password) @testing.requires_redis class RedisQueuesTest(base.QueueControllerTest): driver_class = driver.DataDriver config_file = 'wsgi_redis.conf' controller_class = controllers.QueueController control_driver_class = driver.ControlDriver def setUp(self): super(RedisQueuesTest, self).setUp() self.connection = self.driver.connection self.msg_controller = self.driver.message_controller def tearDown(self): super(RedisQueuesTest, self).tearDown() self.connection.flushdb() @testing.requires_redis class RedisMessagesTest(base.MessageControllerTest): driver_class = driver.DataDriver config_file = 'wsgi_redis.conf' controller_class = controllers.MessageController control_driver_class = driver.ControlDriver gc_interval = 1 def setUp(self): super(RedisMessagesTest, self).setUp() self.connection = self.driver.connection def tearDown(self): super(RedisMessagesTest, self).tearDown() self.connection.flushdb() def test_count(self): queue_name = 'get-count' self.queue_controller.create(queue_name) msgs = [{ 'ttl': 300, 'body': 'di mo fy' } for i in range(0, 10)] client_id = uuid.uuid4() # Creating 10 messages self.controller.post(queue_name, msgs, client_id) num_msg = self.controller._count(queue_name, None) self.assertEqual(10, num_msg) def test_empty_queue_exception(self): queue_name = 'empty-queue-test' self.queue_controller.create(queue_name) self.assertRaises(storage.errors.QueueIsEmpty, self.controller.first, queue_name) def test_gc(self): self.queue_controller.create(self.queue_name) self.controller.post(self.queue_name, [{'ttl': 0, 'body': {}}], client_uuid=uuidutils.generate_uuid()) num_removed = self.controller.gc() self.assertEqual(1, num_removed) for _ in range(100): self.controller.post(self.queue_name, [{'ttl': 0, 'body': {}}], client_uuid=uuidutils.generate_uuid()) num_removed = self.controller.gc() self.assertEqual(100, num_removed) def test_invalid_uuid(self): queue_name = 'invalid-uuid-test' msgs = [{ 'ttl': 300, 'body': 'di mo fy' }] client_id = "invalid_uuid" self.assertRaises(ValueError, self.controller.post, queue_name, msgs, client_id) @testing.requires_redis class RedisClaimsTest(base.ClaimControllerTest): driver_class = driver.DataDriver config_file = 'wsgi_redis.conf' controller_class = controllers.ClaimController control_driver_class = driver.ControlDriver def setUp(self): super(RedisClaimsTest, self).setUp() self.connection = self.driver.connection def tearDown(self): super(RedisClaimsTest, self).tearDown() self.connection.flushdb() def test_claim_doesnt_exist(self): queue_name = 'no-such-claim' epoch = '000000000000000000000000' self.queue_controller.create(queue_name) self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.get, queue_name, epoch, project=None) claim_id, messages = self.controller.create(queue_name, {'ttl': 1, 'grace': 0}, project=None) # Lets let it expire time.sleep(1) self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.update, queue_name, claim_id, {}, project=None) # create a claim and then delete the queue claim_id, messages = self.controller.create(queue_name, {'ttl': 100, 'grace': 0}, project=None) self.queue_controller.delete(queue_name) self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.get, queue_name, claim_id, project=None) self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.update, queue_name, claim_id, {}, project=None) def test_get_claim_after_expires(self): queue_name = 'no-such-claim' self.queue_controller.create(queue_name, project='fake_project') new_messages = [{'ttl': 60, 'body': {}}, {'ttl': 60, 'body': {}}, {'ttl': 60, 'body': {}}] self.message_controller.post(queue_name, new_messages, client_uuid=str(uuid.uuid4()), project='fake_project') claim_id, messages = self.controller.create(queue_name, {'ttl': 1, 'grace': 0}, project='fake_project') # Lets let it expire time.sleep(2) self.assertRaises(storage.errors.ClaimDoesNotExist, self.controller.get, queue_name, claim_id, project='fake_project') def test_gc(self): self.queue_controller.create(self.queue_name) for _ in range(100): self.message_controller.post(self.queue_name, [{'ttl': 300, 'body': 'yo gabba'}], client_uuid=uuidutils.generate_uuid()) now = timeutils.utcnow_ts() timeutils_utcnow = 'oslo_utils.timeutils.utcnow_ts' # Test a single claim with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now - 1 self.controller.create(self.queue_name, {'ttl': 1, 'grace': 60}) num_removed = self.controller._gc(self.queue_name, None) self.assertEqual(1, num_removed) # Test multiple claims with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now - 1 for _ in range(5): self.controller.create(self.queue_name, {'ttl': 1, 'grace': 60}) # NOTE(kgriffs): These ones should not be cleaned up self.controller.create(self.queue_name, {'ttl': 60, 'grace': 60}) self.controller.create(self.queue_name, {'ttl': 60, 'grace': 60}) num_removed = self.controller._gc(self.queue_name, None) self.assertEqual(5, num_removed) @testing.requires_redis class RedisSubscriptionTests(base.SubscriptionControllerTest): driver_class = driver.DataDriver config_file = 'wsgi_redis.conf' controller_class = controllers.SubscriptionController control_driver_class = driver.ControlDriver @testing.requires_redis class RedisPoolsTests(base.PoolsControllerTest): config_file = 'wsgi_redis.conf' driver_class = driver.ControlDriver controller_class = controllers.PoolsController control_driver_class = driver.ControlDriver def setUp(self): super(RedisPoolsTests, self).setUp() self.pools_controller = self.driver.pools_controller # Let's create one pool self.pool = str(uuid.uuid1()) self.pools_controller.create(self.pool, 100, 'localhost', options={}) self.pool1 = str(uuid.uuid1()) self.flavor = str(uuid.uuid1()) self.flavors_controller.create(self.flavor, project=self.project, capabilities={}) self.pools_controller.create(self.pool1, 100, 'localhost1', flavor=self.flavor, options={}) self.flavors_controller = self.driver.flavors_controller def tearDown(self): self.pools_controller.drop_all() super(RedisPoolsTests, self).tearDown() def test_delete_pool_used_by_flavor(self): with testing.expect(storage.errors.PoolInUseByFlavor): self.pools_controller.delete(self.pool1) def test_mismatching_capabilities_fifo(self): # NOTE(gengchc2): The fifo function is not implemented # in redis, we skip it. self.skip("The fifo function is not implemented") def test_mismatching_capabilities1(self): # NOTE(gengchc2): This test is used for testing mismatchming # capabilities in pool with flavor with testing.expect(storage.errors.PoolCapabilitiesMismatch): self.pools_controller.create(str(uuid.uuid1()), 100, 'mongodb://localhost', flavor=self.flavor, options={}) @testing.requires_redis class RedisCatalogueTests(base.CatalogueControllerTest): driver_class = driver.ControlDriver controller_class = controllers.CatalogueController control_driver_class = driver.ControlDriver config_file = 'wsgi_redis.conf' def setUp(self): super(RedisCatalogueTests, self).setUp() self.addCleanup(self.controller.drop_all) @testing.requires_redis class PooledMessageTests(base.MessageControllerTest): config_file = 'wsgi_redis_pooled.conf' controller_class = pooling.MessageController driver_class = pooling.DataDriver control_driver_class = driver.ControlDriver controller_base_class = storage.Message # NOTE(kgriffs): Redis's TTL scavenger only runs once a minute gc_interval = 60 @testing.requires_redis class PooledClaimsTests(base.ClaimControllerTest): config_file = 'wsgi_redis_pooled.conf' controller_class = pooling.ClaimController driver_class = pooling.DataDriver control_driver_class = driver.ControlDriver controller_base_class = storage.Claim def setUp(self): super(PooledClaimsTests, self).setUp() self.connection = self.controller._pool_catalog.lookup( self.queue_name, self.project)._storage.\ claim_controller.driver.connection def tearDown(self): super(PooledClaimsTests, self).tearDown() self.connection.flushdb() # NOTE(gengchc2): Unittest for new flavor configure scenario. @testing.requires_redis class RedisFlavorsTest1(base.FlavorsControllerTest1): driver_class = driver.ControlDriver controller_class = controllers.FlavorsController control_driver_class = driver.ControlDriver config_file = 'wsgi_redis.conf' def setUp(self): super(RedisFlavorsTest1, self).setUp() self.addCleanup(self.controller.drop_all) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/test_impl_sqlalchemy.py0000664000175100017510000000626315033040005024444 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import uuid from zaqar import storage from zaqar.storage import sqlalchemy from zaqar.storage.sqlalchemy import controllers from zaqar.storage.sqlalchemy import tables from zaqar.storage.sqlalchemy import utils from zaqar import tests as testing from zaqar.tests.unit.storage import base class DBCreateMixin(object): def _prepare_conf(self): tables.metadata.create_all(self.driver.engine) class SqlalchemyQueueTests(DBCreateMixin, base.QueueControllerTest): driver_class = sqlalchemy.ControlDriver config_file = 'wsgi_sqlalchemy.conf' controller_class = controllers.QueueController control_driver_class = sqlalchemy.ControlDriver class SqlalchemyPoolsTest(DBCreateMixin, base.PoolsControllerTest): config_file = 'wsgi_sqlalchemy.conf' driver_class = sqlalchemy.ControlDriver controller_class = controllers.PoolsController control_driver_class = sqlalchemy.ControlDriver def setUp(self): super(SqlalchemyPoolsTest, self).setUp() # self.pools_controller.create(self.pool, 100, 'localhost', # group=self.pool_group, options={}) def test_mismatching_capabilities1(self): # NOTE(gengchc2): This test is used for testing mismatchming # capabilities in pool with flavor with testing.expect(storage.errors.PoolCapabilitiesMismatch): self.pools_controller.create(str(uuid.uuid1()), 100, 'redis://localhost', flavor=self.flavor, options={}) class SqlalchemyCatalogueTest(DBCreateMixin, base.CatalogueControllerTest): config_file = 'wsgi_sqlalchemy.conf' driver_class = sqlalchemy.ControlDriver controller_class = controllers.CatalogueController control_driver_class = sqlalchemy.ControlDriver # NOTE(gengchc2): Unittest for new flavor configure scenario. class SqlalchemyFlavorsTest1(DBCreateMixin, base.FlavorsControllerTest1): config_file = 'wsgi_sqlalchemy.conf' driver_class = sqlalchemy.ControlDriver controller_class = controllers.FlavorsController control_driver_class = sqlalchemy.ControlDriver class MsgidTests(testing.TestBase): def test_encode(self): ids = [3, 1, 0] msgids = ['5c693a50', '5c693a52', '5c693a53'] for msgid, id in zip(msgids, ids): self.assertEqual(msgid, utils.msgid_encode(id)) def test_decode(self): msgids = ['5c693a50', '5c693a52', '5c693a53', ''] ids = [3, 1, 0, None] for msgid, id in zip(msgids, ids): self.assertEqual(id, utils.msgid_decode(msgid)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/test_impl_swift.py0000664000175100017510000000376315033040005023440 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar.common import cache as oslo_cache from zaqar.storage import mongodb from zaqar.storage.swift import controllers from zaqar.storage.swift import driver from zaqar import tests as testing from zaqar.tests.unit.storage import base @testing.requires_swift class SwiftMessagesTest(base.MessageControllerTest): driver_class = driver.DataDriver config_file = 'wsgi_swift.conf' controller_class = controllers.MessageController control_driver_class = mongodb.ControlDriver gc_interval = 1 @testing.requires_swift class SwiftClaimsTest(base.ClaimControllerTest): driver_class = driver.DataDriver config_file = 'wsgi_swift.conf' controller_class = controllers.ClaimController control_driver_class = mongodb.ControlDriver @testing.requires_swift class SwiftSubscriptionsTest(base.SubscriptionControllerTest): driver_class = driver.DataDriver config_file = 'wsgi_swift.conf' controller_class = controllers.SubscriptionController control_driver_class = mongodb.ControlDriver @testing.requires_swift class SwiftDriverTest(testing.TestBase): config_file = 'wsgi_swift.conf' def test_is_alive(self): oslo_cache.register_config(self.conf) cache = oslo_cache.get_cache(self.conf) swift_driver = driver.DataDriver(self.conf, cache, mongodb.ControlDriver (self.conf, cache)) self.assertTrue(swift_driver.is_alive()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/test_pool_catalog_new.py0000664000175100017510000001250615033040005024572 0ustar00mylesmyles# Copyright (c) 2017 ZTE Corporation.. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock import uuid from zaqar.common import cache as oslo_cache from zaqar.storage import errors from zaqar.storage import mongodb from zaqar.storage import pooling from zaqar.storage import utils from zaqar import tests as testing # TODO(cpp-cabrera): it would be wonderful to refactor this unit test # so that it could use multiple control storage backends once those # have pools/catalogue implementations. @testing.requires_mongodb class PoolCatalogTest(testing.TestBase): config_file = 'wsgi_mongodb_pooled_disable_virtual_pool.conf' def setUp(self): super(PoolCatalogTest, self).setUp() oslo_cache.register_config(self.conf) cache = oslo_cache.get_cache(self.conf) control = utils.load_storage_driver(self.conf, cache, control_mode=True) self.pools_ctrl = control.pools_controller self.flavors_ctrl = control.flavors_controller self.catalogue_ctrl = control.catalogue_controller # NOTE(cpp-cabrera): populate catalogue self.pool = str(uuid.uuid1()) self.pool2 = str(uuid.uuid1()) self.queue = str(uuid.uuid1()) self.flavor = str(uuid.uuid1()) self.project = str(uuid.uuid1()) # FIXME(therve) This is horrible, we need to manage duplication in a # nicer way if 'localhost' in self.mongodb_url: other_url = self.mongodb_url.replace('localhost', '127.0.0.1') elif '127.0.0.1' in self.mongodb_url: other_url = self.mongodb_url.replace('127.0.0.1', 'localhost') else: self.skipTest("Can't build a dummy mongo URL.") self.pools_ctrl.create(self.pool, 100, self.mongodb_url) self.pools_ctrl.create(self.pool2, 100, other_url) self.catalogue_ctrl.insert(self.project, self.queue, self.pool) self.catalog = pooling.Catalog(self.conf, cache, control) self.flavors_ctrl.create(self.flavor, project=self.project) self.pools_ctrl.update(self.pool2, flavor=self.flavor) def tearDown(self): self.catalogue_ctrl.drop_all() self.pools_ctrl.drop_all() super(PoolCatalogTest, self).tearDown() def test_lookup_loads_correct_driver(self): storage = self.catalog.lookup(self.queue, self.project) self.assertIsInstance(storage._storage, mongodb.DataDriver) def test_lookup_returns_default_or_none_if_queue_not_mapped(self): # Return default self.assertIsNone(self.catalog.lookup('not', 'mapped')) self.config(message_store='faulty', group='drivers') self.config(enable_virtual_pool=True, group='pooling:catalog') self.assertIsNotNone(self.catalog.lookup('not', 'mapped')) def test_lookup_returns_none_if_entry_deregistered(self): self.catalog.deregister(self.queue, self.project) self.assertIsNone(self.catalog.lookup(self.queue, self.project)) def test_register_leads_to_successful_lookup(self): self.catalog.register('not_yet', 'mapped') storage = self.catalog.lookup('not_yet', 'mapped') self.assertIsInstance(storage._storage, mongodb.DataDriver) def test_register_with_flavor(self): queue = 'test' self.catalog.register(queue, project=self.project, flavor=self.flavor) storage = self.catalog.lookup(queue, self.project) self.assertIsInstance(storage._storage, mongodb.DataDriver) def test_register_with_fake_flavor(self): self.assertRaises(errors.FlavorDoesNotExist, self.catalog.register, 'test', project=self.project, flavor='fake') def test_queues_list_on_multi_pools(self): def fake_list(project=None, kfilter={}, marker=None, limit=10, detailed=False, name=None): yield iter([{'name': 'fake_queue'}]) list_str = 'zaqar.storage.mongodb.queues.QueueController.list' with mock.patch(list_str) as queues_list: queues_list.side_effect = fake_list queue_controller = pooling.QueueController(self.catalog) result = queue_controller.list(project=self.project) queue_list = list(next(result)) self.assertEqual(1, len(queue_list)) def test_queue_create_with_empty_json_body(self): queue_controller = pooling.QueueController(self.catalog) with mock.patch('zaqar.storage.pooling.Catalog.register') as register: queue_controller.create(self.queue, metadata={}, project=self.project) register.assert_called_with(self.queue, project=self.project, flavor=None) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/storage/test_utils.py0000664000175100017510000000207115033040005022412 0ustar00mylesmyles# Copyright (c) 2017 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock from zaqar.storage import utils from zaqar import tests as testing class StorageUtilsTest(testing.TestBase): config_file = 'wsgi_swift.conf' def test_can_connect(self): swift_uri = "swift://zaqar:password@/service" is_alive_path = 'zaqar.storage.swift.driver.DataDriver.is_alive' with mock.patch(is_alive_path) as is_alive: is_alive.return_value = True self.assertTrue(utils.can_connect(swift_uri, self.conf)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/test_bootstrap.py0000664000175100017510000000375715033040005021637 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from zaqar import bootstrap from zaqar.common import errors from zaqar.storage import pooling from zaqar.tests import base from zaqar.tests import helpers from zaqar.transport import websocket from zaqar.transport import wsgi class TestBootstrap(base.TestBase): def _bootstrap(self, conf_file): conf_file = helpers.override_mongo_conf(conf_file, self) self.conf = self.load_conf(conf_file) return bootstrap.Bootstrap(self.conf) def test_storage_invalid(self): bootstrap = self._bootstrap('drivers_storage_invalid.conf') self.assertRaises(errors.InvalidDriver, lambda: bootstrap.storage) def test_storage_mongodb_pooled(self): """Makes sure we can load the pool driver.""" bootstrap = self._bootstrap('wsgi_mongodb_pooled.conf') self.assertIsInstance(bootstrap.storage._storage, pooling.DataDriver) def test_transport_invalid(self): bootstrap = self._bootstrap('drivers_transport_invalid.conf') self.assertRaises(errors.InvalidDriver, lambda: bootstrap.transport) def test_transport_wsgi(self): bootstrap = self._bootstrap('wsgi_mongodb.conf') self.assertIsInstance(bootstrap.transport, wsgi.Driver) def test_transport_websocket(self): bootstrap = self._bootstrap('websocket_mongodb.conf') self.assertIsInstance(bootstrap.transport, websocket.Driver) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5800135 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/0000775000175100017510000000000015033040026020234 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/__init__.py0000664000175100017510000000000015033040005022330 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/test_acl.py0000664000175100017510000000323615033040005022405 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple from oslo_policy import policy from zaqar import context from zaqar.tests import base from zaqar.transport import acl from zaqar.transport.wsgi import errors class TestAcl(base.TestBase): def setUp(self): super(TestAcl, self).setUp() ctx = context.RequestContext() request_class = namedtuple("Request", ("env",)) self.request = request_class({"zaqar.context": ctx}) def _set_policy(self, json): acl.setup_policy(self.conf) rules = policy.Rules.load_json(json) acl.ENFORCER.set_rules(rules, use_conf=False) def test_policy_allow(self): @acl.enforce("queues:get_all") def test(ign, request): pass json = '{"queues:get_all": ""}' self._set_policy(json) test(None, self.request) def test_policy_deny(self): @acl.enforce("queues:get_all") def test(ign, request): pass json = '{"queues:get_all": "!"}' self._set_policy(json) self.assertRaises(errors.HTTPForbidden, test, None, self.request) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5800135 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/0000775000175100017510000000000015033040026022222 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/__init__.py0000664000175100017510000000000015033040005024316 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/base.py0000664000175100017510000000530215033040005023503 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_serialization import jsonutils from zaqar import bootstrap from zaqar.conf import default from zaqar.conf import drivers_transport_websocket from zaqar.conf import transport from zaqar import tests as testing class TestBase(testing.TestBase): config_file = None def setUp(self): super(TestBase, self).setUp() if not self.config_file: self.skipTest("No config specified") self.conf.register_opts(default.ALL_OPTS) self.conf.register_opts(transport.ALL_OPTS, group=transport.GROUP_NAME) self.transport_cfg = self.conf[transport.GROUP_NAME] self.conf.register_opts(drivers_transport_websocket.ALL_OPTS, group=drivers_transport_websocket.GROUP_NAME) self.ws_cfg = self.conf[drivers_transport_websocket.GROUP_NAME] self.conf.unreliable = True self.conf.admin_mode = True self.boot = bootstrap.Bootstrap(self.conf) self.addCleanup(self.boot.storage.close) self.addCleanup(self.boot.control.close) self.transport = self.boot.transport self.api = self.boot.api def tearDown(self): if self.conf.pooling: self.boot.control.pools_controller.drop_all() self.boot.control.catalogue_controller.drop_all() super(TestBase, self).tearDown() class TestBaseFaulty(TestBase): """This test ensures we aren't letting any exceptions go unhandled.""" class V1_1Base(TestBase): """Base class for V1.1 API Tests. Should contain methods specific to V1.1 of the API """ def _empty_message_list(self, body): self.assertEqual([], jsonutils.loads(body[0])['messages']) class V1_1BaseFaulty(TestBaseFaulty): """Base class for V1.1 API Faulty Tests. Should contain methods specific to V1.1 exception testing """ pass class V2Base(V1_1Base): """Base class for V2 API Tests. Should contain methods specific to V2 of the API """ class V2BaseFaulty(V1_1BaseFaulty): """Base class for V2 API Faulty Tests. Should contain methods specific to V2 exception testing """ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/test_protocol.py0000664000175100017510000001020615033040005025470 0ustar00mylesmyles# Copyright 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_serialization import jsonutils from oslo_utils import uuidutils import zaqar from zaqar.tests.unit.transport.websocket import base from zaqar.tests.unit.transport.websocket import utils as test_utils @ddt.ddt class TestMessagingProtocol(base.TestBase): config_file = "websocket_mongodb.conf" def setUp(self): super(TestMessagingProtocol, self).setUp() self.protocol = self.transport.factory() self.project_id = 'protocol-test' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } def test_on_message_with_invalid_input(self): payload = '\ufeff' send_mock = mock.Mock() self.protocol.sendMessage = send_mock self.protocol.onMessage(payload, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) payload = "123" self.protocol.onMessage(payload, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) def test_on_message_with_invalid_input_binary(self): dumps, loads, create_req = test_utils.get_pack_tools(binary=True) send_mock = mock.Mock() self.protocol.sendMessage = send_mock # Test error response, when the request can't be deserialized. req = "123" self.protocol.onMessage(req, True) resp = loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) self.assertIn('Can\'t decode binary', resp['body']['error']) # Test error response, when request body is not a dictionary. req = dumps("Apparently, I'm not a dictionary") self.protocol.onMessage(req, True) resp = loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) self.assertIn('Unexpected body type. Expected dict', resp['body']['error']) # Test error response, when validation fails. action = 'queue_glorify' body = {} req = create_req(action, body, self.headers) self.protocol.onMessage(req, True) resp = loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) self.assertEqual('queue_glorify is not a valid action', resp['body']['error']) @ddt.data(True, False) def test_on_message_with_input_in_different_format(self, in_binary): dumps, loads, create_req = test_utils.get_pack_tools(binary=in_binary) action = 'queue_get' body = {'queue_name': 'beautiful-non-existing-queue'} req = create_req(action, body, self.headers) send_mock = mock.Mock() self.protocol.sendMessage = send_mock self.protocol.onMessage(req, in_binary) arg = send_mock.call_args[0][0] resp = loads(arg) self.assertEqual(200, resp['headers']['status']) @mock.patch.object(zaqar.transport.websocket.factory, 'ProtocolFactory') def test_ipv6_escaped(self, mock_pf): delattr(self.transport, '_lazy_factory') self.transport.factory() self.assertEqual('ws://127.0.0.1:9000', mock_pf.mock_calls[0][1][0]) mock_pf.reset_mock() with mock.patch.object(self.transport._ws_conf, 'bind', "1::4"): delattr(self.transport, '_lazy_factory') self.transport.factory() self.assertEqual('ws://[1::4]:9000', mock_pf.mock_calls[0][1][0]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/utils.py0000664000175100017510000000331415033040005023732 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import functools import msgpack from oslo_serialization import jsonutils def create_request(action, body, headers): return jsonutils.dumps({"action": action, "body": body, "headers": headers}) def create_binary_request(action, body, headers): return msgpack.packb({"action": action, "body": body, "headers": headers}) def get_pack_tools(binary=None): """Get serialization tools for testing websocket transport. :param bool binary: type of serialization tools. True: binary (MessagePack) tools. False: text (JSON) tools. :returns: set of serialization tools needed for testing websocket transport: (dumps, loads, create_request_function) :rtype: tuple """ if binary is None: raise Exception("binary param is unspecified") if binary: dumps = msgpack.Packer(use_bin_type=False).pack loads = functools.partial(msgpack.unpackb) create_request_function = create_binary_request else: dumps = jsonutils.dumps loads = jsonutils.loads create_request_function = create_request return dumps, loads, create_request_function ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5800135 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/v2/0000775000175100017510000000000015033040026022551 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/v2/__init__.py0000664000175100017510000000000015033040005024645 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/v2/test_auth.py0000664000175100017510000002427615033040005025133 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import ddt from keystonemiddleware import auth_token from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.common import consts from zaqar.common import urls from zaqar.tests.unit.transport.websocket import base from zaqar.tests.unit.transport.websocket import utils as test_utils @ddt.ddt class AuthTest(base.V2Base): config_file = "websocket_mongodb_keystone_auth.conf" def setUp(self): super(AuthTest, self).setUp() self.protocol = self.transport.factory() self.protocol.factory._secret_key = 'secret' self.default_message_ttl = 3600 self.project_id = '7e55e1a7e' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } auth_mock = mock.patch.object(auth_token.AuthProtocol, '__call__') self.addCleanup(auth_mock.stop) self.auth = auth_mock.start() self.env = {'keystone.token_info': { 'token': {'expires_at': '2035-08-05T15:16:33.603700+00:00'}}} def test_post(self): headers = self.headers.copy() headers['X-Auth-Token'] = 'mytoken1' req = jsonutils.dumps({'action': 'authenticate', 'headers': headers}) msg_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(msg_mock.stop) msg_mock = msg_mock.start() self.protocol.onMessage(req, False) # Didn't send the response yet self.assertEqual(0, msg_mock.call_count) self.assertEqual(1, self.auth.call_count) responses = [] self.protocol._auth_start(self.env, lambda x, y: responses.append(x)) self.assertEqual(1, len(responses)) self.assertEqual('200 OK', responses[0]) # Check that the env is available to future requests req = jsonutils.dumps({'action': consts.MESSAGE_LIST, 'body': {'queue_name': 'myqueue'}, 'headers': self.headers}) process_request = mock.patch.object(self.protocol._handler, 'process_request').start() process_request.return_value = self.protocol._handler.create_response( 200, {}) self.protocol.onMessage(req, False) self.assertEqual(1, process_request.call_count) self.assertEqual(self.env, process_request.call_args[0][0]._env) def test_post_between_auth(self): headers = self.headers.copy() headers['X-Auth-Token'] = 'mytoken1' req = jsonutils.dumps({'action': 'authenticate', 'headers': headers}) msg_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(msg_mock.stop) msg_mock = msg_mock.start() self.protocol.onMessage(req, False) req = test_utils.create_request(consts.QUEUE_LIST, {}, self.headers) self.protocol.onMessage(req, False) self.assertEqual(1, msg_mock.call_count) resp = jsonutils.loads(msg_mock.call_args[0][0]) self.assertEqual(403, resp['headers']['status']) def test_failed_auth(self): msg_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(msg_mock.stop) msg_mock = msg_mock.start() self.protocol._auth_in_binary = False self.protocol._auth_response('401 error', 'Failed') self.assertEqual(1, msg_mock.call_count) resp = jsonutils.loads(msg_mock.call_args[0][0]) self.assertEqual(401, resp['headers']['status']) self.assertEqual('authenticate', resp['request']['action']) def test_reauth(self): headers = self.headers.copy() headers['X-Auth-Token'] = 'mytoken1' req = jsonutils.dumps({'action': 'authenticate', 'headers': headers}) msg_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(msg_mock.stop) msg_mock = msg_mock.start() self.protocol.onMessage(req, False) self.assertEqual(1, self.auth.call_count) responses = [] self.protocol._auth_start(self.env, lambda x, y: responses.append(x)) self.assertEqual(1, len(responses)) handle = self.protocol._deauth_handle self.assertIsNotNone(handle) headers = self.headers.copy() headers['X-Auth-Token'] = 'mytoken2' req = jsonutils.dumps({'action': 'authenticate', 'headers': headers}) self.protocol.onMessage(req, False) self.protocol._auth_start(self.env, lambda x, y: responses.append(x)) self.assertNotEqual(handle, self.protocol._deauth_handle) self.assertEqual(2, len(responses)) self.assertIn('cancelled', repr(handle)) self.assertNotIn('cancelled', repr(self.protocol._deauth_handle)) def test_reauth_after_auth_failure(self): headers = self.headers.copy() headers['X-Auth-Token'] = 'wrong_token' req = jsonutils.dumps({'action': 'authenticate', 'headers': headers}) msg_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(msg_mock.stop) msg_mock = msg_mock.start() # after authenticate failure, the _auth_app will be None and the # request will raise 401 error. self.protocol.onMessage(req, False) self.protocol._auth_response('401 error', 'Failed') resp = jsonutils.loads(msg_mock.call_args[0][0]) self.assertEqual(401, resp['headers']['status']) self.assertEqual('authenticate', resp['request']['action']) self.assertIsNone(self.protocol._auth_app) # try to authenticate again, "onMessage" should not return 403 because # that the _auth_app was cleaned after auth failure. headers['X-Auth-Token'] = 'mytoken' req = jsonutils.dumps({'action': 'authenticate', 'headers': headers}) self.protocol.onMessage(req, False) self.protocol._auth_response('200 OK', 'authenticate success') resp = jsonutils.loads(msg_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) @ddt.data(True, False) def test_auth_response_serialization_format(self, in_binary): dumps, loads, create_req = test_utils.get_pack_tools(binary=in_binary) headers = self.headers.copy() headers['X-Auth-Token'] = 'mytoken1' req = create_req("authenticate", {}, headers) msg_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(msg_mock.stop) msg_mock = msg_mock.start() # Depending on onMessage method's second argument, auth response should # be in binary or text format. self.protocol.onMessage(req, in_binary) self.assertEqual(in_binary, self.protocol._auth_in_binary) self.protocol._auth_response('401 error', 'Failed') self.assertEqual(1, msg_mock.call_count) arg = msg_mock.call_args[0][0] resp = loads(arg) self.assertEqual(401, resp['headers']['status']) def test_signed_url(self): send_mock = mock.Mock() self.protocol.sendMessage = send_mock data = urls.create_signed_url( 'secret', ['/v2/queues/myqueue/messages'], project=self.project_id, methods=['GET']) headers = self.headers.copy() headers.update({ 'URL-Signature': data['signature'], 'URL-Expires': data['expires'], 'URL-Methods': ['GET'], 'URL-Paths': ['/v2/queues/myqueue/messages'] }) req = jsonutils.dumps({'action': consts.MESSAGE_LIST, 'body': {'queue_name': 'myqueue'}, 'headers': headers}) self.protocol.onMessage(req, False) self.assertEqual(1, send_mock.call_count) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) def test_signed_url_wrong_queue(self): send_mock = mock.Mock() self.protocol.sendMessage = send_mock data = urls.create_signed_url( 'secret', ['/v2/queues/myqueue/messages'], project=self.project_id, methods=['GET']) headers = self.headers.copy() headers.update({ 'URL-Signature': data['signature'], 'URL-Expires': data['expires'], 'URL-Methods': ['GET'], 'URL-Paths': ['/v2/queues/otherqueue/messages'] }) req = jsonutils.dumps({'action': consts.MESSAGE_LIST, 'body': {'queue_name': 'otherqueue'}, 'headers': headers}) self.protocol.onMessage(req, False) self.assertEqual(1, send_mock.call_count) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(403, resp['headers']['status']) def test_signed_url_wrong_method(self): send_mock = mock.Mock() self.protocol.sendMessage = send_mock data = urls.create_signed_url( 'secret', ['/v2/queues/myqueue/messages'], project=self.project_id, methods=['GET']) headers = self.headers.copy() headers.update({ 'URL-Signature': data['signature'], 'URL-Expires': data['expires'], 'URL-Methods': ['GET'], 'URL-Paths': ['/v2/queues/myqueue/messages'] }) req = jsonutils.dumps({'action': consts.MESSAGE_DELETE, 'body': {'queue_name': 'myqueue', 'message_id': '123'}, 'headers': headers}) self.protocol.onMessage(req, False) self.assertEqual(1, send_mock.call_count) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(403, resp['headers']['status']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/v2/test_claims.py0000664000175100017510000003777215033040005025447 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock import ddt from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from zaqar.common import consts from zaqar.tests.unit.transport.websocket import base from zaqar.tests.unit.transport.websocket import utils as test_utils @ddt.ddt class ClaimsBaseTest(base.V1_1Base): config_file = "websocket_mongodb.conf" def setUp(self): super(ClaimsBaseTest, self).setUp() self.protocol = self.transport.factory() self.defaults = self.api.get_defaults() self.project_id = '7e55e1a7e' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } action = consts.QUEUE_CREATE body = {"queue_name": "skittle"} req = test_utils.create_request(action, body, self.headers) with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: self.protocol.onMessage(req, False) resp = jsonutils.loads(msg_mock.call_args[0][0]) self.assertIn(resp['headers']['status'], [201, 204]) action = consts.MESSAGE_POST body = {"queue_name": "skittle", "messages": [ {'body': 239, 'ttl': 300}, {'body': {'key_1': 'value_1'}, 'ttl': 300}, {'body': [1, 3], 'ttl': 300}, {'body': 439, 'ttl': 300}, {'body': {'key_2': 'value_2'}, 'ttl': 300}, {'body': ['a', 'b'], 'ttl': 300}, {'body': 639, 'ttl': 300}, {'body': {'key_3': 'value_3'}, 'ttl': 300}, {'body': ["aa", "bb"], 'ttl': 300}] } send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(201, resp['headers']['status']) def tearDown(self): super(ClaimsBaseTest, self).tearDown() action = consts.QUEUE_DELETE body = {'queue_name': 'skittle'} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) @ddt.data('[', '[]', '.', '"fail"') def test_bad_claim(self, doc): action = consts.CLAIM_CREATE body = doc send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) action = consts.CLAIM_UPDATE body = doc req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) def test_exceeded_claim(self): action = consts.CLAIM_CREATE body = {"queue_name": "skittle", "ttl": 100, "grace": 60, "limit": 21} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) @ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60)) def test_unacceptable_ttl_or_grace(self, ttl_grace): ttl, grace = ttl_grace action = consts.CLAIM_CREATE body = {"queue_name": "skittle", "ttl": ttl, "grace": grace} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) @ddt.data(-1, 59, 43201) def test_unacceptable_new_ttl(self, ttl): claim = self._get_a_claim() action = consts.CLAIM_UPDATE body = {"queue_name": "skittle", "claim_id": claim['body']['claim_id'], "ttl": ttl} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) def test_default_ttl_and_grace(self): action = consts.CLAIM_CREATE body = {"queue_name": "skittle"} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(201, resp['headers']['status']) action = consts.CLAIM_GET body = {"queue_name": "skittle", "claim_id": resp['body']['claim_id']} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual(self.defaults.claim_ttl, resp['body']['ttl']) def test_lifecycle(self): # First, claim some messages action = consts.CLAIM_CREATE body = {"queue_name": "skittle", "ttl": 100, "grace": 60} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(201, resp['headers']['status']) claimed_messages = resp['body']['messages'] claim_id = resp['body']['claim_id'] # No more messages to claim body = {"queue_name": "skittle", "ttl": 100, "grace": 60} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) # Listing messages, by default, won't include claimed, will echo action = consts.MESSAGE_LIST body = {"queue_name": "skittle", "echo": True} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual([], resp['body']['messages']) # Listing messages, by default, won't include claimed, won't echo body = {"queue_name": "skittle", "echo": False} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual([], resp['body']['messages']) # List messages, include_claimed, but don't echo body = {"queue_name": "skittle", "include_claimed": True, "echo": False} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual(resp['body']['messages'], []) # List messages with a different client-id and echo=false. # Should return some messages body = {"queue_name": "skittle", "echo": False} headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) # Include claimed messages this time, and echo body = {"queue_name": "skittle", "include_claimed": True, "echo": True} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual(len(claimed_messages), len(resp['body']['messages'])) message_id_1 = resp['body']['messages'][0]['id'] message_id_2 = resp['body']['messages'][1]['id'] # Try to delete the message without submitting a claim_id action = consts.MESSAGE_DELETE body = {"queue_name": "skittle", "message_id": message_id_1} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(403, resp['headers']['status']) # Delete the message and its associated claim body = {"queue_name": "skittle", "message_id": message_id_1, "claim_id": claim_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) # Try to get it from the wrong project headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'someproject' } action = consts.MESSAGE_GET body = {"queue_name": "skittle", "message_id": message_id_2} req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(404, resp['headers']['status']) # Get the message action = consts.MESSAGE_GET body = {"queue_name": "skittle", "message_id": message_id_2} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) # Update the claim creation = timeutils.utcnow() action = consts.CLAIM_UPDATE body = {"queue_name": "skittle", "ttl": 60, "grace": 60, "claim_id": claim_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) # Get the claimed messages (again) action = consts.CLAIM_GET body = {"queue_name": "skittle", "claim_id": claim_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) query = timeutils.utcnow() resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual(60, resp['body']['ttl']) message_id_3 = resp['body']['messages'][0]['id'] estimated_age = timeutils.delta_seconds(creation, query) # The claim's age should be 0 at this moment. But in some unexpected # case, such as slow test, the age maybe larger than 0. Just skip # asserting if so. if resp['body']['age'] == 0: self.assertGreater(estimated_age, resp['body']['age']) # Delete the claim action = consts.CLAIM_DELETE body = {"queue_name": "skittle", "claim_id": claim_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) # Try to delete a message with an invalid claim ID action = consts.MESSAGE_DELETE body = {"queue_name": "skittle", "message_id": message_id_3, "claim_id": claim_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) # Make sure it wasn't deleted! action = consts.MESSAGE_GET body = {"queue_name": "skittle", "message_id": message_id_2} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) # Try to get a claim that doesn't exist action = consts.CLAIM_GET body = {"queue_name": "skittle", "claim_id": claim_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(404, resp['headers']['status']) # Try to update a claim that doesn't exist action = consts.CLAIM_UPDATE body = {"queue_name": "skittle", "ttl": 60, "grace": 60, "claim_id": claim_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(404, resp['headers']['status']) def test_post_claim_nonexistent_queue(self): action = consts.CLAIM_CREATE body = {"queue_name": "nonexistent", "ttl": 100, "grace": 60} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) def test_get_claim_nonexistent_queue(self): action = consts.CLAIM_GET body = {"queue_name": "nonexistent", "claim_id": "aaabbbba"} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(404, resp['headers']['status']) def _get_a_claim(self): action = consts.CLAIM_CREATE body = {"queue_name": "skittle", "ttl": 100, "grace": 60} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(201, resp['headers']['status']) return resp ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/v2/test_messages.py0000664000175100017510000005066215033040005025777 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock import ddt from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from testtools import matchers from zaqar.common import consts from zaqar.tests.unit.transport.websocket import base from zaqar.tests.unit.transport.websocket import utils as test_utils from zaqar.transport import validation @ddt.ddt class MessagesBaseTest(base.V2Base): config_file = "websocket_mongodb.conf" def setUp(self): super(MessagesBaseTest, self).setUp() self.protocol = self.transport.factory() self.default_message_ttl = 3600 self.project_id = '7e55e1a7e' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } body = {"queue_name": "kitkat"} req = test_utils.create_request(consts.QUEUE_CREATE, body, self.headers) with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: self.protocol.onMessage(req, False) resp = jsonutils.loads(msg_mock.call_args[0][0]) self.assertIn(resp['headers']['status'], [201, 204]) def tearDown(self): super(MessagesBaseTest, self).tearDown() body = {"queue_name": "kitkat"} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(consts.QUEUE_DELETE, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) def _test_post(self, sample_messages, in_binary=False): body = {"queue_name": "kitkat", "messages": sample_messages} send_mock = mock.Mock() self.protocol.sendMessage = send_mock dumps, loads, create_req = test_utils.get_pack_tools(binary=in_binary) req = create_req(consts.MESSAGE_POST, body, self.headers) self.protocol.onMessage(req, in_binary) arg = send_mock.call_args[0][0] resp = loads(arg) self.assertEqual(201, resp['headers']['status']) self.msg_ids = resp['body']['message_ids'] self.assertEqual(len(sample_messages), len(self.msg_ids)) lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) # Test GET on the message resource directly # NOTE(cpp-cabrera): force the passing of time to age a message timeutils_utcnow = 'oslo_utils.timeutils.utcnow' now = timeutils.utcnow() + datetime.timedelta(seconds=10) with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now for msg_id in self.msg_ids: headers = self.headers.copy() headers['X-Project-ID'] = '777777' # Wrong project ID action = consts.MESSAGE_GET body = {"queue_name": "kitkat", "message_id": msg_id} req = create_req(action, body, headers) self.protocol.onMessage(req, in_binary) arg = send_mock.call_args[0][0] resp = loads(arg) self.assertEqual(404, resp['headers']['status']) # Correct project ID req = create_req(action, body, self.headers) self.protocol.onMessage(req, in_binary) arg = send_mock.call_args[0][0] resp = loads(arg) self.assertEqual(200, resp['headers']['status']) # Check message properties message = resp['body']['messages'] self.assertEqual(lookup[message['ttl']], message['body']) self.assertEqual(msg_id, message['id']) # no negative age # NOTE(cpp-cabrera): testtools lacks # GreaterThanEqual on py26 self.assertThat(message['age'], matchers.GreaterThan(-1)) # Test bulk GET action = consts.MESSAGE_GET_MANY body = {"queue_name": "kitkat", "message_ids": self.msg_ids} req = create_req(action, body, self.headers) self.protocol.onMessage(req, in_binary) arg = send_mock.call_args[0][0] resp = loads(arg) self.assertEqual(200, resp['headers']['status']) expected_ttls = set(m['ttl'] for m in sample_messages) actual_ttls = set(m['ttl'] for m in resp['body']['messages']) self.assertFalse(expected_ttls - actual_ttls) actual_ids = set(m['id'] for m in resp['body']['messages']) self.assertFalse(set(self.msg_ids) - actual_ids) def test_exceeded_payloads(self): # Get a valid message id resp = self._post_messages("kitkat") msg_id = resp['body']['message_ids'] # Bulk GET restriction get_msg_ids = msg_id * 21 action = consts.MESSAGE_GET_MANY body = {"queue_name": "kitkat", "message_ids": get_msg_ids} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) # Listing restriction body['limit'] = 21 req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) # Bulk deletion restriction del_msg_ids = msg_id * 22 action = consts.MESSAGE_GET_MANY body = {"queue_name": "kitkat", "message_ids": del_msg_ids} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) @ddt.data(True, False) def test_post_single(self, in_binary): sample_messages = [ {'body': {'key': 'value'}, 'ttl': 200}, ] self._test_post(sample_messages, in_binary=in_binary) @ddt.data(True, False) def test_post_multiple(self, in_binary): sample_messages = [ {'body': 239, 'ttl': 100}, {'body': {'key': 'value'}, 'ttl': 200}, {'body': [1, 3], 'ttl': 300}, ] self._test_post(sample_messages, in_binary=in_binary) def test_post_optional_ttl(self): messages = [{'body': 239}, {'body': {'key': 'value'}, 'ttl': 200}] action = consts.MESSAGE_POST body = {"queue_name": "kitkat", "messages": messages} req = test_utils.create_request(action, body, self.headers) send_mock = mock.Mock() self.protocol.sendMessage = send_mock self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(201, resp['headers']['status']) msg_id = resp['body']['message_ids'][0] action = consts.MESSAGE_GET body = {"queue_name": "kitkat", "message_id": msg_id} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual(self.default_message_ttl, resp['body']['messages']['ttl']) def test_post_to_non_ascii_queue(self): queue_name = 'non-ascii-n\u0153me' resp = self._post_messages(queue_name) self.assertEqual(400, resp['headers']['status']) def test_post_with_long_queue_name(self): # NOTE(kgriffs): This test verifies that routes with # embedded queue name params go through the validation # hook, regardless of the target resource. queue_name = 'v' * validation.QUEUE_NAME_MAX_LEN resp = self._post_messages(queue_name) self.assertEqual(201, resp['headers']['status']) queue_name += 'v' resp = self._post_messages(queue_name) self.assertEqual(400, resp['headers']['status']) def test_post_to_missing_queue(self): queue_name = 'nonexistent' resp = self._post_messages(queue_name) self.assertEqual(201, resp['headers']['status']) def test_post_invalid_ttl(self): sample_messages = [ {'body': {'key': 'value'}, 'ttl': '200'}, ] action = consts.MESSAGE_POST body = {"queue_name": "kitkat", "messages": sample_messages} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) send_mock = send_mock.start() req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) self.assertEqual( 'Bad request. The value of the "ttl" field must be a int.', resp['body']['exception']) def test_post_no_body(self): sample_messages = [ {'ttl': 200}, ] action = consts.MESSAGE_POST body = {"queue_name": "kitkat", "messages": sample_messages} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) send_mock = send_mock.start() req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) self.assertEqual( 'Bad request. Missing "body" field.', resp['body']['exception']) def test_get_from_missing_queue(self): action = consts.MESSAGE_LIST body = {"queue_name": "anothernonexistent"} req = test_utils.create_request(action, body, self.headers) send_mock = mock.Mock() self.protocol.sendMessage = send_mock self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual([], resp['body']['messages']) @ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369') def test_bad_client_id(self, text_id): action = consts.MESSAGE_POST body = { "queue_name": "kinder", "messages": [{"ttl": 60, "body": ""}] } headers = { 'Client-ID': text_id, 'X-Project-ID': self.project_id } send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) action = consts.MESSAGE_GET body = { "queue_name": "kinder", "limit": 3, "echo": True } req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) @ddt.data(None, '[', '[]', '{}', '.') def test_post_bad_message(self, document): action = consts.MESSAGE_POST body = { "queue_name": "kinder", "messages": document } send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) @ddt.data(-1, 59, 1209601) def test_unacceptable_ttl(self, ttl): action = consts.MESSAGE_POST body = {"queue_name": "kinder", "messages": [{"ttl": ttl, "body": ""}]} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) def test_exceeded_message_posting(self): # Total (raw request) size document = [{'body': "some body", 'ttl': 100}] * 8000 action = consts.MESSAGE_POST body = { "queue_name": "kinder", "messages": document } send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) @ddt.data('{"overflow": 9223372036854775808}', '{"underflow": -9223372036854775809}') def test_unsupported_json(self, document): action = consts.MESSAGE_POST body = { "queue_name": "fizz", "messages": document } send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) def test_delete(self): resp = self._post_messages("tofi") msg_id = resp['body']['message_ids'][0] action = consts.MESSAGE_GET body = {"queue_name": "tofi", "message_id": msg_id} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) # Delete queue action = consts.MESSAGE_DELETE req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) # Get non existent queue action = consts.MESSAGE_GET req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(404, resp['headers']['status']) # Safe to delete non-existing ones action = consts.MESSAGE_DELETE req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) def test_bulk_delete(self): resp = self._post_messages("nerds", repeat=5) msg_ids = resp['body']['message_ids'] action = consts.MESSAGE_DELETE_MANY body = {"queue_name": "nerds", "message_ids": msg_ids} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) action = consts.MESSAGE_GET req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) # Safe to delete non-existing ones action = consts.MESSAGE_DELETE_MANY req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) # Even after the queue is gone action = consts.QUEUE_DELETE body = {"queue_name": "nerds"} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) action = consts.MESSAGE_DELETE_MANY body = {"queue_name": "nerds", "message_ids": msg_ids} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) def test_pop_delete(self): self._post_messages("kitkat", repeat=5) action = consts.MESSAGE_DELETE_MANY body = {"queue_name": "kitkat", "pop": 2} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) self.assertEqual(2, len(resp['body']['messages'])) self.assertEqual(239, resp['body']['messages'][0]['body']) self.assertEqual(239, resp['body']['messages'][1]['body']) def test_get_nonexistent_message_404s(self): action = consts.MESSAGE_GET body = {"queue_name": "notthere", "message_id": "a"} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(404, resp['headers']['status']) def test_get_multiple_invalid_messages_404s(self): action = consts.MESSAGE_GET_MANY body = {"queue_name": "notnotthere", "message_ids": ["a", "b", "c"]} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) def test_delete_multiple_invalid_messages_204s(self): action = consts.MESSAGE_DELETE body = {"queue_name": "yetanothernotthere", "message_ids": ["a", "b", "c"]} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(400, resp['headers']['status']) def _post_messages(self, queue_name, repeat=1): messages = [{'body': 239, 'ttl': 300}] * repeat action = consts.MESSAGE_POST body = {"queue_name": queue_name, "messages": messages} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) return jsonutils.loads(send_mock.call_args[0][0]) def test_invalid_request(self): send_mock = mock.Mock() self.protocol.sendMessage = send_mock self.protocol.onMessage('foo', False) self.assertEqual(1, send_mock.call_count) response = jsonutils.loads(send_mock.call_args[0][0]) self.assertIn('error', response['body']) self.assertEqual({'status': 400}, response['headers']) self.assertEqual( {'action': None, 'api': 'v2', 'body': {}, 'headers': {}}, response['request']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/v2/test_queue_lifecycle.py0000664000175100017510000005416415033040005027334 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock import ddt from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.common import consts from zaqar.storage import errors as storage_errors from zaqar import tests as testing from zaqar.tests.unit.transport.websocket import base from zaqar.tests.unit.transport.websocket import utils as test_utils @ddt.ddt class QueueLifecycleBaseTest(base.V2Base): config_file = "websocket_mongodb.conf" def setUp(self): super(QueueLifecycleBaseTest, self).setUp() self.protocol = self.transport.factory() def test_empty_project_id(self): action = consts.QUEUE_CREATE body = {"queue_name": "kitkat", "metadata": { "key": { "key2": "value", "key3": [1, 2, 3, 4, 5]} } } headers = {'Client-ID': uuidutils.generate_uuid()} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: msg_mock.side_effect = validator self.protocol.onMessage(req, False) @ddt.data('480924', 'foo') def test_basics_thoroughly(self, project_id): # Stats are empty - queue not created yet action = consts.QUEUE_GET_STATS body = {"queue_name": "gummybears"} headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': project_id } send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(404, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Create action = consts.QUEUE_CREATE body = {"queue_name": "gummybears", "metadata": { "key": { "key2": "value", "key3": [1, 2, 3, 4, 5]}, "messages": {"ttl": 600}, } } req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(201, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Fetch metadata action = consts.QUEUE_GET body = {"queue_name": "gummybears"} meta = {"messages": {"ttl": 600}, "key": { "key2": "value", "key3": [1, 2, 3, 4, 5]} } req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) self.assertEqual(meta, resp['body']) sender.side_effect = validator self.protocol.onMessage(req, False) # Stats empty queue action = consts.QUEUE_GET_STATS body = {"queue_name": "gummybears"} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Delete action = consts.QUEUE_DELETE body = {"queue_name": "gummybears"} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(204, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Get non-existent stats action = consts.QUEUE_GET_STATS body = {"queue_name": "gummybears"} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(404, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) def test_name_restrictions(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' } action = consts.QUEUE_CREATE body = {"queue_name": 'marsbar', "metadata": { "key": { "key2": "value", "key3": [1, 2, 3, 4, 5]}, "messages": {"ttl": 600}, } } send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertIn(resp['headers']['status'], [201, 204]) sender.side_effect = validator self.protocol.onMessage(req, False) body["queue_name"] = "m@rsb@r" req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) body["queue_name"] = "marsbar" * 10 req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) def test_project_id_restriction(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' * 30 } action = consts.QUEUE_CREATE body = {"queue_name": 'poptart'} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) headers['X-Project-ID'] = 'test-project' req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertIn(resp['headers']['status'], [201, 204]) sender.side_effect = validator self.protocol.onMessage(req, False) def test_non_ascii_name(self): test_params = (('/queues/non-ascii-n\u0153me', 'utf-8'), ('/queues/non-ascii-n\xc4me', 'iso8859-1')) headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' * 30 } action = consts.QUEUE_CREATE body = {"queue_name": test_params[0]} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) body = {"queue_name": test_params[1]} req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) def test_no_metadata(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' } action = consts.QUEUE_CREATE body = {"queue_name": "fizbat"} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertIn(resp['headers']['status'], [201, 204]) sender.side_effect = validator self.protocol.onMessage(req, False) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(204, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) @ddt.data('{', '[]', '.', ' ') def test_bad_metadata(self, meta): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' * 30 } action = consts.QUEUE_CREATE body = {"queue_name": "fizbat", "metadata": meta} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) def test_too_much_metadata(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' } action = consts.QUEUE_CREATE body = {"queue_name": "buttertoffee", "metadata": {"messages": {"ttl": 600}, "padding": "x"} } max_size = self.transport_cfg.max_queue_metadata body["metadata"]["padding"] = "x" * max_size send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) def test_way_too_much_metadata(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' } action = consts.QUEUE_CREATE body = {"queue_name": "peppermint", "metadata": {"messages": {"ttl": 600}, "padding": "x"} } max_size = self.transport_cfg.max_queue_metadata body["metadata"]["padding"] = "x" * max_size * 5 send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) def test_update_metadata(self): self.skip("Implement patch method") headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' } action = consts.QUEUE_CREATE body = {"queue_name": "bonobon"} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() # Create req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(201, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Set meta meta1 = {"messages": {"ttl": 600}, "padding": "x"} body["metadata"] = meta1 req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(204, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Get action = consts.QUEUE_GET body = {"queue_name": "bonobon"} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(204, resp['headers']['status']) self.assertEqual(meta1, resp['body']) sender.side_effect = validator self.protocol.onMessage(req, False) # Update action = consts.QUEUE_CREATE meta2 = {"messages": {"ttl": 100}, "padding": "y"} body["metadata"] = meta2 req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(204, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Get again action = consts.QUEUE_GET body = {"queue_name": "bonobon"} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) self.assertEqual(meta2, resp['body']) sender.side_effect = validator self.protocol.onMessage(req, False) def test_list(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() headers = { 'X-Project-ID': project_id, 'Client-ID': client_id } send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() # NOTE(kgriffs): It's important that this one sort after the one # above. This is in order to prove that bug/1236605 is fixed, and # stays fixed! # NOTE(vkmc): In websockets as well! alt_project_id = str(arbitrary_number + 1) # List empty action = consts.QUEUE_LIST body = {} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) self.assertEqual([], resp['body']['queues']) sender.side_effect = validator self.protocol.onMessage(req, False) # Payload exceeded body = {'limit': 21} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(400, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # Create some def create_queue(project_id, queue_name, metadata): altheaders = {'Client-ID': client_id} if project_id is not None: altheaders['X-Project-ID'] = project_id action = consts.QUEUE_CREATE body['queue_name'] = queue_name body['metadata'] = metadata req = test_utils.create_request(action, body, altheaders) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(201, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) create_queue(project_id, 'q1', {"node": 31}) create_queue(project_id, 'q2', {"node": 32}) create_queue(project_id, 'q3', {"node": 33}) create_queue(alt_project_id, 'q3', {"alt": 1}) # List (limit) body = {'limit': 2} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(2, len(resp['body']['queues'])) sender.side_effect = validator self.protocol.onMessage(req, False) # List (no metadata, get all) body = {'limit': 5} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) # Ensure we didn't pick up the queue from the alt project. self.assertEqual(3, len(resp['body']['queues'])) sender.side_effect = validator self.protocol.onMessage(req, False) # List with metadata body = {'detailed': True} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) action = consts.QUEUE_GET body = {"queue_name": "q1"} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) self.assertEqual({"node": 31}, resp['body']) sender.side_effect = validator self.protocol.onMessage(req, False) # List tail action = consts.QUEUE_LIST body = {} req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(200, resp['headers']['status']) sender.side_effect = validator self.protocol.onMessage(req, False) # List manually-constructed tail body = {'marker': "zzz"} req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) def test_list_returns_503_on_nopoolfound_exception(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'test-project' } action = consts.QUEUE_LIST body = {} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(503, resp['headers']['status']) sender.side_effect = validator queue_controller = self.boot.storage.queue_controller with mock.patch.object(queue_controller, 'list') as mock_queue_list: def queue_generator(): raise storage_errors.NoPoolFound() # This generator tries to be like queue controller list generator # in some ways. def fake_generator(): yield queue_generator() yield {} mock_queue_list.return_value = fake_generator() self.protocol.onMessage(req, False) def _post_messages(self, queue_name, headers, repeat=1): messages = [{'body': 239, 'ttl': 300}] * repeat action = consts.MESSAGE_POST body = {"queue_name": queue_name, "messages": messages} send_mock = mock.Mock() self.protocol.sendMessage = send_mock req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) return jsonutils.loads(send_mock.call_args[0][0]) def test_purge(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() headers = { 'X-Project-ID': project_id, 'Client-ID': client_id } queue_name = 'myqueue' resp = self._post_messages(queue_name, headers, repeat=5) msg_ids = resp['body']['message_ids'] send_mock = mock.Mock() self.protocol.sendMessage = send_mock for msg_id in msg_ids: action = consts.MESSAGE_GET body = {"queue_name": queue_name, "message_id": msg_id} req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(200, resp['headers']['status']) action = consts.QUEUE_PURGE body = {"queue_name": queue_name, "resource_types": ["messages"]} req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(204, resp['headers']['status']) for msg_id in msg_ids: action = consts.MESSAGE_GET body = {"queue_name": queue_name, "message_id": msg_id} req = test_utils.create_request(action, body, headers) self.protocol.onMessage(req, False) resp = jsonutils.loads(send_mock.call_args[0][0]) self.assertEqual(404, resp['headers']['status']) class TestQueueLifecycleMongoDB(QueueLifecycleBaseTest): config_file = 'websocket_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestQueueLifecycleMongoDB, self).setUp() def tearDown(self): storage = self.boot.storage._storage connection = storage.connection connection.drop_database(self.boot.control.queues_database) for db in storage.message_databases: connection.drop_database(db) super(TestQueueLifecycleMongoDB, self).tearDown() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/websocket/v2/test_subscriptions.py0000664000175100017510000003762315033040005027101 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import time from unittest import mock import msgpack from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.common import auth from zaqar.common import consts from zaqar.storage import errors as storage_errors from zaqar.tests.unit.transport.websocket import base from zaqar.tests.unit.transport.websocket import utils as test_utils from zaqar.transport.websocket import factory class SubscriptionTest(base.V1_1Base): config_file = 'websocket_mongodb_subscriptions.conf' def setUp(self): super(SubscriptionTest, self).setUp() self.protocol = self.transport.factory() self.project_id = '7e55e1a7e' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } body = {'queue_name': 'kitkat'} req = test_utils.create_request(consts.QUEUE_CREATE, body, self.headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertIn(resp['headers']['status'], [201, 204]) with mock.patch.object(self.protocol, 'sendMessage') as msg_mock: msg_mock.side_effect = validator self.protocol.onMessage(req, False) def tearDown(self): super(SubscriptionTest, self).tearDown() body = {'queue_name': 'kitkat'} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(consts.QUEUE_DELETE, body, self.headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(resp['headers']['status'], 204) sender.side_effect = validator self.protocol.onMessage(req, False) def test_subscription_create(self): action = consts.SUBSCRIPTION_CREATE body = {'queue_name': 'kitkat', 'ttl': 600} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() subscription_factory = factory.NotificationFactory(None) subscription_factory.set_subscription_url('http://localhost:1234/') self.protocol._handler.set_subscription_factory(subscription_factory) req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) added_age = 1 time.sleep(added_age) [subscriber] = list( next( self.boot.storage.subscription_controller.list( 'kitkat', self.project_id))) self.addCleanup( self.boot.storage.subscription_controller.delete, 'kitkat', subscriber['id'], project=self.project_id) self.assertEqual('kitkat', subscriber['source']) self.assertEqual(600, subscriber['ttl']) self.assertEqual('http://localhost:1234/%s' % self.protocol.proto_id, subscriber['subscriber']) self.assertLessEqual(added_age, subscriber['age']) response = { 'body': {'message': 'Subscription kitkat created.', 'subscription_id': subscriber['id']}, 'headers': {'status': 201}, 'request': {'action': consts.SUBSCRIPTION_CREATE, 'body': {'queue_name': 'kitkat', 'ttl': 600}, 'api': 'v2', 'headers': self.headers}} self.assertEqual(1, sender.call_count) self.assertEqual(response, jsonutils.loads(sender.call_args[0][0])) # Trigger protocol close self.protocol.onClose(True, 100, None) subscribers = list( next( self.boot.storage.subscription_controller.list( 'kitkat', self.project_id))) self.assertEqual([], subscribers) @mock.patch.object(auth, 'create_trust_id') def test_subscription_create_trust(self, create_trust): create_trust.return_value = 'trust_id' action = consts.SUBSCRIPTION_CREATE body = {'queue_name': 'kitkat', 'ttl': 600, 'subscriber': 'trust+http://example.com'} self.protocol._auth_env = {} self.protocol._auth_env['X-USER-ID'] = 'user-id' self.protocol._auth_env['X-ROLES'] = 'my-roles' send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) send_mock.start() req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) [subscriber] = list( next( self.boot.storage.subscription_controller.list( 'kitkat', self.project_id))) self.addCleanup( self.boot.storage.subscription_controller.delete, 'kitkat', subscriber['id'], project=self.project_id) self.assertEqual('trust+http://example.com', subscriber['subscriber']) self.assertEqual({'trust_id': 'trust_id'}, subscriber['options']) self.assertEqual('user-id', create_trust.call_args[0][1]) self.assertEqual(self.project_id, create_trust.call_args[0][2]) self.assertEqual(['my-roles'], create_trust.call_args[0][3]) def test_subscription_delete(self): sub = self.boot.storage.subscription_controller.create( 'kitkat', '', 600, {}, project=self.project_id) self.addCleanup( self.boot.storage.subscription_controller.delete, 'kitkat', sub, project=self.project_id) action = consts.SUBSCRIPTION_DELETE body = {'queue_name': 'kitkat', 'subscription_id': str(sub)} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) data = list( next( self.boot.storage.subscription_controller.list( 'kitkat', self.project_id))) self.assertEqual([], data) response = { 'body': 'Subscription %s removed.' % str(sub), 'headers': {'status': 204}, 'request': {'action': consts.SUBSCRIPTION_DELETE, 'body': {'queue_name': 'kitkat', 'subscription_id': str(sub)}, 'api': 'v2', 'headers': self.headers}} self.assertEqual(1, sender.call_count) self.assertEqual(response, jsonutils.loads(sender.call_args[0][0])) def test_subscription_create_no_queue(self): action = consts.SUBSCRIPTION_CREATE body = {'queue_name': 'shuffle', 'ttl': 600} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() subscription_factory = factory.NotificationFactory(None) subscription_factory.set_subscription_url('http://localhost:1234/') self.protocol._handler.set_subscription_factory(subscription_factory) req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) [subscriber] = list( next( self.boot.storage.subscription_controller.list( 'shuffle', self.project_id))) self.addCleanup( self.boot.storage.subscription_controller.delete, 'shuffle', subscriber['id'], project=self.project_id) response = { 'body': {'message': 'Subscription shuffle created.', 'subscription_id': subscriber['id']}, 'headers': {'status': 201}, 'request': {'action': consts.SUBSCRIPTION_CREATE, 'body': {'queue_name': 'shuffle', 'ttl': 600}, 'api': 'v2', 'headers': self.headers}} self.assertEqual(1, sender.call_count) self.assertEqual(response, jsonutils.loads(sender.call_args[0][0])) def test_subscription_get(self): sub = self.boot.storage.subscription_controller.create( 'kitkat', '', 600, {}, project=self.project_id) self.addCleanup( self.boot.storage.subscription_controller.delete, 'kitkat', sub, project=self.project_id) action = consts.SUBSCRIPTION_GET body = {'queue_name': 'kitkat', 'subscription_id': str(sub)} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) expected_response_without_age = { 'body': {'subscriber': '', 'source': 'kitkat', 'options': {}, 'id': str(sub), 'ttl': 600, 'confirmed': False}, 'headers': {'status': 200}, 'request': {'action': consts.SUBSCRIPTION_GET, 'body': {'queue_name': 'kitkat', 'subscription_id': str(sub)}, 'api': 'v2', 'headers': self.headers}} self.assertEqual(1, sender.call_count) response = jsonutils.loads(sender.call_args[0][0]) # Get and remove age from the actual response. actual_sub_age = response['body'].pop('age') self.assertLessEqual(0, actual_sub_age) self.assertEqual(expected_response_without_age, response) def test_subscription_list(self): sub = self.boot.storage.subscription_controller.create( 'kitkat', '', 600, {}, project=self.project_id) self.addCleanup( self.boot.storage.subscription_controller.delete, 'kitkat', sub, project=self.project_id) action = consts.SUBSCRIPTION_LIST body = {'queue_name': 'kitkat'} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) expected_response_without_age = { 'body': { 'subscriptions': [{ 'subscriber': '', 'source': 'kitkat', 'options': {}, 'id': str(sub), 'ttl': 600, 'confirmed': False}]}, 'headers': {'status': 200}, 'request': {'action': consts.SUBSCRIPTION_LIST, 'body': {'queue_name': 'kitkat'}, 'api': 'v2', 'headers': self.headers}} self.assertEqual(1, sender.call_count) response = jsonutils.loads(sender.call_args[0][0]) # Get and remove age from the actual response. actual_sub_age = response['body']['subscriptions'][0].pop('age') self.assertLessEqual(0, actual_sub_age) self.assertEqual(expected_response_without_age, response) def test_subscription_sustainable_notifications_format(self): # NOTE(Eva-i): The websocket subscription's notifications must be # sent in the same format, binary or text, as the format of the # subscription creation request. # This test checks that notifications keep their encoding format, even # if the client suddenly starts sending requests in another format. # Create a subscription in binary format action = consts.SUBSCRIPTION_CREATE body = {'queue_name': 'kitkat', 'ttl': 600} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() subscription_factory = factory.NotificationFactory( self.transport.factory) subscription_factory.set_subscription_url('http://localhost:1234/') self.protocol._handler.set_subscription_factory(subscription_factory) req = test_utils.create_binary_request(action, body, self.headers) self.protocol.onMessage(req, True) self.assertTrue(self.protocol.notify_in_binary) [subscriber] = list( next( self.boot.storage.subscription_controller.list( 'kitkat', self.project_id))) self.addCleanup( self.boot.storage.subscription_controller.delete, 'kitkat', subscriber['id'], project=self.project_id) # Send a message in text format webhook_notification_send_mock = mock.patch('requests.post') self.addCleanup(webhook_notification_send_mock.stop) webhook_notification_sender = webhook_notification_send_mock.start() action = consts.MESSAGE_POST body = {"queue_name": "kitkat", "messages": [{'body': {'status': 'disco queen'}, 'ttl': 60}]} req = test_utils.create_request(action, body, self.headers) self.protocol.onMessage(req, False) self.assertTrue(self.protocol.notify_in_binary) # Check that the server responded in text format to the message # creation request message_create_response = jsonutils.loads( sender.call_args_list[1][0][0]) self.assertEqual(201, message_create_response['headers']['status']) # Fetch webhook notification that was intended to arrive to # notification protocol's listen address. Make subscription factory # send it as websocket notification to the client wh_notification = webhook_notification_sender.call_args[1]['data'] subscription_factory.send_data(wh_notification, self.protocol.proto_id) # Check that the server sent the websocket notification in binary # format self.assertEqual(3, sender.call_count) ws_notification = msgpack.unpackb(sender.call_args_list[2][0][0]) self.assertEqual({'body': {'status': 'disco queen'}, 'ttl': 60, 'queue_name': 'kitkat', 'Message_Type': 'Notification'}, ws_notification) def test_list_returns_503_on_nopoolfound_exception(self): sub = self.boot.storage.subscription_controller.create( 'kitkat', '', 600, {}, project=self.project_id) self.addCleanup( self.boot.storage.subscription_controller.delete, 'kitkat', sub, project=self.project_id) action = consts.SUBSCRIPTION_LIST body = {'queue_name': 'kitkat'} send_mock = mock.patch.object(self.protocol, 'sendMessage') self.addCleanup(send_mock.stop) sender = send_mock.start() req = test_utils.create_request(action, body, self.headers) def validator(resp, isBinary): resp = jsonutils.loads(resp) self.assertEqual(503, resp['headers']['status']) sender.side_effect = validator subscription_controller = self.boot.storage.subscription_controller with mock.patch.object(subscription_controller, 'list') as \ mock_subscription_list: def subscription_generator(): raise storage_errors.NoPoolFound() # This generator tries to be like subscription controller list # generator in some ways. def fake_generator(): yield subscription_generator() yield {} mock_subscription_list.return_value = fake_generator() self.protocol.onMessage(req, False) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5800135 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/0000775000175100017510000000000015033040026021205 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/__init__.py0000664000175100017510000000144515033040005023317 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # TODO(kgriffs): Consider consolidating all of these tests into a # single module. from zaqar.tests.unit.transport.wsgi import base TestBase = base.TestBase TestBaseFaulty = base.TestBaseFaulty V1_1Base = base.V1_1Base ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/base.py0000664000175100017510000001306315033040005022471 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon from falcon import testing as ftest from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar import bootstrap from zaqar.conf import default from zaqar.conf import drivers_transport_wsgi from zaqar.conf import transport from zaqar import tests as testing class TestBase(testing.TestBase): config_file = None def setUp(self): super(TestBase, self).setUp() if not self.config_file: self.skipTest("No config specified") self.conf.register_opts(default.ALL_OPTS) self.conf.register_opts(transport.ALL_OPTS, group=transport.GROUP_NAME) self.transport_cfg = self.conf[transport.GROUP_NAME] self.conf.register_opts(drivers_transport_wsgi.ALL_OPTS, group=drivers_transport_wsgi.GROUP_NAME) self.wsgi_cfg = self.conf[drivers_transport_wsgi.GROUP_NAME] self.conf.unreliable = True self.conf.admin_mode = True self.boot = bootstrap.Bootstrap(self.conf) self.addCleanup(self.boot.storage.close) self.addCleanup(self.boot.control.close) self.app = self.boot.transport.app self.srmock = ftest.StartResponseMock() self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-ROLES': 'admin', 'X-USER-ID': 'a12d157c7d0d41999096639078fd11fc', 'X-TENANT-ID': 'abb69142168841fcaa2785791b92467f', } def tearDown(self): if self.conf.pooling: self.boot.control.pools_controller.drop_all() self.boot.control.catalogue_controller.drop_all() super(TestBase, self).tearDown() def simulate_request(self, path, project_id=None, **kwargs): """Simulate a request. Simulates a WSGI request to the API for testing. :param path: Request path for the desired resource :param project_id: Project ID to use for the X-Project-ID header, or None to not set the header :param kwargs: Same as falcon.testing.create_environ() :returns: standard WSGI iterable response """ # NOTE(flaper87): We create a copy regardless the headers # were passed or not. This will prevent modifying `self.headers` # in cases where simulate methods are called like: # self.simulate_put(path, headers=self.headers) headers = kwargs.get('headers', self.headers).copy() project_id = ('518b51ea133c4facadae42c328d6b77b' if project_id is None else project_id) if kwargs.get('need_project_id', True): headers['X-Project-ID'] = headers.get('X-Project-ID', project_id) kwargs.pop('need_project_id', None) kwargs['headers'] = headers kwargs['host'] = 'openstack.example.com' kwargs['root_path'] = 'messaging' try: path.encode('latin1').decode('utf-8', 'replace') except UnicodeEncodeError: self.srmock.status = falcon.HTTP_400 return return self.app(ftest.create_environ(path=path, **kwargs), self.srmock) def simulate_get(self, *args, **kwargs): """Simulate a GET request.""" kwargs['method'] = 'GET' return self.simulate_request(*args, **kwargs) def simulate_head(self, *args, **kwargs): """Simulate a HEAD request.""" kwargs['method'] = 'HEAD' return self.simulate_request(*args, **kwargs) def simulate_put(self, *args, **kwargs): """Simulate a PUT request.""" kwargs['method'] = 'PUT' return self.simulate_request(*args, **kwargs) def simulate_post(self, *args, **kwargs): """Simulate a POST request.""" kwargs['method'] = 'POST' return self.simulate_request(*args, **kwargs) def simulate_delete(self, *args, **kwargs): """Simulate a DELETE request.""" kwargs['method'] = 'DELETE' return self.simulate_request(*args, **kwargs) def simulate_patch(self, *args, **kwargs): """Simulate a PATCH request.""" kwargs['method'] = 'PATCH' return self.simulate_request(*args, **kwargs) class TestBaseFaulty(TestBase): """This test ensures we aren't letting any exceptions go unhandled.""" class V1_1Base(TestBase): """Base class for V1.1 API Tests. Should contain methods specific to V1.1 of the API """ url_prefix = '/v1.1' def _empty_message_list(self, body): self.assertEqual([], jsonutils.loads(body[0])['messages']) class V1_1BaseFaulty(TestBaseFaulty): """Base class for V1.1 API Faulty Tests. Should contain methods specific to V1.1 exception testing """ url_prefix = '/v1.1' class V2Base(V1_1Base): """Base class for V2 API Tests. Should contain methods specific to V2 of the API """ url_prefix = '/v2' class V2BaseFaulty(V1_1BaseFaulty): """Base class for V2 API Faulty Tests. Should contain methods specific to V2 exception testing """ url_prefix = '/v2' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/test_utils.py0000664000175100017510000001507315033040005023761 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import io import falcon from oslo_serialization import jsonutils import testtools from zaqar.transport.wsgi import utils class TestUtils(testtools.TestCase): def test_get_checked_field_missing(self): doc = {} self.assertRaises(falcon.HTTPBadRequest, utils.get_checked_field, doc, 'openstack', int, None) self.assertRaises(falcon.HTTPBadRequest, utils.get_checked_field, doc, 42, int, None) doc = {'openstac': 10} self.assertRaises(falcon.HTTPBadRequest, utils.get_checked_field, doc, 'openstack', int, None) value = utils.get_checked_field(doc, 'missing', int, 0) self.assertEqual(0, value) value = utils.get_checked_field(doc, 'missing', dict, {}) self.assertEqual({}, value) def test_get_checked_field_bad_type(self): doc = {'openstack': '10'} self.assertRaises(falcon.HTTPBadRequest, utils.get_checked_field, doc, 'openstack', int, None) doc = {'openstack': 10, 'openstack-mq': 'test'} self.assertRaises(falcon.HTTPBadRequest, utils.get_checked_field, doc, 'openstack', str, None) doc = {'openstack': '[1, 2]'} self.assertRaises(falcon.HTTPBadRequest, utils.get_checked_field, doc, 'openstack', list, None) def test_get_checked_field(self): doc = {'hello': 'world', 'the answer': 42, 'question': []} value = utils.get_checked_field(doc, 'hello', str, None) self.assertEqual('world', value) value = utils.get_checked_field(doc, 'the answer', int, None) self.assertEqual(42, value) value = utils.get_checked_field(doc, 'question', list, None) self.assertEqual([], value) def test_filter_missing(self): doc = {'body': {'event': 'start_backup'}} spec = (('tag', dict, None),) self.assertRaises(falcon.HTTPBadRequest, utils.filter, doc, spec) spec = (('tag', str, 'db'),) filtered = utils.filter(doc, spec) self.assertEqual({'tag': 'db'}, filtered) def test_filter_bad_type(self): doc = {'ttl': '300', 'bogus': 'yogabbagabba'} spec = [('ttl', int, None)] self.assertRaises(falcon.HTTPBadRequest, utils.filter, doc, spec) def test_filter(self): doc = {'body': {'event': 'start_backup'}} def spec(): yield ('body', dict, None) filtered = utils.filter(doc, spec()) self.assertEqual(doc, filtered) doc = {'ttl': 300, 'bogus': 'yogabbagabba'} spec = [('ttl', int, None)] filtered = utils.filter(doc, spec) self.assertEqual({'ttl': 300}, filtered) doc = {'body': {'event': 'start_backup'}, 'ttl': 300} spec = (('body', dict, None), ('ttl', int, None)) filtered = utils.filter(doc, spec) self.assertEqual(doc, filtered) def test_no_spec(self): obj = {u'body': {'event': 'start_backup'}, 'ttl': 300} document = str(jsonutils.dumps(obj, ensure_ascii=False)) doc_stream = io.StringIO(document) deserialized = utils.deserialize(doc_stream, len(document)) filtered = utils.sanitize(deserialized, spec=None) self.assertEqual(obj, filtered) # NOTE(kgriffs): Ensure default value for *spec* is None filtered2 = utils.sanitize(deserialized) self.assertEqual(filtered, filtered2) def test_no_spec_array(self): things = [{u'body': {'event': 'start_backup'}, 'ttl': 300}] document = str(jsonutils.dumps(things, ensure_ascii=False)) doc_stream = io.StringIO(document) deserialized = utils.deserialize(doc_stream, len(document)) filtered = utils.sanitize(deserialized, doctype=utils.JSONArray, spec=None) self.assertEqual(things, filtered) def test_filter_star(self): doc = {'ttl': 300, 'body': {'event': 'start_backup'}} spec = [('body', '*', None), ('ttl', '*', None)] filtered = utils.filter(doc, spec) self.assertEqual(doc, filtered) def test_deserialize_and_sanitize_json_obj(self): obj = {u'body': {'event': 'start_backup'}, 'id': 'DEADBEEF'} document = str(jsonutils.dumps(obj, ensure_ascii=False)) stream = io.StringIO(document) spec = [('body', dict, None), ('id', str, None)] # Positive test deserialized_object = utils.deserialize(stream, len(document)) filtered_object = utils.sanitize(deserialized_object, spec) self.assertEqual(obj, filtered_object) # Negative test self.assertRaises(falcon.HTTPBadRequest, utils.sanitize, deserialized_object, spec, doctype=utils.JSONArray) def test_deserialize_and_sanitize_json_array(self): array = [{u'body': {u'x': 1}}, {u'body': {u'x': 2}}] document = str(jsonutils.dumps(array, ensure_ascii=False)) stream = io.StringIO(document) spec = [('body', dict, None)] # Positive test deserialized_object = utils.deserialize(stream, len(document)) filtered_object = utils.sanitize(deserialized_object, spec, doctype=utils.JSONArray) self.assertEqual(array, filtered_object) # Negative test self.assertRaises(falcon.HTTPBadRequest, utils.sanitize, deserialized_object, spec, doctype=utils.JSONObject) def test_bad_doctype(self): self.assertRaises(TypeError, utils.sanitize, {}, None, doctype=int) def test_deserialize_bad_stream(self): stream = None length = None self.assertRaises(falcon.HTTPBadRequest, utils.deserialize, stream, length) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/test_version.py0000664000175100017510000000354215033040005024304 0ustar00mylesmyles# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_serialization import jsonutils from zaqar.tests.unit.transport.wsgi import base EXPECTED_VERSIONS = [ { 'id': '1.1', 'status': 'DEPRECATED', 'updated': '2016-7-29T02:22:47Z', 'media-types': [ { 'base': 'application/json', 'type': 'application/vnd.openstack.messaging-v1_1+json' } ], 'links': [ { 'href': '/v1.1/', 'rel': 'self' } ] }, { 'id': '2', 'status': 'CURRENT', 'updated': '2014-9-24T04:06:47Z', 'media-types': [ { 'base': 'application/json', 'type': 'application/vnd.openstack.messaging-v2+json' } ], 'links': [ { 'href': '/v2/', 'rel': 'self' } ] } ] class TestVersion(base.TestBase): config_file = 'wsgi_mongodb.conf' def test_get(self): response = self.simulate_get('/') versions = jsonutils.loads(response[0])['versions'] self.assertEqual(falcon.HTTP_300, self.srmock.status) self.assertEqual(2, len(versions)) self.assertEqual(EXPECTED_VERSIONS, versions) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5810134 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/0000775000175100017510000000000015033040026021753 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/__init__.py0000664000175100017510000000000015033040005024047 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_auth.py0000664000175100017510000000256215033040005024327 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Auth.""" import falcon from falcon import testing from keystonemiddleware import auth_token from oslo_utils import uuidutils from zaqar.tests.unit.transport.wsgi import base class TestAuth(base.V1_1Base): config_file = 'keystone_auth.conf' def setUp(self): super(TestAuth, self).setUp() self.headers = {'Client-ID': uuidutils.generate_uuid()} def test_auth_install(self): self.assertIsInstance(self.app._auth_app, auth_token.AuthProtocol) def test_non_authenticated(self): env = testing.create_environ(self.url_prefix + '/480924/queues/', method='GET', headers=self.headers) self.app(env, self.srmock) self.assertEqual(falcon.HTTP_401, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_claims.py0000664000175100017510000003046115033040005024635 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from testtools import matchers from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestClaimsMongoDB(base.V1_1Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestClaimsMongoDB, self).setUp() self.default_claim_ttl = self.boot.transport._defaults.claim_ttl self.project_id = '737_abc8332832' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } self.queue_path = self.url_prefix + '/queues/fizbit' self.claims_path = self.queue_path + '/claims' self.messages_path = self.queue_path + '/messages' doc = jsonutils.dumps({"_ttl": 60}) self.simulate_put(self.queue_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = jsonutils.dumps({'messages': [{'body': 239, 'ttl': 300}] * 10}) self.simulate_post(self.queue_path + '/messages', body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) def tearDown(self): storage = self.boot.storage._storage control = self.boot.control connection = storage.connection connection.drop_database(control.queues_database) for db in storage.message_databases: connection.drop_database(db) self.simulate_delete(self.queue_path, headers=self.headers) super(TestClaimsMongoDB, self).tearDown() @ddt.data('[', '[]', '.', '"fail"') def test_bad_claim(self, doc): self.simulate_post(self.claims_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) href = self._get_a_claim() self.simulate_patch(href, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_exceeded_claim(self): self.simulate_post(self.claims_path, body='{"ttl": 100, "grace": 60}', query_string='limit=21', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60)) def test_unacceptable_ttl_or_grace(self, ttl_grace): ttl, grace = ttl_grace self.simulate_post(self.claims_path, body=jsonutils.dumps({'ttl': ttl, 'grace': grace}), headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 59, 43201) def test_unacceptable_new_ttl(self, ttl): href = self._get_a_claim() self.simulate_patch(href, body=jsonutils.dumps({'ttl': ttl}), headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_default_ttl_and_grace(self): self.simulate_post(self.claims_path, body='{}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) body = self.simulate_get(self.srmock.headers_dict['location'], headers=self.headers) claim = jsonutils.loads(body[0]) self.assertEqual(self.default_claim_ttl, claim['ttl']) def _get_a_claim(self): doc = '{"ttl": 100, "grace": 60}' self.simulate_post(self.claims_path, body=doc, headers=self.headers) return self.srmock.headers_dict['Location'] def test_lifecycle(self): doc = '{"ttl": 100, "grace": 60}' # First, claim some messages body = self.simulate_post(self.claims_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) claimed = jsonutils.loads(body[0])['messages'] claim_href = self.srmock.headers_dict['Location'] message_href, params = claimed[0]['href'].split('?') # No more messages to claim self.simulate_post(self.claims_path, body=doc, query_string='limit=3', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Listing messages, by default, won't include claimed, will echo body = self.simulate_get(self.messages_path, headers=self.headers, query_string="echo=true") self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # Listing messages, by default, won't include claimed, won't echo body = self.simulate_get(self.messages_path, headers=self.headers, query_string="echo=false") self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # List messages, include_claimed, but don't echo body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=false', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # List messages with a different client-id and echo=false. # Should return some messages headers = self.headers.copy() headers["Client-ID"] = uuidutils.generate_uuid() body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=false', headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Include claimed messages this time, and echo body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=true', headers=self.headers) listed = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertEqual(len(claimed), len(listed['messages'])) now = timeutils.utcnow() + datetime.timedelta(seconds=10) timeutils_utcnow = 'oslo_utils.timeutils.utcnow' with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now body = self.simulate_get(claim_href, headers=self.headers) claim = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertEqual(100, claim['ttl']) # NOTE(cpp-cabrera): verify that claim age is non-negative self.assertThat(claim['age'], matchers.GreaterThan(-1)) # Try to delete the message without submitting a claim_id self.simulate_delete(message_href, headers=self.headers) self.assertEqual(falcon.HTTP_403, self.srmock.status) # Delete the message and its associated claim self.simulate_delete(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Try to get it from the wrong project headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'bogusproject' } self.simulate_get(message_href, query_string=params, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Get the message self.simulate_get(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Update the claim new_claim_ttl = '{"ttl": 60, "grace": 60}' creation = timeutils.utcnow() self.simulate_patch(claim_href, body=new_claim_ttl, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get the claimed messages (again) body = self.simulate_get(claim_href, headers=self.headers) query = timeutils.utcnow() claim = jsonutils.loads(body[0]) message_href, params = claim['messages'][0]['href'].split('?') self.assertEqual(60, claim['ttl']) estimated_age = timeutils.delta_seconds(creation, query) self.assertGreater(estimated_age, claim['age']) # Delete the claim self.simulate_delete(claim['href'], headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Try to delete a message with an invalid claim ID self.simulate_delete(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) # Make sure it wasn't deleted! self.simulate_get(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Try to get a claim that doesn't exist self.simulate_get(claim['href'], headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Try to update a claim that doesn't exist self.simulate_patch(claim['href'], body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_post_claim_nonexistent_queue(self): path = self.url_prefix + '/queues/nonexistent/claims' self.simulate_post(path, body='{"ttl": 100, "grace": 60}', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_get_claim_nonexistent_queue(self): path = self.url_prefix + '/queues/nonexistent/claims/aaabbbba' self.simulate_get(path, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # NOTE(cpp-cabrera): regression test against bug #1203842 def test_get_nonexistent_claim_404s(self): self.simulate_get(self.claims_path + '/a', headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_delete_nonexistent_claim_204s(self): self.simulate_delete(self.claims_path + '/a', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_patch_nonexistent_claim_404s(self): patch_data = jsonutils.dumps({'ttl': 100}) self.simulate_patch(self.claims_path + '/a', body=patch_data, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) class TestClaimsFaultyDriver(base.V1_1BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): self.project_id = '480924abc_' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } claims_path = self.url_prefix + '/queues/fizbit/claims' doc = '{"ttl": 100, "grace": 60}' self.simulate_post(claims_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(claims_path + '/nichts', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_patch(claims_path + '/nichts', body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_delete(claims_path + '/foo', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_default_limits.py0000664000175100017510000001122515033040005026367 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar import storage from zaqar.tests.unit.transport.wsgi import base class TestDefaultLimits(base.V1_1Base): config_file = 'wsgi_mongodb_default_limits.conf' def setUp(self): super(TestDefaultLimits, self).setUp() self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '%s_' % uuidutils.generate_uuid() } self.queue_path = self.url_prefix + '/queues' self.q1_queue_path = self.queue_path + '/' + uuidutils.generate_uuid() self.messages_path = self.q1_queue_path + '/messages' self.claims_path = self.q1_queue_path + '/claims' self.simulate_put(self.q1_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) def tearDown(self): self.simulate_delete(self.queue_path, headers=self.headers) super(TestDefaultLimits, self).tearDown() def test_queue_listing(self): # 2 queues to list self.simulate_put(self.queue_path + '/q2', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) with self._prepare_queues(storage.DEFAULT_QUEUES_PER_PAGE + 1): result = self.simulate_get(self.queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) queues = jsonutils.loads(result[0])['queues'] self.assertEqual(storage.DEFAULT_QUEUES_PER_PAGE, len(queues)) def test_message_listing_different_id(self): self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) headers = self.headers.copy() headers['Client-ID'] = uuidutils.generate_uuid() result = self.simulate_get(self.messages_path, headers=headers, query_string='echo=false') self.assertEqual(falcon.HTTP_200, self.srmock.status) messages = jsonutils.loads(result[0])['messages'] self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) def test_message_listing_same_id(self): self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) result = self.simulate_get(self.messages_path, headers=self.headers, query_string='echo=false') self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(result) self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) result = self.simulate_get(self.messages_path, headers=self.headers, query_string='echo=true') messages = jsonutils.loads(result[0])['messages'] self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) def test_claim_creation(self): self._prepare_messages(storage.DEFAULT_MESSAGES_PER_CLAIM + 1) result = self.simulate_post(self.claims_path, body='{"ttl": 60, "grace": 60}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) messages = jsonutils.loads(result[0])['messages'] self.assertEqual(storage.DEFAULT_MESSAGES_PER_CLAIM, len(messages)) @contextlib.contextmanager def _prepare_queues(self, count): queue_paths = [self.queue_path + '/multi-{0}'.format(i) for i in range(count)] for path in queue_paths: self.simulate_put(path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) yield for path in queue_paths: self.simulate_delete(path, headers=self.headers) def _prepare_messages(self, count): doc = {'messages': [{'body': 239, 'ttl': 300}] * count} body = jsonutils.dumps(doc) self.simulate_post(self.messages_path, body=body, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_health.py0000664000175100017510000000605115033040005024630 0ustar00mylesmyles# Copyright 2014 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from zaqar.storage import errors import zaqar.storage.mongodb as mongo from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestHealthMongoDB(base.V1_1Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestHealthMongoDB, self).setUp() def test_basic(self): path = self.url_prefix + '/health' body = self.simulate_get(path) health = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertTrue(health['storage_reachable']) self.assertIsNotNone(health['message_volume']) for op in health['operation_status']: self.assertTrue(health['operation_status'][op]['succeeded']) @mock.patch.object(mongo.driver.DataDriver, '_health') def test_message_volume(self, mock_driver_get): def _health(): KPI = {} KPI['message_volume'] = {'free': 1, 'claimed': 2, 'total': 3} return KPI mock_driver_get.side_effect = _health path = self.url_prefix + '/health' body = self.simulate_get(path) health = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) message_volume = health['message_volume'] self.assertEqual(1, message_volume['free']) self.assertEqual(2, message_volume['claimed']) self.assertEqual(3, message_volume['total']) @mock.patch.object(mongo.messages.MessageController, 'delete') def test_operation_status(self, mock_messages_delete): mock_messages_delete.side_effect = errors.NotPermitted() path = self.url_prefix + '/health' body = self.simulate_get(path) health = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) op_status = health['operation_status'] for op in op_status.keys(): if op == 'delete_messages': self.assertFalse(op_status[op]['succeeded']) self.assertIsNotNone(op_status[op]['ref']) else: self.assertTrue(op_status[op]['succeeded']) class TestHealthFaultyDriver(base.V1_1BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): path = self.url_prefix + '/health' self.simulate_get(path) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_home.py0000664000175100017510000000522115033040005024311 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from urllib import parse as urlparse from zaqar.tests.unit.transport.wsgi import base class TestHomeDocument(base.V1_1Base): config_file = 'wsgi_mongodb.conf' def test_json_response(self): self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '8383830383abc_' } body = self.simulate_get(self.url_prefix + '/', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) content_type = self.srmock.headers_dict['Content-Type'] self.assertEqual('application/json-home', content_type) try: jsonutils.loads(body[0]) except ValueError: self.fail('Home document is not valid JSON') def test_href_template(self): self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '8383830383' } body = self.simulate_get(self.url_prefix + '/', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) resp = jsonutils.loads(body[0]) queue_href_template = resp['resources']['rel/queue']['href-template'] path_1 = 'https://zaqar.example.com' + self.url_prefix path_2 = 'https://zaqar.example.com' + self.url_prefix + '/' # Verify all the href template start with the correct version prefix def get_href_or_template(resource): return resource.get('href-template', '') or resource['href'] for resource in list(resp['resources']): self.assertTrue( get_href_or_template(resp['resources'][resource]). startswith(self.url_prefix)) url = urlparse.urljoin(path_1, queue_href_template) expected = ('https://zaqar.example.com' + self.url_prefix + '/queues/foo') self.assertEqual(expected, url.format(queue_name='foo')) url = urlparse.urljoin(path_2, queue_href_template) self.assertEqual(expected, url.format(queue_name='foo')) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_media_type.py0000664000175100017510000000633715033040005025512 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid import falcon from falcon import testing from oslo_serialization import jsonutils from zaqar.tests.unit.transport.wsgi import base class TestMediaType(base.V1_1Base): config_file = 'wsgi_mongodb.conf' def test_json_only_endpoints_with_wrong_accept_header(self): endpoints = ( ('GET', self.url_prefix + '/queues'), ('GET', self.url_prefix + '/queues/nonexistent/stats'), ('POST', self.url_prefix + '/queues/nonexistent/messages'), ('GET', self.url_prefix + '/queues/nonexistent/messages/deadbeaf'), ('POST', self.url_prefix + '/queues/nonexistent/claims'), ('GET', self.url_prefix + '/queues/nonexistent/claims/0ad'), ('GET', self.url_prefix + '/health'), ) for method, endpoint in endpoints: headers = { 'Client-ID': str(uuid.uuid4()), 'Accept': 'application/xml', } env = testing.create_environ(endpoint, method=method, headers=headers) self.app(env, self.srmock) self.assertEqual(falcon.HTTP_406, self.srmock.status) def test_request_with_body_and_urlencoded_contenttype_header_fails(self): # NOTE(Eva-i): this test case makes sure wsgi 'before' hook # "require_content_type_be_non_urlencoded" works to prevent # bug/1547100. eww_queue_path = self.url_prefix + '/queues/eww' eww_queue_messages_path = eww_queue_path + '/messages' sample_message = jsonutils.dumps({'messages': [{'body': {'eww!'}, 'ttl': 200}]}) bad_headers = { 'Client-ID': str(uuid.uuid4()), 'Content-Type': 'application/x-www-form-urlencoded', } # Create queue request with bad headers. Should still work, because it # has no body. self.simulate_put(eww_queue_path, headers=bad_headers) self.addCleanup(self.simulate_delete, eww_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Post message request with good headers. Should work. self.simulate_post(eww_queue_messages_path, body=sample_message, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Post message request with bad headers. Should not work. self.simulate_post(eww_queue_messages_path, body=sample_message, headers=bad_headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_messages.py0000664000175100017510000006076715033040005025210 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock import uuid import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import timeutils from testtools import matchers from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base from zaqar.transport import validation @ddt.ddt class TestMessagesMongoDB(base.V1_1Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestMessagesMongoDB, self).setUp() self.default_message_ttl = self.boot.transport._defaults.message_ttl if self.conf.pooling: for i in range(4): uri = "%s/%s" % (self.mongodb_url, str(i)) doc = {'weight': 100, 'uri': uri} self.simulate_put(self.url_prefix + '/pools/' + str(i), body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.project_id = '7e55e1a7e' self.headers = { 'Client-ID': str(uuid.uuid4()), 'X-Project-ID': self.project_id } # TODO(kgriffs): Add support in self.simulate_* for a "base path" # so that we don't have to concatenate against self.url_prefix # all over the place. self.queue_path = self.url_prefix + '/queues/fizbit' self.messages_path = self.queue_path + '/messages' doc = '{"_ttl": 60}' self.simulate_put(self.queue_path, body=doc, headers=self.headers) def tearDown(self): self.simulate_delete(self.queue_path, headers=self.headers) if self.conf.pooling: for i in range(4): self.simulate_delete(self.url_prefix + '/pools/' + str(i), headers=self.headers) super(TestMessagesMongoDB, self).tearDown() def test_name_restrictions(self): sample_messages = [ {'body': {'key': 'value'}, 'ttl': 200}, ] messages_path = self.url_prefix + '/queues/%s/messages' sample_doc = jsonutils.dumps({'messages': sample_messages}) self.simulate_post(messages_path % 'Nice-Boat_2', body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_post(messages_path % 'Nice-Bo@t', body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_post(messages_path % ('_niceboat' * 8), body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def _test_post(self, sample_messages): sample_doc = jsonutils.dumps({'messages': sample_messages}) result = self.simulate_post(self.messages_path, body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) result_doc = jsonutils.loads(result[0]) msg_ids = self._get_msg_ids(self.srmock.headers_dict) self.assertEqual(len(sample_messages), len(msg_ids)) expected_resources = [str(self.messages_path + '/' + id) for id in msg_ids] self.assertEqual(expected_resources, result_doc['resources']) # NOTE(kgriffs): As of v1.1, "partial" is no longer given # in the response document. self.assertNotIn('partial', result_doc) self.assertEqual(len(sample_messages), len(msg_ids)) lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) # Test GET on the message resource directly # NOTE(cpp-cabrera): force the passing of time to age a message timeutils_utcnow = 'oslo_utils.timeutils.utcnow' now = timeutils.utcnow() + datetime.timedelta(seconds=10) with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now for msg_id in msg_ids: message_uri = self.messages_path + '/' + msg_id headers = self.headers.copy() headers['X-Project-ID'] = '777777' # Wrong project ID self.simulate_get(message_uri, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Correct project ID result = self.simulate_get(message_uri, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Check message properties message = jsonutils.loads(result[0]) self.assertEqual(message_uri, message['href']) self.assertEqual(lookup[message['ttl']], message['body']) self.assertEqual(msg_id, message['id']) # no negative age # NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26 self.assertThat(message['age'], matchers.GreaterThan(-1)) # Test bulk GET query_string = 'ids=' + ','.join(msg_ids) result = self.simulate_get(self.messages_path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) expected_ttls = set(m['ttl'] for m in sample_messages) actual_ttls = set(m['ttl'] for m in result_doc['messages']) self.assertFalse(expected_ttls - actual_ttls) actual_ids = set(m['id'] for m in result_doc['messages']) self.assertFalse(set(msg_ids) - actual_ids) def test_exceeded_payloads(self): # Get a valid message id self._post_messages(self.messages_path) msg_id = self._get_msg_id(self.srmock.headers_dict) # Bulk GET restriction query_string = 'ids=' + ','.join([msg_id] * 21) self.simulate_get(self.messages_path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) # Listing restriction self.simulate_get(self.messages_path, query_string='limit=21', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) # Bulk deletion restriction query_string = 'ids=' + ','.join([msg_id] * 22) self.simulate_delete(self.messages_path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_post_single(self): sample_messages = [ {'body': {'key': 'value'}, 'ttl': 200}, ] self._test_post(sample_messages) def test_post_multiple(self): sample_messages = [ {'body': 239, 'ttl': 100}, {'body': {'key': 'value'}, 'ttl': 200}, {'body': [1, 3], 'ttl': 300}, ] self._test_post(sample_messages) def test_post_optional_ttl(self): sample_messages = { 'messages': [ {'body': 239}, {'body': {'key': 'value'}, 'ttl': 200}, ], } # Manually check default TTL is max from config sample_doc = jsonutils.dumps(sample_messages) result = self.simulate_post(self.messages_path, body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) result_doc = jsonutils.loads(result[0]) href = result_doc['resources'][0] result = self.simulate_get(href, headers=self.headers) message = jsonutils.loads(result[0]) self.assertEqual(self.default_message_ttl, message['ttl']) def test_post_to_non_ascii_queue(self): # NOTE(kgriffs): This test verifies that routes with # embedded queue name params go through the validation # hook, regardless of the target resource. path = self.url_prefix + '/queues/non-ascii-n\u0153me/messages' self._post_messages(path) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_post_with_long_queue_name(self): # NOTE(kgriffs): This test verifies that routes with # embedded queue name params go through the validation # hook, regardless of the target resource. queues_path = self.url_prefix + '/queues/' game_title = 'v' * validation.QUEUE_NAME_MAX_LEN self.addCleanup( self.simulate_delete, queues_path + game_title, headers=self.headers) self._post_messages(queues_path + game_title + '/messages') self.assertEqual(falcon.HTTP_201, self.srmock.status) game_title += 'v' self._post_messages(queues_path + game_title + '/messages') self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_post_to_missing_queue(self): self.addCleanup( self.simulate_delete, self.url_prefix + '/queues/nonexistent', headers=self.headers) self._post_messages(self.url_prefix + '/queues/nonexistent/messages') self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_get_from_missing_queue(self): body = self.simulate_get(self.url_prefix + '/queues/nonexistent/messages', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) @ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369') def test_bad_client_id(self, text_id): self.simulate_post(self.queue_path + '/messages', body='{"ttl": 60, "body": ""}', headers={'Client-ID': text_id}) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_get(self.queue_path + '/messages', query_string='limit=3&echo=true', headers={'Client-ID': text_id}) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(None, '[', '[]', '{}', '.') def test_post_bad_message(self, document): self.simulate_post(self.queue_path + '/messages', body=document, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 59, 1209601) def test_unacceptable_ttl(self, ttl): doc = {'messages': [{'ttl': ttl, 'body': None}]} self.simulate_post(self.queue_path + '/messages', body=jsonutils.dumps(doc), headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_exceeded_message_posting(self): # Total (raw request) size doc = {'messages': [{'body': "some body", 'ttl': 100}] * 20} body = jsonutils.dumps(doc, indent=4) max_len = self.transport_cfg.max_messages_post_size long_body = body + (' ' * (max_len - len(body) + 1)) self.simulate_post(self.queue_path + '/messages', body=long_body, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data('{"overflow": 9223372036854775808}', '{"underflow": -9223372036854775809}') def test_unsupported_json(self, document): self.simulate_post(self.queue_path + '/messages', body=document, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_delete(self): self._post_messages(self.messages_path) msg_id = self._get_msg_id(self.srmock.headers_dict) target = self.messages_path + '/' + msg_id self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.simulate_delete(target, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Safe to delete non-existing ones self.simulate_delete(target, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_bulk_delete(self): path = self.queue_path + '/messages' self._post_messages(path, repeat=5) [target, params] = self.srmock.headers_dict['location'].split('?') # Deleting the whole collection is denied self.simulate_delete(path, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Safe to delete non-existing ones self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Even after the queue is gone self.simulate_delete(self.queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_list(self): path = self.queue_path + '/messages' self._post_messages(path, repeat=10) query_string = 'limit=3&echo=true' body = self.simulate_get(path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) cnt = 0 while jsonutils.loads(body[0])['messages'] != []: contents = jsonutils.loads(body[0]) [target, params] = contents['links'][0]['href'].split('?') for msg in contents['messages']: self.simulate_get(msg['href'], headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) body = self.simulate_get(target, query_string=params, headers=self.headers) cnt += 1 self.assertEqual(4, cnt) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # Stats body = self.simulate_get(self.queue_path + '/stats', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) message_stats = jsonutils.loads(body[0])['messages'] # NOTE(kgriffs): The other parts of the stats are tested # in tests.storage.base and so are not repeated here. expected_pattern = self.queue_path + '/messages/[^/]+$' for message_stat_name in ('oldest', 'newest'): self.assertThat(message_stats[message_stat_name]['href'], matchers.MatchesRegex(expected_pattern)) # NOTE(kgriffs): Try to get messages for a missing queue body = self.simulate_get(self.url_prefix + '/queues/nonexistent/messages', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) def test_list_with_bad_marker(self): path = self.queue_path + '/messages' self._post_messages(path, repeat=5) query_string = 'limit=3&echo=true&marker=sfhlsfdjh2048' body = self.simulate_get(path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) def test_no_uuid(self): headers = { 'Client-ID': "textid", 'X-Project-ID': '7e7e7e' } path = self.queue_path + '/messages' self.simulate_post(path, body='[{"body": 0, "ttl": 100}]', headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_get_claimed_contains_claim_id_in_href(self): path = self.queue_path res = self._post_messages(path + '/messages', repeat=5) for url in jsonutils.loads(res[0])['resources']: message = self.simulate_get(url) self.assertNotIn('claim_id', jsonutils.loads(message[0])['href']) self.simulate_post(path + '/claims', body='{"ttl": 100, "grace": 100}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) for url in jsonutils.loads(res[0])['resources']: message = self.simulate_get(url) self.assertIn('claim_id', jsonutils.loads(message[0])['href']) # NOTE(cpp-cabrera): regression test against bug #1210633 def test_when_claim_deleted_then_messages_unclaimed(self): path = self.queue_path self._post_messages(path + '/messages', repeat=5) # post claim self.simulate_post(path + '/claims', body='{"ttl": 100, "grace": 100}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) location = self.srmock.headers_dict['location'] # release claim self.simulate_delete(location, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # get unclaimed messages self.simulate_get(path + '/messages', query_string='echo=true', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # NOTE(cpp-cabrera): regression test against bug #1203842 def test_get_nonexistent_message_404s(self): path = self.url_prefix + '/queues/notthere/messages/a' self.simulate_get(path, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_get_multiple_invalid_messages_404s(self): path = self.url_prefix + '/queues/notthere/messages' self.simulate_get(path, query_string='ids=a,b,c', headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_delete_multiple_invalid_messages_204s(self): path = self.url_prefix + '/queues/notthere/messages' self.simulate_delete(path, query_string='ids=a,b,c', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_delete_message_with_invalid_claim_doesnt_delete_message(self): path = self.queue_path resp = self._post_messages(path + '/messages', 1) location = jsonutils.loads(resp[0])['resources'][0] self.simulate_delete(location, query_string='claim_id=invalid', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_get(location, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_no_duplicated_messages_path_in_href(self): """Test for bug 1240897.""" path = self.queue_path + '/messages' self._post_messages(path, repeat=1) msg_id = self._get_msg_id(self.srmock.headers_dict) query_string = 'ids=%s' % msg_id body = self.simulate_get(path, query_string=query_string, headers=self.headers) messages = jsonutils.loads(body[0]) self.assertNotIn(self.queue_path + '/messages/messages', messages['messages'][0]['href']) def _post_messages(self, target, repeat=1): doc = {'messages': [{'body': 239, 'ttl': 300}] * repeat} body = jsonutils.dumps(doc) return self.simulate_post(target, body=body, headers=self.headers) def _get_msg_id(self, headers): return self._get_msg_ids(headers)[0] def _get_msg_ids(self, headers): return headers['location'].rsplit('=', 1)[-1].split(',') @ddt.data(1, 2, 10) def test_pop(self, message_count): self._post_messages(self.messages_path, repeat=message_count) msg_id = self._get_msg_id(self.srmock.headers_dict) target = self.messages_path + '/' + msg_id self.simulate_get(target, self.project_id) self.assertEqual(falcon.HTTP_200, self.srmock.status) query_string = 'pop=' + str(message_count) result = self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) self.assertEqual(message_count, len(result_doc['messages'])) self.simulate_get(target, self.project_id) self.assertEqual(falcon.HTTP_404, self.srmock.status) @ddt.data('', 'pop=1000000', 'pop=10&ids=1', 'pop=-1') def test_pop_invalid(self, query_string): self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_pop_empty_queue(self): query_string = 'pop=1' result = self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) self.assertEqual([], result_doc['messages']) def test_pop_single_message(self): self._post_messages(self.messages_path, repeat=5) msg_id = self._get_msg_id(self.srmock.headers_dict) target = self.messages_path + '/' + msg_id self.simulate_get(target, self.project_id) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Pop Single message from the queue query_string = 'pop=1' result = self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Get messages from the queue & verify message count query_string = 'echo=True' result = self.simulate_get(self.messages_path, self.project_id, query_string=query_string, headers=self.headers) result_doc = jsonutils.loads(result[0]) actual_msg_count = len(result_doc['messages']) expected_msg_count = 4 self.assertEqual(expected_msg_count, actual_msg_count) class TestMessagesMongoDBPooled(TestMessagesMongoDB): config_file = 'wsgi_mongodb_pooled.conf' # TODO(cpp-cabrera): remove this skipTest once pooled queue # listing is implemented def test_list(self): self.skipTest("Need to implement pooled queue listing.") class TestMessagesFaultyDriver(base.V1_1BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): project_id = 'xyz' path = self.url_prefix + '/queues/fizbit/messages' body = '{"messages": [{"body": 239, "ttl": 100}]}' headers = { 'Client-ID': str(uuid.uuid4()), 'X-Project-ID': project_id } self.simulate_post(path, body=body, headers=headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(path + '/nonexistent', headers=headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_delete(path + '/nada', headers=headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_ping.py0000664000175100017510000000235615033040005024324 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from zaqar.tests.unit.transport.wsgi import base class TestPing(base.V1_1Base): config_file = 'wsgi_mongodb.conf' def test_get(self): # TODO(kgriffs): Make use of setUp for setting the URL prefix # so we can just say something like: # # response = self.simulate_get('/ping') # response = self.simulate_get('/v1.1/ping') self.assertEqual(falcon.HTTP_204, self.srmock.status) self.assertEqual([], response) def test_head(self): response = self.simulate_head('/v1.1/ping') self.assertEqual(falcon.HTTP_204, self.srmock.status) self.assertEqual([], response) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_queue_lifecycle.py0000664000175100017510000003447715033040005026543 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.storage import errors as storage_errors from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestQueueLifecycleMongoDB(base.V1_1Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestQueueLifecycleMongoDB, self).setUp() self.queue_path = self.url_prefix + '/queues' self.gumshoe_queue_path = self.queue_path + '/gumshoe' self.fizbat_queue_path = self.queue_path + '/fizbat' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '3387309841abc_' } def tearDown(self): storage = self.boot.storage._storage connection = storage.connection connection.drop_database(self.boot.control.queues_database) for db in storage.message_databases: connection.drop_database(db) super(TestQueueLifecycleMongoDB, self).tearDown() def test_empty_project_id(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '' } self.simulate_put(self.gumshoe_queue_path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(self.gumshoe_queue_path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data('480924', 'foo') def test_basics_thoroughly(self, project_id): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': project_id } gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats' # Stats are empty - queue not created yet self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Create doc = '{"messages": {"ttl": 600}}' self.simulate_put(self.gumshoe_queue_path, headers=headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) location = self.srmock.headers_dict['Location'] self.assertEqual(self.gumshoe_queue_path, location) # Fetch metadata result = self.simulate_get(self.gumshoe_queue_path, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertEqual(jsonutils.loads(doc), result_doc) # Stats empty queue self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Delete self.simulate_delete(self.gumshoe_queue_path, headers=headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get non-existent stats self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_name_restrictions(self): self.simulate_put(self.queue_path + '/Nice-Boat_2', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_put(self.queue_path + '/Nice-Bo@t', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_put(self.queue_path + '/_' + 'niceboat' * 8, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_project_id_restriction(self): muvluv_queue_path = self.queue_path + '/Muv-Luv' self.simulate_put(muvluv_queue_path, headers={'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'JAM Project' * 24}) self.assertEqual(falcon.HTTP_400, self.srmock.status) # no charset restrictions self.simulate_put(muvluv_queue_path, headers={'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'JAM Project'}) self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_non_ascii_name(self): test_params = (('/queues/non-ascii-n\u0153me', 'utf-8'), ('/queues/non-ascii-n\xc4me', 'iso8859-1')) for uri, enc in test_params: uri = self.url_prefix + uri self.simulate_put(uri, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(uri, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_no_metadata(self): self.simulate_put(self.fizbat_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_put(self.fizbat_queue_path, body='', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) @ddt.data('{', '[]', '.', ' ') def test_bad_metadata(self, document): self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=document) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_too_much_metadata(self): self.simulate_put(self.fizbat_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size - (len(doc) - 10) + 1 doc = doc.format(pad='x' * padding_len) self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_way_too_much_metadata(self): self.simulate_put(self.fizbat_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size * 100 doc = doc.format(pad='x' * padding_len) self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_custom_metadata(self): # Set doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size - (len(doc) - 2) doc = doc.format(pad='x' * padding_len) self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Get result = self.simulate_get(self.fizbat_queue_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(jsonutils.loads(doc), result_doc) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_update_metadata(self): self.skip("This should use patch instead") xyz_queue_path = self.url_prefix + '/queues/xyz' xyz_queue_path_metadata = xyz_queue_path # Create self.simulate_put(xyz_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Set meta doc1 = '{"messages": {"ttl": 600}}' self.simulate_put(xyz_queue_path_metadata, headers=self.headers, body=doc1) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Update doc2 = '{"messages": {"ttl": 100}}' self.simulate_put(xyz_queue_path_metadata, headers=self.headers, body=doc2) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get result = self.simulate_get(xyz_queue_path_metadata, headers=self.headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(jsonutils.loads(doc2), result_doc) def test_list(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } # NOTE(kgriffs): It's important that this one sort after the one # above. This is in order to prove that bug/1236605 is fixed, and # stays fixed! alt_project_id = str(arbitrary_number + 1) # List empty result = self.simulate_get(self.queue_path, headers=header) self.assertEqual(falcon.HTTP_200, self.srmock.status) results = jsonutils.loads(result[0]) self.assertEqual([], results['queues']) self.assertIn('links', results) self.assertEqual(0, len(results['links'])) # Payload exceeded self.simulate_get(self.queue_path, headers=header, query_string='limit=21') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Create some def create_queue(name, project_id, body): altheader = {'Client-ID': client_id} if project_id is not None: altheader['X-Project-ID'] = project_id uri = self.queue_path + '/' + name self.simulate_put(uri, headers=altheader, body=body) create_queue('q1', project_id, '{"node": 31}') create_queue('q2', project_id, '{"node": 32}') create_queue('q3', project_id, '{"node": 33}') create_queue('q3', alt_project_id, '{"alt": 1}') # List (limit) result = self.simulate_get(self.queue_path, headers=header, query_string='limit=2') result_doc = jsonutils.loads(result[0]) self.assertEqual(2, len(result_doc['queues'])) # List (no metadata, get all) result = self.simulate_get(self.queue_path, headers=header, query_string='limit=5') result_doc = jsonutils.loads(result[0]) [target, params] = result_doc['links'][0]['href'].split('?') self.assertEqual(falcon.HTTP_200, self.srmock.status) # Ensure we didn't pick up the queue from the alt project. queues = result_doc['queues'] self.assertEqual(3, len(queues)) # List with metadata result = self.simulate_get(self.queue_path, headers=header, query_string='detailed=true') self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) [target, params] = result_doc['links'][0]['href'].split('?') queue = result_doc['queues'][0] result = self.simulate_get(queue['href'], headers=header) result_doc = jsonutils.loads(result[0]) self.assertEqual(queue['metadata'], result_doc) self.assertEqual({'node': 31}, result_doc) # List tail self.simulate_get(target, headers=header, query_string=params) self.assertEqual(falcon.HTTP_200, self.srmock.status) # List manually-constructed tail self.simulate_get(target, headers=header, query_string='marker=zzz') self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_list_returns_503_on_nopoolfound_exception(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } queue_controller = self.boot.storage.queue_controller with mock.patch.object(queue_controller, 'list') as mock_queue_list: def queue_generator(): raise storage_errors.NoPoolFound() # This generator tries to be like queue controller list generator # in some ways. def fake_generator(): yield queue_generator() yield {} mock_queue_list.return_value = fake_generator() self.simulate_get(self.queue_path, headers=header) self.assertEqual(falcon.HTTP_503, self.srmock.status) class TestQueueLifecycleFaultyDriver(base.V1_1BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '338730984abc_1' } gumshoe_queue_path = self.url_prefix + '/queues/gumshoe' doc = '{"messages": {"ttl": 600}}' self.simulate_put(gumshoe_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_503, self.srmock.status) location = ('Location', gumshoe_queue_path) self.assertNotIn(location, self.srmock.headers) result = self.simulate_get(gumshoe_queue_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.assertNotEqual(result_doc, jsonutils.loads(doc)) self.simulate_get(gumshoe_queue_path + '/stats', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(self.url_prefix + '/queues', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_delete(gumshoe_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v1_1/test_validation.py0000664000175100017510000001174515033040005025523 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.tests.unit.transport.wsgi import base class TestValidation(base.V1_1Base): config_file = 'wsgi_mongodb_validation.conf' def setUp(self): super(TestValidation, self).setUp() self.project_id = '7e55e1a7e' self.queue_path = self.url_prefix + '/queues/noein' self.simulate_put(self.queue_path, self.project_id) self.headers = { 'Client-ID': uuidutils.generate_uuid(), } def tearDown(self): self.simulate_delete(self.queue_path, self.project_id) super(TestValidation, self).tearDown() def test_metadata_deserialization(self): # Normal case self.simulate_put(self.queue_path, self.project_id, body='{"timespace": "Shangri-la"}') self.assertEqual(falcon.HTTP_204, self.srmock.status) # Too long max_queue_metadata = 64 doc_tmpl = '{{"Dragon Torc":"{0}"}}' doc_tmpl_ws = '{{ "Dragon Torc" : "{0}" }}' # with whitespace envelope_length = len(doc_tmpl.format('')) for tmpl in doc_tmpl, doc_tmpl_ws: gen = '0' * (max_queue_metadata - envelope_length + 1) doc = tmpl.format(gen) self.simulate_put(self.queue_path, self.project_id, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_message_deserialization(self): # Normal case body = '{"messages": [{"body": "Dragon Knights", "ttl": 100}]}' self.simulate_post(self.queue_path + '/messages', self.project_id, body=body, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Both messages' size are too long max_messages_post_size = 256 obj = {'a': 0, 'b': ''} envelope_length = len(jsonutils.dumps(obj, separators=(',', ':'))) obj['b'] = 'x' * (max_messages_post_size - envelope_length + 1) for long_body in ('a' * (max_messages_post_size - 2 + 1), obj): doc = jsonutils.dumps([{'body': long_body, 'ttl': 100}]) self.simulate_post(self.queue_path + '/messages', self.project_id, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_request_without_client_id(self): # No Client-ID in headers, it will raise 400 error. empty_headers = {} self.simulate_put(self.queue_path, self.project_id, headers=empty_headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_queue_metadata_putting(self): # Test _default_message_ttl # TTL normal case queue_1 = self.url_prefix + '/queues/queue1' self.simulate_put(queue_1, self.project_id, body='{"_default_message_ttl": 60}') self.addCleanup(self.simulate_delete, queue_1, self.project_id, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # TTL under min self.simulate_put(queue_1, self.project_id, body='{"_default_message_ttl": 59}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # TTL over max self.simulate_put(queue_1, self.project_id, body='{"_default_message_ttl": 1209601}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Test _max_messages_post_size # Size normal case queue_2 = self.url_prefix + '/queues/queue2' self.simulate_put(queue_2, self.project_id, body='{"_max_messages_post_size": 255}') self.addCleanup(self.simulate_delete, queue_2, self.project_id, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Size over max self.simulate_put(queue_2, self.project_id, body='{"_max_messages_post_size": 257}') self.assertEqual(falcon.HTTP_400, self.srmock.status) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5820134 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/0000775000175100017510000000000015033040026021753 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/__init__.py0000664000175100017510000000000015033040005024047 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_auth.py0000664000175100017510000000256015033040005024325 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Auth.""" import falcon from falcon import testing from keystonemiddleware import auth_token from oslo_utils import uuidutils from zaqar.tests.unit.transport.wsgi import base class TestAuth(base.V2Base): config_file = 'keystone_auth.conf' def setUp(self): super(TestAuth, self).setUp() self.headers = {'Client-ID': uuidutils.generate_uuid()} def test_auth_install(self): self.assertIsInstance(self.app._auth_app, auth_token.AuthProtocol) def test_non_authenticated(self): env = testing.create_environ(self.url_prefix + '/480924/queues/', method='GET', headers=self.headers) self.app(env, self.srmock) self.assertEqual(falcon.HTTP_401, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_claims.py0000664000175100017510000003045615033040005024641 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from testtools import matchers from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestClaimsMongoDB(base.V2Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestClaimsMongoDB, self).setUp() self.default_claim_ttl = self.boot.transport._defaults.claim_ttl self.project_id = '737_abc8332832' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } self.queue_path = self.url_prefix + '/queues/fizbit' self.claims_path = self.queue_path + '/claims' self.messages_path = self.queue_path + '/messages' doc = jsonutils.dumps({"_ttl": 60}) self.simulate_put(self.queue_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = jsonutils.dumps({'messages': [{'body': 239, 'ttl': 300}] * 10}) self.simulate_post(self.queue_path + '/messages', body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) def tearDown(self): storage = self.boot.storage._storage control = self.boot.control connection = storage.connection connection.drop_database(control.queues_database) for db in storage.message_databases: connection.drop_database(db) self.simulate_delete(self.queue_path, headers=self.headers) super(TestClaimsMongoDB, self).tearDown() @ddt.data('[', '[]', '.', '"fail"') def test_bad_claim(self, doc): self.simulate_post(self.claims_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) href = self._get_a_claim() self.simulate_patch(href, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_exceeded_claim(self): self.simulate_post(self.claims_path, body='{"ttl": 100, "grace": 60}', query_string='limit=21', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60)) def test_unacceptable_ttl_or_grace(self, ttl_grace): ttl, grace = ttl_grace self.simulate_post(self.claims_path, body=jsonutils.dumps({'ttl': ttl, 'grace': grace}), headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 59, 43201) def test_unacceptable_new_ttl(self, ttl): href = self._get_a_claim() self.simulate_patch(href, body=jsonutils.dumps({'ttl': ttl}), headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_default_ttl_and_grace(self): self.simulate_post(self.claims_path, body='{}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) body = self.simulate_get(self.srmock.headers_dict['location'], headers=self.headers) claim = jsonutils.loads(body[0]) self.assertEqual(self.default_claim_ttl, claim['ttl']) def _get_a_claim(self): doc = '{"ttl": 100, "grace": 60}' self.simulate_post(self.claims_path, body=doc, headers=self.headers) return self.srmock.headers_dict['Location'] def test_lifecycle(self): doc = '{"ttl": 100, "grace": 60}' # First, claim some messages body = self.simulate_post(self.claims_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) claimed = jsonutils.loads(body[0])['messages'] claim_href = self.srmock.headers_dict['Location'] message_href, params = claimed[0]['href'].split('?') # No more messages to claim self.simulate_post(self.claims_path, body=doc, query_string='limit=3', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Listing messages, by default, won't include claimed, will echo body = self.simulate_get(self.messages_path, headers=self.headers, query_string="echo=true") self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # Listing messages, by default, won't include claimed, won't echo body = self.simulate_get(self.messages_path, headers=self.headers, query_string="echo=false") self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # List messages, include_claimed, but don't echo body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=false', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # List messages with a different client-id and echo=false. # Should return some messages headers = self.headers.copy() headers["Client-ID"] = uuidutils.generate_uuid() body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=false', headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Include claimed messages this time, and echo body = self.simulate_get(self.messages_path, query_string='include_claimed=true' '&echo=true', headers=self.headers) listed = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertEqual(len(claimed), len(listed['messages'])) now = timeutils.utcnow() + datetime.timedelta(seconds=10) timeutils_utcnow = 'oslo_utils.timeutils.utcnow' with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now body = self.simulate_get(claim_href, headers=self.headers) claim = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertEqual(100, claim['ttl']) # NOTE(cpp-cabrera): verify that claim age is non-negative self.assertThat(claim['age'], matchers.GreaterThan(-1)) # Try to delete the message without submitting a claim_id self.simulate_delete(message_href, headers=self.headers) self.assertEqual(falcon.HTTP_403, self.srmock.status) # Delete the message and its associated claim self.simulate_delete(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Try to get it from the wrong project headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'bogusproject' } self.simulate_get(message_href, query_string=params, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Get the message self.simulate_get(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Update the claim new_claim_ttl = '{"ttl": 60, "grace": 60}' creation = timeutils.utcnow() self.simulate_patch(claim_href, body=new_claim_ttl, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get the claimed messages (again) body = self.simulate_get(claim_href, headers=self.headers) query = timeutils.utcnow() claim = jsonutils.loads(body[0]) message_href, params = claim['messages'][0]['href'].split('?') self.assertEqual(60, claim['ttl']) estimated_age = timeutils.delta_seconds(creation, query) self.assertGreater(estimated_age, claim['age']) # Delete the claim self.simulate_delete(claim['href'], headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Try to delete a message with an invalid claim ID self.simulate_delete(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) # Make sure it wasn't deleted! self.simulate_get(message_href, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Try to get a claim that doesn't exist self.simulate_get(claim['href'], headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Try to update a claim that doesn't exist self.simulate_patch(claim['href'], body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_post_claim_nonexistent_queue(self): path = self.url_prefix + '/queues/nonexistent/claims' self.simulate_post(path, body='{"ttl": 100, "grace": 60}', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_get_claim_nonexistent_queue(self): path = self.url_prefix + '/queues/nonexistent/claims/aaabbbba' self.simulate_get(path, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # NOTE(cpp-cabrera): regression test against bug #1203842 def test_get_nonexistent_claim_404s(self): self.simulate_get(self.claims_path + '/a', headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_delete_nonexistent_claim_204s(self): self.simulate_delete(self.claims_path + '/a', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_patch_nonexistent_claim_404s(self): patch_data = jsonutils.dumps({'ttl': 100}) self.simulate_patch(self.claims_path + '/a', body=patch_data, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) class TestClaimsFaultyDriver(base.V2BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): self.project_id = '480924abc_' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } claims_path = self.url_prefix + '/queues/fizbit/claims' doc = '{"ttl": 100, "grace": 60}' self.simulate_post(claims_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(claims_path + '/nichts', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_patch(claims_path + '/nichts', body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_delete(claims_path + '/foo', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_default_limits.py0000664000175100017510000001122315033040005026365 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar import storage from zaqar.tests.unit.transport.wsgi import base class TestDefaultLimits(base.V2Base): config_file = 'wsgi_mongodb_default_limits.conf' def setUp(self): super(TestDefaultLimits, self).setUp() self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '%s_' % uuidutils.generate_uuid() } self.queue_path = self.url_prefix + '/queues' self.q1_queue_path = self.queue_path + '/' + uuidutils.generate_uuid() self.messages_path = self.q1_queue_path + '/messages' self.claims_path = self.q1_queue_path + '/claims' self.simulate_put(self.q1_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) def tearDown(self): self.simulate_delete(self.queue_path, headers=self.headers) super(TestDefaultLimits, self).tearDown() def test_queue_listing(self): # 2 queues to list self.simulate_put(self.queue_path + '/q2', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) with self._prepare_queues(storage.DEFAULT_QUEUES_PER_PAGE + 1): result = self.simulate_get(self.queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) queues = jsonutils.loads(result[0])['queues'] self.assertEqual(storage.DEFAULT_QUEUES_PER_PAGE, len(queues)) def test_message_listing_different_id(self): self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) headers = self.headers.copy() headers['Client-ID'] = uuidutils.generate_uuid() result = self.simulate_get(self.messages_path, headers=headers, query_string='echo=false') self.assertEqual(falcon.HTTP_200, self.srmock.status) messages = jsonutils.loads(result[0])['messages'] self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) def test_message_listing_same_id(self): self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) result = self.simulate_get(self.messages_path, headers=self.headers, query_string='echo=false') self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(result) self._prepare_messages(storage.DEFAULT_MESSAGES_PER_PAGE + 1) result = self.simulate_get(self.messages_path, headers=self.headers, query_string='echo=true') messages = jsonutils.loads(result[0])['messages'] self.assertEqual(storage.DEFAULT_MESSAGES_PER_PAGE, len(messages)) def test_claim_creation(self): self._prepare_messages(storage.DEFAULT_MESSAGES_PER_CLAIM + 1) result = self.simulate_post(self.claims_path, body='{"ttl": 60, "grace": 60}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) messages = jsonutils.loads(result[0])['messages'] self.assertEqual(storage.DEFAULT_MESSAGES_PER_CLAIM, len(messages)) @contextlib.contextmanager def _prepare_queues(self, count): queue_paths = [self.queue_path + '/multi-{0}'.format(i) for i in range(count)] for path in queue_paths: self.simulate_put(path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) yield for path in queue_paths: self.simulate_delete(path, headers=self.headers) def _prepare_messages(self, count): doc = {'messages': [{'body': 239, 'ttl': 300}] * count} body = jsonutils.dumps(doc) self.simulate_post(self.messages_path, body=body, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_flavors_new.py0000664000175100017510000003344315033040005025715 0ustar00mylesmyles# Copyright (c) 2017 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import contextlib import uuid import ddt import falcon from oslo_serialization import jsonutils from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @contextlib.contextmanager def flavor(test, name, pool_list): """A context manager for constructing a flavor for use in testing. Deletes the flavor after exiting the context. :param test: Must expose simulate_* methods :param name: Name for this flavor :type name: str :type pool: str :returns: (name, uri, capabilities) :rtype: see above """ doc = {'pool_list': pool_list} path = test.url_prefix + '/flavors/' + name test.simulate_put(path, body=jsonutils.dumps(doc)) try: yield name, pool_list finally: test.simulate_delete(path) @contextlib.contextmanager def flavors(test, count): """A context manager for constructing flavors for use in testing. Deletes the flavors after exiting the context. :param test: Must expose simulate_* methods :param count: Number of pools to create :type count: int :returns: (paths, pool_list capabilities) :rtype: ([str], [str], [dict]) """ pool_path_all = [] flavor_path_all = [] for i in range(count): poolname = 'pool' + str(i) pool_doc = {'weight': 100, 'uri': test.mongodb_url + '/test' + str(i)} pool_path = test.url_prefix + '/pools/' + poolname test.simulate_put(pool_path, body=jsonutils.dumps(pool_doc)) flavorname = str(i) flavor_path = test.url_prefix + "/flavors/" + flavorname flavor_doc = {'pool_list': [poolname]} test.simulate_put(flavor_path, body=jsonutils.dumps(flavor_doc)) pool_path_all.append(pool_path) flavor_path_all.append(flavor_path) try: yield flavor_path_all finally: for path in flavor_path_all: test.simulate_delete(path) for path in pool_path_all: test.simulate_delete(path) @ddt.ddt class TestFlavorsMongoDB(base.V2Base): config_file = 'wsgi_mongodb_pooled.conf' @testing.requires_mongodb def setUp(self): super(TestFlavorsMongoDB, self).setUp() self.queue = 'test-queue' self.queue_path = self.url_prefix + '/queues/' + self.queue self.pool = 'mypool' self.pool_path = self.url_prefix + '/pools/' + self.pool self.pool_doc = {'weight': 100, 'uri': self.mongodb_url + '/test'} self.simulate_put(self.pool_path, body=jsonutils.dumps(self.pool_doc)) self.flavor = 'test-flavor' self.doc = {'capabilities': {}} self.doc['pool_list'] = [self.pool] self.flavor_path = self.url_prefix + '/flavors/' + self.flavor self.simulate_put(self.flavor_path, body=jsonutils.dumps(self.doc)) self.assertEqual(falcon.HTTP_201, self.srmock.status) def tearDown(self): self.simulate_delete(self.queue_path) self.simulate_delete(self.flavor_path) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_delete(self.pool_path) super(TestFlavorsMongoDB, self).tearDown() def test_put_flavor_works(self): name = str(uuid.uuid1()) with flavor(self, name, self.doc['pool_list']): self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_put_raises_if_missing_fields(self): path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) self.simulate_put(path, body=jsonutils.dumps({})) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_put(path, body=jsonutils.dumps({'capabilities': {}})) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(1, 2**32+1, []) def test_put_raises_if_invalid_pool(self, pool_list): path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) self.simulate_put(path, body=jsonutils.dumps({'pool_list': pool_list})) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_put_auto_get_capabilities(self): path = self.url_prefix + '/flavors/' + str(uuid.uuid1()) doc = {'pool_list': self.doc['pool_list']} self.simulate_put(path, body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_201, self.srmock.status) # NOTE(gengchc2): Delete it, otherwise exist garbage flavor. self.simulate_delete(path) def test_put_existing_overwrites(self): # NOTE(cabrera): setUp creates default flavor expect = self.doc self.simulate_put(self.flavor_path, body=jsonutils.dumps(expect)) self.assertEqual(falcon.HTTP_201, self.srmock.status) result = self.simulate_get(self.flavor_path) self.assertEqual(falcon.HTTP_200, self.srmock.status) doc = jsonutils.loads(result[0]) self.assertEqual(expect['pool_list'], doc['pool_list']) def test_create_flavor_no_pool_list(self): self.simulate_delete(self.flavor_path) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_delete(self.pool_path) self.assertEqual(falcon.HTTP_204, self.srmock.status) resp = self.simulate_put(self.flavor_path, body=jsonutils.dumps(self.doc)) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.assertEqual( {'description': 'Flavor test-flavor could not be created, ' 'error:Pool mypool does not exist', 'title': 'Unable to create'}, jsonutils.loads(resp[0])) def test_delete_works(self): self.simulate_delete(self.flavor_path) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(self.flavor_path) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_get_nonexisting_raises_404(self): self.simulate_get(self.url_prefix + '/flavors/nonexisting') self.assertEqual(falcon.HTTP_404, self.srmock.status) def _flavor_expect(self, flavor, xhref, xpool_list=None): self.assertIn('href', flavor) self.assertIn('name', flavor) self.assertEqual(xhref, flavor['href']) if xpool_list is not None: self.assertIn('pool_list', flavor) self.assertEqual(xpool_list, flavor['pool_list']) def test_get_works(self): result = self.simulate_get(self.flavor_path) self.assertEqual(falcon.HTTP_200, self.srmock.status) flavor = jsonutils.loads(result[0]) self._flavor_expect(flavor, self.flavor_path, self.doc['pool_list']) store_caps = ['FIFO', 'CLAIMS', 'DURABILITY', 'AOD', 'HIGH_THROUGHPUT'] self.assertEqual(store_caps, flavor['capabilities']) def test_patch_raises_if_missing_fields(self): self.simulate_patch(self.flavor_path, body=jsonutils.dumps({'location': 1})) self.assertEqual(falcon.HTTP_400, self.srmock.status) def _patch_test(self, doc): result = self.simulate_patch(self.flavor_path, body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_200, self.srmock.status) updated_flavor = jsonutils.loads(result[0]) self._flavor_expect(updated_flavor, self.flavor_path) capabilities = ['FIFO', 'CLAIMS', 'DURABILITY', 'AOD', 'HIGH_THROUGHPUT'] self.assertEqual(capabilities, updated_flavor['capabilities']) result = self.simulate_get(self.flavor_path) self.assertEqual(falcon.HTTP_200, self.srmock.status) flavor = jsonutils.loads(result[0]) self._flavor_expect(flavor, self.flavor_path) self.assertEqual(capabilities, flavor['capabilities']) def test_patch_works(self): doc = {'pool_list': self.doc['pool_list'], 'capabilities': []} self._patch_test(doc) def test_patch_works_with_extra_fields(self): doc = {'pool_list': self.doc['pool_list'], 'capabilities': [], 'location': 100, 'partition': 'taco'} self._patch_test(doc) @ddt.data(-1, 2**32+1, []) def test_patch_raises_400_on_invalid_pool_list(self, pool_list): self.simulate_patch(self.flavor_path, body=jsonutils.dumps({'pool_list': pool_list})) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 'wee', []) def test_patch_raises_400_on_invalid_capabilities(self, capabilities): doc = {'capabilities': capabilities} self.simulate_patch(self.flavor_path, body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_patch_raises_404_if_flavor_not_found(self): self.simulate_patch(self.url_prefix + '/flavors/notexists', body=jsonutils.dumps({'pool_list': ['test']})) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_empty_listing(self): self.simulate_delete(self.flavor_path) result = self.simulate_get(self.url_prefix + '/flavors') results = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertEqual(0, len(results['flavors'])) self.assertIn('links', results) def _listing_test(self, count=10, limit=10, marker=None, detailed=False): # NOTE(cpp-cabrera): delete initial flavor - it will interfere # with listing tests self.simulate_delete(self.flavor_path) query = 'limit={0}&detailed={1}'.format(limit, detailed) if marker: query += '&marker={0}'.format(marker) with flavors(self, count): result = self.simulate_get(self.url_prefix + '/flavors', query_string=query) self.assertEqual(falcon.HTTP_200, self.srmock.status) results = jsonutils.loads(result[0]) self.assertIsInstance(results, dict) self.assertIn('flavors', results) self.assertIn('links', results) flavors_list = results['flavors'] link = results['links'][0] self.assertEqual('next', link['rel']) href = falcon.uri.parse_query_string(link['href'].split('?')[1]) self.assertIn('marker', href) self.assertEqual(str(limit), href['limit']) self.assertEqual(str(detailed).lower(), href['detailed']) next_query_string = ('marker={marker}&limit={limit}' '&detailed={detailed}').format(**href) next_result = self.simulate_get(link['href'].split('?')[0], query_string=next_query_string) next_flavors = jsonutils.loads(next_result[0]) next_flavors_list = next_flavors['flavors'] self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertIn('links', next_flavors) if limit < count: self.assertEqual(min(limit, count-limit), len(next_flavors_list)) else: self.assertEqual(0, len(next_flavors_list)) self.assertEqual(min(limit, count), len(flavors_list)) for i, s in enumerate(flavors_list + next_flavors_list): capabilities = ['FIFO', 'CLAIMS', 'DURABILITY', 'AOD', 'HIGH_THROUGHPUT'] if detailed: self.assertIn('capabilities', s) self.assertEqual(s['capabilities'], capabilities) else: self.assertNotIn('capabilities', s) def test_listing_works(self): self._listing_test() def test_detailed_listing_works(self): self._listing_test(detailed=True) @ddt.data(1, 5, 10, 15) def test_listing_works_with_limit(self, limit): self._listing_test(count=15, limit=limit) def test_listing_marker_is_respected(self): self.simulate_delete(self.flavor_path) with flavors(self, 10) as expected: result = self.simulate_get(self.url_prefix + '/flavors', query_string='marker=3') self.assertEqual(falcon.HTTP_200, self.srmock.status) flavor_list = jsonutils.loads(result[0])['flavors'] self.assertEqual(6, len(flavor_list)) path = expected[4] self._flavor_expect(flavor_list[0], path) def test_listing_error_with_invalid_limit(self): self.simulate_delete(self.flavor_path) query = 'limit={0}&detailed={1}'.format(0, True) with flavors(self, 10): self.simulate_get(self.url_prefix + '/flavors', query_string=query) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_queue_create_works(self): metadata = {'_flavor': self.flavor} self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata)) self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_queue_create_no_flavor(self): metadata = {'_flavor': self.flavor} self.simulate_delete(self.flavor_path) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_put(self.queue_path, body=jsonutils.dumps(metadata)) self.assertEqual(falcon.HTTP_400, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_health.py0000664000175100017510000000604515033040005024633 0ustar00mylesmyles# Copyright 2014 Catalyst IT Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from zaqar.storage import errors import zaqar.storage.mongodb as mongo from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestHealthMongoDB(base.V2Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestHealthMongoDB, self).setUp() def test_basic(self): path = self.url_prefix + '/health' body = self.simulate_get(path) health = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertTrue(health['storage_reachable']) self.assertIsNotNone(health['message_volume']) for op in health['operation_status']: self.assertTrue(health['operation_status'][op]['succeeded']) @mock.patch.object(mongo.driver.DataDriver, '_health') def test_message_volume(self, mock_driver_get): def _health(): KPI = {} KPI['message_volume'] = {'free': 1, 'claimed': 2, 'total': 3} return KPI mock_driver_get.side_effect = _health path = self.url_prefix + '/health' body = self.simulate_get(path) health = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) message_volume = health['message_volume'] self.assertEqual(1, message_volume['free']) self.assertEqual(2, message_volume['claimed']) self.assertEqual(3, message_volume['total']) @mock.patch.object(mongo.messages.MessageController, 'delete') def test_operation_status(self, mock_messages_delete): mock_messages_delete.side_effect = errors.NotPermitted() path = self.url_prefix + '/health' body = self.simulate_get(path) health = jsonutils.loads(body[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) op_status = health['operation_status'] for op in op_status.keys(): if op == 'delete_messages': self.assertFalse(op_status[op]['succeeded']) self.assertIsNotNone(op_status[op]['ref']) else: self.assertTrue(op_status[op]['succeeded']) class TestHealthFaultyDriver(base.V2BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): path = self.url_prefix + '/health' self.simulate_get(path) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_home.py0000664000175100017510000000521715033040005024316 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from urllib import parse as urlparse from zaqar.tests.unit.transport.wsgi import base class TestHomeDocument(base.V2Base): config_file = 'wsgi_mongodb.conf' def test_json_response(self): self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '8383830383abc_' } body = self.simulate_get(self.url_prefix + '/', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) content_type = self.srmock.headers_dict['Content-Type'] self.assertEqual('application/json-home', content_type) try: jsonutils.loads(body[0]) except ValueError: self.fail('Home document is not valid JSON') def test_href_template(self): self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '8383830383' } body = self.simulate_get(self.url_prefix + '/', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) resp = jsonutils.loads(body[0]) queue_href_template = resp['resources']['rel/queue']['href-template'] path_1 = 'https://zaqar.example.com' + self.url_prefix path_2 = 'https://zaqar.example.com' + self.url_prefix + '/' # Verify all the href template start with the correct version prefix def get_href_or_template(resource): return resource.get('href-template', '') or resource['href'] for resource in list(resp['resources']): self.assertTrue( get_href_or_template(resp['resources'][resource]). startswith(self.url_prefix)) url = urlparse.urljoin(path_1, queue_href_template) expected = ('https://zaqar.example.com' + self.url_prefix + '/queues/foo') self.assertEqual(expected, url.format(queue_name='foo')) url = urlparse.urljoin(path_2, queue_href_template) self.assertEqual(expected, url.format(queue_name='foo')) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_media_type.py0000664000175100017510000000640215033040005025503 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from falcon import testing from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.tests.unit.transport.wsgi import base class TestMediaType(base.V2Base): config_file = 'wsgi_mongodb.conf' def test_json_only_endpoints_with_wrong_accept_header(self): endpoints = ( ('GET', self.url_prefix + '/queues'), ('GET', self.url_prefix + '/queues/nonexistent/stats'), ('POST', self.url_prefix + '/queues/nonexistent/messages'), ('GET', self.url_prefix + '/queues/nonexistent/messages/deadbeaf'), ('POST', self.url_prefix + '/queues/nonexistent/claims'), ('GET', self.url_prefix + '/queues/nonexistent/claims/0ad'), ('GET', self.url_prefix + '/health'), ) for method, endpoint in endpoints: headers = { 'Client-ID': uuidutils.generate_uuid(), 'Accept': 'application/xml', } env = testing.create_environ(endpoint, method=method, headers=headers) self.app(env, self.srmock) self.assertEqual(falcon.HTTP_406, self.srmock.status) def test_request_with_body_and_urlencoded_contenttype_header_fails(self): # NOTE(Eva-i): this test case makes sure wsgi 'before' hook # "require_content_type_be_non_urlencoded" works to prevent # bug/1547100. eww_queue_path = self.url_prefix + '/queues/eww' eww_queue_messages_path = eww_queue_path + '/messages' sample_message = jsonutils.dumps({'messages': [{'body': {'eww!'}, 'ttl': 200}]}) bad_headers = { 'Client-ID': uuidutils.generate_uuid(), 'Content-Type': 'application/x-www-form-urlencoded', } # Create queue request with bad headers. Should still work, because it # has no body. self.simulate_put(eww_queue_path, headers=bad_headers) self.addCleanup(self.simulate_delete, eww_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Post message request with good headers. Should work. self.simulate_post(eww_queue_messages_path, body=sample_message, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Post message request with bad headers. Should not work. self.simulate_post(eww_queue_messages_path, body=sample_message, headers=bad_headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_messages.py0000664000175100017510000010067015033040005025174 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils from testtools import matchers from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base from zaqar.transport import validation @ddt.ddt class TestMessagesMongoDB(base.V2Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestMessagesMongoDB, self).setUp() self.default_message_ttl = self.boot.transport._defaults.message_ttl if self.conf.pooling: uri = self.mongodb_url for i in range(4): db_name = "zaqar_test_pools_" + str(i) # NOTE(dynarro): we need to create a unique uri. new_uri = "%s/%s" % (uri, db_name) options = {'database': db_name} doc = {'weight': 100, 'uri': new_uri, 'options': options} self.simulate_put(self.url_prefix + '/pools/' + str(i), body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.project_id = '7e55e1a7e' self.headers.update({ 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id }) # TODO(kgriffs): Add support in self.simulate_* for a "base path" # so that we don't have to concatenate against self.url_prefix # all over the place. self.queue_path = self.url_prefix + '/queues/fizbit' self.encrypted_queue_path = self.url_prefix + '/queues/secretbit' self.messages_path = self.queue_path + '/messages' self.encrypted_messages_path = self.encrypted_queue_path + '/messages' doc = '{"_ttl": 60}' self.simulate_put(self.queue_path, body=doc, headers=self.headers) doc = '{"_ttl": 60, "_enable_encrypt_messages": true}' self.simulate_put(self.encrypted_queue_path, body=doc, headers=self.headers) def tearDown(self): self.simulate_delete(self.queue_path, headers=self.headers) self.simulate_delete(self.encrypted_queue_path, headers=self.headers) if self.conf.pooling: for i in range(4): self.simulate_delete(self.url_prefix + '/pools/' + str(i), headers=self.headers) super(TestMessagesMongoDB, self).tearDown() def test_name_restrictions(self): sample_messages = [ {'body': {'key': 'value'}, 'ttl': 200}, ] messages_path = self.url_prefix + '/queues/%s/messages' sample_doc = jsonutils.dumps({'messages': sample_messages}) self.simulate_post(messages_path % 'Nice-Boat_2', body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_post(messages_path % 'Nice-Bo@t', body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_post(messages_path % ('_niceboat' * 8), body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def _test_post(self, sample_messages, is_encrypted=False): sample_doc = jsonutils.dumps({'messages': sample_messages}) messages_path = None if is_encrypted: messages_path = self.encrypted_messages_path else: messages_path = self.messages_path result = self.simulate_post(messages_path, body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) result_doc = jsonutils.loads(result[0]) msg_ids = self._get_msg_ids(self.srmock.headers_dict) self.assertEqual(len(sample_messages), len(msg_ids)) expected_resources = [str(self.messages_path + '/' + id) for id in msg_ids] self.assertEqual(expected_resources, result_doc['resources']) # NOTE(kgriffs): As of v1.1, "partial" is no longer given # in the response document. self.assertNotIn('partial', result_doc) self.assertEqual(len(sample_messages), len(msg_ids)) lookup = dict([(m['ttl'], m['body']) for m in sample_messages]) # Test GET on the message resource directly # NOTE(cpp-cabrera): force the passing of time to age a message timeutils_utcnow = 'oslo_utils.timeutils.utcnow' now = timeutils.utcnow() + datetime.timedelta(seconds=10) with mock.patch(timeutils_utcnow) as mock_utcnow: mock_utcnow.return_value = now for msg_id in msg_ids: message_uri = messages_path + '/' + msg_id headers = self.headers.copy() headers['X-Project-ID'] = '777777' # Wrong project ID self.simulate_get(message_uri, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Correct project ID result = self.simulate_get(message_uri, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Check message properties message = jsonutils.loads(result[0]) self.assertEqual(message_uri, message['href']) self.assertEqual(lookup[message['ttl']], message['body']) self.assertEqual(msg_id, message['id']) # no negative age # NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26 self.assertThat(message['age'], matchers.GreaterThan(-1)) # Test bulk GET query_string = 'ids=' + ','.join(msg_ids) result = self.simulate_get(messages_path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) expected_ttls = set(m['ttl'] for m in sample_messages) actual_ttls = set(m['ttl'] for m in result_doc['messages']) self.assertFalse(expected_ttls - actual_ttls) actual_ids = set(m['id'] for m in result_doc['messages']) self.assertFalse(set(msg_ids) - actual_ids) def test_exceeded_payloads(self): # Get a valid message id self._post_messages(self.messages_path) msg_id = self._get_msg_id(self.srmock.headers_dict) # Bulk GET restriction query_string = 'ids=' + ','.join([msg_id] * 21) self.simulate_get(self.messages_path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) # Listing restriction self.simulate_get(self.messages_path, query_string='limit=21', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) # Bulk deletion restriction query_string = 'ids=' + ','.join([msg_id] * 22) self.simulate_delete(self.messages_path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_post_single(self): sample_messages = [ {'body': {'key': 'value'}, 'ttl': 200}, ] self._test_post(sample_messages) def test_post_multiple(self): sample_messages = [ {'body': 239, 'ttl': 100}, {'body': {'key': 'value'}, 'ttl': 200}, {'body': [1, 3], 'ttl': 300}, ] self._test_post(sample_messages) def test_post_single_encrypted(self): sample_messages = [ {'body': {'key': 'value'}, 'ttl': 200}, ] self._test_post(sample_messages) def test_post_multiple_encrypted(self): sample_messages = [ {'body': 239, 'ttl': 100}, {'body': {'key': 'value'}, 'ttl': 200}, {'body': [1, 3], 'ttl': 300}, ] self._test_post(sample_messages) def test_post_optional_ttl(self): sample_messages = { 'messages': [ {'body': 239}, {'body': {'key': 'value'}, 'ttl': 200}, ], } # Manually check default TTL is max from config sample_doc = jsonutils.dumps(sample_messages) result = self.simulate_post(self.messages_path, body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) result_doc = jsonutils.loads(result[0]) href = result_doc['resources'][0] result = self.simulate_get(href, headers=self.headers) message = jsonutils.loads(result[0]) self.assertEqual(self.default_message_ttl, message['ttl']) def test_post_to_non_ascii_queue(self): # NOTE(kgriffs): This test verifies that routes with # embedded queue name params go through the validation # hook, regardless of the target resource. path = self.url_prefix + '/queues/non-ascii-n\u0153me/messages' self._post_messages(path) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_post_with_long_queue_name(self): # NOTE(kgriffs): This test verifies that routes with # embedded queue name params go through the validation # hook, regardless of the target resource. queues_path = self.url_prefix + '/queues/' game_title = 'v' * validation.QUEUE_NAME_MAX_LEN self.addCleanup( self.simulate_delete, queues_path + game_title, headers=self.headers) self._post_messages(queues_path + game_title + '/messages') self.assertEqual(falcon.HTTP_201, self.srmock.status) game_title += 'v' self._post_messages(queues_path + game_title + '/messages') self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_post_to_missing_queue(self): self.addCleanup( self.simulate_delete, self.url_prefix + '/queues/nonexistent', headers=self.headers) self._post_messages(self.url_prefix + '/queues/nonexistent/messages') self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_post_using_queue_default_message_ttl(self): queue_path = self.url_prefix + '/queues/test_queue1' messages_path = queue_path + '/messages' doc = '{"_default_message_ttl": 999}' self.simulate_put(queue_path, body=doc, headers=self.headers) self.addCleanup(self.simulate_delete, queue_path, headers=self.headers) sample_messages = { 'messages': [ {'body': {'key': 'value'}}, ], } sample_doc = jsonutils.dumps(sample_messages) result = self.simulate_post(messages_path, body=sample_doc, headers=self.headers) result_doc = jsonutils.loads(result[0]) href = result_doc['resources'][0] result = self.simulate_get(href, headers=self.headers) message = jsonutils.loads(result[0]) self.assertEqual(999, message['ttl']) def test_post_using_queue_max_messages_post_size(self): queue_path = self.url_prefix + '/queues/test_queue2' messages_path = queue_path + '/messages' doc = '{"_max_messages_post_size": 1023}' self.simulate_put(queue_path, body=doc, headers=self.headers) self.addCleanup(self.simulate_delete, queue_path, headers=self.headers) sample_messages = { 'messages': [ {'body': {'key': 'a' * 1204}}, ], } sample_doc = jsonutils.dumps(sample_messages) self.simulate_post(messages_path, body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_post_using_queue_max_messages_post_size_with_encrypted(self): queue_path = self.url_prefix + '/queues/test_queue2' messages_path = queue_path + '/messages' doc = ('{"_max_messages_post_size": 1023, ' '"_enable_encrypt_messages": true}') self.simulate_put(queue_path, body=doc, headers=self.headers) self.addCleanup(self.simulate_delete, queue_path, headers=self.headers) sample_messages = { 'messages': [ {'body': {'key': 'a' * 1204}}, ], } sample_doc = jsonutils.dumps(sample_messages) self.simulate_post(messages_path, body=sample_doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_get_from_missing_queue(self): body = self.simulate_get(self.url_prefix + '/queues/nonexistent/messages', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) @ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369') def test_bad_client_id(self, text_id): self.simulate_post(self.queue_path + '/messages', body='{"ttl": 60, "body": ""}', headers={'Client-ID': text_id}) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_get(self.queue_path + '/messages', query_string='limit=3&echo=true', headers={'Client-ID': text_id}) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(None, '[', '[]', '{}', '.', '123') def test_post_bad_message(self, document): self.simulate_post(self.queue_path + '/messages', body=document, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 59, 1209601) def test_unacceptable_ttl(self, ttl): doc = {'messages': [{'ttl': ttl, 'body': None}]} self.simulate_post(self.queue_path + '/messages', body=jsonutils.dumps(doc), headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_exceeded_message_posting(self): # Total (raw request) size doc = {'messages': [{'body': "some body", 'ttl': 100}] * 20} body = jsonutils.dumps(doc, indent=4) max_len = self.transport_cfg.max_messages_post_size long_body = body + (' ' * (max_len - len(body) + 1)) self.simulate_post(self.queue_path + '/messages', body=long_body, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data('{"overflow": 9223372036854775808}', '{"underflow": -9223372036854775809}') def test_unsupported_json(self, document): self.simulate_post(self.queue_path + '/messages', body=document, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_delete(self): self._post_messages(self.messages_path) msg_id = self._get_msg_id(self.srmock.headers_dict) target = self.messages_path + '/' + msg_id self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.simulate_delete(target, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Safe to delete non-existing ones self.simulate_delete(target, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_delete_with_encrypted(self): self._post_messages(self.encrypted_messages_path) msg_id = self._get_msg_id(self.srmock.headers_dict) target = self.encrypted_messages_path + '/' + msg_id self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.simulate_delete(target, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Safe to delete non-existing ones self.simulate_delete(target, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_bulk_delete(self): path = self.queue_path + '/messages' self._post_messages(path, repeat=5) [target, params] = self.srmock.headers_dict['location'].split('?') # Deleting the whole collection is denied self.simulate_delete(path, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Safe to delete non-existing ones self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Even after the queue is gone self.simulate_delete(self.queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_bulk_delete_with_encrpted(self): path = self.encrypted_queue_path + '/messages' self._post_messages(path, repeat=5) [target, params] = self.srmock.headers_dict['location'].split('?') # Deleting the whole collection is denied self.simulate_delete(path, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Safe to delete non-existing ones self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Even after the queue is gone self.simulate_delete(self.queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_bulk_delete_with_claim_ids(self): self.conf.set_override('message_delete_with_claim_id', True, 'transport') path = self.queue_path self._post_messages(path + '/messages', repeat=5) [target, params] = self.srmock.headers_dict['location'].split('?') body = self.simulate_post(path + '/claims', body='{"ttl": 100, "grace": 100}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) claimed = jsonutils.loads(body[0])['messages'] claime_ids = '&claim_ids=' for claim in claimed: claime_ids += claim['href'].split('claim_id=')[1] + ',' params = params + claime_ids self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_bulk_delete_without_claim_ids(self): self.conf.set_override('message_delete_with_claim_id', True, 'transport') path = self.queue_path self._post_messages(path + '/messages', repeat=5) [target, params] = self.srmock.headers_dict['location'].split('?') self.simulate_delete(target, query_string=params, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_list(self): path = self.queue_path + '/messages' self._post_messages(path, repeat=10) query_string = 'limit=3&echo=true' body = self.simulate_get(path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) cnt = 0 while jsonutils.loads(body[0])['messages'] != []: contents = jsonutils.loads(body[0]) [target, params] = contents['links'][0]['href'].split('?') for msg in contents['messages']: self.simulate_get(msg['href'], headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) body = self.simulate_get(target, query_string=params, headers=self.headers) cnt += 1 self.assertEqual(4, cnt) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) # Stats body = self.simulate_get(self.queue_path + '/stats', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) message_stats = jsonutils.loads(body[0])['messages'] # NOTE(kgriffs): The other parts of the stats are tested # in tests.storage.base and so are not repeated here. expected_pattern = self.queue_path + '/messages/[^/]+$' for message_stat_name in ('oldest', 'newest'): self.assertThat(message_stats[message_stat_name]['href'], matchers.MatchesRegex(expected_pattern)) # NOTE(kgriffs): Try to get messages for a missing queue body = self.simulate_get(self.url_prefix + '/queues/nonexistent/messages', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) def test_list_with_encrpyted(self): path = self.encrypted_queue_path + '/messages' self._post_messages(path, repeat=10) query_string = 'limit=3&echo=true' body = self.simulate_get(path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) cnt = 0 while jsonutils.loads(body[0])['messages'] != []: contents = jsonutils.loads(body[0]) [target, params] = contents['links'][0]['href'].split('?') for msg in contents['messages']: self.simulate_get(msg['href'], headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) body = self.simulate_get(target, query_string=params, headers=self.headers) cnt += 1 self.assertEqual(4, cnt) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) def test_list_with_bad_marker(self): path = self.queue_path + '/messages' self._post_messages(path, repeat=5) query_string = 'limit=3&echo=true&marker=sfhlsfdjh2048' body = self.simulate_get(path, query_string=query_string, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self._empty_message_list(body) def test_no_uuid(self): headers = { 'Client-ID': "textid", 'X-Project-ID': '7e7e7e' } path = self.queue_path + '/messages' self.simulate_post(path, body='[{"body": 0, "ttl": 100}]', headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_get_claimed_contains_claim_id_in_href(self): path = self.queue_path res = self._post_messages(path + '/messages', repeat=5) for url in jsonutils.loads(res[0])['resources']: message = self.simulate_get(url) self.assertNotIn('claim_id', jsonutils.loads(message[0])['href']) self.simulate_post(path + '/claims', body='{"ttl": 100, "grace": 100}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) for url in jsonutils.loads(res[0])['resources']: message = self.simulate_get(url) self.assertIn('claim_id', jsonutils.loads(message[0])['href']) # NOTE(cpp-cabrera): regression test against bug #1210633 def test_when_claim_deleted_then_messages_unclaimed(self): path = self.queue_path self._post_messages(path + '/messages', repeat=5) # post claim self.simulate_post(path + '/claims', body='{"ttl": 100, "grace": 100}', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) location = self.srmock.headers_dict['location'] # release claim self.simulate_delete(location, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # get unclaimed messages self.simulate_get(path + '/messages', query_string='echo=true', headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # NOTE(cpp-cabrera): regression test against bug #1203842 def test_get_nonexistent_message_404s(self): path = self.url_prefix + '/queues/notthere/messages/a' self.simulate_get(path, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_get_multiple_invalid_messages_404s(self): path = self.url_prefix + '/queues/notthere/messages' self.simulate_get(path, query_string='ids=a,b,c', headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_delete_multiple_invalid_messages_204s(self): path = self.url_prefix + '/queues/notthere/messages' self.simulate_delete(path, query_string='ids=a,b,c', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_delete_message_with_invalid_claim_doesnt_delete_message(self): path = self.queue_path resp = self._post_messages(path + '/messages', 1) location = jsonutils.loads(resp[0])['resources'][0] self.simulate_delete(location, query_string='claim_id=invalid', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_get(location, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_no_duplicated_messages_path_in_href(self): """Test for bug 1240897.""" path = self.queue_path + '/messages' self._post_messages(path, repeat=1) msg_id = self._get_msg_id(self.srmock.headers_dict) query_string = 'ids=%s' % msg_id body = self.simulate_get(path, query_string=query_string, headers=self.headers) messages = jsonutils.loads(body[0]) self.assertNotIn(self.queue_path + '/messages/messages', messages['messages'][0]['href']) def _post_messages(self, target, repeat=1): doc = {'messages': [{'body': 239, 'ttl': 300}] * repeat} body = jsonutils.dumps(doc) return self.simulate_post(target, body=body, headers=self.headers) def _get_msg_id(self, headers): return self._get_msg_ids(headers)[0] def _get_msg_ids(self, headers): return headers['location'].rsplit('=', 1)[-1].split(',') @ddt.data(1, 2, 10) def test_pop(self, message_count): self._post_messages(self.messages_path, repeat=message_count) msg_id = self._get_msg_id(self.srmock.headers_dict) target = self.messages_path + '/' + msg_id self.simulate_get(target, self.project_id) self.assertEqual(falcon.HTTP_200, self.srmock.status) query_string = 'pop=' + str(message_count) result = self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) self.assertEqual(message_count, len(result_doc['messages'])) self.simulate_get(target, self.project_id) self.assertEqual(falcon.HTTP_404, self.srmock.status) @ddt.data('', 'pop=1000000', 'pop=10&ids=1', 'pop=-1') def test_pop_invalid(self, query_string): self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_pop_empty_queue(self): query_string = 'pop=1' result = self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) self.assertEqual([], result_doc['messages']) def test_pop_single_message(self): self._post_messages(self.messages_path, repeat=5) msg_id = self._get_msg_id(self.srmock.headers_dict) target = self.messages_path + '/' + msg_id self.simulate_get(target, self.project_id) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Pop Single message from the queue query_string = 'pop=1' result = self.simulate_delete(self.messages_path, self.project_id, query_string=query_string) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Get messages from the queue & verify message count query_string = 'echo=True' result = self.simulate_get(self.messages_path, self.project_id, query_string=query_string, headers=self.headers) result_doc = jsonutils.loads(result[0]) actual_msg_count = len(result_doc['messages']) expected_msg_count = 4 self.assertEqual(expected_msg_count, actual_msg_count) class TestMessagesMongoDBPooled(TestMessagesMongoDB): config_file = 'wsgi_mongodb_pooled.conf' # TODO(cpp-cabrera): remove this skipTest once pooled queue # listing is implemented def test_list(self): self.skipTest("Need to implement pooled queue listing.") class TestMessagesFaultyDriver(base.V2BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): project_id = 'xyz' path = self.url_prefix + '/queues/fizbit/messages' body = '{"messages": [{"body": 239, "ttl": 100}]}' headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': project_id } self.simulate_post(path, body=body, headers=headers) self.assertEqual(falcon.HTTP_500, self.srmock.status) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(path + '/nonexistent', headers=headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_delete(path + '/nada', headers=headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_ping.py0000664000175100017510000000235015033040005024316 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from zaqar.tests.unit.transport.wsgi import base class TestPing(base.V2Base): config_file = 'wsgi_mongodb.conf' def test_get(self): # TODO(kgriffs): Make use of setUp for setting the URL prefix # so we can just say something like: # # response = self.simulate_get('/ping') # response = self.simulate_get('/v2/ping') self.assertEqual(falcon.HTTP_204, self.srmock.status) self.assertEqual([], response) def test_head(self): response = self.simulate_head('/v2/ping') self.assertEqual(falcon.HTTP_204, self.srmock.status) self.assertEqual([], response) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_pools_new.py0000664000175100017510000003656315033040005025403 0ustar00mylesmyles# Copyright (c) 2017 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import contextlib import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @contextlib.contextmanager def pool(test, name, weight, uri, flavor=None, options={}): """A context manager for constructing a pool for use in testing. Deletes the pool after exiting the context. :param test: Must expose simulate_* methods :param name: Name for this pool :type name: str :type weight: int :type uri: str :type options: dict :returns: (name, weight, uri, options) :rtype: see above """ uri = "%s/%s" % (uri, uuidutils.generate_uuid()) doc = {'weight': weight, 'uri': uri, 'flavor': flavor, 'options': options} path = test.url_prefix + '/pools/' + name test.simulate_put(path, body=jsonutils.dumps(doc)) try: yield name, weight, uri, flavor, options finally: test.simulate_delete(path) @contextlib.contextmanager def pools(test, count, uri, flavor): """A context manager for constructing pools for use in testing. Deletes the pools after exiting the context. :param test: Must expose simulate_* methods :param count: Number of pools to create :type count: int :returns: (paths, weights, uris, options) :rtype: ([str], [int], [str], [dict]) """ mongo_url = uri base = test.url_prefix + '/pools/' args = [(base + str(i), i, {str(i): i}) for i in range(count)] for path, weight, option in args: uri = "%s/%s" % (mongo_url, uuidutils.generate_uuid()) doc = {'weight': weight, 'uri': uri, 'flavor': flavor, 'options': option} test.simulate_put(path, body=jsonutils.dumps(doc)) try: yield args finally: for path, _, _ in args: # (gengchc): Remove flavor from the pool, # so we can delete the pool. test.simulate_patch(path, body=jsonutils.dumps({'flavor': ''})) test.simulate_delete(path) @ddt.ddt class TestPoolsMongoDB(base.V2Base): config_file = 'wsgi_mongodb_pooled.conf' @testing.requires_mongodb def setUp(self): super(TestPoolsMongoDB, self).setUp() self.doc = {'weight': 100, 'flavor': 'my-flavor', 'uri': self.mongodb_url} self.pool = self.url_prefix + '/pools/' + uuidutils.generate_uuid() self.simulate_put(self.pool, body=jsonutils.dumps(self.doc)) self.assertEqual(falcon.HTTP_201, self.srmock.status) def tearDown(self): super(TestPoolsMongoDB, self).tearDown() self.simulate_delete(self.pool) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_put_pool_works(self): name = uuidutils.generate_uuid() weight, uri = self.doc['weight'], self.doc['uri'] with pool(self, name, weight, uri, flavor='my-flavor'): self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_put_raises_if_missing_fields(self): path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() self.simulate_put(path, body=jsonutils.dumps({'weight': 100})) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_put(path, body=jsonutils.dumps( {'uri': self.mongodb_url})) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 2**32+1, 'big') def test_put_raises_if_invalid_weight(self, weight): path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() doc = {'weight': weight, 'uri': 'a'} self.simulate_put(path, body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 2**32+1, [], 'localhost:27017') def test_put_raises_if_invalid_uri(self, uri): path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() self.simulate_put(path, body=jsonutils.dumps({'weight': 1, 'uri': uri})) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 'wee', []) def test_put_raises_if_invalid_options(self, options): path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() doc = {'weight': 1, 'uri': 'a', 'options': options} self.simulate_put(path, body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_put_same_database_uri(self): # NOTE(cabrera): setUp creates default pool expect = self.doc path = self.url_prefix + '/pools/' + uuidutils.generate_uuid() self.simulate_put(path, body=jsonutils.dumps(expect)) self.assertEqual(falcon.HTTP_409, self.srmock.status) def test_put_existing_overwrites(self): # NOTE(cabrera): setUp creates default pool expect = self.doc self.simulate_put(self.pool, body=jsonutils.dumps(expect)) self.assertEqual(falcon.HTTP_201, self.srmock.status) result = self.simulate_get(self.pool) self.assertEqual(falcon.HTTP_200, self.srmock.status) doc = jsonutils.loads(result[0]) self.assertEqual(expect['weight'], doc['weight']) self.assertEqual(expect['uri'], doc['uri']) def test_put_capabilities_mismatch_pool(self): mongodb_doc = self.doc self.simulate_put(self.pool, body=jsonutils.dumps(mongodb_doc)) self.assertEqual(falcon.HTTP_201, self.srmock.status) redis_doc = {'weight': 100, 'flavor': 'my-flavor', 'uri': 'redis://127.0.0.1:6379'} self.simulate_put(self.pool, body=jsonutils.dumps(redis_doc)) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_delete_works(self): # (gengchc): Remove flavor from the pool, so we can delete the pool. self.simulate_patch(self.pool, body=jsonutils.dumps({'flavor': ''})) self.simulate_delete(self.pool) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.simulate_get(self.pool) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_get_nonexisting_raises_404(self): self.simulate_get(self.url_prefix + '/pools/nonexisting') self.assertEqual(falcon.HTTP_404, self.srmock.status) def _pool_expect(self, pool, xhref, xweight, xuri): self.assertIn('href', pool) self.assertIn('name', pool) self.assertEqual(xhref, pool['href']) self.assertIn('weight', pool) self.assertEqual(xweight, pool['weight']) self.assertIn('uri', pool) # NOTE(dynarro): we are using startwith because we are adding to # pools UUIDs, to avoid dupplications self.assertTrue(pool['uri'].startswith(xuri)) def test_get_works(self): result = self.simulate_get(self.pool) self.assertEqual(falcon.HTTP_200, self.srmock.status) pool = jsonutils.loads(result[0]) self._pool_expect(pool, self.pool, self.doc['weight'], self.doc['uri']) def test_detailed_get_works(self): result = self.simulate_get(self.pool, query_string='detailed=True') self.assertEqual(falcon.HTTP_200, self.srmock.status) pool = jsonutils.loads(result[0]) self._pool_expect(pool, self.pool, self.doc['weight'], self.doc['uri']) self.assertIn('options', pool) self.assertEqual({}, pool['options']) def test_patch_raises_if_missing_fields(self): self.simulate_patch(self.pool, body=jsonutils.dumps({'location': 1})) self.assertEqual(falcon.HTTP_400, self.srmock.status) def _patch_test(self, doc): result = self.simulate_patch(self.pool, body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_200, self.srmock.status) updated_pool = jsonutils.loads(result[0]) self._pool_expect(updated_pool, self.pool, doc['weight'], doc['uri']) result = self.simulate_get(self.pool, query_string='detailed=True') self.assertEqual(falcon.HTTP_200, self.srmock.status) pool = jsonutils.loads(result[0]) self._pool_expect(pool, self.pool, doc['weight'], doc['uri']) self.assertEqual(doc['options'], pool['options']) def test_patch_works(self): doc = {'weight': 101, 'uri': self.mongodb_url, 'options': {'a': 1}} self._patch_test(doc) def test_patch_works_with_extra_fields(self): doc = {'weight': 101, 'uri': self.mongodb_url, 'options': {'a': 1}, 'location': 100, 'partition': 'taco'} self._patch_test(doc) @ddt.data(-1, 2**32+1, 'big') def test_patch_raises_400_on_invalid_weight(self, weight): self.simulate_patch(self.pool, body=jsonutils.dumps({'weight': weight})) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 2**32+1, [], 'localhost:27017') def test_patch_raises_400_on_invalid_uri(self, uri): self.simulate_patch(self.pool, body=jsonutils.dumps({'uri': uri})) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data(-1, 'wee', []) def test_patch_raises_400_on_invalid_options(self, options): self.simulate_patch(self.pool, body=jsonutils.dumps({'options': options})) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_patch_raises_404_if_pool_not_found(self): self.simulate_patch(self.url_prefix + '/pools/notexists', body=jsonutils.dumps({'weight': 1})) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_empty_listing(self): # (gengchc): Remove flavor from the pool, so we can delete the pool. self.simulate_patch(self.pool, body=jsonutils.dumps({'flavor': ''})) self.simulate_delete(self.pool) result = self.simulate_get(self.url_prefix + '/pools') results = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertEqual(0, len(results['pools'])) self.assertIn('links', results) def _listing_test(self, count=10, limit=10, marker=None, detailed=False): # NOTE(cpp-cabrera): delete initial pool - it will interfere # with listing tests # (gengchc): Remove flavor from the pool, so we can delete the pool. self.simulate_patch(self.pool, body=jsonutils.dumps({'flavor': ''})) self.simulate_delete(self.pool) query = 'limit={0}&detailed={1}'.format(limit, detailed) if marker: query += '&marker={0}'.format(marker) with pools(self, count, self.doc['uri'], 'my-flavor') as expected: result = self.simulate_get(self.url_prefix + '/pools', query_string=query) self.assertEqual(falcon.HTTP_200, self.srmock.status) results = jsonutils.loads(result[0]) self.assertIsInstance(results, dict) self.assertIn('pools', results) self.assertIn('links', results) pool_list = results['pools'] link = results['links'][0] self.assertEqual('next', link['rel']) href = falcon.uri.parse_query_string(link['href'].split('?')[1]) self.assertIn('marker', href) self.assertEqual(str(limit), href['limit']) self.assertEqual(str(detailed).lower(), href['detailed']) next_query_string = ('marker={marker}&limit={limit}' '&detailed={detailed}').format(**href) next_result = self.simulate_get(link['href'].split('?')[0], query_string=next_query_string) self.assertEqual(falcon.HTTP_200, self.srmock.status) next_pool = jsonutils.loads(next_result[0]) next_pool_list = next_pool['pools'] self.assertIn('links', next_pool) if limit < count: self.assertEqual(min(limit, count-limit), len(next_pool_list)) else: # NOTE(jeffrey4l): when limit >= count, there will be no # pools in the 2nd page. self.assertEqual(0, len(next_pool_list)) self.assertEqual(min(limit, count), len(pool_list)) for s in pool_list + next_pool_list: # NOTE(flwang): It can't assumed that both sqlalchemy and # mongodb can return query result with the same order. Just # like the order they're inserted. Actually, sqlalchemy can't # guarantee that. So we're leveraging the relationship between # pool weight and the index of pools fixture to get the # right pool to verify. expect = expected[s['weight']] path, weight, options = expect[:3] self._pool_expect(s, path, weight, self.doc['uri']) if detailed: self.assertIn('options', s) self.assertEqual(s['options'], expect[-1]) else: self.assertNotIn('options', s) def test_listing_works(self): self._listing_test() def test_detailed_listing_works(self): self._listing_test(detailed=True) @ddt.data(1, 5, 10, 15) def test_listing_works_with_limit(self, limit): self._listing_test(count=15, limit=limit) def test_listing_marker_is_respected(self): # (gengchc): Remove flavor from the pool, so we can delete the pool. self.simulate_patch(self.pool, body=jsonutils.dumps({'flavor': ''})) self.simulate_delete(self.pool) with pools(self, 10, self.doc['uri'], 'my-flavor') as expected: result = self.simulate_get(self.url_prefix + '/pools', query_string='marker=3') self.assertEqual(falcon.HTTP_200, self.srmock.status) pool_list = jsonutils.loads(result[0])['pools'] self.assertEqual(6, len(pool_list)) path, weight = expected[4][:2] self._pool_expect(pool_list[0], path, weight, self.doc['uri']) def test_listing_error_with_invalid_limit(self): self.simulate_delete(self.pool) query = 'limit={0}&detailed={1}'.format(0, True) with pools(self, 10, self.doc['uri'], 'my-flavor'): self.simulate_get(self.url_prefix + '/pools', query_string=query) self.assertEqual(falcon.HTTP_400, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_purge.py0000664000175100017510000001125215033040005024504 0ustar00mylesmyles# Copyright 2016 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.tests.unit.transport.wsgi import base class TestPurge(base.V2Base): config_file = 'wsgi_mongodb.conf' def setUp(self): super(TestPurge, self).setUp() self.headers = { 'Client-ID': uuidutils.generate_uuid() } self.queue_path = self.url_prefix + '/queues/myqueue' self.messages_path = self.queue_path + '/messages' self.subscription_path = (self.queue_path + '/subscriptions') self.messages = {'messages': [{'body': 'A', 'ttl': 300}, {'body': 'B', 'ttl': 400}, {'body': 'C', 'ttl': 500}]} self.subscriptions = {"subscriber": "http://ping.me", "ttl": 3600, "options": {"key": "value"}} def tearDown(self): self.simulate_delete(self.queue_path, headers=self.headers) super(TestPurge, self).tearDown() def _get_msg_id(self, headers): return self._get_msg_ids(headers)[0] def _get_msg_ids(self, headers): return headers['location'].rsplit('=', 1)[-1].split(',') def test_purge_particular_resource(self): # Post messages messages_body = jsonutils.dumps(self.messages) self.simulate_post(self.messages_path, body=messages_body, headers=self.headers) msg_ids = self._get_msg_ids(self.srmock.headers_dict) for msg_id in msg_ids: target = self.messages_path + '/' + msg_id self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Post subscriptions sub_resp = self.simulate_post(self.subscription_path, body=jsonutils.dumps(self.subscriptions), headers=self.headers) # Purge queue purge_body = jsonutils.dumps({'resource_types': ['messages']}) self.simulate_post(self.queue_path+"/purge", body=purge_body) for msg_id in msg_ids: target = self.messages_path + '/' + msg_id self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Check subscriptions are still there resp_list = self.simulate_get(self.subscription_path, headers=self.headers) resp_list_doc = jsonutils.loads(resp_list[0]) sid = resp_list_doc['subscriptions'][0]['id'] sub_resp_doc = jsonutils.loads(sub_resp[0]) self.assertEqual(sub_resp_doc['subscription_id'], sid) def test_purge_by_default(self): # Post messages messages_body = jsonutils.dumps(self.messages) self.simulate_post(self.messages_path, body=messages_body, headers=self.headers) msg_ids = self._get_msg_ids(self.srmock.headers_dict) for msg_id in msg_ids: target = self.messages_path + '/' + msg_id self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Post subscriptions sub_resp = self.simulate_post(self.subscription_path, body=jsonutils.dumps(self.subscriptions), headers=self.headers) # Purge queue purge_body = jsonutils.dumps({'resource_types': ['messages', 'subscriptions']}) self.simulate_post(self.queue_path+"/purge", body=purge_body) for msg_id in msg_ids: target = self.messages_path + '/' + msg_id self.simulate_get(target, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) # Check subscriptions are still there sub_id = jsonutils.loads(sub_resp[0])['subscription_id'] self.simulate_get(self.subscription_path + "/" + sub_id, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py0000664000175100017510000006232615033040005026535 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.storage import errors as storage_errors from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestQueueLifecycleMongoDB(base.V2Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestQueueLifecycleMongoDB, self).setUp() self.queue_path = self.url_prefix + '/queues' self.gumshoe_queue_path = self.queue_path + '/gumshoe' self.fizbat_queue_path = self.queue_path + '/fizbat' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '3387309841abc_' } def tearDown(self): control = self.boot.control storage = self.boot.storage._storage connection = storage.connection connection.drop_database(control.queues_database) for db in storage.message_databases: connection.drop_database(db) super(TestQueueLifecycleMongoDB, self).tearDown() def test_without_project_id(self): headers = { 'Client-ID': uuidutils.generate_uuid(), } self.simulate_put(self.gumshoe_queue_path, headers=headers, need_project_id=False) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(self.gumshoe_queue_path, headers=headers, need_project_id=False) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_empty_project_id(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '' } self.simulate_put(self.gumshoe_queue_path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(self.gumshoe_queue_path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data('480924', 'foo') def test_basics_thoroughly(self, project_id): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': project_id } gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats' # Stats are empty - queue not created yet self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Create doc = '{"messages": {"ttl": 600}}' self.simulate_put(self.gumshoe_queue_path, headers=headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) location = self.srmock.headers_dict['Location'] self.assertEqual(location, self.gumshoe_queue_path) # Fetch metadata result = self.simulate_get(self.gumshoe_queue_path, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) ref_doc = jsonutils.loads(doc) ref_doc['_default_message_ttl'] = 3600 ref_doc['_max_messages_post_size'] = 262144 ref_doc['_default_message_delay'] = 0 ref_doc['_dead_letter_queue'] = None ref_doc['_dead_letter_queue_messages_ttl'] = None ref_doc['_max_claim_count'] = None ref_doc['_enable_encrypt_messages'] = False self.assertEqual(ref_doc, result_doc) # Stats empty queue self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Delete self.simulate_delete(self.gumshoe_queue_path, headers=headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get non-existent stats self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) @ddt.data('1234567890', '11111111111111111111111111111111111') def test_basics_thoroughly_with_different_client_id(self, client_id): self.conf.set_override('client_id_uuid_safe', 'off', 'transport') headers = { 'Client-ID': client_id, 'X-Project-ID': '480924' } gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats' # Stats are empty - queue not created yet self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Create doc = '{"messages": {"ttl": 600}}' self.simulate_put(self.gumshoe_queue_path, headers=headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) location = self.srmock.headers_dict['Location'] self.assertEqual(location, self.gumshoe_queue_path) # Fetch metadata result = self.simulate_get(self.gumshoe_queue_path, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) ref_doc = jsonutils.loads(doc) ref_doc['_default_message_ttl'] = 3600 ref_doc['_max_messages_post_size'] = 262144 ref_doc['_default_message_delay'] = 0 ref_doc['_dead_letter_queue'] = None ref_doc['_dead_letter_queue_messages_ttl'] = None ref_doc['_max_claim_count'] = None ref_doc['_enable_encrypt_messages'] = False self.assertEqual(ref_doc, result_doc) # Stats empty queue self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Delete self.simulate_delete(self.gumshoe_queue_path, headers=headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get non-existent stats self.simulate_get(gumshoe_queue_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_name_restrictions(self): self.simulate_put(self.queue_path + '/Nice-Boat_2', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_put(self.queue_path + '/Nice-Bo@t', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_put(self.queue_path + '/_' + 'niceboat' * 8, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_put(self.queue_path + '/Service.test_queue', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_project_id_restriction(self): muvluv_queue_path = self.queue_path + '/Muv-Luv' self.simulate_put(muvluv_queue_path, headers={'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'JAM Project' * 24}) self.assertEqual(falcon.HTTP_400, self.srmock.status) # no charset restrictions self.simulate_put(muvluv_queue_path, headers={'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'JAM Project'}) self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_non_ascii_name(self): test_params = (('/queues/non-ascii-n\u0153me', 'utf-8'), ('/queues/non-ascii-n\xc4me', 'iso8859-1')) for uri, enc in test_params: uri = self.url_prefix + uri self.simulate_put(uri, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(uri, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_no_metadata(self): self.simulate_put(self.fizbat_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_put(self.fizbat_queue_path, body='', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) result = self.simulate_get(self.fizbat_queue_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(256 * 1024, result_doc.get('_max_messages_post_size')) self.assertEqual(3600, result_doc.get('_default_message_ttl')) self.assertEqual(0, result_doc.get('_default_message_delay')) @ddt.data('{', '[]', '.', ' ') def test_bad_metadata(self, document): self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=document) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_too_much_metadata(self): self.simulate_put(self.fizbat_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size - (len(doc) - 10) + 1 doc = doc.format(pad='x' * padding_len) self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_way_too_much_metadata(self): self.simulate_put(self.fizbat_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size * 100 doc = doc.format(pad='x' * padding_len) self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_custom_metadata(self): # Set doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size - (len(doc) - 2) doc = doc.format(pad='x' * padding_len) self.simulate_put(self.fizbat_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Get result = self.simulate_get(self.fizbat_queue_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) ref_doc = jsonutils.loads(doc) ref_doc['_default_message_ttl'] = 3600 ref_doc['_max_messages_post_size'] = 262144 ref_doc['_default_message_delay'] = 0 ref_doc['_dead_letter_queue'] = None ref_doc['_dead_letter_queue_messages_ttl'] = None ref_doc['_max_claim_count'] = None ref_doc['_enable_encrypt_messages'] = False self.assertEqual(ref_doc, result_doc) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_update_metadata(self): xyz_queue_path = self.url_prefix + '/queues/xyz' xyz_queue_path_metadata = xyz_queue_path headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': uuidutils.generate_uuid() } # Create self.simulate_put(xyz_queue_path, headers=headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) headers.update({'Content-Type': "application/openstack-messaging-v2.0-json-patch"}) # add metadata doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},' '{"op":"add", "path": "/metadata/key2", "value": 1}]') self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc1) self.assertEqual(falcon.HTTP_200, self.srmock.status) # remove reserved metadata, zaqar will do nothing and return 200, # because doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc3) self.assertEqual(falcon.HTTP_200, self.srmock.status) # replace metadata doc2 = '[{"op":"replace", "path": "/metadata/key1", "value": 2}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc2) self.assertEqual(falcon.HTTP_200, self.srmock.status) # replace reserved metadata, zaqar will store the reserved metadata doc2 = ('[{"op":"replace", "path": "/metadata/_default_message_ttl",' '"value": 300}]') self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc2) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Get result = self.simulate_get(xyz_queue_path_metadata, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual({'key1': 2, 'key2': 1, '_default_message_ttl': 300, '_max_messages_post_size': 262144, '_default_message_delay': 0, '_dead_letter_queue': None, '_dead_letter_queue_messages_ttl': None, '_max_claim_count': None, '_enable_encrypt_messages': False}, result_doc) # remove metadata doc3 = '[{"op":"remove", "path": "/metadata/key1"}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc3) self.assertEqual(falcon.HTTP_200, self.srmock.status) # remove reserved metadata doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc3) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Get result = self.simulate_get(xyz_queue_path_metadata, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual({'key2': 1, '_default_message_ttl': 3600, '_max_messages_post_size': 262144, '_default_message_delay': 0, '_dead_letter_queue': None, '_dead_letter_queue_messages_ttl': None, '_max_claim_count': None, '_enable_encrypt_messages': False}, result_doc) # replace non-existent metadata doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc4) self.assertEqual(falcon.HTTP_409, self.srmock.status) # remove non-existent metadata doc5 = '[{"op":"remove", "path": "/metadata/key3"}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc5) self.assertEqual(falcon.HTTP_409, self.srmock.status) self.simulate_delete(xyz_queue_path, headers=headers) # add metadata to non-existent queue doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},' '{"op":"add", "path": "/metadata/key2", "value": 1}]') self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc1) self.assertEqual(falcon.HTTP_404, self.srmock.status) # replace metadata in non-existent queue doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc4) self.assertEqual(falcon.HTTP_404, self.srmock.status) # remove metadata from non-existent queue doc5 = '[{"op":"remove", "path": "/metadata/key3"}]' self.simulate_patch(xyz_queue_path_metadata, headers=headers, body=doc5) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_list(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } # NOTE(kgriffs): It's important that this one sort after the one # above. This is in order to prove that bug/1236605 is fixed, and # stays fixed! alt_project_id = str(arbitrary_number + 1) # List empty result = self.simulate_get(self.queue_path, headers=header) self.assertEqual(falcon.HTTP_200, self.srmock.status) results = jsonutils.loads(result[0]) self.assertEqual([], results['queues']) self.assertIn('links', results) self.assertEqual(0, len(results['links'])) # Payload exceeded self.simulate_get(self.queue_path, headers=header, query_string='limit=21') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Create some def create_queue(name, project_id, body): altheader = {'Client-ID': client_id} if project_id is not None: altheader['X-Project-ID'] = project_id uri = self.queue_path + '/' + name self.simulate_put(uri, headers=altheader, body=body) create_queue('q1', project_id, '{"node": 31}') create_queue('q2', project_id, '{"node": 32}') create_queue('q3', project_id, '{"node": 33}') create_queue('q3', alt_project_id, '{"alt": 1}') # List (limit) result = self.simulate_get(self.queue_path, headers=header, query_string='limit=2') result_doc = jsonutils.loads(result[0]) self.assertEqual(2, len(result_doc['queues'])) # List (no metadata, get all) result = self.simulate_get(self.queue_path, headers=header, query_string='limit=5') result_doc = jsonutils.loads(result[0]) [target, params] = result_doc['links'][0]['href'].split('?') self.simulate_get(target, headers=header, query_string=params) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Ensure we didn't pick up the queue from the alt project. queues = result_doc['queues'] self.assertEqual(3, len(queues)) # List with metadata result = self.simulate_get(self.queue_path, headers=header, query_string='detailed=true') self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) [target, params] = result_doc['links'][0]['href'].split('?') queue = result_doc['queues'][0] result = self.simulate_get(queue['href'], headers=header) result_doc = jsonutils.loads(result[0]) self.assertEqual(queue['metadata'], result_doc) self.assertEqual({'node': 31, '_default_message_ttl': 3600, '_max_messages_post_size': 262144, '_default_message_delay': 0, '_dead_letter_queue': None, '_dead_letter_queue_messages_ttl': None, '_max_claim_count': None, '_enable_encrypt_messages': False}, result_doc) # queue filter result = self.simulate_get(self.queue_path, headers=header, query_string='node=34') self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) self.assertEqual(0, len(result_doc['queues'])) # List tail self.simulate_get(target, headers=header, query_string=params) self.assertEqual(falcon.HTTP_200, self.srmock.status) # List manually-constructed tail self.simulate_get(target, headers=header, query_string='marker=zzz') self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_list_returns_503_on_nopoolfound_exception(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } queue_controller = self.boot.storage.queue_controller with mock.patch.object(queue_controller, 'list') as mock_queue_list: def queue_generator(): raise storage_errors.NoPoolFound() # This generator tries to be like queue controller list generator # in some ways. def fake_generator(): yield queue_generator() yield {} mock_queue_list.return_value = fake_generator() self.simulate_get(self.queue_path, headers=header) self.assertEqual(falcon.HTTP_503, self.srmock.status) def test_list_with_filter(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } # Create some def create_queue(name, project_id, body): altheader = {'Client-ID': client_id} if project_id is not None: altheader['X-Project-ID'] = project_id uri = self.queue_path + '/' + name self.simulate_put(uri, headers=altheader, body=body) create_queue('q1', project_id, '{"test_metadata_key1": "value1"}') create_queue('q2', project_id, '{"_max_messages_post_size": 2000}') create_queue('q3', project_id, '{"test_metadata_key2": 30}') # List (filter query) result = self.simulate_get(self.queue_path, headers=header, query_string='name=q&test_metadata_key2=30') result_doc = jsonutils.loads(result[0]) self.assertEqual(1, len(result_doc['queues'])) self.assertEqual('q3', result_doc['queues'][0]['name']) # List (filter query) result = self.simulate_get(self.queue_path, headers=header, query_string='_max_messages_post_size=2000') result_doc = jsonutils.loads(result[0]) self.assertEqual(1, len(result_doc['queues'])) self.assertEqual('q2', result_doc['queues'][0]['name']) # List (filter query) result = self.simulate_get(self.queue_path, headers=header, query_string='name=q') result_doc = jsonutils.loads(result[0]) self.assertEqual(3, len(result_doc['queues'])) # List (filter query) result = self.simulate_get(self.queue_path, headers=header, query_string='with_count=true') result_doc = jsonutils.loads(result[0]) self.assertEqual(3, result_doc['count']) class TestQueueLifecycleFaultyDriver(base.V2BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '338730984abc_1' } gumshoe_queue_path = self.url_prefix + '/queues/gumshoe' doc = '{"messages": {"ttl": 600}}' self.simulate_put(gumshoe_queue_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_503, self.srmock.status) location = ('Location', gumshoe_queue_path) self.assertNotIn(location, self.srmock.headers) result = self.simulate_get(gumshoe_queue_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.assertNotEqual(result_doc, jsonutils.loads(doc)) self.simulate_get(gumshoe_queue_path + '/stats', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(self.url_prefix + '/queues', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_delete(gumshoe_queue_path, headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_subscriptions.py0000664000175100017510000004712415033040005026300 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.common import auth from zaqar.notification import notifier from zaqar.storage import errors as storage_errors from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestSubscriptionsMongoDB(base.V2Base): config_file = 'wsgi_mongodb_pooled.conf' @testing.requires_mongodb def setUp(self): super(TestSubscriptionsMongoDB, self).setUp() if self.conf.pooling: for i in range(1): uri = self.conf['drivers:management_store:mongodb'].uri doc = {'weight': 100, 'uri': uri} self.simulate_put(self.url_prefix + '/pools/' + str(i), body=jsonutils.dumps(doc)) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.addCleanup(self.simulate_delete, self.url_prefix + '/pools/' + str(i), headers=self.headers) self.project_id = '7e55e1a7exyz' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': self.project_id } self.queue = 'fake-topic' self.queue_path = self.url_prefix + '/queues/' + self.queue doc = '{"_ttl": 60}' self.simulate_put(self.queue_path, body=doc, headers=self.headers) self.subscription_path = (self.url_prefix + '/queues/' + self.queue + '/subscriptions') self.subscription = 'fake-id' self.confirm_path = (self.url_prefix + '/queues/' + self.queue + '/subscriptions/' + self.subscription + '/confirm') self.conf.signed_url.secret_key = 'test_key' def tearDown(self): resp = self.simulate_get(self.subscription_path, headers=self.headers) resp_doc = jsonutils.loads(resp[0]) for s in resp_doc['subscriptions']: self.simulate_delete(self.subscription_path + '/' + s['id'], headers=self.headers) self.simulate_delete(self.queue_path) super(TestSubscriptionsMongoDB, self).tearDown() def _create_subscription(self, subscriber='http://triger.me', ttl=600, options='{"a":1}'): doc = ('{"subscriber": "%s", "ttl": %s, "options": %s}' % (subscriber, ttl, options)) return self.simulate_post(self.subscription_path, body=doc, headers=self.headers) def test_create_works(self): resp = self._create_subscription() self.assertEqual(falcon.HTTP_201, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) resp_list = self.simulate_get(self.subscription_path, headers=self.headers) resp_list_doc = jsonutils.loads(resp_list[0]) sid = resp_list_doc['subscriptions'][0]['id'] self.assertEqual(resp_doc['subscription_id'], sid) def test_create_duplicate_409(self): self._create_subscription(subscriber='http://CCC.com') self.assertEqual(falcon.HTTP_201, self.srmock.status) # the subscription is not confirmed, So the second request will # retry confirm and return 201 again. self._create_subscription(subscriber='http://CCC.com') self.assertEqual(falcon.HTTP_201, self.srmock.status) @mock.patch.object(notifier.NotifierDriver, 'send_confirm_notification') def test_create_and_send_notification(self, mock_send_confirm): self._create_subscription(subscriber='http://CCC.com') self.assertEqual(1, mock_send_confirm.call_count) @mock.patch.object(notifier.NotifierDriver, 'send_confirm_notification') def test_recreate(self, mock_send_confirm): resp = self._create_subscription(subscriber='http://CCC.com') resp_doc = jsonutils.loads(resp[0]) s_id1 = resp_doc['subscription_id'] self.assertEqual(1, mock_send_confirm.call_count) resp = self._create_subscription(subscriber='http://CCC.com') resp_doc = jsonutils.loads(resp[0]) s_id2 = resp_doc['subscription_id'] self.assertEqual(2, mock_send_confirm.call_count) self.assertEqual(s_id1, s_id2) @mock.patch.object(notifier.NotifierDriver, 'send_confirm_notification') def test_recreate_after_confirmed(self, mock_send_confirm): resp = self._create_subscription(subscriber='http://CCC.com') self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = '{"confirmed": true}' resp_doc = jsonutils.loads(resp[0]) confirm_path = (self.url_prefix + '/queues/' + self.queue + '/subscriptions/' + resp_doc['subscription_id'] + '/confirm') self.simulate_put(confirm_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) self.assertEqual(1, mock_send_confirm.call_count) self._create_subscription(subscriber='http://CCC.com') self.assertEqual(falcon.HTTP_409, self.srmock.status) def test_create_invalid_body_400(self): resp = self._create_subscription(options='xxx') self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIn('body could not be parsed', resp_doc['description']) def test_create_no_body(self): resp = self.simulate_post(self.subscription_path, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.assertIn('Missing parameter', jsonutils.loads(resp[0])['description']) def test_create_invalid_subscriber_400(self): resp = self._create_subscription(subscriber='fake') self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIn('must be supported in the list', resp_doc['description']) def test_create_unsupported_subscriber_400(self): resp = self._create_subscription(subscriber='email://fake') self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIn('must be supported in the list', resp_doc['description']) def test_create_invalid_options_400(self): resp = self._create_subscription(options='1') self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIn('must be a dict', resp_doc['description']) def test_create_invalid_ttl(self): resp = self._create_subscription(ttl='"invalid"') self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIn('must be an integer', resp_doc['description']) def _list_subscription(self, count=10, limit=10, marker=None): for i in range(count): self._create_subscription(subscriber='http://' + str(i)) query = 'limit={0}'.format(limit) if marker: query += '&marker={0}'.format(marker) resp = self.simulate_get(self.subscription_path, query_string=query, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIsInstance(resp_doc, dict) self.assertIn('subscriptions', resp_doc) self.assertIn('links', resp_doc) subscriptions_list = resp_doc['subscriptions'] link = resp_doc['links'][0] self.assertEqual('next', link['rel']) href = falcon.uri.parse_query_string(link['href'].split('?')[1]) self.assertIn('marker', href) self.assertEqual(str(limit), href['limit']) next_query_string = ('marker={marker}&limit={limit}').format(**href) next_result = self.simulate_get(link['href'].split('?')[0], query_string=next_query_string) next_subscriptions = jsonutils.loads(next_result[0]) next_subscriptions_list = next_subscriptions['subscriptions'] self.assertEqual(falcon.HTTP_200, self.srmock.status) self.assertIn('links', next_subscriptions) if limit < count: self.assertEqual(min(limit, count-limit), len(next_subscriptions_list)) else: self.assertEqual(0, len(next_subscriptions_list)) self.assertEqual(min(limit, count), len(subscriptions_list)) def test_list_works(self): self._list_subscription() def test_list_returns_503_on_nopoolfound_exception(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } subscription_controller = self.boot.storage.subscription_controller with mock.patch.object(subscription_controller, 'list') as \ mock_subscription_list: def subscription_generator(): raise storage_errors.NoPoolFound() # This generator tries to be like subscription controller list # generator in some ways. def fake_generator(): yield subscription_generator() yield {} mock_subscription_list.return_value = fake_generator() self.simulate_get(self.subscription_path, headers=header) self.assertEqual(falcon.HTTP_503, self.srmock.status) def test_list_empty(self): resp = self.simulate_get(self.subscription_path, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIsInstance(resp_doc, dict) self.assertIn('subscriptions', resp_doc) self.assertIn('links', resp_doc) self.assertEqual([], resp_doc['subscriptions']) self.assertEqual([], resp_doc['links']) @ddt.data(1, 5, 10, 15) def test_listing_works_with_limit(self, limit): self._list_subscription(count=15, limit=limit) def test_listing_marker_is_respected(self): for i in range(15): self._create_subscription(subscriber='http://' + str(i)) resp = self.simulate_get(self.subscription_path, query_string='limit=20', headers=self.headers) subscriptions_list = jsonutils.loads(resp[0])['subscriptions'] id_list = sorted([s['id'] for s in subscriptions_list]) resp = self.simulate_get(self.subscription_path, query_string='marker={0}'.format(id_list[9]), headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) next_subscriptions_list = jsonutils.loads(resp[0])['subscriptions'] self.assertEqual(5, len(next_subscriptions_list)) # The subscriptions's age should be 0 at this moment. But in some # unexpected case, such as slow test, the age maybe larger than 0. self.assertGreaterEqual(next_subscriptions_list[0].pop('age'), subscriptions_list[10].pop('age')) self.assertEqual(subscriptions_list[10], next_subscriptions_list[0]) def test_get_works(self): self._create_subscription() resp = self.simulate_get(self.subscription_path, headers=self.headers) resp_doc = jsonutils.loads(resp[0]) sid = resp_doc['subscriptions'][0]['id'] subscriber = resp_doc['subscriptions'][0]['subscriber'] resp = self.simulate_get(self.subscription_path + '/' + sid, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertEqual(sid, resp_doc['id']) self.assertEqual(subscriber, resp_doc['subscriber']) def test_get_nonexisting_raise_404(self): self.simulate_get(self.subscription_path + '/fake', headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_patch_works(self): self._create_subscription() resp = self.simulate_get(self.subscription_path, headers=self.headers) resp_doc = jsonutils.loads(resp[0]) sid = resp_doc['subscriptions'][0]['id'] resp = self.simulate_patch(self.subscription_path + '/' + sid, body='{"ttl": 300}', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) resp = self.simulate_get(self.subscription_path + '/' + sid, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertEqual(300, resp_doc['ttl']) def test_patch_nonexisting_raise_404(self): self.simulate_patch(self.subscription_path + '/x', body='{"ttl": 300}', headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_patch_to_duplicate_raise_409(self): self._create_subscription() toupdate = self._create_subscription(subscriber='http://update.me', ttl=600, options='{"a":1}') toupdate_sid = jsonutils.loads(toupdate[0])['subscription_id'] doc = {'subscriber': 'http://triger.me'} self.simulate_patch(self.subscription_path + '/' + toupdate_sid, body=jsonutils.dumps(doc), headers=self.headers) self.assertEqual(falcon.HTTP_409, self.srmock.status) def test_patch_no_body(self): self._create_subscription() resp = self.simulate_get(self.subscription_path, headers=self.headers) resp_doc = jsonutils.loads(resp[0]) sid = resp_doc['subscriptions'][0]['id'] resp = self.simulate_patch(self.subscription_path + '/' + sid, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertNotIn('{subscription_id}', resp_doc['description']) def test_patch_invalid_ttl(self): self.simulate_patch(self.subscription_path + '/x', body='{"ttl": "invalid"}', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_patch_invalid_body(self): resp = self.simulate_patch(self.subscription_path + '/x', body='[1]', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertEqual('Subscriptions must be a dict.', resp_doc['description']) def test_delete_works(self): self._create_subscription() resp = self.simulate_get(self.subscription_path, headers=self.headers) resp_doc = jsonutils.loads(resp[0]) sid = resp_doc['subscriptions'][0]['id'] resp = self.simulate_get(self.subscription_path + '/' + sid, headers=self.headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) self.simulate_delete(self.subscription_path + '/' + sid, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) resp = self.simulate_get(self.subscription_path + '/' + sid, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) @mock.patch.object(auth, 'create_trust_id') def test_create_with_trust(self, create_trust): create_trust.return_value = 'trust_id' self.headers['X-USER-ID'] = 'user-id' self.headers['X-ROLES'] = 'my-roles' self._create_subscription('trust+http://example.com') self.assertEqual(falcon.HTTP_201, self.srmock.status) self.assertEqual('user-id', create_trust.call_args[0][1]) self.assertEqual(self.project_id, create_trust.call_args[0][2]) self.assertEqual(['my-roles'], create_trust.call_args[0][3]) resp_list = self.simulate_get(self.subscription_path, headers=self.headers) resp_list_doc = jsonutils.loads(resp_list[0]) options = resp_list_doc['subscriptions'][0]['options'] self.assertEqual({'a': 1, 'trust_id': 'trust_id'}, options) def test_confirm(self): doc = '{"confirmed": true}' resp = self._create_subscription() resp_doc = jsonutils.loads(resp[0]) confirm_path = (self.url_prefix + '/queues/' + self.queue + '/subscriptions/' + resp_doc['subscription_id'] + '/confirm') self.simulate_put(confirm_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) def test_confirm_with_invalid_body(self): doc = '{confirmed:123}' resp = self.simulate_put(self.confirm_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertIn('body could not be parsed', resp_doc['description']) def test_confirm_without_boolean_body(self): doc = '{"confirmed":123}' resp = self.simulate_put(self.confirm_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) resp_doc = jsonutils.loads(resp[0]) self.assertEqual("The 'confirmed' should be boolean.", resp_doc['description']) def test_confirm_with_non_subscription(self): doc = '{"confirmed": true}' self.simulate_put(self.confirm_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_confirm_with_extra_spec(self): self.headers['EXTRA-SPEC'] = 'messagecode:123456' doc = '{"confirmed": true}' resp = self._create_subscription() resp_doc = jsonutils.loads(resp[0]) confirm_path = (self.url_prefix + '/queues/' + self.queue + '/subscriptions/' + resp_doc['subscription_id'] + '/confirm') self.simulate_put(confirm_path, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_topic_lifecycle.py0000664000175100017510000005712515033040005026530 0ustar00mylesmyles# Copyright (c) 2019 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from unittest import mock import ddt import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.storage import errors as storage_errors from zaqar import tests as testing from zaqar.tests.unit.transport.wsgi import base @ddt.ddt class TestTopicLifecycleMongoDB(base.V2Base): config_file = 'wsgi_mongodb.conf' @testing.requires_mongodb def setUp(self): super(TestTopicLifecycleMongoDB, self).setUp() self.topic_path = self.url_prefix + '/topics' self.mars_topic_path = self.topic_path + '/mars' self.venus_topic_path = self.topic_path + '/venus' self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '3387309841abc_' } def tearDown(self): control = self.boot.control storage = self.boot.storage._storage connection = storage.connection connection.drop_database(control.topics_database) for db in storage.message_databases: connection.drop_database(db) super(TestTopicLifecycleMongoDB, self).tearDown() def test_without_project_id(self): headers = { 'Client-ID': uuidutils.generate_uuid(), } self.simulate_put(self.mars_topic_path, headers=headers, need_project_id=False) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(self.mars_topic_path, headers=headers, need_project_id=False) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_empty_project_id(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '' } self.simulate_put(self.mars_topic_path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(self.mars_topic_path, headers=headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) @ddt.data('480924', 'foo') def test_basics_thoroughly(self, project_id): headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': project_id } mars_topic_path_stats = self.mars_topic_path + '/stats' # Stats are empty - topic not created yet self.simulate_get(mars_topic_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Create doc = '{"messages": {"ttl": 600}}' self.simulate_put(self.mars_topic_path, headers=headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) location = self.srmock.headers_dict['Location'] self.assertEqual(location, self.mars_topic_path) # Fetch metadata result = self.simulate_get(self.mars_topic_path, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) ref_doc = jsonutils.loads(doc) ref_doc['_default_message_ttl'] = 3600 ref_doc['_max_messages_post_size'] = 262144 ref_doc['_default_message_delay'] = 0 self.assertEqual(ref_doc, result_doc) # Stats empty topic self.simulate_get(mars_topic_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Delete self.simulate_delete(self.mars_topic_path, headers=headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get non-existent stats self.simulate_get(mars_topic_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) @ddt.data('1234567890', '11111111111111111111111111111111111') def test_basics_thoroughly_with_different_client_id(self, client_id): self.conf.set_override('client_id_uuid_safe', 'off', 'transport') headers = { 'Client-ID': client_id, 'X-Project-ID': '480924' } mars_topic_path_stats = self.mars_topic_path + '/stats' # Stats are empty - topic not created yet self.simulate_get(mars_topic_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Create doc = '{"messages": {"ttl": 600}}' self.simulate_put(self.mars_topic_path, headers=headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) location = self.srmock.headers_dict['Location'] self.assertEqual(location, self.mars_topic_path) # Fetch metadata result = self.simulate_get(self.mars_topic_path, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_200, self.srmock.status) ref_doc = jsonutils.loads(doc) ref_doc['_default_message_ttl'] = 3600 ref_doc['_max_messages_post_size'] = 262144 ref_doc['_default_message_delay'] = 0 self.assertEqual(ref_doc, result_doc) # Stats empty topic self.simulate_get(mars_topic_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Delete self.simulate_delete(self.mars_topic_path, headers=headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) # Get non-existent stats self.simulate_get(mars_topic_path_stats, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_name_restrictions(self): self.simulate_put(self.topic_path + '/Nice-Boat_2', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_put(self.topic_path + '/Nice-Bo@t', headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_put(self.topic_path + '/_' + 'niceboat' * 8, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_put(self.topic_path + '/Service.test_topic', headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_project_id_restriction(self): muvluv_topic_path = self.topic_path + '/Muv-Luv' self.simulate_put(muvluv_topic_path, headers={'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'JAM Project' * 24}) self.assertEqual(falcon.HTTP_400, self.srmock.status) # no charset restrictions self.simulate_put(muvluv_topic_path, headers={'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': 'JAM Project'}) self.assertEqual(falcon.HTTP_201, self.srmock.status) def test_non_ascii_name(self): test_params = (('/topics/non-ascii-n\u0153me', 'utf-8'), ('/topics/non-ascii-n\xc4me', 'iso8859-1')) for uri, enc in test_params: uri = self.url_prefix + uri self.simulate_put(uri, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_delete(uri, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_no_metadata(self): self.simulate_put(self.venus_topic_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) self.simulate_put(self.venus_topic_path, body='', headers=self.headers) self.assertEqual(falcon.HTTP_204, self.srmock.status) result = self.simulate_get(self.venus_topic_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(256 * 1024, result_doc.get('_max_messages_post_size')) self.assertEqual(3600, result_doc.get('_default_message_ttl')) self.assertEqual(0, result_doc.get('_default_message_delay')) @ddt.data('{', '[]', '.', ' ') def test_bad_metadata(self, document): self.simulate_put(self.venus_topic_path, headers=self.headers, body=document) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_too_much_metadata(self): self.simulate_put(self.venus_topic_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size - (len(doc) - 10) + 1 doc = doc.format(pad='x' * padding_len) self.simulate_put(self.venus_topic_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_way_too_much_metadata(self): self.simulate_put(self.venus_topic_path, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size * 100 doc = doc.format(pad='x' * padding_len) self.simulate_put(self.venus_topic_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_custom_metadata(self): # Set doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}' max_size = self.transport_cfg.max_queue_metadata padding_len = max_size - (len(doc) - 2) doc = doc.format(pad='x' * padding_len) self.simulate_put(self.venus_topic_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Get result = self.simulate_get(self.venus_topic_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) ref_doc = jsonutils.loads(doc) ref_doc['_default_message_ttl'] = 3600 ref_doc['_max_messages_post_size'] = 262144 ref_doc['_default_message_delay'] = 0 self.assertEqual(ref_doc, result_doc) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_update_metadata(self): xyz_topic_path = self.url_prefix + '/topics/xyz' xyz_topic_path_metadata = xyz_topic_path headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': uuidutils.generate_uuid() } # Create self.simulate_put(xyz_topic_path, headers=headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) headers.update({'Content-Type': "application/openstack-messaging-v2.0-json-patch"}) # add metadata doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},' '{"op":"add", "path": "/metadata/key2", "value": 1}]') self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc1) self.assertEqual(falcon.HTTP_200, self.srmock.status) # remove reserved metadata, zaqar will do nothing and return 200, # because doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc3) self.assertEqual(falcon.HTTP_200, self.srmock.status) # replace metadata doc2 = '[{"op":"replace", "path": "/metadata/key1", "value": 2}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc2) self.assertEqual(falcon.HTTP_200, self.srmock.status) # replace reserved metadata, zaqar will store the reserved metadata doc2 = ('[{"op":"replace", "path": "/metadata/_default_message_ttl",' '"value": 300}]') self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc2) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Get result = self.simulate_get(xyz_topic_path_metadata, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual({'key1': 2, 'key2': 1, '_default_message_ttl': 300, '_max_messages_post_size': 262144, '_default_message_delay': 0}, result_doc) # remove metadata doc3 = '[{"op":"remove", "path": "/metadata/key1"}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc3) self.assertEqual(falcon.HTTP_200, self.srmock.status) # remove reserved metadata doc3 = '[{"op":"remove", "path": "/metadata/_default_message_ttl"}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc3) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Get result = self.simulate_get(xyz_topic_path_metadata, headers=headers) result_doc = jsonutils.loads(result[0]) self.assertEqual({'key2': 1, '_default_message_ttl': 3600, '_max_messages_post_size': 262144, '_default_message_delay': 0}, result_doc) # replace non-existent metadata doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc4) self.assertEqual(falcon.HTTP_409, self.srmock.status) # remove non-existent metadata doc5 = '[{"op":"remove", "path": "/metadata/key3"}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc5) self.assertEqual(falcon.HTTP_409, self.srmock.status) self.simulate_delete(xyz_topic_path, headers=headers) # add metadata to non-existent topic doc1 = ('[{"op":"add", "path": "/metadata/key1", "value": 1},' '{"op":"add", "path": "/metadata/key2", "value": 1}]') self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc1) self.assertEqual(falcon.HTTP_404, self.srmock.status) # replace metadata in non-existent topic doc4 = '[{"op":"replace", "path": "/metadata/key3", "value":2}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc4) self.assertEqual(falcon.HTTP_404, self.srmock.status) # remove metadata from non-existent topic doc5 = '[{"op":"remove", "path": "/metadata/key3"}]' self.simulate_patch(xyz_topic_path_metadata, headers=headers, body=doc5) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_list(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } # NOTE(kgriffs): It's important that this one sort after the one # above. This is in order to prove that bug/1236605 is fixed, and # stays fixed! alt_project_id = str(arbitrary_number + 1) # List empty result = self.simulate_get(self.topic_path, headers=header) self.assertEqual(falcon.HTTP_200, self.srmock.status) results = jsonutils.loads(result[0]) self.assertEqual([], results['topics']) self.assertIn('links', results) self.assertEqual(0, len(results['links'])) # Payload exceeded self.simulate_get(self.topic_path, headers=header, query_string='limit=21') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Create some def create_topic(name, project_id, body): altheader = {'Client-ID': client_id} if project_id is not None: altheader['X-Project-ID'] = project_id uri = self.topic_path + '/' + name self.simulate_put(uri, headers=altheader, body=body) create_topic('q1', project_id, '{"node": 31}') create_topic('q2', project_id, '{"node": 32}') create_topic('q3', project_id, '{"node": 33}') create_topic('q3', alt_project_id, '{"alt": 1}') # List (limit) result = self.simulate_get(self.topic_path, headers=header, query_string='limit=2') result_doc = jsonutils.loads(result[0]) self.assertEqual(2, len(result_doc['topics'])) # List (no metadata, get all) result = self.simulate_get(self.topic_path, headers=header, query_string='limit=5') result_doc = jsonutils.loads(result[0]) [target, params] = result_doc['links'][0]['href'].split('?') self.simulate_get(target, headers=header, query_string=params) self.assertEqual(falcon.HTTP_200, self.srmock.status) # Ensure we didn't pick up the topic from the alt project. topics = result_doc['topics'] self.assertEqual(3, len(topics)) # List with metadata result = self.simulate_get(self.topic_path, headers=header, query_string='detailed=true') self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) [target, params] = result_doc['links'][0]['href'].split('?') topic = result_doc['topics'][0] result = self.simulate_get(topic['href'], headers=header) result_doc = jsonutils.loads(result[0]) self.assertEqual(topic['metadata'], result_doc) self.assertEqual({'node': 31, '_default_message_ttl': 3600, '_max_messages_post_size': 262144, '_default_message_delay': 0}, result_doc) # topic filter result = self.simulate_get(self.topic_path, headers=header, query_string='node=34') self.assertEqual(falcon.HTTP_200, self.srmock.status) result_doc = jsonutils.loads(result[0]) self.assertEqual(0, len(result_doc['topics'])) # List tail self.simulate_get(target, headers=header, query_string=params) self.assertEqual(falcon.HTTP_200, self.srmock.status) # List manually-constructed tail self.simulate_get(target, headers=header, query_string='marker=zzz') self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_list_returns_503_on_nopoolfound_exception(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } topic_controller = self.boot.storage.topic_controller with mock.patch.object(topic_controller, 'list') as mock_topic_list: def topic_generator(): raise storage_errors.NoPoolFound() # This generator tries to be like topic controller list generator # in some ways. def fake_generator(): yield topic_generator() yield {} mock_topic_list.return_value = fake_generator() self.simulate_get(self.topic_path, headers=header) self.assertEqual(falcon.HTTP_503, self.srmock.status) def test_list_with_filter(self): arbitrary_number = 644079696574693 project_id = str(arbitrary_number) client_id = uuidutils.generate_uuid() header = { 'X-Project-ID': project_id, 'Client-ID': client_id } # Create some def create_topic(name, project_id, body): altheader = {'Client-ID': client_id} if project_id is not None: altheader['X-Project-ID'] = project_id uri = self.topic_path + '/' + name self.simulate_put(uri, headers=altheader, body=body) create_topic('q1', project_id, '{"test_metadata_key1": "value1"}') create_topic('q2', project_id, '{"_max_messages_post_size": 2000}') create_topic('q3', project_id, '{"test_metadata_key2": 30}') # List (filter query) result = self.simulate_get(self.topic_path, headers=header, query_string='name=q&test_metadata_key2=30') result_doc = jsonutils.loads(result[0]) self.assertEqual(1, len(result_doc['topics'])) self.assertEqual('q3', result_doc['topics'][0]['name']) # List (filter query) result = self.simulate_get(self.topic_path, headers=header, query_string='_max_messages_post_size=2000') result_doc = jsonutils.loads(result[0]) self.assertEqual(1, len(result_doc['topics'])) self.assertEqual('q2', result_doc['topics'][0]['name']) # List (filter query) result = self.simulate_get(self.topic_path, headers=header, query_string='name=q') result_doc = jsonutils.loads(result[0]) self.assertEqual(3, len(result_doc['topics'])) class TestTopicLifecycleFaultyDriver(base.V2BaseFaulty): config_file = 'wsgi_faulty.conf' def test_simple(self): self.headers = { 'Client-ID': uuidutils.generate_uuid(), 'X-Project-ID': '338730984abc_1' } mars_topic_path = self.url_prefix + '/topics/mars' doc = '{"messages": {"ttl": 600}}' self.simulate_put(mars_topic_path, headers=self.headers, body=doc) self.assertEqual(falcon.HTTP_503, self.srmock.status) location = ('Location', mars_topic_path) self.assertNotIn(location, self.srmock.headers) result = self.simulate_get(mars_topic_path, headers=self.headers) result_doc = jsonutils.loads(result[0]) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.assertNotEqual(result_doc, jsonutils.loads(doc)) self.simulate_get(mars_topic_path + '/stats', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_get(self.url_prefix + '/topics', headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) self.simulate_delete(mars_topic_path, headers=self.headers) self.assertEqual(falcon.HTTP_503, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_urls.py0000664000175100017510000002130715033040005024351 0ustar00mylesmyles# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import falcon from oslo_serialization import jsonutils from oslo_utils import timeutils from zaqar.common import urls from zaqar.tests.unit.transport.wsgi import base class TestURL(base.V2Base): config_file = 'wsgi_mongodb.conf' def setUp(self): super(TestURL, self).setUp() self.signed_url_prefix = self.url_prefix + '/queues/shared_queue/share' def test_url_generation(self): timeutils.set_time_override() self.addCleanup(timeutils.clear_time_override) data = {'methods': ['GET', 'POST']} response = self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_200, self.srmock.status) content = jsonutils.loads(response[0]) expires = timeutils.utcnow(True) + datetime.timedelta(days=1) expires_str = expires.strftime(urls._DATE_FORMAT) for field in ['signature', 'project', 'methods', 'paths', 'expires']: self.assertIn(field, content) self.assertEqual(expires_str, content['expires']) self.assertEqual(data['methods'], content['methods']) self.assertEqual(['/v2/queues/shared_queue/messages'], content['paths']) def test_url_paths(self): timeutils.set_time_override() self.addCleanup(timeutils.clear_time_override) data = {'methods': ['GET', 'POST'], 'paths': ['messages', 'subscriptions']} response = self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_200, self.srmock.status) content = jsonutils.loads(response[0]) self.assertEqual( ['/v2/queues/shared_queue/messages', '/v2/queues/shared_queue/subscriptions'], content['paths']) def test_url_bad_request(self): self.simulate_post(self.signed_url_prefix, body='not json') self.assertEqual(falcon.HTTP_400, self.srmock.status) data = {'dummy': 'meh'} self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_400, self.srmock.status) data = {'expires': 'wrong date format'} self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_400, self.srmock.status) data = {'methods': 'methods not list'} self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_400, self.srmock.status) data = {'paths': ['notallowed']} self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_url_verification_success(self): data = {'methods': ['GET', 'POST']} response = self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_200, self.srmock.status) content = jsonutils.loads(response[0]) headers = { 'URL-Signature': content['signature'], 'URL-Expires': content['expires'], 'URL-Methods': ','.join(content['methods']), 'URL-Paths': ','.join(content['paths']) } headers.update(self.headers) response = self.simulate_get(content['paths'][0], headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) def _get_msg_id(self, headers): return self._get_msg_ids(headers)[0] def _get_msg_ids(self, headers): return headers['location'].rsplit('=', 1)[-1].split(',') def test_url_verification_success_with_message_id(self): doc = {'messages': [{'body': 239, 'ttl': 300}]} body = jsonutils.dumps(doc) self.simulate_post(self.url_prefix + '/queues/shared_queue/messages', body=body, headers=self.headers) msg_id = self._get_msg_id(self.srmock.headers_dict) data = {'methods': ['GET', 'POST']} response = self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_200, self.srmock.status) content = jsonutils.loads(response[0]) headers = { 'URL-Signature': content['signature'], 'URL-Expires': content['expires'], 'URL-Methods': ','.join(content['methods']), 'URL-Paths': ','.join(content['paths']) } headers.update(self.headers) self.simulate_get(content['paths'][0] + '/' + msg_id, headers=headers) self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_url_verification_bad_request(self): path = self.url_prefix + '/queues/shared_queue/messages' expires = timeutils.utcnow() + datetime.timedelta(days=1) expires_str = expires.strftime(urls._DATE_FORMAT) headers = { 'URL-Signature': 'dummy', 'URL-Expires': 'not a real date', 'URL-Methods': 'GET,POST', 'URL-Paths': '/v2/queues/shared_queue/messages' } headers.update(self.headers) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) headers = { 'URL-Signature': 'dummy', 'URL-Expires': expires_str, 'URL-Methods': '', 'URL-Paths': '/v2/queues/shared_queue/messages' } headers.update(self.headers) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) headers = { 'URL-Signature': 'dummy', 'URL-Expires': expires_str, 'URL-Methods': 'nothing here', 'URL-Paths': '/v2/queues/shared_queue/messages' } headers.update(self.headers) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) headers = { 'URL-Signature': 'dummy', 'URL-Expires': expires_str, 'URL-Methods': 'POST,PUT', 'URL-Paths': '/v2/queues/shared_queue/messages' } headers.update(self.headers) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) headers = { 'URL-Signature': 'wrong signature', 'URL-Expires': expires_str, 'URL-Methods': 'GET,POST', 'URL-Paths': '/v2/queues/shared_queue/messages' } headers.update(self.headers) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) headers = { 'URL-Signature': 'will fail because of the old date', 'URL-Expires': '2015-01-01T00:00:00', 'URL-Methods': 'GET,POST', 'URL-Paths': '/v2/queues/shared_queue/messages' } headers.update(self.headers) self.simulate_get(path, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) def test_url_verification_bad_with_message_id(self): doc = {'messages': [{'body': 239, 'ttl': 300}]} body = jsonutils.dumps(doc) self.simulate_post(self.url_prefix + '/queues/shared_queue/messages', body=body, headers=self.headers) msg_id = self._get_msg_id(self.srmock.headers_dict) data = {'methods': ['GET', 'POST']} response = self.simulate_post(self.signed_url_prefix, body=jsonutils.dumps(data)) self.assertEqual(falcon.HTTP_200, self.srmock.status) content = jsonutils.loads(response[0]) headers = { 'URL-Signature': content['signature'], 'URL-Expires': content['expires'], 'URL-Methods': ','.join(content['methods']), 'URL-Paths': ','.join('/queues/shared_queue/claims') } headers.update(self.headers) self.simulate_get(content['paths'][0] + '/' + msg_id, headers=headers) self.assertEqual(falcon.HTTP_404, self.srmock.status) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/tests/unit/transport/wsgi/v2_0/test_validation.py0000664000175100017510000002257315033040005025524 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.tests.unit.transport.wsgi import base class TestValidation(base.V2Base): config_file = 'wsgi_mongodb_validation.conf' def setUp(self): super(TestValidation, self).setUp() self.project_id = '7e55e1a7e' self.queue_path = self.url_prefix + '/queues/noein' self.simulate_put(self.queue_path, self.project_id) self.headers = { 'Client-ID': uuidutils.generate_uuid(), } def tearDown(self): self.simulate_delete(self.queue_path, self.project_id) super(TestValidation, self).tearDown() def test_metadata_deserialization(self): # Normal case self.simulate_put(self.queue_path, self.project_id, body='{"timespace": "Shangri-la"}') self.assertEqual(falcon.HTTP_204, self.srmock.status) # Too long max_queue_metadata = 64 doc_tmpl = '{{"Dragon Torc":"{0}"}}' doc_tmpl_ws = '{{ "Dragon Torc" : "{0}" }}' # with whitespace envelope_length = len(doc_tmpl.format('')) for tmpl in doc_tmpl, doc_tmpl_ws: gen = '0' * (max_queue_metadata - envelope_length + 1) doc = tmpl.format(gen) self.simulate_put(self.queue_path, self.project_id, body=doc) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_message_deserialization(self): # Normal case body = '{"messages": [{"body": "Dragon Knights", "ttl": 100}]}' self.simulate_post(self.queue_path + '/messages', self.project_id, body=body, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Both messages' size are too long max_messages_post_size = 256 obj = {'a': 0, 'b': ''} envelope_length = len(jsonutils.dumps(obj, separators=(',', ':'))) obj['b'] = 'x' * (max_messages_post_size - envelope_length + 1) for long_body in ('a' * (max_messages_post_size - 2 + 1), obj): doc = jsonutils.dumps([{'body': long_body, 'ttl': 100}]) self.simulate_post(self.queue_path + '/messages', self.project_id, body=doc, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_request_without_client_id(self): # No Client-ID in headers, it will raise 400 error. empty_headers = {} self.simulate_put(self.queue_path, self.project_id, body='{"timespace": "Shangri-la"}', headers=empty_headers) def test_subscription_ttl(self): # Normal case body = '{"subscriber": "http://trigger.she", "ttl": 100, "options":{}}' self.simulate_post(self.queue_path + '/subscriptions', self.project_id, body=body, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Very big TTL body = ('{"subscriber": "http://a.c", "ttl": 99999999999999999' ', "options":{}}') self.simulate_post(self.queue_path + '/subscriptions', self.project_id, body=body, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_queue_metadata_putting(self): # Test _default_message_ttl # TTL normal case queue_1 = self.url_prefix + '/queues/queue1' self.simulate_put(queue_1, self.project_id, body='{"_default_message_ttl": 60}') self.addCleanup(self.simulate_delete, queue_1, self.project_id, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # TTL value is zero self.simulate_put(queue_1, self.project_id, body='{"_default_message_ttl": 0}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # TTL under min self.simulate_put(queue_1, self.project_id, body='{"_default_message_ttl": 59}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # TTL over max self.simulate_put(queue_1, self.project_id, body='{"_default_message_ttl": 1209601}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Delay TTL over max self.simulate_put(queue_1, self.project_id, body='{"_default_message_delay": 901}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Test _max_messages_post_size # Size normal case queue_2 = self.url_prefix + '/queues/queue2' self.simulate_put(queue_2, self.project_id, body='{"_max_messages_post_size": 255}') self.addCleanup(self.simulate_delete, queue_2, self.project_id, headers=self.headers) self.assertEqual(falcon.HTTP_201, self.srmock.status) # Size over max self.simulate_put(queue_2, self.project_id, body='{"_max_messages_post_size": 257}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Size value is zero self.simulate_put(queue_2, self.project_id, body='{"_max_messages_post_size": 0}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # _dead_letter_queue_messages_ttl is not integer self.simulate_put(queue_2, self.project_id, body='{"_dead_letter_queue_messages_ttl": "123"}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # _max_claim_count is not integer self.simulate_put(queue_2, self.project_id, body='{"_max_claim_count": "123"}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # _dead_letter_queue_messages_ttl is out of range self.simulate_put(queue_2, self.project_id, body='{"_dead_letter_queue_messages_ttl": 59}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # _dead_letter_queue_messages_ttl value is zero self.simulate_put(queue_2, self.project_id, body='{"_dead_letter_queue_messages_ttl": 0}') self.assertEqual(falcon.HTTP_400, self.srmock.status) def test_queue_patching(self): headers = { 'Client-ID': uuidutils.generate_uuid(), 'Content-Type': "application/openstack-messaging-v2.0-json-patch" } # Wrong JSON pointer self.simulate_patch(self.queue_path, self.project_id, headers=headers, body='[{"op":"add","path":"/a","value":2}]') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Wrong op self.simulate_patch(self.queue_path, self.project_id, headers=headers, body='[{"op":"a","path":"/metadata/a","value":2}]') self.assertEqual(falcon.HTTP_400, self.srmock.status) self.simulate_patch(self.queue_path, self.project_id, headers=headers, body='[{"op":"add","path":"/metadata/a",' '"value":2}]') self.assertEqual(falcon.HTTP_200, self.srmock.status) def test_queue_purge(self): # Wrong key queue_1 = self.url_prefix + '/queues/queue1/purge' self.simulate_post(queue_1, self.project_id, body='{"wrong_key": ["messages"]}') self.addCleanup(self.simulate_delete, queue_1, self.project_id, headers=self.headers) self.assertEqual(falcon.HTTP_400, self.srmock.status) # Wrong value self.simulate_post(queue_1, self.project_id, body='{"resource_types": ["wrong_value"]}') self.assertEqual(falcon.HTTP_400, self.srmock.status) # Correct input self.simulate_post(queue_1, self.project_id, body='{"resource_types": ["messages"]}') self.assertEqual(falcon.HTTP_204, self.srmock.status) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5830135 zaqar-20.1.0.dev29/zaqar/transport/0000775000175100017510000000000015033040026016113 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/__init__.py0000664000175100017510000000123715033040005020224 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Zaqar Transport Drivers""" from zaqar.transport import base # Hoist into package namespace DriverBase = base.DriverBase ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/acl.py0000664000175100017510000000256715033040005017233 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy enforcer of Zaqar""" import functools from oslo_policy import policy from zaqar.common import policies ENFORCER = None def setup_policy(conf): global ENFORCER ENFORCER = policy.Enforcer(conf) register_rules(ENFORCER) def register_rules(enforcer): enforcer.register_defaults(policies.list_rules()) def enforce(rule): # Late import to prevent cycles from zaqar.transport.wsgi import errors def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): ctx = args[1].env['zaqar.context'] ENFORCER.enforce(rule, {}, ctx.to_dict(), do_raise=True, exc=errors.HTTPForbidden) return func(*args, **kwargs) return handler return decorator ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/base.py0000664000175100017510000000440215033040005017374 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from zaqar.conf import default from zaqar.conf import transport class ResourceDefaults(object): """Registers and exposes defaults for resource fields.""" def __init__(self, conf): self._conf = conf self._conf.register_opts(transport.ALL_OPTS, group=transport.GROUP_NAME) self._defaults = self._conf[transport.GROUP_NAME] @property def message_ttl(self): return self._defaults.default_message_ttl @property def claim_ttl(self): return self._defaults.default_claim_ttl @property def claim_grace(self): return self._defaults.default_claim_grace @property def subscription_ttl(self): return self._defaults.default_subscription_ttl class DriverBase(object, metaclass=abc.ABCMeta): """Base class for Transport Drivers to document the expected interface. :param conf: configuration instance :type conf: oslo_config.cfg.CONF :param storage: The storage driver :type storage: zaqar.storage.base.DataDriverBase :param cache: caching object :type cache: dogpile.cache.region.CacheRegion :param control: Storage driver to handle the control plane :type control: zaqar.storage.base.ControlDriverBase """ def __init__(self, conf, storage, cache, control): self._conf = conf self._storage = storage self._cache = cache self._control = control self._conf.register_opts([default.auth_strategy]) self._defaults = ResourceDefaults(self._conf) @abc.abstractmethod def listen(self): """Start listening for client requests (self-hosting mode).""" raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/encryptor.py0000664000175100017510000002351215033040005020512 0ustar00mylesmyles# Copyright (c) 2020 Fiberhome Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Encryption has a dependency on the pycrypto. If pycrypto is not available, CryptoUnavailableError will be raised. """ import base64 import functools import hashlib import os import pickle try: from cryptography.hazmat import backends as crypto_backends from cryptography.hazmat.primitives.asymmetric import padding as \ rsa_padding from cryptography.hazmat.primitives import ciphers from cryptography.hazmat.primitives.ciphers import algorithms from cryptography.hazmat.primitives.ciphers import modes from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import padding from cryptography.hazmat.primitives import serialization except ImportError: ciphers = None from zaqar.conf import transport from zaqar.i18n import _ class EncryptionFailed(ValueError): """Encryption failed when encrypting messages.""" def __init__(self, msg, *args, **kwargs): msg = msg.format(*args, **kwargs) super(EncryptionFailed, self).__init__(msg) class DecryptError(Exception): """raise when unable to decrypt encrypted data.""" pass class CryptoUnavailableError(Exception): """raise when Python Crypto module is not available.""" pass def assert_crypto_availability(f): """Ensure cryptography module is available.""" @functools.wraps(f) def wrapper(*args, **kwds): if ciphers is None: raise CryptoUnavailableError() return f(*args, **kwds) return wrapper class EncryptionFactory(object): def __init__(self, conf): self._conf = conf self._conf.register_opts(transport.ALL_OPTS, group=transport.GROUP_NAME) self._limits_conf = self._conf[transport.GROUP_NAME] self._algorithm = self._limits_conf.message_encryption_algorithms self._encryption_key = None if self._limits_conf.message_encryption_key: hash_function = hashlib.sha256() key = bytes(self._limits_conf.message_encryption_key, 'utf-8') hash_function.update(key) self._encryption_key = hash_function.digest() def getEncryptor(self): if self._algorithm == 'AES256' and self._encryption_key: return AES256Encryptor(self._encryption_key) if self._algorithm == 'RSA' and self._encryption_key: return RSAEncryptor(self._encryption_key) class Encryptor(object): def __init__(self, encryption_key): self._encryption_key = encryption_key def message_encrypted(self, messages): """Encrypting a list of messages. :param messages: A list of messages """ pass def message_decrypted(self, messages): """decrypting a list of messages. :param messages: A list of messages """ pass def get_cipher(self): pass def get_encryption_key(self): return self._encryption_key class AES256Encryptor(Encryptor): def get_cipher(self): iv = os.urandom(16) cipher = ciphers.Cipher( algorithms.AES(self.get_encryption_key()), modes.CBC(iv), backend=crypto_backends.default_backend()) # AES algorithm uses block size of 16 bytes = 128 bits, defined in # algorithms.AES.block_size. Using ``cryptography``, we will # analogously use hazmat.primitives.padding to pad it to # the 128-bit block size. padder = padding.PKCS7(algorithms.AES.block_size).padder() return iv, cipher, padder def _encrypt_string_message(self, message): """Encrypt the message type of string""" message = message.encode('utf-8') iv, cipher, padder = self.get_cipher() encryptor = cipher.encryptor() padded_data = padder.update(message) + padder.finalize() data = iv + encryptor.update(padded_data) + encryptor.finalize() return base64.b64encode(data) def _encrypt_other_types_message(self, message): """Encrypt the message type of other types""" iv, cipher, padder = self.get_cipher() encryptor = cipher.encryptor() padded_data = padder.update(message) + padder.finalize() data = iv + encryptor.update(padded_data) + encryptor.finalize() return base64.b64encode(data) def _encrypt_message(self, message): """Encrypt the message data with the given secret key. Padding is n bytes of the value n, where 1 <= n <= blocksize. """ if isinstance(message['body'], str): message['body'] = self._encrypt_string_message(message['body']) else: # For other types like dict or list, we need to serialize them # first. try: s_message = pickle.dumps(message['body']) except pickle.PickleError: return message['body'] = self._encrypt_other_types_message(s_message) def _decrypt_message(self, message): try: encrypted_message = base64.b64decode(message['body']) except (ValueError, TypeError): return iv = encrypted_message[:16] cipher = ciphers.Cipher( algorithms.AES(self._encryption_key), modes.CBC(iv), backend=crypto_backends.default_backend()) try: decryptor = cipher.decryptor() data = (decryptor.update(encrypted_message[16:]) + decryptor.finalize()) except Exception: raise DecryptError(_('Encrypted data appears to be corrupted.')) # Strip the last n padding bytes where n is the last value in # the plaintext unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder() data = unpadder.update(data) + unpadder.finalize() try: message['body'] = pickle.loads(data) except pickle.UnpicklingError: # If the data is a string which didn't be serialized, there will # raise an exception. We just try to return the string itself. message['body'] = str(data, encoding="utf-8") @assert_crypto_availability def message_encrypted(self, messages): """Encrypting a list of messages. :param messages: A list of messages """ if self.get_encryption_key(): for msg in messages: self._encrypt_message(msg) else: msg = _('Now Zaqar only support AES-256 and need to specify the' 'key.') raise EncryptionFailed(msg) @assert_crypto_availability def message_decrypted(self, messages): """decrypting a list of messages. :param messages: A list of messages """ if self.get_encryption_key(): for msg in messages: self._decrypt_message(msg) else: msg = _('Now Zaqar only support AES-256 and need to specify the' 'key.') raise EncryptionFailed(msg) class RSAEncryptor(Encryptor): def get_cipher(self): # Load the public key public_key = None with open(self.get_encryption_key(), "rb") as key_file: public_key = serialization.load_pem_publice_key( key_file.read(), passwor=None, backend=crypto_backends.default_backends()) return public_key def _encrypt_string_message(self, message): """Encrypt the message type of string""" message = message.encode('utf-8') public_key = self.get_cipher() data = public_key.encrypt(message, rsa_padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None)) return base64.b64encode(data) def _encrypt_other_types_message(self, message): """Encrypt the message type of other types""" public_key = self.get_cipher() data = public_key.encrypt(message, rsa_padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None)) return base64.b64encode(data) def _encrypt_message(self, message): """Encrypt the message data with the given secret key. Padding is n bytes of the value n, where 1 <= n <= blocksize. """ if isinstance(message['body'], str): message['body'] = self._encrypt_string_message(message['body']) else: # For other types like dict or list, we need to serialize them # first. try: s_message = pickle.dumps(message['body']) except pickle.PickleError: return message['body'] = self._encrypt_other_types_message(s_message) def _decrypt_message(self, message): pass @assert_crypto_availability def message_encrypted(self, messages): """Encrypting a list of messages. :param messages: A list of messages """ if self.get_encryption_key(): for msg in messages: self._encrypt_message(msg) else: msg = _('Now Zaqar only support AES-256 and need to specify the' 'key.') raise EncryptionFailed(msg) @assert_crypto_availability def message_decrypted(self, messages): """decrypting a list of messages. :param messages: A list of messages """ pass ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5830135 zaqar-20.1.0.dev29/zaqar/transport/middleware/0000775000175100017510000000000015033040026020230 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/middleware/__init__.py0000664000175100017510000000000015033040005022324 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/middleware/auth.py0000664000175100017510000000413615033040005021544 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Middleware for handling authorization and authentication.""" from keystonemiddleware import auth_token from oslo_log import log STRATEGIES = {} LOG = log.getLogger(__name__) class SignedAndExtraSpecHeadersAuth(object): def __init__(self, app, auth_app): self._app = app self._auth_app = auth_app def __call__(self, environ, start_response): path = environ.get('PATH_INFO') extra_spec = environ.get('HTTP_EXTRA_SPEC') # NOTE(flwang): The root path of Zaqar service shouldn't require any # auth. if path == '/': return self._app(environ, start_response) signature = environ.get('HTTP_URL_SIGNATURE') if (signature is None and extra_spec is None) or \ path.startswith('/v1'): return self._auth_app(environ, start_response) return self._app(environ, start_response) class KeystoneAuth(object): @classmethod def install(cls, app, conf): """Install Auth check on application.""" LOG.debug('Installing Keystone\'s auth protocol') return auth_token.AuthProtocol(app, conf={"oslo-config-config": conf, "oslo-config-project": "zaqar"}) STRATEGIES['keystone'] = KeystoneAuth def strategy(strategy): """Returns the Auth Strategy. :param strategy: String representing the strategy to use """ try: return STRATEGIES[strategy] except KeyError: raise RuntimeError ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/middleware/cors.py0000664000175100017510000000716115033040005021552 0ustar00mylesmyles# Copyright 2017 OpenStack, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse as urlparse import webob from oslo_log import log from oslo_middleware import cors LOG = log.getLogger(__name__) class Response(webob.Response): def __call__(self, environ, start_response): """WSGI application interface""" if self.conditional_response: return self.conditional_response_app(environ, start_response) headerlist = self._abs_headerlist(environ) start_response(self.status, headerlist) if environ['REQUEST_METHOD'] == 'HEAD': # Special case here... # NOTE(wangxiyuan): webob.response.Response always return # EmptyResponse here. This behavior breaks backward-compatibility. # so we need to 'fix' it here manually. return [] return self._app_iter def _abs_headerlist(self, *args, **kwargs): headerlist = super(Response, self)._abs_headerlist(*args, **kwargs) # NOTE(wangxiyuan): webob.response.Response always convert relative # path to absolute path given the request environ on location field in # the header of response. This behavior breaks backward-compatibility. # so we need to 'fix' it here manually. for i, (name, value) in enumerate(headerlist): if name.lower() == 'location': loc = urlparse.urlparse(value) relative_path = value[value.index(loc.path):] headerlist[i] = (name, relative_path) break return headerlist class Request(webob.Request): ResponseClass = Response class CORSMiddleware(object): def __init__(self, app, auth_app, conf): self._app = cors.CORS(app, conf) # We don't auth here. It's just used for keeping consistence. self._auth_app = auth_app @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): return self._app(request) @classmethod def install(cls, app, auth_app, conf): LOG.debug('Installing CORS middleware.') cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID', 'X-Trace-Info', 'X-Trace-HMAC', 'Client-id'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH', 'HEAD'] ) return CORSMiddleware(app, auth_app, conf) def install_cors(app, auth_app, conf): return CORSMiddleware.install(app, auth_app, conf) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/middleware/profile.py0000664000175100017510000001071515033040005022243 0ustar00mylesmyles# Copyright 2016 OpenStack, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse as urlparse import webob from oslo_log import log from osprofiler import _utils as utils from osprofiler import notifier from osprofiler import profiler from osprofiler import web LOG = log.getLogger(__name__) def setup(conf, binary, host): if conf.profiler.enabled: # Note(wangxiyuan): OSprofiler now support some kind of backends, such # as Ceilometer, ElasticSearch, Messaging and MongoDB. # 1. Ceilometer is only used for data collection, and Messaging is only # used for data transfer. So Ceilometer only works when Messaging is # enabled. # 2. ElasticSearch and MongoDB support both data collection and # transfer. So they can be used standalone. # 3. Choose which backend depends on the config option # "connection_string" , and the default value is "messaging://". backend_uri = conf.profiler.connection_string if "://" not in backend_uri: backend_uri += "://" parsed_connection = urlparse.urlparse(backend_uri) backend_type = parsed_connection.scheme if backend_type == "messaging": import oslo_messaging _notifier = notifier.create( backend_uri, oslo_messaging, {}, oslo_messaging.get_notification_transport(conf), "Zaqar", binary, host) else: _notifier = notifier.create(backend_uri, project="Zaqar", service=binary, host=host) notifier.set(_notifier) LOG.warning("OSProfiler is enabled.\nIt means that person who " "knows any of hmac_keys that are specified in " "/etc/zaqar/zaqar.conf can trace his requests. \n In " "real life only operator can read this file so there " "is no security issue. Note that even if person can " "trigger profiler, only admin user can retrieve trace " "information.\n" "To disable OSprofiler set in zaqar.conf:\n" "[profiler]\nenabled=false") web.enable(conf.profiler.hmac_keys) else: web.disable() class ProfileWSGIMiddleware(object): def __init__(self, application, hmac_keys=None, enabled=False): self.application = application self.name = "wsgi" self.enabled = enabled self.hmac_keys = utils.split(hmac_keys or "") def _trace_is_valid(self, trace_info): if not isinstance(trace_info, dict): return False trace_keys = set(trace_info.keys()) if not all(k in trace_keys for k in web._REQUIRED_KEYS): return False if trace_keys.difference(web._REQUIRED_KEYS + web._OPTIONAL_KEYS): return False return True def __call__(self, environ, start_response): request = webob.Request(environ) trace_info = utils.signed_unpack(request.headers.get(web.X_TRACE_INFO), request.headers.get(web.X_TRACE_HMAC), self.hmac_keys) if not self._trace_is_valid(trace_info): return self.application(environ, start_response) profiler.init(**trace_info) info = { "request": { "path": request.path, "query": request.query_string, "method": request.method, "scheme": request.scheme } } with profiler.Trace(self.name, info=info): return self.application(environ, start_response) def install_wsgi_tracer(app, conf): enabled = conf.profiler.enabled and conf.profiler.trace_wsgi_transport if enabled: LOG.debug('Installing osprofiler\'s wsgi tracer') return ProfileWSGIMiddleware(app, conf.profiler.hmac_keys, enabled=enabled) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/utils.py0000664000175100017510000000344715033040005017632 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from oslo_utils import encodeutils class MalformedJSON(ValueError): """JSON string is not valid.""" pass class OverflowedJSONInteger(OverflowError): """JSON integer is too large.""" pass def _json_int(s): """Parse a string as a base 10 64-bit signed integer.""" i = int(s) if not (int(-2 ** 63) <= i <= int(2 ** 63 - 1)): raise OverflowedJSONInteger() return i def read_json(stream, len): """Like json.load, but converts ValueError to MalformedJSON upon failure. :param stream: a file-like object :param len: the number of bytes to read from stream """ try: content = encodeutils.safe_decode(stream.read(len), 'utf-8') result = jsonutils.loads(content, parse_int=_json_int) if not isinstance(result, dict) and not isinstance(result, list): raise MalformedJSON() return result except UnicodeDecodeError as ex: raise MalformedJSON(ex) except ValueError as ex: raise MalformedJSON(ex) def to_json(obj): """Like json.dumps, but outputs a UTF-8 encoded string. :param obj: a JSON-serializable object """ return jsonutils.dumps(obj, ensure_ascii=False) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/validation.py0000664000175100017510000007154215033040005020625 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import re import uuid from oslo_utils import timeutils from urllib import parse as urllib_parse from zaqar.common import consts from zaqar.conf import transport from zaqar.i18n import _ MIN_MESSAGE_TTL = 60 MIN_CLAIM_TTL = 60 MIN_CLAIM_GRACE = 60 MIN_DELAY_TTL = 0 MIN_SUBSCRIPTION_TTL = 60 _PURGBLE_RESOURCE_TYPES = {'messages', 'subscriptions'} # NOTE(kgriffs): Don't use \w because it isn't guaranteed to match # only ASCII characters. QUEUE_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_\-.]+$') QUEUE_NAME_MAX_LEN = 64 PROJECT_ID_MAX_LEN = 256 class ValidationFailed(ValueError): """User input did not follow API restrictions.""" def __init__(self, msg, *args, **kwargs): msg = msg.format(*args, **kwargs) super(ValidationFailed, self).__init__(msg) class Validator(object): def __init__(self, conf): self._conf = conf self._conf.register_opts(transport.ALL_OPTS, group=transport.GROUP_NAME) self._limits_conf = self._conf[transport.GROUP_NAME] self._supported_operations = ('add', 'remove', 'replace') def queue_identification(self, queue, project): """Restrictions on a project id & queue name pair. :param queue: Name of the queue :param project: Project id :raises ValidationFailed: if the `name` is longer than 64 characters or contains anything other than ASCII digits and letters, underscores, and dashes. Also raises if `project` is not None but longer than 256 characters. """ if project is not None and len(project) > PROJECT_ID_MAX_LEN: msg = _('Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if len(queue) > QUEUE_NAME_MAX_LEN: msg = _('Queue names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if not QUEUE_NAME_REGEX.match(queue): raise ValidationFailed( _('Queue names may only contain ASCII letters, digits, ' 'underscores, and dashes.')) def _get_change_operation_d10(self, raw_change): op = raw_change.get('op') if op is None: msg = (_('Unable to find `op` in JSON Schema change. ' 'It must be one of the following: %(available)s.') % {'available': ', '.join(self._supported_operations)}) raise ValidationFailed(msg) if op not in self._supported_operations: msg = (_('Invalid operation: `%(op)s`. ' 'It must be one of the following: %(available)s.') % {'op': op, 'available': ', '.join(self._supported_operations)}) raise ValidationFailed(msg) return op def _get_change_path_d10(self, raw_change): try: return raw_change['path'] except KeyError: msg = _("Unable to find '%s' in JSON Schema change") % 'path' raise ValidationFailed(msg) def _decode_json_pointer(self, pointer): """Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._validate_json_pointer(pointer) ret = [] for part in pointer.lstrip('/').split('/'): ret.append(part.replace('~1', '/').replace('~0', '~').strip()) return ret def _validate_json_pointer(self, pointer): """Validate a json pointer. We only accept a limited form of json pointers. """ if not pointer.startswith('/'): msg = _('Pointer `%s` does not start with "/".') % pointer raise ValidationFailed(msg) if re.search(r'/\s*?/', pointer[1:]): msg = _('Pointer `%s` contains adjacent "/".') % pointer raise ValidationFailed(msg) if len(pointer) > 1 and pointer.endswith('/'): msg = _('Pointer `%s` end with "/".') % pointer raise ValidationFailed(msg) if pointer[1:].strip() == '/': msg = _('Pointer `%s` does not contains valid token.') % pointer raise ValidationFailed(msg) if re.search(r'~[^01]', pointer) or pointer.endswith('~'): msg = _('Pointer `%s` contains "~" not part of' ' a recognized escape sequence.') % pointer raise ValidationFailed(msg) def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "{0}" requires a member named "value".') raise ValidationFailed(msg, op) return raw_change['value'] def _validate_change(self, change): if change['op'] == 'remove': return path_root = change['path'][0] if len(change['path']) >= 1 and path_root.lower() != 'metadata': msg = _("The root of path must be metadata, e.g /metadata/key.") raise ValidationFailed(msg) def _validate_path(self, op, path): limits = {'add': 2, 'remove': 2, 'replace': 2} if len(path) != limits.get(op, 2): msg = _("Invalid JSON pointer for this resource: " "'/%s, e.g /metadata/key'") % '/'.join(path) raise ValidationFailed(msg) def _parse_json_schema_change(self, raw_change, draft_version): if draft_version == 10: op = self._get_change_operation_d10(raw_change) path = self._get_change_path_d10(raw_change) else: msg = _('Unrecognized JSON Schema draft version') raise ValidationFailed(msg) path_list = self._decode_json_pointer(path) return op, path_list def _validate_retry_policy(self, metadata): retry_policy = metadata.get('_retry_policy') if metadata else None if retry_policy and not isinstance(retry_policy, dict): msg = _('retry_policy must be a dict.') raise ValidationFailed(msg) if retry_policy: valid_keys = ['retries_with_no_delay', 'minimum_delay_retries', 'minimum_delay', 'maximum_delay', 'maximum_delay_retries', 'retry_backoff_function', 'ignore_subscription_override'] for key in valid_keys: retry_value = retry_policy.get(key) if key == 'retry_backoff_function': if retry_value and not isinstance(retry_value, str): msg = _('retry_backoff_function must be a string.') raise ValidationFailed(msg) # Now we support linear, arithmetic, exponential # and geometric retry backoff function. fun = {'linear', 'arithmetic', 'exponential', 'geometric'} if retry_value and retry_value not in fun: msg = _('invalid retry_backoff_function.') raise ValidationFailed(msg) elif key == 'ignore_subscription_override': if retry_value and not isinstance(retry_value, bool): msg = _('ignore_subscription_override must be a ' 'boolean.') raise ValidationFailed(msg) else: if retry_value and not isinstance(retry_value, int): msg = _('Retry policy: %s must be a integer.') % key raise ValidationFailed(msg) min_delay = retry_policy.get('minimum_delay', consts.MINIMUM_DELAY) max_delay = retry_policy.get('maximum_delay', consts.MAXIMUM_DELAY) if max_delay < min_delay: msg = _('minimum_delay must less than maximum_delay.') raise ValidationFailed(msg) if ((max_delay - min_delay) < 2*consts.LINEAR_INTERVAL): msg = _('invalid minimum_delay and maximum_delay.') raise ValidationFailed(msg) def queue_patching(self, request, changes): washed_changes = [] content_types = { 'application/openstack-messaging-v2.0-json-patch': 10, } json_schema_version = content_types[request.content_type] if not isinstance(changes, list): msg = _('Request body must be a JSON array of operation objects.') raise ValidationFailed(msg) for raw_change in changes: if not isinstance(raw_change, dict): msg = _('Operations must be JSON objects.') raise ValidationFailed(msg) (op, path) = self._parse_json_schema_change(raw_change, json_schema_version) # NOTE(flwang): Now the 'path' is a list. self._validate_path(op, path) change = {'op': op, 'path': path, 'json_schema_version': json_schema_version} if not op == 'remove': change['value'] = self._get_change_value(raw_change, op) self._validate_change(change) washed_changes.append(change) return washed_changes def queue_listing(self, limit=None, **kwargs): """Restrictions involving a list of queues. :param limit: The expected number of queues in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded """ uplimit = self._limits_conf.max_queues_per_page if limit is not None and not (0 < limit <= uplimit): msg = _('Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_queues_per_page) def queue_metadata_length(self, content_length): """Restrictions on queue's length. :param content_length: Queue request's length. :raises ValidationFailed: if the metadata is oversize. """ if content_length is None: return if content_length > self._limits_conf.max_queue_metadata: msg = _('Queue metadata is too large. Max size: {0}') raise ValidationFailed(msg, self._limits_conf.max_queue_metadata) def queue_metadata_putting(self, queue_metadata): """Checking if the reserved attributes of the queue are valid. :param queue_metadata: Queue's metadata. :raises ValidationFailed: if any reserved attribute is invalid. """ if not queue_metadata: return queue_default_ttl = queue_metadata.get('_default_message_ttl') if queue_default_ttl and not isinstance(queue_default_ttl, int): msg = _('_default_message_ttl must be integer.') raise ValidationFailed(msg) if queue_default_ttl is not None: if not (MIN_MESSAGE_TTL <= queue_default_ttl <= self._limits_conf.max_message_ttl): msg = _('_default_message_ttl can not exceed {0} ' 'seconds, and must be at least {1} seconds long.') raise ValidationFailed( msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_max_msg_size = queue_metadata.get('_max_messages_post_size', None) if queue_max_msg_size and not isinstance(queue_max_msg_size, int): msg = _('_max_messages_post_size must be integer.') raise ValidationFailed(msg) if queue_max_msg_size is not None: if not (0 < queue_max_msg_size <= self._limits_conf.max_messages_post_size): raise ValidationFailed( _('_max_messages_post_size can not exceed {0}, ' ' and must be at least greater than 0.'), self._limits_conf.max_messages_post_size) max_claim_count = queue_metadata.get('_max_claim_count', None) if max_claim_count and not isinstance(max_claim_count, int): msg = _('_max_claim_count must be integer.') raise ValidationFailed(msg) dlq_ttl = queue_metadata.get('_dead_letter_queue_messages_ttl', None) if dlq_ttl and not isinstance(dlq_ttl, int): msg = _('_dead_letter_queue_messages_ttl must be integer.') raise ValidationFailed(msg) if dlq_ttl is not None and not (MIN_MESSAGE_TTL <= dlq_ttl <= self._limits_conf.max_message_ttl): msg = _('The TTL for a message may not exceed {0} seconds, ' 'and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_delay = queue_metadata.get('_default_message_delay', None) if queue_delay and not isinstance(queue_delay, int): msg = _('_default_message_delay must be integer.') raise ValidationFailed(msg) if queue_delay is not None: if not (MIN_DELAY_TTL <= queue_delay <= self._limits_conf.max_message_delay): msg = _('The TTL can not exceed {0} seconds, and must ' 'be at least {1} seconds long.') raise ValidationFailed( msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL) encrypted_queue = queue_metadata.get('_enable_encrypt_messages', False) if encrypted_queue and not isinstance(encrypted_queue, bool): msg = _('_enable_encrypt_messages must be boolean.') raise ValidationFailed(msg) self._validate_retry_policy(queue_metadata) def queue_purging(self, document): """Restrictions the resource types to be purged for a queue. :param resource_types: Type list of all resource under a queue :raises ValidationFailed: if the resource types are invalid """ if 'resource_types' not in document: msg = _('Post body must contain key "resource_types".') raise ValidationFailed(msg) if (not set(document['resource_types']).issubset( _PURGBLE_RESOURCE_TYPES)): msg = _('Resource types must be a sub set of {0}.') raise ValidationFailed(msg, _PURGBLE_RESOURCE_TYPES) def message_posting(self, messages): """Restrictions on a list of messages. :param messages: A list of messages :raises ValidationFailed: if any message has a out-of-range TTL. """ if not messages: raise ValidationFailed(_('No messages to enqueu.')) for msg in messages: self.message_content(msg) def message_length(self, content_length, max_msg_post_size=None): """Restrictions on message post length. :param content_length: Queue request's length. :raises ValidationFailed: if the metadata is oversize. """ if content_length is None: return if max_msg_post_size: try: min_max_size = min(max_msg_post_size, self._limits_conf.max_messages_post_size) if content_length > min_max_size: raise ValidationFailed( _('Message collection size is too large. The max ' 'size for current queue is {0}. It is calculated ' 'by max size = min(max_messages_post_size_config: ' '{1}, max_messages_post_size_queue: {2}).'), min_max_size, self._limits_conf.max_messages_post_size, max_msg_post_size) except TypeError: # NOTE(flwang): If there is a type error when using min(), # it only happens in py3.x, it will be skipped and compare # the message length with the size defined in config file. pass if content_length > self._limits_conf.max_messages_post_size: raise ValidationFailed( _('Message collection size is too large. Max size {0}'), self._limits_conf.max_messages_post_size) def message_content(self, message): """Restrictions on each message.""" ttl = message['ttl'] if not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl): msg = _('The TTL for a message may not exceed {0} seconds, and ' 'must be at least {1} seconds long.') raise ValidationFailed( msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) delay = message.get('delay', 0) if not (MIN_DELAY_TTL <= delay <= self._limits_conf.max_message_delay): msg = _('The Delay TTL for a message may not exceed {0} seconds,' 'and must be at least {1} seconds long.') raise ValidationFailed( msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL) def message_listing(self, limit=None, **kwargs): """Restrictions involving a list of messages. :param limit: The expected number of messages in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded """ uplimit = self._limits_conf.max_messages_per_page if limit is not None and not (0 < limit <= uplimit): msg = _('Limit must be at least 1 and may not ' 'be greater than {0}.') raise ValidationFailed( msg, self._limits_conf.max_messages_per_page) def message_deletion(self, ids=None, pop=None, claim_ids=None): """Restrictions involving deletion of messages. :param ids: message ids passed in by the delete request :param pop: count of messages to be POPped :param claim_ids: claim ids passed in by the delete request :raises ValidationFailed: if, pop AND id params are present together neither pop or id params are present message count to be popped > maximum allowed """ if pop is not None and ids is not None: msg = _('pop and id params cannot be present together in the ' 'delete request.') raise ValidationFailed(msg) if pop is None and ids is None: msg = _('The request should have either "ids" or "pop" ' 'parameter in the request, to be able to delete.') raise ValidationFailed(msg) if self._limits_conf.message_delete_with_claim_id: if (ids and claim_ids is None) or (ids is None and claim_ids): msg = _('The request should have both "ids" and "claim_ids" ' 'parameter in the request when ' 'message_delete_with_claim_id is True.') raise ValidationFailed(msg) pop_uplimit = self._limits_conf.max_messages_per_claim_or_pop if pop is not None and not (0 < pop <= pop_uplimit): msg = _('Pop value must be at least 1 and may not ' 'be greater than {0}.') raise ValidationFailed(msg, pop_uplimit) delete_uplimit = self._limits_conf.max_messages_per_page if ids is not None and not (0 < len(ids) <= delete_uplimit): msg = _('ids parameter should have at least 1 and not ' 'greater than {0} values.') raise ValidationFailed(msg, delete_uplimit) def claim_creation(self, metadata, limit=None): """Restrictions on the claim parameters upon creation. :param metadata: The claim metadata :param limit: The number of messages to claim :raises ValidationFailed: if either TTL or grace is out of range, or the expected number of messages exceed the limit. """ self.claim_updating(metadata) uplimit = self._limits_conf.max_messages_per_claim_or_pop if limit is not None and not (0 < limit <= uplimit): msg = _('Limit must be at least 1 and may not ' 'be greater than {0}.') raise ValidationFailed( msg, self._limits_conf.max_messages_per_claim_or_pop) grace = metadata['grace'] if not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace): msg = _('The grace for a claim may not exceed {0} seconds, and ' 'must be at least {1} seconds long.') raise ValidationFailed( msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE) def claim_updating(self, metadata): """Restrictions on the claim TTL. :param metadata: The claim metadata :raises ValidationFailed: if the TTL is out of range """ ttl = metadata['ttl'] if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl): msg = _('The TTL for a claim may not exceed {0} seconds, and ' 'must be at least {1} seconds long.') raise ValidationFailed( msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL) def subscription_posting(self, subscription): """Restrictions on a creation of subscription. :param subscription: dict of subscription :raises ValidationFailed: if the subscription is invalid. """ for p in ('subscriber',): if p not in subscription.keys(): raise ValidationFailed(_('Missing parameter %s in body.') % p) self.subscription_patching(subscription) def subscription_patching(self, subscription): """Restrictions on an update of subscription. :param subscription: dict of subscription :raises ValidationFailed: if the subscription is invalid. """ if not subscription: raise ValidationFailed(_('No subscription to create.')) if not isinstance(subscription, dict): msg = _('Subscriptions must be a dict.') raise ValidationFailed(msg) subscriber = subscription.get('subscriber') subscriber_type = None if subscriber: parsed_uri = urllib_parse.urlparse(subscriber) subscriber_type = parsed_uri.scheme if subscriber_type not in self._limits_conf.subscriber_types: msg = _('The subscriber type of subscription must be ' 'supported in the list {0}.') raise ValidationFailed(msg, self._limits_conf.subscriber_types) options = subscription.get('options') if options and not isinstance(options, dict): msg = _('Options must be a dict.') raise ValidationFailed(msg) self._validate_retry_policy(options) ttl = subscription.get('ttl') if ttl: if not isinstance(ttl, int): msg = _('TTL must be an integer.') raise ValidationFailed(msg) if ttl < MIN_SUBSCRIPTION_TTL: msg = _('The TTL for a subscription ' 'must be at least {0} seconds long.') raise ValidationFailed(msg, MIN_SUBSCRIPTION_TTL) # NOTE(flwang): By this change, technically, user can set a very # big TTL so as to get a very long subscription. now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) msg = _('The TTL seconds for a subscription plus current time' ' must be less than {0}.') try: # NOTE(flwang): If below expression works, then we believe the # ttl is acceptable otherwise it exceeds the max time of # python. now_dt + datetime.timedelta(seconds=ttl) except OverflowError: raise ValidationFailed(msg, datetime.datetime.max) def subscription_confirming(self, confirmed): confirmed = confirmed.get('confirmed') if not isinstance(confirmed, bool): msg = _(u"The 'confirmed' should be boolean.") raise ValidationFailed(msg) def subscription_listing(self, limit=None, **kwargs): """Restrictions involving a list of subscriptions. :param limit: The expected number of subscriptions in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded """ uplimit = self._limits_conf.max_subscriptions_per_page if limit is not None and not (0 < limit <= uplimit): msg = _('Limit must be at least 1 and may not ' 'be greater than {0}.') raise ValidationFailed( msg, self._limits_conf.max_subscriptions_per_page) def get_limit_conf_value(self, limit_conf_name=None): """Return the value of limit configuration. :param limit_conf_name: configuration name """ return self._limits_conf[limit_conf_name] def flavor_listing(self, limit=None, **kwargs): """Restrictions involving a list of pools. :param limit: The expected number of flavors in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded """ uplimit = self._limits_conf.max_flavors_per_page if limit is not None and not (0 < limit <= uplimit): msg = _('Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_flavors_per_page) def pool_listing(self, limit=None, **kwargs): """Restrictions involving a list of pools. :param limit: The expected number of flavors in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded """ uplimit = self._limits_conf.max_pools_per_page if limit is not None and not (0 < limit <= uplimit): msg = _('Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_pools_per_page) def client_id_uuid_safe(self, client_id): """Restrictions the format of client id :param client_id: the client id of request :raises ValidationFailed: if the limit is exceeded """ if self._limits_conf.client_id_uuid_safe == 'off': if (len(client_id) < self._limits_conf.min_length_client_id) or \ (len(client_id) > self._limits_conf.max_length_client_id): msg = _('Length of client id must be at least {0} and no ' 'greater than {1}.') raise ValidationFailed(msg, self._limits_conf.min_length_client_id, self._limits_conf.max_length_client_id) if self._limits_conf.client_id_uuid_safe == 'strict': uuid.UUID(client_id) def topic_identification(self, topic, project): """Restrictions on a project id & topic name pair. :param queue: Name of the topic :param project: Project id :raises ValidationFailed: if the `name` is longer than 64 characters or contains anything other than ASCII digits and letters, underscores, and dashes. Also raises if `project` is not None but longer than 256 characters. """ if project is not None and len(project) > PROJECT_ID_MAX_LEN: msg = _('Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if len(topic) > QUEUE_NAME_MAX_LEN: msg = _('Topic names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if not QUEUE_NAME_REGEX.match(topic): raise ValidationFailed( _('Topic names may only contain ASCII letters, digits, ' 'underscores, and dashes.')) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5830135 zaqar-20.1.0.dev29/zaqar/transport/websocket/0000775000175100017510000000000015033040026020101 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/websocket/__init__.py0000664000175100017510000000131515033040005022207 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Websocket Transport Driver""" from zaqar.transport.websocket import driver # Hoist into package namespace Driver = driver.Driver ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/websocket/driver.py0000664000175100017510000000741215033040005021747 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import socket from oslo_log import log as logging from oslo_utils import netutils from zaqar.common import decorators from zaqar.conf import drivers_transport_websocket from zaqar.i18n import _ from zaqar.transport import base from zaqar.transport.middleware import auth from zaqar.transport.websocket import factory LOG = logging.getLogger(__name__) class Driver(base.DriverBase): def __init__(self, conf, api, cache): super(Driver, self).__init__(conf, None, None, None) self._api = api self._cache = cache self._conf.register_opts(drivers_transport_websocket.ALL_OPTS, group=drivers_transport_websocket.GROUP_NAME) self._ws_conf = self._conf[drivers_transport_websocket.GROUP_NAME] if self._conf.auth_strategy: auth_strategy = auth.strategy(self._conf.auth_strategy) self._auth_strategy = lambda app: auth_strategy.install( app, self._conf) else: self._auth_strategy = None @decorators.lazy_property(write=False) def factory(self): uri = 'ws://' + netutils.escape_ipv6(self._ws_conf.bind) + ':' + \ str(self._ws_conf.port) return factory.ProtocolFactory( uri, handler=self._api, external_port=self._ws_conf.external_port, auth_strategy=self._auth_strategy, loop=asyncio.get_event_loop(), secret_key=self._conf.signed_url.secret_key) @decorators.lazy_property(write=False) def notification_factory(self): return factory.NotificationFactory(self.factory) def listen(self): """Self-host the WebSocket server. It runs the WebSocket server using 'bind' and 'port' options from the websocket config group, and the notifiton endpoint using the 'notification_bind' and 'notification_port' options. """ msgtmpl = _('Serving on host %(bind)s:%(port)s') LOG.info(msgtmpl, {'bind': self._ws_conf.bind, 'port': self._ws_conf.port}) loop = asyncio.get_event_loop() coro_notification = loop.create_server( self.notification_factory, self._ws_conf.notification_bind, self._ws_conf.notification_port) coro = loop.create_server( self.factory, self._ws_conf.bind, self._ws_conf.port) def got_server(task): # Retrieve the port number of the listening socket port = task.result().sockets[0].getsockname()[1] if self._ws_conf.notification_bind is not None: host = self._ws_conf.notification_bind else: host = socket.gethostname() self.notification_factory.set_subscription_url( 'http://%s:%s/' % (netutils.escape_ipv6(host), port)) self._api.set_subscription_factory(self.notification_factory) task = asyncio.Task(coro_notification) task.add_done_callback(got_server) loop.run_until_complete(asyncio.gather(coro, task)) try: loop.run_forever() except KeyboardInterrupt: pass finally: loop.close() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/websocket/factory.py0000664000175100017510000000465115033040005022125 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import txaio txaio.use_asyncio() from autobahn.asyncio import websocket import msgpack from oslo_serialization import jsonutils from oslo_utils import uuidutils from zaqar.transport.websocket import protocol class ProtocolFactory(websocket.WebSocketServerFactory): protocol = protocol.MessagingProtocol def __init__(self, uri, handler, external_port, auth_strategy, loop, secret_key): websocket.WebSocketServerFactory.__init__( self, url=uri, externalPort=external_port) self._handler = handler self._auth_strategy = auth_strategy self._loop = loop self._secret_key = secret_key self._protos = {} def __call__(self): proto_id = uuidutils.generate_uuid() proto = self.protocol(self._handler, proto_id, self._auth_strategy, self._loop) self._protos[proto_id] = proto proto.factory = self return proto def unregister(self, proto_id): self._protos.pop(proto_id) class NotificationFactory(object): protocol = protocol.NotificationProtocol def __init__(self, factory): self.message_factory = factory def set_subscription_url(self, url): self._subscription_url = url def get_subscriber(self, protocol): return '%s%s' % (self._subscription_url, protocol.proto_id) def send_data(self, data, proto_id): instance = self.message_factory._protos.get(proto_id) if instance: # NOTE(Eva-i): incoming data is encoded in JSON, let's convert it # to MsgPack, if notification should be encoded in binary format. if instance.notify_in_binary: data = msgpack.packb(jsonutils.loads(data)) instance.sendMessage(data, instance.notify_in_binary) def __call__(self): return self.protocol(self) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/websocket/protocol.py0000664000175100017510000002437615033040005022325 0ustar00mylesmyles# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import datetime import email import io import sys from autobahn.asyncio import websocket import msgpack from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import timeutils import txaio from zaqar.common import consts LOG = logging.getLogger(__name__) class MessagingProtocol(websocket.WebSocketServerProtocol): _fake_env = { 'REQUEST_METHOD': 'POST', 'SERVER_NAME': 'zaqar', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', 'PATH_INFO': '/', 'SCRIPT_NAME': '', 'wsgi.url_scheme': 'http' } def __init__(self, handler, proto_id, auth_strategy, loop): txaio.use_asyncio() websocket.WebSocketServerProtocol.__init__(self) self._handler = handler self.proto_id = proto_id self._auth_strategy = auth_strategy self._loop = loop self._authentified = False self._auth_env = None self._auth_app = None self._auth_in_binary = None self._deauth_handle = None self.notify_in_binary = None self._subscriptions = [] def onConnect(self, request): LOG.info("Client connecting: %s", request.peer) def onOpen(self): LOG.info("WebSocket connection open.") def onMessage(self, payload, isBinary): # Deserialize the request try: if isBinary: payload = msgpack.unpackb(payload) else: if isinstance(payload, bytes): payload = payload.decode() payload = jsonutils.loads(payload) except Exception: if isBinary: pack_name = 'binary (MessagePack)' else: pack_name = 'text (JSON)' ex_type, ex_value = sys.exc_info()[:2] ex_name = ex_type.__name__ msg = 'Can\'t decode {0} request. {1}: {2}'.format( pack_name, ex_name, ex_value) LOG.debug(msg) body = {'error': msg} resp = self._handler.create_response(400, body) return self._send_response(resp, isBinary) # Check if the request is dict if not isinstance(payload, dict): body = { 'error': 'Unexpected body type. Expected dict or dict like.' } resp = self._handler.create_response(400, body) return self._send_response(resp, isBinary) # Parse the request req = self._handler.create_request(payload, self._auth_env) # Validate and process the request resp = self._handler.validate_request(payload, req) if resp is None: if self._auth_strategy and not self._authentified: if self._auth_app or payload.get('action') != 'authenticate': if 'URL-Signature' in payload.get('headers', {}): if self._handler.verify_signature( self.factory._secret_key, payload): resp = self._handler.process_request(req, self) else: body = {'error': 'Not authentified.'} resp = self._handler.create_response( 403, body, req) else: body = {'error': 'Not authentified.'} resp = self._handler.create_response(403, body, req) else: return self._authenticate(payload, isBinary) elif payload.get('action') == 'authenticate': return self._authenticate(payload, isBinary) else: resp = self._handler.process_request(req, self) if payload.get('action') == consts.SUBSCRIPTION_CREATE: # NOTE(Eva-i): this will make further websocket # notifications encoded in the same format as the last # successful websocket subscription create request. if resp._headers['status'] == 201: subscriber = payload['body'].get('subscriber') # If there is no subscriber, the user has created websocket # subscription. if not subscriber: self.notify_in_binary = isBinary self._subscriptions.append(resp) return self._send_response(resp, isBinary) def onClose(self, wasClean, code, reason): self._handler.clean_subscriptions(self._subscriptions) self.factory.unregister(self.proto_id) LOG.info("WebSocket connection closed: %s", reason) def _authenticate(self, payload, in_binary): self._auth_in_binary = in_binary self._auth_app = self._auth_strategy(self._auth_start) env = self._fake_env.copy() env.update( (self._header_to_env_var(key), value) for key, value in payload.get('headers').items()) self._auth_app(env, self._auth_response) def _auth_start(self, env, start_response): self._authentified = True self._auth_env = dict( (self._env_var_to_header(key), value) for key, value in env.items()) self._auth_app = None expire = env['keystone.token_info']['token']['expires_at'] expire_time = timeutils.parse_isotime(expire) now = datetime.datetime.now(tz=datetime.timezone.utc) delta = (expire_time - now).total_seconds() if self._deauth_handle is not None: self._deauth_handle.cancel() self._deauth_handle = self._loop.call_later( delta, self._deauthenticate) start_response('200 OK', []) def _deauthenticate(self): self._authentified = False self._auth_env = None self.sendClose(4003, 'Authentication expired.') def _auth_response(self, status, message): code = int(status.split()[0]) req = self._handler.create_request({'action': 'authenticate'}) if code != 200: # NOTE(wangxiyuan): _auth_app should be cleaned up the after the # authentication failure so that the client can be authenticated # again. self._auth_app = None body = {'error': 'Authentication failed.'} resp = self._handler.create_response(code, body, req) self._send_response(resp, self._auth_in_binary) else: body = {'message': 'Authentified.'} resp = self._handler.create_response(200, body, req) self._send_response(resp, self._auth_in_binary) def _header_to_env_var(self, key): return 'HTTP_%s' % key.replace('-', '_').upper() def _env_var_to_header(self, key): if key.startswith("HTTP_"): return key[5:].replace("_", "-") else: return key def _send_response(self, resp, in_binary): if in_binary: pack_name = 'bin' self.sendMessage(msgpack.packb(resp.get_response()), True) else: pack_name = 'txt' self.sendMessage(jsonutils.dump_as_bytes(resp.get_response()), False) if LOG.isEnabledFor(logging.INFO): api = resp._request._api status = resp._headers['status'] action = resp._request._action # Dump to JSON to print body without unicode prefixes on Python 2 body = jsonutils.dumps(resp._request._body) var_dict = {'api': api, 'pack_name': pack_name, 'status': status, 'action': action, 'body': body} LOG.info('Response: API %(api)s %(pack_name)s, %(status)s. ' 'Request: action "%(action)s", body %(body)s.', var_dict) class NotificationProtocol(asyncio.Protocol): def __init__(self, factory): self._factory = factory def connection_made(self, transport): self._transport = transport self._data = bytearray() self._state = 'INIT' self._subscriber_id = None self._length = 0 def write_status(self, status): self._transport.write(b'HTTP/1.0 %s\r\n\r\n' % status) self._transport.close() def data_received(self, data): self._data.extend(data) if self._state == 'INIT' and b'\r\n' in self._data: first_line, self._data = self._data.split(b'\r\n', 1) verb, uri, version = first_line.split() if verb != b'POST': self.write_status(b'405 Not Allowed') return self._state = 'HEADERS' self._subscriber_id = uri[1:].decode('utf-8') if self._state == 'HEADERS' and b'\r\n\r\n' in self._data: headers, self._data = self._data.split(b'\r\n\r\n', 1) headers = email.message_from_binary_file(io.BytesIO(headers)) # try both cases of content-length for backwards compatibility length = headers.get(b'content-length', headers.get('Content-Length')) if not length: LOG.debug('Content-Length not provided in the data message') self.write_status(b'400 Bad Request') return self._length = int(length) self._state = 'BODY' if self._state == 'BODY': if len(self._data) >= self._length: if self._subscriber_id: self._factory.send_data(bytes(self._data), str(self._subscriber_id)) self.write_status(b'200 OK') else: self.write_status(b'400 Bad Request') def connection_lost(self, exc): self._data = self._subscriber_id = None self._length = 0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5830135 zaqar-20.1.0.dev29/zaqar/transport/wsgi/0000775000175100017510000000000015033040026017064 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/__init__.py0000664000175100017510000000123615033040005021174 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """WSGI Transport Driver""" from zaqar.transport.wsgi import driver # Hoist into package namespace Driver = driver.Driver ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/app.py0000664000175100017510000000333015033040005020212 0ustar00mylesmyles# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """WSGI App for WSGI Containers This app should be used by external WSGI containers. For example: $ gunicorn zaqar.transport.wsgi.app:app NOTE: As for external containers, it is necessary to put config files in the standard paths. There's no common way to specify / pass configuration files to the WSGI app when it is called from other apps. """ import threading from oslo_config import cfg from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from zaqar import bootstrap from zaqar import version # Use the global CONF instance CONF = cfg.CONF def init_application(): gmr_opts.set_defaults(CONF) log.register_options(CONF) CONF(project='zaqar', prog='zaqar-queues', args=[]) log.setup(CONF, 'zaqar') gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) boot = bootstrap.Bootstrap(CONF) CONF.drivers.transport = 'wsgi' return boot.transport.app app = application = None lock = threading.Lock() with lock: if application is None: application = init_application() # Keep the old name for compatibility app = application ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/driver.py0000664000175100017510000001544315033040005020735 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon import socket from wsgiref import simple_server from oslo_log import log as logging from oslo_utils import netutils from zaqar.common import decorators from zaqar.common.transport.wsgi import helpers from zaqar.conf import drivers_transport_wsgi from zaqar.i18n import _ from zaqar import transport from zaqar.transport import acl from zaqar.transport import encryptor from zaqar.transport.middleware import auth from zaqar.transport.middleware import cors from zaqar.transport.middleware import profile from zaqar.transport import validation from zaqar.transport.wsgi import v1_1 from zaqar.transport.wsgi import v2_0 from zaqar.transport.wsgi import version LOG = logging.getLogger(__name__) class FuncMiddleware(object): def __init__(self, func): self.func = func def process_resource(self, req, resp, resource, params): return self.func(req, resp, params) class Driver(transport.DriverBase): def __init__(self, conf, storage, cache, control): super(Driver, self).__init__(conf, storage, cache, control) self._conf.register_opts(drivers_transport_wsgi.ALL_OPTS, group=drivers_transport_wsgi.GROUP_NAME) self._wsgi_conf = self._conf[drivers_transport_wsgi.GROUP_NAME] self._validate = validation.Validator(self._conf) self._encryptor_factory = encryptor.EncryptionFactory(self._conf) self.app = None self._init_routes() self._init_middleware() def _verify_pre_signed_url(self, req, resp, params): return helpers.verify_pre_signed_url(self._conf.signed_url.secret_key, req, resp, params) def _validate_queue_identification(self, req, resp, params): return helpers.validate_queue_identification( self._validate.queue_identification, req, resp, params) def _validate_topic_identification(self, req, resp, params): return helpers.validate_topic_identification( self._validate.topic_identification, req, resp, params) def _require_client_id(self, req, resp, params): return helpers.require_client_id( self._validate.client_id_uuid_safe, req, resp, params) @decorators.lazy_property(write=False) def before_hooks(self): """Exposed to facilitate unit testing.""" return [ self._verify_pre_signed_url, helpers.require_content_type_be_non_urlencoded, helpers.require_accepts_json, self._require_client_id, helpers.extract_project_id, # NOTE(jeffrey4l): Depends on the project_id and client_id being # extracted above helpers.inject_context, # NOTE(kgriffs): Depends on project_id being extracted, above self._validate_queue_identification, # NOTE(kgriffs): Depends on project_id being extracted, above self._validate_topic_identification, # NOTE(wanghao): verify the extra specs if it is existing helpers.verify_extra_spec ] def _init_routes(self): """Initialize hooks and URI routes to resources.""" catalog = [ ('/v1.1', v1_1.public_endpoints(self, self._conf)), ('/v2', v2_0.public_endpoints(self, self._conf)), ('/', [('', version.Resource())]) ] if self._conf.admin_mode: catalog.extend([ ('/v1.1', v1_1.private_endpoints(self, self._conf)), ('/v2', v2_0.private_endpoints(self, self._conf)), ]) middleware = [FuncMiddleware(hook) for hook in self.before_hooks] self.app = falcon.App(middleware=middleware) # Set options to keep behavior compatible to pre-2.0.0 falcon self.app.req_options.auto_parse_qs_csv = True self.app.req_options.keep_blank_qs_values = False self.app.add_error_handler(Exception, self._error_handler) for version_path, endpoints in catalog: if endpoints: for route, resource in endpoints: self.app.add_route(version_path + route, resource) def _init_middleware(self): """Initialize WSGI middlewarez.""" # NOTE(zhiyan): Install Profiler if (self._conf.profiler.enabled and self._conf.profiler.trace_wsgi_transport): self.app = profile.install_wsgi_tracer(self.app, self._conf) auth_app = self.app # NOTE(flaper87): Install Auth if self._conf.auth_strategy: strategy = auth.strategy(self._conf.auth_strategy) auth_app = strategy.install(self.app, self._conf) self.app = auth.SignedAndExtraSpecHeadersAuth(self.app, auth_app) # NOTE(wangxiyuan): Install CORS, this middleware should be called # before Keystone auth. self.app = cors.install_cors(self.app, auth_app, self._conf) acl.setup_policy(self._conf) def _error_handler(self, request, response, exc, params): if isinstance(exc, falcon.HTTPError): raise LOG.exception('Internal server error') raise falcon.HTTPInternalServerError( title='Internal server error', description=str(exc)) def _get_server_cls(self, host): """Return an appropriate WSGI server class base on provided host :param host: The listen host for the zaqar API server. """ server_cls = simple_server.WSGIServer if netutils.is_valid_ipv6(host): if getattr(server_cls, 'address_family') == socket.AF_INET: class server_cls(server_cls): address_family = socket.AF_INET6 return server_cls def listen(self): """Self-host using 'bind' and 'port' from the WSGI config group.""" msgtmpl = _('Serving on host %(bind)s:%(port)s') LOG.info(msgtmpl, {'bind': self._wsgi_conf.bind, 'port': self._wsgi_conf.port}) server_cls = self._get_server_cls(self._wsgi_conf.bind) httpd = simple_server.make_server(self._wsgi_conf.bind, self._wsgi_conf.port, self.app, server_cls) httpd.serve_forever() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/errors.py0000664000175100017510000000604415033040005020753 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from zaqar.i18n import _ class HTTPServiceUnavailable(falcon.HTTPServiceUnavailable): """Wraps falcon.HTTPServiceUnavailable with Zaqar messaging.""" TITLE = _('Service temporarily unavailable') DESCRIPTION = _('Please try again in a few seconds.') def __init__(self, description): description = description + ' ' + self.DESCRIPTION super(HTTPServiceUnavailable, self).__init__( title=self.TITLE, description=description) class HTTPBadRequestAPI(falcon.HTTPBadRequest): """Wraps falcon.HTTPBadRequest with a contextual title.""" TITLE = _('Invalid API request') def __init__(self, description): super(HTTPBadRequestAPI, self).__init__( title=self.TITLE, description=description) class HTTPBadRequestBody(falcon.HTTPBadRequest): """Wraps falcon.HTTPBadRequest with a contextual title.""" TITLE = _('Invalid request body') def __init__(self, description): super(HTTPBadRequestBody, self).__init__( title=self.TITLE, description=description) class HTTPDocumentTypeNotSupported(HTTPBadRequestBody): """Wraps HTTPBadRequestBody with a standard description.""" DESCRIPTION = _('Document type not supported.') def __init__(self): super(HTTPDocumentTypeNotSupported, self).__init__(self.DESCRIPTION) class HTTPForbidden(falcon.HTTPForbidden): """Wraps falcon.HTTPForbidden with a contextual title.""" TITLE = _('Not authorized') DESCRIPTION = _('You are not authorized to complete this action.') def __init__(self): super(HTTPForbidden, self).__init__( title=self.TITLE, description=self.DESCRIPTION) class HTTPConflict(falcon.HTTPConflict): """Wraps falcon.HTTPConflict with contextual title.""" TITLE = _('Resource conflict') def __init__(self, description, **kwargs): super(HTTPConflict, self).__init__( title=self.TITLE, description=description, **kwargs) class HTTPNotFound(falcon.HTTPNotFound): """Wraps falcon.HTTPConflict with contextual title.""" TITLE = _('Not found') def __init__(self, description): super(HTTPNotFound, self).__init__( title=self.TITLE, description=description) class HTTPUnsupportedMediaType(falcon.HTTPUnsupportedMediaType): """Wraps falcon.HTTPUnsupportedMediaType with contextual title.""" def __init__(self, description): super(HTTPUnsupportedMediaType, self).__init__( description=description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/utils.py0000664000175100017510000002016215033040005020574 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon import jsonschema from oslo_log import log as logging from zaqar.i18n import _ from zaqar.transport import utils from zaqar.transport.wsgi import errors JSONObject = dict """Represents a JSON object in Python.""" JSONArray = list """Represents a JSON array in Python.""" LOG = logging.getLogger(__name__) # # TODO(kgriffs): Create Falcon "before" hooks adapters for these functions # def deserialize(stream, len): """Deserializes JSON from a file-like stream. This function deserializes JSON from a stream, including translating read and parsing errors to HTTP error types. :param stream: file-like object from which to read an object or array of objects. :param len: number of bytes to read from stream :raises HTTPBadRequest: if the request is invalid :raises HTTPServiceUnavailable: if the http service is unavailable """ if len is None: description = _('Request body can not be empty') raise errors.HTTPBadRequestBody(description) try: # TODO(kgriffs): read_json should stream the resulting list # of messages, returning a generator rather than buffering # everything in memory (bp/streaming-serialization). return utils.read_json(stream, len) except utils.MalformedJSON as ex: LOG.debug(ex) description = _('Request body could not be parsed.') raise errors.HTTPBadRequestBody(description) except utils.OverflowedJSONInteger as ex: LOG.debug(ex) description = _('JSON contains integer that is too large.') raise errors.HTTPBadRequestBody(description) except Exception: # Error while reading from the network/server description = _('Request body could not be read.') LOG.exception(description) raise errors.HTTPServiceUnavailable(description) def sanitize(document, spec=None, doctype=JSONObject): """Validates a document and drops undesired fields. :param document: A dict to verify according to `spec`. :param spec: (Default None) Iterable describing expected fields, yielding tuples with the form of: (field_name, value_type, default_value) Note that value_type may either be a Python type, or the special string '*' to accept any type. default_value is the default to give the field if it is missing, or None to require that the field be present. If spec is None, the incoming documents will not be validated. :param doctype: type of document to expect; must be either JSONObject or JSONArray. :raises HTTPBadRequestBody: if the request is invalid :returns: A sanitized, filtered version of the document. If the document is a list of objects, each object will be filtered and returned in a new list. If, on the other hand, the document is expected to contain a single object, that object's fields will be filtered and the resulting object will be returned. """ if doctype is JSONObject: if not isinstance(document, JSONObject): raise errors.HTTPDocumentTypeNotSupported() return document if spec is None else filter(document, spec) if doctype is JSONArray: if not isinstance(document, JSONArray): raise errors.HTTPDocumentTypeNotSupported() if spec is None: return document return [filter(obj, spec) for obj in document] raise TypeError('doctype must be either a JSONObject or JSONArray') def filter(document, spec): """Validates and retrieves typed fields from a single document. Sanitizes a dict-like document by checking it against a list of field spec, and returning only those fields specified. :param document: dict-like object :param spec: iterable describing expected fields, yielding tuples with the form of: (field_name, value_type). Note that value_type may either be a Python type, or the special string '*' to accept any type. :raises HTTPBadRequest: if any field is missing or not an instance of the specified type :returns: A filtered dict containing only the fields listed in the spec """ filtered = {} for name, value_type, default_value in spec: filtered[name] = get_checked_field(document, name, value_type, default_value) return filtered def get_checked_field(document, name, value_type, default_value): """Validates and retrieves a typed field from a document. This function attempts to look up doc[name], and raises appropriate HTTP errors if the field is missing or not an instance of the given type. :param document: dict-like object :param name: field name :param value_type: expected value type, or '*' to accept any type :param default_value: Default value to use if the value is missing, or None to make the value required. :raises HTTPBadRequest: if the field is missing or not an instance of value_type :returns: value obtained from doc[name] """ try: value = document[name] except KeyError: if default_value is not None: value = default_value else: description = _('Missing "{name}" field.').format(name=name) raise errors.HTTPBadRequestBody(description) # PERF(kgriffs): We do our own little spec thing because it is way # faster than jsonschema. if value_type == '*' or isinstance(value, value_type): return value description = _('The value of the "{name}" field must be a {vtype}.') description = description.format(name=name, vtype=value_type.__name__) raise errors.HTTPBadRequestBody(description) def load(req): """Reads request body, raising an exception if it is not JSON. :param req: The request object to read from :type req: falcon.Request :return: a dictionary decoded from the JSON stream :rtype: dict :raises HTTPBadRequestBody: if JSON could not be parsed """ try: return utils.read_json(req.stream, req.content_length) except (utils.MalformedJSON, utils.OverflowedJSONInteger): message = 'JSON could not be parsed.' LOG.exception(message) raise errors.HTTPBadRequestBody(message) # TODO(cpp-cabrera): generalize this def validate(validator, document): """Verifies a document against a schema. :param validator: a validator to use to check validity :type validator: jsonschema.Draft4Validator :param document: document to check :type document: dict :raises HTTPBadRequestBody: if the request is invalid """ try: validator.validate(document) except jsonschema.ValidationError as ex: raise errors.HTTPBadRequestBody( '{0}: {1}'.format(ex.args, str(ex)) ) def message_url(message, base_path, claim_id=None): path = "/".join([base_path, 'messages', message['id']]) if claim_id: path += falcon.to_query_str({'claim_id': claim_id}) return path def format_message_v1(message, base_path, claim_id=None): return { 'href': message_url(message, base_path, claim_id), 'ttl': message['ttl'], 'age': message['age'], 'body': message['body'], } def format_message_v1_1(message, base_path, claim_id=None): url = message_url(message, base_path, claim_id) res = { 'id': message['id'], 'href': url, 'ttl': message['ttl'], 'age': message['age'], 'body': message['body'] } if message.get('checksum'): res['checksum'] = message.get('checksum') return res ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5840135 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/0000775000175100017510000000000015033040026017632 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/__init__.py0000664000175100017510000001002115033040005021732 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_log import log as logging from zaqar.common import decorators from zaqar.transport.wsgi.v1_1 import claims from zaqar.transport.wsgi.v1_1 import flavors from zaqar.transport.wsgi.v1_1 import health from zaqar.transport.wsgi.v1_1 import homedoc from zaqar.transport.wsgi.v1_1 import messages from zaqar.transport.wsgi.v1_1 import ping from zaqar.transport.wsgi.v1_1 import pools from zaqar.transport.wsgi.v1_1 import queues from zaqar.transport.wsgi.v1_1 import stats LOG = logging.getLogger(__name__) VERSION = { 'id': '1.1', 'status': 'DEPRECATED', 'updated': '2016-7-29T02:22:47Z', 'media-types': [ { 'base': 'application/json', 'type': 'application/vnd.openstack.messaging-v1_1+json' } ], 'links': [ { 'href': '/v1.1/', 'rel': 'self' } ] } @decorators.api_version_manager(VERSION) def public_endpoints(driver, conf): queue_controller = driver._storage.queue_controller message_controller = driver._storage.message_controller claim_controller = driver._storage.claim_controller defaults = driver._defaults return [ # Home ('/', homedoc.Resource(conf)), # Queues Endpoints ('/queues', queues.CollectionResource(driver._validate, queue_controller)), ('/queues/{queue_name}', queues.ItemResource(driver._validate, queue_controller, message_controller)), ('/queues/{queue_name}/stats', stats.Resource(queue_controller)), # Messages Endpoints ('/queues/{queue_name}/messages', messages.CollectionResource(driver._wsgi_conf, driver._validate, message_controller, queue_controller, defaults.message_ttl)), ('/queues/{queue_name}/messages/{message_id}', messages.ItemResource(message_controller)), # Claims Endpoints ('/queues/{queue_name}/claims', claims.CollectionResource(driver._wsgi_conf, driver._validate, claim_controller, defaults.claim_ttl, defaults.claim_grace)), ('/queues/{queue_name}/claims/{claim_id}', claims.ItemResource(driver._wsgi_conf, driver._validate, claim_controller, defaults.claim_ttl, defaults.claim_grace)), # Ping ('/ping', ping.Resource(driver._storage)) ] @decorators.api_version_manager(VERSION) def private_endpoints(driver, conf): catalogue = [ # Health ('/health', health.Resource(driver._storage)), ] if conf.pooling: pools_controller = driver._control.pools_controller flavors_controller = driver._control.flavors_controller catalogue.extend([ ('/pools', pools.Listing(pools_controller)), ('/pools/{pool}', pools.Resource(pools_controller)), ('/flavors', flavors.Listing(flavors_controller)), ('/flavors/{flavor}', flavors.Resource(flavors_controller)), ]) return catalogue ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/claims.py0000664000175100017510000001611615033040005021456 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class CollectionResource(object): __slots__ = ( '_claim_controller', '_validate', '_claim_post_spec', '_default_meta', ) def __init__(self, wsgi_conf, validate, claim_controller, default_claim_ttl, default_grace_ttl): self._claim_controller = claim_controller self._validate = validate self._claim_post_spec = ( ('ttl', int, default_claim_ttl), ('grace', int, default_grace_ttl), ) # NOTE(kgriffs): Create this once up front, rather than creating # a new dict every time, for the sake of performance. self._default_meta = { 'ttl': default_claim_ttl, 'grace': default_grace_ttl, } @decorators.TransportLog("Claims collection") def on_post(self, req, resp, project_id, queue_name): # Check for an explicit limit on the # of messages to claim limit = req.get_param_as_int('limit') claim_options = {} if limit is None else {'limit': limit} # NOTE(kgriffs): Clients may or may not actually include the # Content-Length header when the body is empty; the following # check works for both 0 and None. if not req.content_length: # No values given, so use defaults metadata = self._default_meta else: # Read claim metadata (e.g., TTL) and raise appropriate # HTTP errors as needed. document = wsgi_utils.deserialize(req.stream, req.content_length) metadata = wsgi_utils.sanitize(document, self._claim_post_spec) # Claim some messages try: self._validate.claim_creation(metadata, limit=limit) cid, msgs = self._claim_controller.create( queue_name, metadata=metadata, project=project_id, **claim_options) # Buffer claimed messages # TODO(kgriffs): optimize, along with serialization (below) resp_msgs = list(msgs) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Claim could not be created.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Serialize claimed messages, if any. This logic assumes # the storage driver returned well-formed messages. if len(resp_msgs) != 0: base_path = req.path.rpartition('/')[0] resp_msgs = [wsgi_utils.format_message_v1_1(msg, base_path, cid) for msg in resp_msgs] resp.location = req.path + '/' + cid resp.text = utils.to_json({'messages': resp_msgs}) resp.status = falcon.HTTP_201 else: resp.status = falcon.HTTP_204 class ItemResource(object): __slots__ = ('_claim_controller', '_validate', '_claim_patch_spec') def __init__(self, wsgi_conf, validate, claim_controller, default_claim_ttl, default_grace_ttl): self._claim_controller = claim_controller self._validate = validate self._claim_patch_spec = ( ('ttl', int, default_claim_ttl), ('grace', int, default_grace_ttl), ) @decorators.TransportLog("Claim item") def on_get(self, req, resp, project_id, queue_name, claim_id): try: meta, msgs = self._claim_controller.get( queue_name, claim_id=claim_id, project=project_id) # Buffer claimed messages # TODO(kgriffs): Optimize along with serialization (see below) meta['messages'] = list(msgs) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Claim could not be queried.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Serialize claimed messages # TODO(kgriffs): Optimize base_path = req.path.rsplit('/', 2)[0] meta['messages'] = [wsgi_utils.format_message_v1_1(msg, base_path, claim_id) for msg in meta['messages']] meta['href'] = req.path del meta['id'] resp.text = utils.to_json(meta) # status defaults to 200 @decorators.TransportLog("Claim item") def on_patch(self, req, resp, project_id, queue_name, claim_id): # Read claim metadata (e.g., TTL) and raise appropriate # HTTP errors as needed. document = wsgi_utils.deserialize(req.stream, req.content_length) metadata = wsgi_utils.sanitize(document, self._claim_patch_spec) try: self._validate.claim_updating(metadata) self._claim_controller.update(queue_name, claim_id=claim_id, metadata=metadata, project=project_id) resp.status = falcon.HTTP_204 except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Claim could not be updated.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) @decorators.TransportLog("Claim item") def on_delete(self, req, resp, project_id, queue_name, claim_id): try: self._claim_controller.delete(queue_name, claim_id=claim_id, project=project_id) resp.status = falcon.HTTP_204 except Exception: description = _('Claim could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/flavors.py0000664000175100017510000001442115033040005021657 0ustar00mylesmyles# Copyright (c) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon import jsonschema from oslo_log import log from zaqar.common.api.schemas.v1_1 import flavors as schema from zaqar.common import utils as common_utils from zaqar.i18n import _ from zaqar.storage import errors from zaqar.transport import utils as transport_utils from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = log.getLogger(__name__) class Listing(object): """A resource to list registered flavors :param flavors_controller: means to interact with storage """ def __init__(self, flavors_controller): self._ctrl = flavors_controller def on_get(self, request, response, project_id): """Returns a flavor listing as objects embedded in an object: :: { "flavors": [ {"href": "", "capabilities": {}, "pool": ""}, ... ], "links": [ {"rel": "next", "href": ""}, ... ] } :returns: HTTP | 200 """ LOG.debug('LIST flavors for project_id %s', project_id) store = {} request.get_param('marker', store=store) request.get_param_as_int('limit', store=store) request.get_param_as_bool('detailed', store=store) cursor = self._ctrl.list(project=project_id, **store) flavors = list(next(cursor)) results = {'links': []} if flavors: store['marker'] = next(cursor) for entry in flavors: entry['href'] = request.path + '/' + entry['name'] results['links'] = [ { 'rel': 'next', 'href': request.path + falcon.to_query_str(store) } ] results['flavors'] = flavors response.text = transport_utils.to_json(results) response.status = falcon.HTTP_200 class Resource(object): """A handler for individual flavor. :param flavors_controller: means to interact with storage """ def __init__(self, flavors_controller): self._ctrl = flavors_controller validator_type = jsonschema.Draft4Validator self._validators = { 'create': validator_type(schema.create), 'capabilities': validator_type(schema.patch_capabilities), } def on_get(self, request, response, project_id, flavor): """Returns a JSON object for a single flavor entry: :: {"pool_group": "", capabilities: {...}} :returns: HTTP | [200, 404] """ LOG.debug('GET flavor - name: %s', flavor) data = None detailed = request.get_param_as_bool('detailed') or False try: data = self._ctrl.get(flavor, project=project_id, detailed=detailed) except errors.FlavorDoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) data['href'] = request.path response.text = transport_utils.to_json(data) def on_put(self, request, response, project_id, flavor): """Registers a new flavor. Expects the following input: :: {"capabilities": {}} A capabilities object may also be provided. :returns: HTTP | [201, 400] """ LOG.debug('PUT flavor - name: %s', flavor) data = wsgi_utils.load(request) wsgi_utils.validate(self._validators['create'], data) try: self._ctrl.create(flavor, project=project_id, capabilities=data['capabilities']) response.status = falcon.HTTP_201 response.location = request.path except errors.PoolGroupDoesNotExist: description = (_('Flavor %(flavor)s could not be created. ') % dict(flavor=flavor)) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to create'), description=description) def on_delete(self, request, response, project_id, flavor): """Deregisters a flavor. :returns: HTTP | [204] """ LOG.debug('DELETE flavor - name: %s', flavor) self._ctrl.delete(flavor, project=project_id) response.status = falcon.HTTP_204 def on_patch(self, request, response, project_id, flavor): """Allows one to update a flavors's pool and/or capabilities. This method expects the user to submit a JSON object containing at least one of: 'pool_group', 'capabilities'. If none are found, the request is flagged as bad. There is also strict format checking through the use of jsonschema. Appropriate errors are returned in each case for badly formatted input. :returns: HTTP | [200, 400] """ LOG.debug('PATCH flavor - name: %s', flavor) data = wsgi_utils.load(request) EXPECT = ('capabilities') if not any([(field in data) for field in EXPECT]): LOG.debug('PATCH flavor, bad params') raise wsgi_errors.HTTPBadRequestBody( '`capabilities` needs ' 'to be specified' ) for field in EXPECT: wsgi_utils.validate(self._validators[field], data) fields = common_utils.fields(data, EXPECT, pred=lambda v: v is not None) try: self._ctrl.update(flavor, project=project_id, **fields) except errors.FlavorDoesNotExist as ex: LOG.exception('Flavor "%s" does not exist', flavor) raise wsgi_errors.HTTPNotFound(str(ex)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/health.py0000664000175100017510000000236615033040005021455 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # Copyright 2014 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_log import log as logging from zaqar.i18n import _ from zaqar.transport import utils from zaqar.transport.wsgi import errors as wsgi_errors LOG = logging.getLogger(__name__) class Resource(object): __slots__ = ('_driver',) def __init__(self, driver): self._driver = driver def on_get(self, req, resp, **kwargs): try: resp_dict = self._driver.health() resp.text = utils.to_json(resp_dict) except Exception: description = _('Health status could not be read.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/homedoc.py0000664000175100017510000002205315033040005021621 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_serialization import jsonutils # NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03 JSON_HOME = { 'resources': { # ----------------------------------------------------------------- # Queues # ----------------------------------------------------------------- 'rel/queues': { 'href-template': '/v1.1/queues{?marker,limit,detailed}', 'href-vars': { 'marker': 'param/marker', 'limit': 'param/queue_limit', 'detailed': 'param/detailed', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/queue': { 'href-template': '/v1.1/queues/{queue_name}', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['PUT', 'DELETE'], 'formats': { 'application/json': {}, }, }, }, 'rel/queue_stats': { 'href-template': '/v1.1/queues/{queue_name}/stats', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, # ----------------------------------------------------------------- # Messages # ----------------------------------------------------------------- 'rel/messages': { 'href-template': ('/v1.1/queues/{queue_name}/messages' '{?marker,limit,echo,include_claimed}'), 'href-vars': { 'queue_name': 'param/queue_name', 'marker': 'param/marker', 'limit': 'param/messages_limit', 'echo': 'param/echo', 'include_claimed': 'param/include_claimed', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/post_messages': { 'href-template': '/v1.1/queues/{queue_name}/messages', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['POST'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'], }, }, 'rel/messages_delete': { 'href-template': '/v1.1/queues/{queue_name}/messages{?ids,pop}', 'href-vars': { 'queue_name': 'param/queue_name', 'ids': 'param/ids', 'pop': 'param/pop' }, 'hints': { 'allow': [ 'DELETE' ], 'formats': { 'application/json': {} } } }, 'rel/message_delete': { 'href-template': '/v1.1/queues/{queue_name}/messages/{message_id}{?claim}', # noqa 'href-vars': { 'queue_name': 'param/queue_name', 'message_id': 'param/message_id', 'claim': 'param/claim_id' }, 'hints': { 'allow': [ 'DELETE' ], 'formats': { 'application/json': {} } } }, # ----------------------------------------------------------------- # Claims # ----------------------------------------------------------------- 'rel/claim': { 'href-template': '/v1.1/queues/{queue_name}/claims/{claim_id}', 'href-vars': { 'queue_name': 'param/queue_name', 'claim_id': 'param/claim_id', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/post_claim': { 'href-template': '/v1.1/queues/{queue_name}/claims{?limit}', 'href-vars': { 'queue_name': 'param/queue_name', 'limit': 'param/claim_limit', }, 'hints': { 'allow': ['POST'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'] }, }, 'rel/patch_claim': { 'href-template': '/v1.1/queues/{queue_name}/claims/{claim_id}', 'href-vars': { 'queue_name': 'param/queue_name', 'claim_id': 'param/claim_id', }, 'hints': { 'allow': ['PATCH'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'] }, }, 'rel/delete_claim': { 'href-template': '/v1.1/queues/{queue_name}/claims/{claim_id}', 'href-vars': { 'queue_name': 'param/queue_name', 'claim_id': 'param/claim_id', }, 'hints': { 'allow': ['DELETE'], 'formats': { 'application/json': {}, }, }, }, # ----------------------------------------------------------------- # Ping # ----------------------------------------------------------------- 'rel/ping': { 'href-template': '/v1.1/ping', 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, } } } } } ADMIN_RESOURCES = { # ----------------------------------------------------------------- # Pools # ----------------------------------------------------------------- 'rel/pools': { 'href-template': '/v1.1/pools{?detailed,limit,marker}', 'href-vars': { 'detailed': 'param/detailed', 'limit': 'param/pool_limit', 'marker': 'param/marker', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/pool': { 'href-template': '/v1.1/pools/{pool_name}', 'href-vars': { 'pool_name': 'param/pool_name', }, 'hints': { 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], 'formats': { 'application/json': {}, }, }, }, # ----------------------------------------------------------------- # Flavors # ----------------------------------------------------------------- 'rel/flavors': { 'href-template': '/v1.1/flavors{?detailed,limit,marker}', 'href-vars': { 'detailed': 'param/detailed', 'limit': 'param/flavor_limit', 'marker': 'param/marker', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/flavor': { 'href-template': '/v1.1/flavors/{flavor_name}', 'href-vars': { 'flavor_name': 'param/flavor_name', }, 'hints': { 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], 'formats': { 'application/json': {}, }, }, }, # ----------------------------------------------------------------- # Health # ----------------------------------------------------------------- 'rel/health': { 'href': '/v1.1/health', 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, } class Resource(object): def __init__(self, conf): if conf.admin_mode: JSON_HOME['resources'].update(ADMIN_RESOURCES) document = jsonutils.dumps(JSON_HOME, ensure_ascii=False, indent=4) self.document_utf8 = document.encode('utf-8') def on_get(self, req, resp, project_id): resp.data = self.document_utf8 resp.content_type = 'application/json-home' resp.cache_control = ['max-age=86400'] # status defaults to 200 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/messages.py0000664000175100017510000003117715033040005022021 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.common.transport.wsgi import helpers as wsgi_helpers from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class CollectionResource(object): __slots__ = ( '_message_controller', '_queue_controller', '_wsgi_conf', '_validate', '_message_post_spec', ) def __init__(self, wsgi_conf, validate, message_controller, queue_controller, default_message_ttl): self._wsgi_conf = wsgi_conf self._validate = validate self._message_controller = message_controller self._queue_controller = queue_controller self._message_post_spec = ( ('ttl', int, default_message_ttl), ('body', '*', None), ) # ---------------------------------------------------------------------- # Helpers # ---------------------------------------------------------------------- def _get_by_id(self, base_path, project_id, queue_name, ids): """Returns one or more messages from the queue by ID.""" try: self._validate.message_listing(limit=len(ids)) messages = self._message_controller.bulk_get( queue_name, message_ids=ids, project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Message could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response messages = list(messages) if not messages: return None messages = [wsgi_utils.format_message_v1_1(m, base_path, m['claim_id']) for m in messages] return {'messages': messages} def _get(self, req, project_id, queue_name): client_uuid = wsgi_helpers.get_client_uuid(req) kwargs = {} # NOTE(kgriffs): This syntax ensures that # we don't clobber default values with None. req.get_param('marker', store=kwargs) req.get_param_as_int('limit', store=kwargs) req.get_param_as_bool('echo', store=kwargs) req.get_param_as_bool('include_claimed', store=kwargs) try: self._validate.message_listing(**kwargs) results = self._message_controller.list( queue_name, project=project_id, client_uuid=client_uuid, **kwargs) # Buffer messages cursor = next(results) messages = list(cursor) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except storage_errors.QueueDoesNotExist as ex: LOG.debug(ex) messages = None except Exception: description = _('Messages could not be listed.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) if not messages: messages = [] else: # Found some messages, so prepare the response kwargs['marker'] = next(results) base_path = req.path.rsplit('/', 1)[0] messages = [wsgi_utils.format_message_v1_1(m, base_path, m['claim_id']) for m in messages] links = [] if messages: links = [ { 'rel': 'next', 'href': req.path + falcon.to_query_str(kwargs) } ] return { 'messages': messages, 'links': links } # ---------------------------------------------------------------------- # Interface # ---------------------------------------------------------------------- @decorators.TransportLog("Messages collection") def on_post(self, req, resp, project_id, queue_name): client_uuid = wsgi_helpers.get_client_uuid(req) try: # Place JSON size restriction before parsing self._validate.message_length(req.content_length) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) # Deserialize and validate the incoming messages document = wsgi_utils.deserialize(req.stream, req.content_length) if 'messages' not in document: description = _('No messages were found in the request body.') raise wsgi_errors.HTTPBadRequestAPI(description) messages = wsgi_utils.sanitize(document['messages'], self._message_post_spec, doctype=wsgi_utils.JSONArray) try: self._validate.message_posting(messages) if not self._queue_controller.exists(queue_name, project_id): self._queue_controller.create(queue_name, project=project_id) message_ids = self._message_controller.post( queue_name, messages=messages, project=project_id, client_uuid=client_uuid) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except storage_errors.MessageConflict: description = _('No messages could be enqueued.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) except Exception: description = _('Messages could not be enqueued.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare the response ids_value = ','.join(message_ids) resp.location = req.path + '?ids=' + ids_value hrefs = [req.path + '/' + id for id in message_ids] body = {'resources': hrefs} resp.text = utils.to_json(body) resp.status = falcon.HTTP_201 @decorators.TransportLog("Messages collection") def on_get(self, req, resp, project_id, queue_name): ids = req.get_param_as_list('ids') if ids is None: response = self._get(req, project_id, queue_name) else: response = self._get_by_id(req.path.rsplit('/', 1)[0], project_id, queue_name, ids) if response is None: # NOTE(TheSriram): Trying to get a message by id, should # return the message if its present, otherwise a 404 since # the message might have been deleted. msg = _('No messages with IDs: {ids} found in the queue {queue} ' 'for project {project}.') description = msg.format(queue=queue_name, project=project_id, ids=ids) raise wsgi_errors.HTTPNotFound(description) else: resp.text = utils.to_json(response) # status defaults to 200 @decorators.TransportLog("Messages collection") def on_delete(self, req, resp, project_id, queue_name): ids = req.get_param_as_list('ids') pop_limit = req.get_param_as_int('pop') try: self._validate.message_deletion(ids, pop_limit) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) if ids: resp.status = self._delete_messages_by_id(queue_name, ids, project_id) elif pop_limit: resp.status, resp.text = self._pop_messages(queue_name, project_id, pop_limit) def _delete_messages_by_id(self, queue_name, ids, project_id): try: self._message_controller.bulk_delete( queue_name, message_ids=ids, project=project_id) except Exception: description = _('Messages could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) return falcon.HTTP_204 def _pop_messages(self, queue_name, project_id, pop_limit): try: LOG.debug('POP messages - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) messages = self._message_controller.pop( queue_name, project=project_id, limit=pop_limit) except Exception: description = _('Messages could not be popped.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response if not messages: messages = [] body = {'messages': messages} body = utils.to_json(body) return falcon.HTTP_200, body class ItemResource(object): __slots__ = '_message_controller' def __init__(self, message_controller): self._message_controller = message_controller @decorators.TransportLog("Messages item") def on_get(self, req, resp, project_id, queue_name, message_id): try: message = self._message_controller.get( queue_name, message_id, project=project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Message could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response message['href'] = req.path message = wsgi_utils.format_message_v1_1(message, req.path.rsplit('/', 2)[0], message['claim_id']) resp.text = utils.to_json(message) # status defaults to 200 @decorators.TransportLog("Messages item") def on_delete(self, req, resp, project_id, queue_name, message_id): error_title = _('Unable to delete') try: self._message_controller.delete( queue_name, message_id=message_id, project=project_id, claim=req.get_param('claim_id')) except storage_errors.MessageNotClaimed as ex: LOG.debug(ex) description = _('A claim was specified, but the message ' 'is not currently claimed.') raise falcon.HTTPBadRequest( title=error_title, description=description) except storage_errors.ClaimDoesNotExist as ex: LOG.debug(ex) description = _('The specified claim does not exist or ' 'has expired.') raise falcon.HTTPBadRequest( title=error_title, description=description) except storage_errors.NotPermitted as ex: LOG.debug(ex) description = _('This message is claimed; it cannot be ' 'deleted without a valid claim ID.') raise falcon.HTTPForbidden( title=error_title, description=description) except Exception: description = _('Message could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Alles guete resp.status = falcon.HTTP_204 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/ping.py0000664000175100017510000000170715033040005021143 0ustar00mylesmyles# Copyright 2014 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon class Resource(object): __slots__ = ('_driver',) def __init__(self, driver): self._driver = driver def on_get(self, req, resp, **kwargs): resp.status = (falcon.HTTP_204 if self._driver.is_alive() else falcon.HTTP_503) def on_head(self, req, resp, **kwargs): resp.status = falcon.HTTP_204 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/pools.py0000664000175100017510000001727715033040005021353 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """pools: a resource to handle storage pool management A pool is added by an operator by interacting with the pooling-related endpoints. When specifying a pool, the following fields are required: :: { "name": string, "weight": integer, "uri": string::uri } Furthermore, depending on the underlying storage type of pool being registered, there is an optional field: :: { "options": {...} } """ import falcon import jsonschema from oslo_log import log from zaqar.common.api.schemas import pools as schema from zaqar.common import utils as common_utils from zaqar.i18n import _ from zaqar.storage import errors from zaqar.storage import utils as storage_utils from zaqar.transport import utils as transport_utils from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = log.getLogger(__name__) class Listing(object): """A resource to list registered pools :param pools_controller: means to interact with storage """ def __init__(self, pools_controller): self._ctrl = pools_controller def on_get(self, request, response, project_id): """Returns a pool listing as objects embedded in an object: :: { "pools": [ {"href": "", "weight": 100, "uri": ""}, ... ], "links": [ {"href": "", "rel": "next"} ] } :returns: HTTP | 200 """ LOG.debug('LIST pools') store = {} request.get_param('marker', store=store) request.get_param_as_int('limit', store=store) request.get_param_as_bool('detailed', store=store) cursor = self._ctrl.list(**store) pools = list(next(cursor)) results = {'links': []} if pools: store['marker'] = next(cursor) for entry in pools: entry['href'] = request.path + '/' + entry['name'] results['links'] = [ { 'rel': 'next', 'href': request.path + falcon.to_query_str(store) } ] results['pools'] = pools response.content_location = request.relative_uri response.text = transport_utils.to_json(results) response.status = falcon.HTTP_200 class Resource(object): """A handler for individual pool. :param pools_controller: means to interact with storage """ def __init__(self, pools_controller): self._ctrl = pools_controller validator_type = jsonschema.Draft4Validator self._validators = { 'weight': validator_type(schema.patch_weight), 'uri': validator_type(schema.patch_uri), 'group': validator_type(schema.patch_uri), 'options': validator_type(schema.patch_options), 'create': validator_type(schema.create) } def on_get(self, request, response, project_id, pool): """Returns a JSON object for a single pool entry: :: {"weight": 100, "uri": "", options: {...}} :returns: HTTP | [200, 404] """ LOG.debug('GET pool - name: %s', pool) data = None detailed = request.get_param_as_bool('detailed') or False try: data = self._ctrl.get(pool, detailed) except errors.PoolDoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) data['href'] = request.path response.text = transport_utils.to_json(data) def on_put(self, request, response, project_id, pool): """Registers a new pool. Expects the following input: :: {"weight": 100, "uri": ""} An options object may also be provided. :returns: HTTP | [201, 204] """ LOG.debug('PUT pool - name: %s', pool) conf = self._ctrl.driver.conf data = wsgi_utils.load(request) wsgi_utils.validate(self._validators['create'], data) if not storage_utils.can_connect(data['uri'], conf=conf): raise wsgi_errors.HTTPBadRequestBody( 'cannot connect to %s' % data['uri'] ) try: self._ctrl.create(pool, weight=data['weight'], uri=data['uri'], options=data.get('options', {})) response.status = falcon.HTTP_201 response.location = request.path except errors.PoolCapabilitiesMismatch as e: title = _('Unable to create pool') LOG.exception(title) raise falcon.HTTPBadRequest(title=title, description=str(e)) except errors.PoolAlreadyExists as e: LOG.exception('Pool "%s" already exists', pool) raise wsgi_errors.HTTPConflict(str(e)) def on_delete(self, request, response, project_id, pool): """Deregisters a pool. :returns: HTTP | [204, 403] """ LOG.debug('DELETE pool - name: %s', pool) try: self._ctrl.delete(pool) except errors.PoolInUseByFlavor as ex: title = _('Unable to delete') description = _('This pool is used by flavors {flavor}; ' 'It cannot be deleted.') description = description.format(flavor=ex.flavor) LOG.exception(description) raise falcon.HTTPForbidden(title=title, description=description) response.status = falcon.HTTP_204 def on_patch(self, request, response, project_id, pool): """Allows one to update a pool's weight, uri, and/or options. This method expects the user to submit a JSON object containing at least one of: 'uri', 'weight', 'group', 'options'. If none are found, the request is flagged as bad. There is also strict format checking through the use of jsonschema. Appropriate errors are returned in each case for badly formatted input. :returns: HTTP | 200,400 """ LOG.debug('PATCH pool - name: %s', pool) data = wsgi_utils.load(request) EXPECT = ('weight', 'uri', 'options') if not any([(field in data) for field in EXPECT]): LOG.debug('PATCH pool, bad params') raise wsgi_errors.HTTPBadRequestBody( 'One of `uri`, `weight`,or `options` needs ' 'to be specified' ) for field in EXPECT: wsgi_utils.validate(self._validators[field], data) conf = self._ctrl.driver.conf if 'uri' in data and not storage_utils.can_connect(data['uri'], conf=conf): raise wsgi_errors.HTTPBadRequestBody( 'cannot connect to %s' % data['uri'] ) fields = common_utils.fields(data, EXPECT, pred=lambda v: v is not None) try: self._ctrl.update(pool, **fields) except errors.PoolDoesNotExist as ex: LOG.exception('Pool "%s" does not exist', pool) raise wsgi_errors.HTTPNotFound(str(ex)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/queues.py0000664000175100017510000001315615033040005021516 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class ItemResource(object): __slots__ = ('_validate', '_queue_controller', '_message_controller') def __init__(self, validate, queue_controller, message_controller): self._validate = validate self._queue_controller = queue_controller self._message_controller = message_controller @decorators.TransportLog("Queue metadata") def on_get(self, req, resp, project_id, queue_name): try: resp_dict = self._queue_controller.get(queue_name, project=project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Queue metadata could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.text = utils.to_json(resp_dict) # status defaults to 200 @decorators.TransportLog("Queue item") def on_put(self, req, resp, project_id, queue_name): try: # Place JSON size restriction before parsing self._validate.queue_metadata_length(req.content_length) # Deserialize queue metadata metadata = None if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) metadata = wsgi_utils.sanitize(document) # NOTE(Eva-i): reserved queue attributes is Zaqar's feature since # API v2. But we have to ensure the bad data will not come from # older APIs, so we validate metadata here. self._validate.queue_metadata_putting(metadata) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) try: created = self._queue_controller.create(queue_name, metadata=metadata, project=project_id) except storage_errors.FlavorDoesNotExist as ex: LOG.exception('"%s" does not exist', queue_name) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Queue could not be created.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_201 if created else falcon.HTTP_204 resp.location = req.path @decorators.TransportLog("Queue item") def on_delete(self, req, resp, project_id, queue_name): try: self._queue_controller.delete(queue_name, project=project_id) except Exception: description = _('Queue could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_204 class CollectionResource(object): __slots__ = ('_queue_controller', '_validate') def __init__(self, validate, queue_controller): self._queue_controller = queue_controller self._validate = validate @decorators.TransportLog("Queue collection") def on_get(self, req, resp, project_id): kwargs = {} # NOTE(kgriffs): This syntax ensures that # we don't clobber default values with None. req.get_param('marker', store=kwargs) req.get_param_as_int('limit', store=kwargs) req.get_param_as_bool('detailed', store=kwargs) try: self._validate.queue_listing(**kwargs) results = self._queue_controller.list(project=project_id, **kwargs) # Buffer list of queues queues = list(next(results)) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Queues could not be listed.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Got some. Prepare the response. kwargs['marker'] = next(results) or kwargs.get('marker', '') for each_queue in queues: each_queue['href'] = req.path + '/' + each_queue['name'] links = [] if queues: links = [ { 'rel': 'next', 'href': req.path + falcon.to_query_str(kwargs) } ] response_body = { 'queues': queues, 'links': links } resp.text = utils.to_json(response_body) # status defaults to 200 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v1_1/stats.py0000664000175100017510000000446515033040005021350 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import utils from zaqar.transport.wsgi import errors as wsgi_errors LOG = logging.getLogger(__name__) class Resource(object): __slots__ = '_queue_ctrl' def __init__(self, queue_controller): self._queue_ctrl = queue_controller def on_get(self, req, resp, project_id, queue_name): try: resp_dict = self._queue_ctrl.stats(queue_name, project=project_id) message_stats = resp_dict['messages'] if message_stats['total'] != 0: base_path = req.path[:req.path.rindex('/')] + '/messages/' newest = message_stats['newest'] newest['href'] = base_path + newest['id'] del newest['id'] oldest = message_stats['oldest'] oldest['href'] = base_path + oldest['id'] del oldest['id'] resp.text = utils.to_json(resp_dict) # status defaults to 200 except (storage_errors.QueueDoesNotExist, storage_errors.QueueIsEmpty): resp_dict = { 'messages': { 'claimed': 0, 'free': 0, 'total': 0 } } resp.text = utils.to_json(resp_dict) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Queue stats could not be read.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5850134 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/0000775000175100017510000000000015033040026017632 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/__init__.py0000664000175100017510000001657115033040005021752 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from zaqar.common import decorators from zaqar.transport.wsgi.v2_0 import claims from zaqar.transport.wsgi.v2_0 import flavors from zaqar.transport.wsgi.v2_0 import health from zaqar.transport.wsgi.v2_0 import homedoc from zaqar.transport.wsgi.v2_0 import messages from zaqar.transport.wsgi.v2_0 import ping from zaqar.transport.wsgi.v2_0 import pools from zaqar.transport.wsgi.v2_0 import purge from zaqar.transport.wsgi.v2_0 import queues from zaqar.transport.wsgi.v2_0 import stats from zaqar.transport.wsgi.v2_0 import subscriptions from zaqar.transport.wsgi.v2_0 import topic from zaqar.transport.wsgi.v2_0 import topic_purge from zaqar.transport.wsgi.v2_0 import topic_stats from zaqar.transport.wsgi.v2_0 import urls VERSION = { 'id': '2', 'status': 'CURRENT', 'updated': '2014-9-24T04:06:47Z', 'media-types': [ { 'base': 'application/json', 'type': 'application/vnd.openstack.messaging-v2+json' } ], 'links': [ { 'href': '/v2/', 'rel': 'self' } ] } @decorators.api_version_manager(VERSION) def public_endpoints(driver, conf): queue_controller = driver._storage.queue_controller message_controller = driver._storage.message_controller claim_controller = driver._storage.claim_controller subscription_controller = driver._storage.subscription_controller topic_controller = driver._storage.topic_controller defaults = driver._defaults return [ # Home ('/', homedoc.Resource(conf)), # Queues Endpoints ('/queues', queues.CollectionResource(driver._validate, queue_controller)), ('/queues/{queue_name}', queues.ItemResource(driver._validate, queue_controller, message_controller)), ('/queues/{queue_name}/stats', stats.Resource(queue_controller)), ('/queues/{queue_name}/purge', purge.Resource(driver)), # Messages Endpoints ('/queues/{queue_name}/messages', messages.CollectionResource(driver._wsgi_conf, driver._validate, message_controller, queue_controller, defaults.message_ttl, driver._encryptor_factory)), ('/queues/{queue_name}/messages/{message_id}', messages.ItemResource(message_controller, queue_controller, driver._encryptor_factory)), # Claims Endpoints ('/queues/{queue_name}/claims', claims.CollectionResource(driver._wsgi_conf, driver._validate, claim_controller, defaults.claim_ttl, defaults.claim_grace)), ('/queues/{queue_name}/claims/{claim_id}', claims.ItemResource(driver._wsgi_conf, driver._validate, claim_controller, defaults.claim_ttl, defaults.claim_grace)), # Ping ('/ping', ping.Resource(driver._storage)), # Subscription Endpoints ('/queues/{queue_name}/subscriptions', subscriptions.CollectionResource(driver._validate, subscription_controller, defaults.subscription_ttl, queue_controller, conf)), ('/queues/{queue_name}/subscriptions/{subscription_id}', subscriptions.ItemResource(driver._validate, subscription_controller)), ('/queues/{queue_name}/subscriptions/{subscription_id}/confirm', subscriptions.ConfirmResource(driver._validate, subscription_controller, conf)), # Pre-Signed URL Endpoint ('/queues/{queue_name}/share', urls.Resource(driver)), # Topics Endpoints ('/topics', topic.CollectionResource(driver._validate, topic_controller)), ('/topics/{topic_name}', topic.ItemResource(driver._validate, topic_controller, message_controller)), ('/topics/{topic_name}/stats', topic_stats.Resource(topic_controller)), ('/topics/{topic_name}/purge', topic_purge.Resource(driver)), # Topic Messages Endpoints ('/topics/{topic_name}/messages', messages.CollectionResource(driver._wsgi_conf, driver._validate, message_controller, topic_controller, defaults.message_ttl, driver._encryptor_factory)), ('/topics/{topic_name}/messages/{message_id}', messages.ItemResource(message_controller, queue_controller, driver._encryptor_factory)), # Topic Subscription Endpoints ('/topics/{topic_name}/subscriptions', subscriptions.CollectionResource(driver._validate, subscription_controller, defaults.subscription_ttl, topic_controller, conf)), ('/topics/{topic_name}/subscriptions/{subscription_id}', subscriptions.ItemResource(driver._validate, subscription_controller)), ('/topics/{topic_name}/subscriptions/{subscription_id}/confirm', subscriptions.ConfirmResource(driver._validate, subscription_controller, conf)), ] @decorators.api_version_manager(VERSION) def private_endpoints(driver, conf): catalogue = [ # Health ('/health', health.Resource(driver._storage)), ] if conf.pooling: pools_controller = driver._control.pools_controller flavors_controller = driver._control.flavors_controller validate = driver._validate catalogue.extend([ ('/pools', pools.Listing(pools_controller, validate)), ('/pools/{pool}', pools.Resource(pools_controller)), ('/flavors', flavors.Listing(flavors_controller, pools_controller, validate)), ('/flavors/{flavor}', flavors.Resource(flavors_controller, pools_controller)), ]) return catalogue ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/claims.py0000664000175100017510000001636615033040005021465 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class CollectionResource(object): __slots__ = ( '_claim_controller', '_validate', '_claim_post_spec', '_default_meta', ) def __init__(self, wsgi_conf, validate, claim_controller, default_claim_ttl, default_grace_ttl): self._claim_controller = claim_controller self._validate = validate self._claim_post_spec = ( ('ttl', int, default_claim_ttl), ('grace', int, default_grace_ttl), ) # NOTE(kgriffs): Create this once up front, rather than creating # a new dict every time, for the sake of performance. self._default_meta = { 'ttl': default_claim_ttl, 'grace': default_grace_ttl, } @decorators.TransportLog("Claims collection") @acl.enforce("claims:create") def on_post(self, req, resp, project_id, queue_name): # Check for an explicit limit on the # of messages to claim limit = req.get_param_as_int('limit') claim_options = {} if limit is None else {'limit': limit} # NOTE(kgriffs): Clients may or may not actually include the # Content-Length header when the body is empty; the following # check works for both 0 and None. if not req.content_length: # No values given, so use defaults metadata = self._default_meta else: # Read claim metadata (e.g., TTL) and raise appropriate # HTTP errors as needed. document = wsgi_utils.deserialize(req.stream, req.content_length) metadata = wsgi_utils.sanitize(document, self._claim_post_spec) # Claim some messages try: self._validate.claim_creation(metadata, limit=limit) cid, msgs = self._claim_controller.create( queue_name, metadata=metadata, project=project_id, **claim_options) # Buffer claimed messages # TODO(kgriffs): optimize, along with serialization (below) resp_msgs = list(msgs) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Claim could not be created.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Serialize claimed messages, if any. This logic assumes # the storage driver returned well-formed messages. if len(resp_msgs) != 0: base_path = req.path.rpartition('/')[0] resp_msgs = [wsgi_utils.format_message_v1_1(msg, base_path, cid) for msg in resp_msgs] resp.location = req.path + '/' + cid resp.text = utils.to_json({'messages': resp_msgs}) resp.status = falcon.HTTP_201 else: resp.status = falcon.HTTP_204 class ItemResource(object): __slots__ = ('_claim_controller', '_validate', '_claim_patch_spec') def __init__(self, wsgi_conf, validate, claim_controller, default_claim_ttl, default_grace_ttl): self._claim_controller = claim_controller self._validate = validate self._claim_patch_spec = ( ('ttl', int, default_claim_ttl), ('grace', int, default_grace_ttl), ) @decorators.TransportLog("Claims item") @acl.enforce("claims:get") def on_get(self, req, resp, project_id, queue_name, claim_id): try: meta, msgs = self._claim_controller.get( queue_name, claim_id=claim_id, project=project_id) # Buffer claimed messages # TODO(kgriffs): Optimize along with serialization (see below) meta['messages'] = list(msgs) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Claim could not be queried.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Serialize claimed messages # TODO(kgriffs): Optimize base_path = req.path.rsplit('/', 2)[0] meta['messages'] = [wsgi_utils.format_message_v1_1(msg, base_path, claim_id) for msg in meta['messages']] meta['href'] = req.path del meta['id'] resp.text = utils.to_json(meta) # status defaults to 200 @decorators.TransportLog("Claims item") @acl.enforce("claims:update") def on_patch(self, req, resp, project_id, queue_name, claim_id): # Read claim metadata (e.g., TTL) and raise appropriate # HTTP errors as needed. document = wsgi_utils.deserialize(req.stream, req.content_length) metadata = wsgi_utils.sanitize(document, self._claim_patch_spec) try: self._validate.claim_updating(metadata) self._claim_controller.update(queue_name, claim_id=claim_id, metadata=metadata, project=project_id) resp.status = falcon.HTTP_204 except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Claim could not be updated.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) @decorators.TransportLog("Claims item") @acl.enforce("claims:delete") def on_delete(self, req, resp, project_id, queue_name, claim_id): try: self._claim_controller.delete(queue_name, claim_id=claim_id, project=project_id) resp.status = falcon.HTTP_204 except Exception: description = _('Claim could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/flavors.py0000664000175100017510000003430515033040005021662 0ustar00mylesmyles# Copyright (c) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon import jsonschema from oslo_log import log from zaqar.common.api.schemas import flavors as schema from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors from zaqar.transport import acl from zaqar.transport import utils as transport_utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = log.getLogger(__name__) class Listing(object): """A resource to list registered flavors :param flavors_controller: means to interact with storage """ def __init__(self, flavors_controller, pools_controller, validate): self._ctrl = flavors_controller self._pools_ctrl = pools_controller self._validate = validate @decorators.TransportLog("Flavors collection") @acl.enforce("flavors:get_all") def on_get(self, request, response, project_id): """Returns a flavor listing as objects embedded in an object: :: { "flavors": [ {"href": "", "capabilities": {}, "pool_list": ""}, ... ], "links": [ {"rel": "next", "href": ""}, ... ] } :returns: HTTP | 200 """ LOG.debug('LIST flavors for project_id %s', project_id) store = {} request.get_param('marker', store=store) request.get_param_as_int('limit', store=store) detailed = request.get_param_as_bool('detailed') try: self._validate.flavor_listing(**store) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) cursor = self._ctrl.list(project=project_id, **store) flavors = list(next(cursor)) results = {'links': []} if flavors: store['marker'] = next(cursor) for entry in flavors: entry['href'] = request.path + '/' + entry['name'] data = {} data['name'] = entry['name'] pool_list = \ list(self._pools_ctrl.get_pools_by_flavor(flavor=data)) pool_name_list = [] if len(pool_list) > 0: pool_name_list = [x['name'] for x in pool_list] entry['pool_list'] = pool_name_list if detailed: caps = self._pools_ctrl.capabilities(flavor=entry) entry['capabilities'] = [cap.name for cap in caps] if detailed is not None: store['detailed'] = detailed if flavors: results['links'] = [ { 'rel': 'next', 'href': request.path + falcon.to_query_str(store) } ] results['flavors'] = flavors response.text = transport_utils.to_json(results) response.status = falcon.HTTP_200 class Resource(object): """A handler for individual flavor. :param flavors_controller: means to interact with storage """ def __init__(self, flavors_controller, pools_controller): self._ctrl = flavors_controller self._pools_ctrl = pools_controller validator_type = jsonschema.Draft4Validator self._validators = { 'create': validator_type(schema.create), 'pool_list': validator_type(schema.patch_pool_list), 'capabilities': validator_type(schema.patch_capabilities), } @decorators.TransportLog("Flavors item") @acl.enforce("flavors:get") def on_get(self, request, response, project_id, flavor): """Returns a JSON object for a single flavor entry: :: {"pool": "", "pool_list": [], capabilities: {...}} :returns: HTTP | [200, 404] """ LOG.debug('GET flavor - name: %s', flavor) data = None try: data = self._ctrl.get(flavor, project=project_id) capabilities = self._pools_ctrl.capabilities(flavor=data) data['capabilities'] = [cap.name for cap in capabilities] pool_list =\ list(self._pools_ctrl.get_pools_by_flavor(flavor=data)) pool_name_list = [] if len(pool_list) > 0: pool_name_list = [x['name'] for x in pool_list] data['pool_list'] = pool_name_list except errors.FlavorDoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) data['href'] = request.path response.text = transport_utils.to_json(data) def _check_pools_exists(self, pool_list): if pool_list is not None: for pool in pool_list: if not self._pools_ctrl.exists(pool): raise errors.PoolDoesNotExist(pool) def _update_pools_by_flavor(self, flavor, pool_list): if pool_list is not None: for pool in pool_list: self._pools_ctrl.update(pool, flavor=flavor) def _clean_pools_by_flavor(self, flavor, pool_list=None): if pool_list is None: flavor_obj = {} flavor_obj['name'] = flavor pllt = list(self._pools_ctrl.get_pools_by_flavor( flavor=flavor_obj)) pool_list = [x['name'] for x in pllt] if pool_list is not None: for pool in pool_list: self._pools_ctrl.update(pool, flavor="") def _on_put_by_pool_list(self, request, response, project_id, flavor, pool_list): LOG.debug('PUT flavor - name by flavor: %s', flavor) # NOTE(gengchc2): If configuration flavor is used by the new schema, # a list of pools is required. if len(pool_list) == 0: response.status = falcon.HTTP_400 response.location = request.path raise falcon.HTTPBadRequest( title=_('Unable to create'), description='Bad Request') # NOTE(gengchc2): Check if pools in the pool_list exist. try: self._check_pools_exists(pool_list) except errors.PoolDoesNotExist as ex: description = (_('Flavor %(flavor)s could not be created, ' 'error:%(msg)s') % dict(flavor=flavor, msg=str(ex))) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to create'), description=description) capabilities = self._pools_ctrl.capabilities(name=pool_list[0]) try: self._ctrl.create(flavor, project=project_id, capabilities=capabilities) response.status = falcon.HTTP_201 response.location = request.path except errors.ConnectionError as ex: description = (_('Flavor %(flavor)s could not be created, ' 'error:%(msg)s') % dict(flavor=flavor, msg=str(ex))) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to create'), description=description) # NOTE(gengchc2): Update the 'flavor' field in pools tables. try: self._update_pools_by_flavor(flavor, pool_list) except errors.ConnectionError as ex: description = (_('Flavor %(flavor)s could not be created, ' 'error:%(msg)s') % dict(flavor=flavor, msg=str(ex))) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to create'), description=description) @decorators.TransportLog("Flavors item") @acl.enforce("flavors:create") def on_put(self, request, response, project_id, flavor): """Registers a new flavor. Expects the following input: :: {"pool_list": [], "capabilities": {}} A capabilities object may also be provided. :returns: HTTP | [201, 400] """ LOG.debug('PUT flavor - name: %s', flavor) data = wsgi_utils.load(request) wsgi_utils.validate(self._validators['create'], data) pool_list = data.get('pool_list') if pool_list is not None: self._on_put_by_pool_list(request, response, project_id, flavor, pool_list) @decorators.TransportLog("Flavors item") @acl.enforce("flavors:delete") def on_delete(self, request, response, project_id, flavor): """Deregisters a flavor. :returns: HTTP | [204] """ LOG.debug('DELETE flavor - name: %s', flavor) # NOTE(gengchc2): If configuration flavor is # used by the new schema, the flavor field in pools # need to be cleaned. try: self._clean_pools_by_flavor(flavor) except errors.ConnectionError: description = (_('Flavor %(flavor)s could not be deleted.') % dict(flavor=flavor)) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to create'), description=description) self._ctrl.delete(flavor, project=project_id) response.status = falcon.HTTP_204 def _on_patch_by_pool_list(self, request, response, project_id, flavor, pool_list): if len(pool_list) == 0: response.status = falcon.HTTP_400 response.location = request.path raise falcon.HTTPBadRequest( title=_('Unable to create'), description='Bad Request') # NOTE(gengchc2): If the flavor does not exist, return try: self._ctrl.get(flavor, project=project_id) except errors.FlavorDoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) flavor_obj = {} flavor_obj['name'] = flavor # NOTE(gengchc2): Get the pools list with flavor. pool_list_old = list(self._pools_ctrl.get_pools_by_flavor( flavor=flavor_obj)) # NOTE(gengchc2): Check if the new pool in the pool_list exist. try: self._check_pools_exists(pool_list) except errors.PoolDoesNotExist as ex: description = (_('Flavor %(flavor)s cant be updated, ' 'error:%(msg)s') % dict(flavor=flavor, msg=str(ex))) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('updatefail'), description=description) capabilities = self._pools_ctrl.capabilities(name=pool_list[0]) try: self._ctrl.update(flavor, project=project_id, capabilities=capabilities) resp_data = self._ctrl.get(flavor, project=project_id) resp_data['capabilities'] = [cap.name for cap in capabilities] except errors.FlavorDoesNotExist as ex: LOG.exception('Flavor "%s" does not exist', flavor) raise wsgi_errors.HTTPNotFound(str(ex)) # (gengchc) Update flavor field in new pool list. try: self._update_pools_by_flavor(flavor, pool_list) except errors.ConnectionError as ex: description = (_('Flavor %(flavor)s could not be updated, ' 'error:%(msg)s') % dict(flavor=flavor, msg=str(ex))) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to create'), description=description) # (gengchc) Remove flavor from old pool list. try: pool_list_removed = [] for pool_old in pool_list_old: if pool_old['name'] not in pool_list: pool_list_removed.append(pool_old['name']) self._clean_pools_by_flavor(flavor, pool_list_removed) except errors.ConnectionError as ex: description = (_('Flavor %(flavor)s could not be updated, ' 'error:%(msg)s') % dict(flavor=flavor, msg=str(ex))) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to create'), description=description) resp_data['pool_list'] = pool_list resp_data['href'] = request.path response.text = transport_utils.to_json(resp_data) @decorators.TransportLog("Flavors item") @acl.enforce("flavors:update") def on_patch(self, request, response, project_id, flavor): """Allows one to update a flavors'pool list. This method expects the user to submit a JSON object containing 'pool list'. If none is found, the request is flagged as bad. There is also strict format checking through the use of jsonschema. Appropriate errors are returned in each case for badly formatted input. :returns: HTTP | [200, 400] """ LOG.debug('PATCH flavor - name: %s', flavor) data = wsgi_utils.load(request) field = 'pool_list' if field not in data: LOG.debug('PATCH flavor, bad params') raise wsgi_errors.HTTPBadRequestBody( '`pool_list` needs to be specified' ) wsgi_utils.validate(self._validators[field], data) pool_list = data.get('pool_list') # NOTE(gengchc2): If pool_list is not None, configuration flavor is # used by the new schema. # a list of pools is required. if pool_list is not None: self._on_patch_by_pool_list(request, response, project_id, flavor, pool_list) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/health.py0000664000175100017510000000260515033040005021451 0ustar00mylesmyles# Copyright (c) 2014 Rackspace, Inc. # Copyright 2014 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport.wsgi import errors as wsgi_errors LOG = logging.getLogger(__name__) class Resource(object): __slots__ = ('_driver',) def __init__(self, driver): self._driver = driver @decorators.TransportLog("Health item") @acl.enforce("health:get") def on_get(self, req, resp, **kwargs): try: resp_dict = self._driver.health() resp.text = utils.to_json(resp_dict) except Exception: description = _('Health status could not be read.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/homedoc.py0000664000175100017510000003054515033040005021626 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. from oslo_serialization import jsonutils # NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03 JSON_HOME = { 'resources': { # ----------------------------------------------------------------- # Queues # ----------------------------------------------------------------- 'rel/queues': { 'href-template': '/v2/queues{?marker,limit,detailed}', 'href-vars': { 'marker': 'param/marker', 'limit': 'param/queue_limit', 'detailed': 'param/detailed', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/queue': { 'href-template': '/v2/queues/{queue_name}', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['GET', 'PUT', 'DELETE', 'PATCH'], 'formats': { 'application/json': {}, }, }, }, 'rel/queue_stats': { 'href-template': '/v2/queues/{queue_name}/stats', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/queue_share': { 'href-template': '/v2/queues/{queue_name}/share', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['POST'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'], }, }, 'rel/queue_purge': { 'href-template': '/v2/queues/{queue_name}/purge', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['POST'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'], }, }, # ----------------------------------------------------------------- # Messages # ----------------------------------------------------------------- 'rel/messages': { 'href-template': ('/v2/queues/{queue_name}/messages' '{?marker,limit,echo,include_claimed}'), 'href-vars': { 'queue_name': 'param/queue_name', 'marker': 'param/marker', 'limit': 'param/messages_limit', 'echo': 'param/echo', 'include_claimed': 'param/include_claimed', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/post_messages': { 'href-template': '/v2/queues/{queue_name}/messages', 'href-vars': { 'queue_name': 'param/queue_name', }, 'hints': { 'allow': ['POST'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'], }, }, 'rel/messages_delete': { 'href-template': '/v2/queues/{queue_name}/messages{?ids,pop}', 'href-vars': { 'queue_name': 'param/queue_name', 'ids': 'param/ids', 'pop': 'param/pop' }, 'hints': { 'allow': [ 'DELETE' ], 'formats': { 'application/json': {} } } }, 'rel/message_delete': { 'href-template': '/v2/queues/{queue_name}/messages/{message_id}{?claim}', # noqa 'href-vars': { 'queue_name': 'param/queue_name', 'message_id': 'param/message_id', 'claim': 'param/claim_id' }, 'hints': { 'allow': [ 'DELETE' ], 'formats': { 'application/json': {} } } }, 'rel/message_get': { 'href-template': '/v2/queues/{queue_name}/messages/{message_id}', 'href-vars': { 'queue_name': 'param/queue_name', 'message_id': 'param/message_id' }, 'hints': { 'allow': [ 'GET' ], 'formats': { 'application/json': {} } } }, # ----------------------------------------------------------------- # Claims # ----------------------------------------------------------------- 'rel/claim': { 'href-template': '/v2/queues/{queue_name}/claims/{claim_id}', 'href-vars': { 'queue_name': 'param/queue_name', 'claim_id': 'param/claim_id', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/post_claim': { 'href-template': '/v2/queues/{queue_name}/claims{?limit}', 'href-vars': { 'queue_name': 'param/queue_name', 'limit': 'param/claim_limit', }, 'hints': { 'allow': ['POST'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'] }, }, 'rel/patch_claim': { 'href-template': '/v2/queues/{queue_name}/claims/{claim_id}', 'href-vars': { 'queue_name': 'param/queue_name', 'claim_id': 'param/claim_id', }, 'hints': { 'allow': ['PATCH'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'] }, }, 'rel/delete_claim': { 'href-template': '/v2/queues/{queue_name}/claims/{claim_id}', 'href-vars': { 'queue_name': 'param/queue_name', 'claim_id': 'param/claim_id', }, 'hints': { 'allow': ['DELETE'], 'formats': { 'application/json': {}, }, }, }, # ----------------------------------------------------------------- # Subscriptions # ----------------------------------------------------------------- 'rel/subscriptions_get': { 'href-template': '/v2/queues/{queue_name}/subscriptions{?marker,limit}', # noqa 'href-vars': { 'queue_name': 'param/queue_name', 'marker': 'param/marker', 'limit': 'param/subscription_limit', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, } } }, 'rel/subscriptions_post': { 'href-template': '/v2/queues/{queue_name}/subscriptions', 'href-vars': { 'queue_name': 'param/queue_name', 'limit': 'param/subscription_limit', }, 'hints': { 'allow': ['POST'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'] } }, 'rel/subscription': { 'href-template': '/v2/queues/{queue_name}/subscriptions/{subscriptions_id}', # noqa 'href-vars': { 'queue_name': 'param/queue_name', 'subscriptions_id': 'param/subscriptions_id', }, 'hints': { 'allow': ['GET', 'DELETE'], 'formats': { 'application/json': {}, } } }, 'rel/subscription_patch': { 'href-template': '/v2/queues/{queue_name}/subscriptions/{subscriptions_id}', # noqa 'href-vars': { 'queue_name': 'param/queue_name', 'subscriptions_id': 'param/subscriptions_id', }, 'hints': { 'allow': ['PATCH'], 'formats': { 'application/json': {}, }, 'accept-post': ['application/json'] } }, # ----------------------------------------------------------------- # Ping # ----------------------------------------------------------------- 'rel/ping': { 'href-template': '/v2/ping', 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, } } } } } ADMIN_RESOURCES = { # ----------------------------------------------------------------- # Pools # ----------------------------------------------------------------- 'rel/pools': { 'href-template': '/v2/pools{?detailed,limit,marker}', 'href-vars': { 'detailed': 'param/detailed', 'limit': 'param/pool_limit', 'marker': 'param/marker', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/pool': { 'href-template': '/v2/pools/{pool_name}', 'href-vars': { 'pool_name': 'param/pool_name', }, 'hints': { 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], 'formats': { 'application/json': {}, }, }, }, # ----------------------------------------------------------------- # Flavors # ----------------------------------------------------------------- 'rel/flavors': { 'href-template': '/v2/flavors{?detailed,limit,marker}', 'href-vars': { 'detailed': 'param/detailed', 'limit': 'param/flavor_limit', 'marker': 'param/marker', }, 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, 'rel/flavor': { 'href-template': '/v2/flavors/{flavor_name}', 'href-vars': { 'flavor_name': 'param/flavor_name', }, 'hints': { 'allow': ['GET', 'PUT', 'PATCH', 'DELETE'], 'formats': { 'application/json': {}, }, }, }, # ----------------------------------------------------------------- # Health # ----------------------------------------------------------------- 'rel/health': { 'href': '/v2/health', 'hints': { 'allow': ['GET'], 'formats': { 'application/json': {}, }, }, }, } class Resource(object): def __init__(self, conf): if conf.admin_mode: JSON_HOME['resources'].update(ADMIN_RESOURCES) document = jsonutils.dumps(JSON_HOME, ensure_ascii=False, indent=4) self.document_utf8 = document.encode('utf-8') def on_get(self, req, resp, project_id): resp.data = self.document_utf8 resp.content_type = 'application/json-home' resp.cache_control = ['max-age=86400'] # status defaults to 200 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/messages.py0000664000175100017510000004117615033040005022021 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.common.transport.wsgi import helpers as wsgi_helpers from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class CollectionResource(object): __slots__ = ( '_message_controller', '_queue_controller', '_wsgi_conf', '_validate', '_default_message_ttl', '_encryptor' ) def __init__(self, wsgi_conf, validate, message_controller, queue_controller, default_message_ttl, encryptor_factory): self._wsgi_conf = wsgi_conf self._validate = validate self._message_controller = message_controller self._queue_controller = queue_controller self._default_message_ttl = default_message_ttl self._encryptor = encryptor_factory.getEncryptor() # ---------------------------------------------------------------------- # Helpers # ---------------------------------------------------------------------- def _get_by_id(self, base_path, project_id, queue_name, ids): """Returns one or more messages from the queue by ID.""" try: self._validate.message_listing(limit=len(ids)) messages = self._message_controller.bulk_get( queue_name, message_ids=ids, project=project_id) queue_meta = self._queue_controller.get_metadata(queue_name, project_id) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except storage_errors.QueueDoesNotExist: LOG.exception('Queue name "%s" does not exist', queue_name) queue_meta = None except Exception: description = _('Message could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response messages = list(messages) if not messages: return None # Decrypt messages if queue_meta and queue_meta.get('_enable_encrypt_messages', False): self._encryptor.message_decrypted(messages) messages = [wsgi_utils.format_message_v1_1(m, base_path, m['claim_id']) for m in messages] return {'messages': messages} def _get(self, req, project_id, queue_name): client_uuid = wsgi_helpers.get_client_uuid(req) kwargs = {} # NOTE(kgriffs): This syntax ensures that # we don't clobber default values with None. req.get_param('marker', store=kwargs) req.get_param_as_int('limit', store=kwargs) req.get_param_as_bool('echo', store=kwargs) req.get_param_as_bool('include_claimed', store=kwargs) req.get_param_as_bool('include_delayed', store=kwargs) try: queue_meta = {} try: # NOTE(cdyangzhenyu): In order to determine whether the # queue has a delay attribute, the metadata of the queue # is obtained here. This may have a little performance impact. # So maybe a refactor is needed in the future. queue_meta = self._queue_controller.get_metadata(queue_name, project_id) except storage_errors.DoesNotExist: LOG.exception('Queue name "%s" does not exist', queue_name) queue_delay = queue_meta.get('_default_message_delay') if not queue_delay: # NOTE(cdyangzhenyu): If the queue without the metadata # attribute _default_message_delay, we don't filter # for delay messages. kwargs['include_delayed'] = True self._validate.message_listing(**kwargs) results = self._message_controller.list( queue_name, project=project_id, client_uuid=client_uuid, **kwargs) # Buffer messages cursor = next(results) messages = list(cursor) # Decrypt messages if queue_meta.get('_enable_encrypt_messages', False): self._encryptor.message_decrypted(messages) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except storage_errors.QueueDoesNotExist as ex: LOG.debug(ex) messages = None except Exception: description = _('Messages could not be listed.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) if not messages: messages = [] else: # Found some messages, so prepare the response kwargs['marker'] = next(results) base_path = req.path.rsplit('/', 1)[0] messages = [wsgi_utils.format_message_v1_1(m, base_path, m['claim_id']) for m in messages] links = [] if messages: links = [ { 'rel': 'next', 'href': req.path + falcon.to_query_str(kwargs) } ] return { 'messages': messages, 'links': links } # ---------------------------------------------------------------------- # Interface # ---------------------------------------------------------------------- @decorators.TransportLog("Messages collection") @acl.enforce("messages:create") def on_post(self, req, resp, project_id, queue_name): client_uuid = wsgi_helpers.get_client_uuid(req) try: # NOTE(flwang): Replace 'exists' with 'get_metadata' won't impact # the performance since both of them will call # collection.find_one() queue_meta = None try: queue_meta = self._queue_controller.get_metadata(queue_name, project_id) except storage_errors.DoesNotExist: self._validate.queue_identification(queue_name, project_id) self._queue_controller.create(queue_name, project=project_id) # NOTE(flwang): Queue is created in lazy mode, so no metadata # set. queue_meta = {} queue_max_msg_size = queue_meta.get('_max_messages_post_size') queue_default_ttl = queue_meta.get('_default_message_ttl') queue_delay = queue_meta.get('_default_message_delay') queue_encrypted = queue_meta.get('_enable_encrypt_messages', False) if queue_default_ttl: message_post_spec = (('ttl', int, queue_default_ttl), ('body', '*', None),) else: message_post_spec = (('ttl', int, self._default_message_ttl), ('body', '*', None),) if queue_delay: message_post_spec += (('delay', int, queue_delay),) # Place JSON size restriction before parsing self._validate.message_length(req.content_length, max_msg_post_size=queue_max_msg_size) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) # Deserialize and validate the incoming messages document = wsgi_utils.deserialize(req.stream, req.content_length) if 'messages' not in document: description = _('No messages were found in the request body.') raise wsgi_errors.HTTPBadRequestAPI(description) messages = wsgi_utils.sanitize(document['messages'], message_post_spec, doctype=wsgi_utils.JSONArray) try: self._validate.message_posting(messages) if queue_encrypted: self._encryptor.message_encrypted(messages) message_ids = self._message_controller.post( queue_name, messages=messages, project=project_id, client_uuid=client_uuid) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except storage_errors.MessageConflict: description = _('No messages could be enqueued.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) except Exception: description = _('Messages could not be enqueued.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare the response ids_value = ','.join(message_ids) resp.location = req.path + '?ids=' + ids_value hrefs = [req.path + '/' + id for id in message_ids] body = {'resources': hrefs} resp.text = utils.to_json(body) resp.status = falcon.HTTP_201 @decorators.TransportLog("Messages collection") @acl.enforce("messages:get_all") def on_get(self, req, resp, project_id, queue_name): ids = req.get_param_as_list('ids') if ids is None: response = self._get(req, project_id, queue_name) else: response = self._get_by_id(req.path.rsplit('/', 1)[0], project_id, queue_name, ids) if response is None: # NOTE(TheSriram): Trying to get a message by id, should # return the message if its present, otherwise a 404 since # the message might have been deleted. msg = _('No messages with IDs: {ids} found in the queue {queue} ' 'for project {project}.') description = msg.format(queue=queue_name, project=project_id, ids=ids) raise wsgi_errors.HTTPNotFound(description) else: resp.text = utils.to_json(response) # status defaults to 200 @decorators.TransportLog("Messages collection") @acl.enforce("messages:delete_all") def on_delete(self, req, resp, project_id, queue_name): ids = req.get_param_as_list('ids') claim_ids = None if self._validate.get_limit_conf_value('message_delete_with_claim_id'): claim_ids = req.get_param_as_list('claim_ids') pop_limit = req.get_param_as_int('pop') try: self._validate.message_deletion(ids, pop_limit, claim_ids) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) if ids: resp.status = self._delete_messages_by_id(queue_name, ids, project_id, claim_ids) elif pop_limit: resp.status, resp.text = self._pop_messages(queue_name, project_id, pop_limit) def _delete_messages_by_id(self, queue_name, ids, project_id, claim_ids=None): try: self._message_controller.bulk_delete( queue_name, message_ids=ids, project=project_id, claim_ids=claim_ids) except Exception: description = _('Messages could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) return falcon.HTTP_204 def _pop_messages(self, queue_name, project_id, pop_limit): try: LOG.debug('POP messages - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) messages = self._message_controller.pop( queue_name, project=project_id, limit=pop_limit) except Exception: description = _('Messages could not be popped.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response if not messages: messages = [] body = {'messages': messages} body = utils.to_json(body) return falcon.HTTP_200, body class ItemResource(object): __slots__ = ( '_message_controller', '_queue_controller', '_encryptor' ) def __init__(self, message_controller, queue_controller, encryptor_factory): self._message_controller = message_controller self._queue_controller = queue_controller self._encryptor = encryptor_factory.getEncryptor() @decorators.TransportLog("Messages item") @acl.enforce("messages:get") def on_get(self, req, resp, project_id, queue_name, message_id): try: message = self._message_controller.get( queue_name, message_id, project=project_id) queue_meta = self._queue_controller.get_metadata(queue_name, project_id) # Decrypt messages if queue_meta.get('_enable_encrypt_messages', False): self._encryptor.message_decrypted([message]) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Message could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Prepare response message['href'] = req.path message = wsgi_utils.format_message_v1_1(message, req.path.rsplit('/', 2)[0], message['claim_id']) resp.text = utils.to_json(message) # status defaults to 200 @decorators.TransportLog("Messages item") @acl.enforce("messages:delete") def on_delete(self, req, resp, project_id, queue_name, message_id): error_title = _('Unable to delete') try: self._message_controller.delete( queue_name, message_id=message_id, project=project_id, claim=req.get_param('claim_id')) except storage_errors.MessageNotClaimed as ex: LOG.debug(ex) description = _('A claim was specified, but the message ' 'is not currently claimed.') raise falcon.HTTPBadRequest( title=error_title, description=description) except storage_errors.ClaimDoesNotExist as ex: LOG.debug(ex) description = _('The specified claim does not exist or ' 'has expired.') raise falcon.HTTPBadRequest( title=error_title, description=description) except storage_errors.NotPermitted as ex: LOG.debug(ex) description = _('This message is claimed; it cannot be ' 'deleted without a valid claim ID.') raise falcon.HTTPForbidden( title=error_title, description=description) except Exception: description = _('Message could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Alles guete resp.status = falcon.HTTP_204 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/ping.py0000664000175100017510000000223215033040005021135 0ustar00mylesmyles# Copyright 2014 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon from zaqar.common import decorators from zaqar.transport import acl class Resource(object): __slots__ = ('_driver',) def __init__(self, driver): self._driver = driver @decorators.TransportLog("Ping item") @acl.enforce("ping:get") def on_get(self, req, resp, **kwargs): resp.status = (falcon.HTTP_204 if self._driver.is_alive() else falcon.HTTP_503) @decorators.TransportLog("Ping item") @acl.enforce("ping:get") def on_head(self, req, resp, **kwargs): resp.status = falcon.HTTP_204 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/pools.py0000664000175100017510000002126615033040005021344 0ustar00mylesmyles# Copyright (c) 2013 Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """pools: a resource to handle storage pool management A pool is added by an operator by interacting with the pooling-related endpoints. When specifying a pool, the following fields are required: :: { "name": string, "weight": integer, "uri": string::uri } Furthermore, depending on the underlying storage type of pool being registered, there is an optional field: :: { "options": {...} } """ import falcon import jsonschema from oslo_log import log from zaqar.common.api.schemas import pools as schema from zaqar.common import decorators from zaqar.common import utils as common_utils from zaqar.i18n import _ from zaqar.storage import errors from zaqar.storage import utils as storage_utils from zaqar.transport import acl from zaqar.transport import utils as transport_utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = log.getLogger(__name__) class Listing(object): """A resource to list registered pools :param pools_controller: means to interact with storage """ def __init__(self, pools_controller, validate): self._ctrl = pools_controller self._validate = validate @decorators.TransportLog("Pools collection") @acl.enforce("pools:get_all") def on_get(self, request, response, project_id): """Returns a pool listing as objects embedded in an object: :: { "pools": [ {"href": "", "weight": 100, "uri": ""}, ... ], "links": [ {"href": "", "rel": "next"} ] } :returns: HTTP | 200 """ LOG.debug('LIST pools') store = {} request.get_param('marker', store=store) request.get_param_as_int('limit', store=store) request.get_param_as_bool('detailed', store=store) try: self._validate.pool_listing(**store) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) cursor = self._ctrl.list(**store) pools = list(next(cursor)) results = {'links': []} if pools: store['marker'] = next(cursor) for entry in pools: entry['href'] = request.path + '/' + entry['name'] results['links'] = [ { 'rel': 'next', 'href': request.path + falcon.to_query_str(store) } ] results['pools'] = pools response.content_location = request.relative_uri response.text = transport_utils.to_json(results) response.status = falcon.HTTP_200 class Resource(object): """A handler for individual pool. :param pools_controller: means to interact with storage """ def __init__(self, pools_controller): self._ctrl = pools_controller validator_type = jsonschema.Draft4Validator self._validators = { 'weight': validator_type(schema.patch_weight), 'uri': validator_type(schema.patch_uri), 'flavor': validator_type(schema.patch_flavor), 'options': validator_type(schema.patch_options), 'create': validator_type(schema.create) } @decorators.TransportLog("Pools item") @acl.enforce("pools:get") def on_get(self, request, response, project_id, pool): """Returns a JSON object for a single pool entry: :: {"weight": 100, "uri": "", options: {...}} :returns: HTTP | [200, 404] """ LOG.debug('GET pool - name: %s', pool) data = None detailed = request.get_param_as_bool('detailed') or False try: data = self._ctrl.get(pool, detailed) except errors.PoolDoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) data['href'] = request.path response.text = transport_utils.to_json(data) @decorators.TransportLog("Pools item") @acl.enforce("pools:create") def on_put(self, request, response, project_id, pool): """Registers a new pool. Expects the following input: :: {"weight": 100, "uri": ""} An options object may also be provided. :returns: HTTP | [201, 204] """ LOG.debug('PUT pool - name: %s', pool) conf = self._ctrl.driver.conf data = wsgi_utils.load(request) wsgi_utils.validate(self._validators['create'], data) if not storage_utils.can_connect(data['uri'], conf=conf): raise wsgi_errors.HTTPBadRequestBody( 'cannot connect to %s' % data['uri'] ) try: self._ctrl.create(pool, weight=data['weight'], uri=data['uri'], flavor=data.get('flavor', None), options=data.get('options', {})) response.status = falcon.HTTP_201 response.location = request.path except errors.PoolCapabilitiesMismatch as e: title = _('Unable to create pool') LOG.exception(title) raise falcon.HTTPBadRequest(title=title, description=str(e)) except errors.PoolAlreadyExists as e: LOG.exception('Pool "%s" already exists', pool) raise wsgi_errors.HTTPConflict(str(e)) @decorators.TransportLog("Pools item") @acl.enforce("pools:delete") def on_delete(self, request, response, project_id, pool): """Deregisters a pool. :returns: HTTP | [204, 403] """ LOG.debug('DELETE pool - name: %s', pool) try: self._ctrl.delete(pool) except errors.PoolInUseByFlavor as ex: title = _('Unable to delete') description = _('This pool is used by flavors {flavor}; ' 'It cannot be deleted.') description = description.format(flavor=ex.flavor) LOG.exception(description) raise falcon.HTTPForbidden(title=title, description=description) response.status = falcon.HTTP_204 @decorators.TransportLog("Pools item") @acl.enforce("pools:update") def on_patch(self, request, response, project_id, pool): """Allows one to update a pool's weight, uri, and/or options. This method expects the user to submit a JSON object containing at least one of: 'uri', 'weight', 'flavor', 'options'.If none are found, the request is flagged as bad. There is also strict format checking through the use of jsonschema. Appropriate errors are returned in each case for badly formatted input. :returns: HTTP | 200,400 """ LOG.debug('PATCH pool - name: %s', pool) data = wsgi_utils.load(request) EXPECT = ('weight', 'uri', 'flavor', 'options') if not any([(field in data) for field in EXPECT]): LOG.debug('PATCH pool, bad params') raise wsgi_errors.HTTPBadRequestBody( 'One of `uri`, `weight`, `flavor`,' ' or `options` needs ' 'to be specified' ) for field in EXPECT: wsgi_utils.validate(self._validators[field], data) conf = self._ctrl.driver.conf if 'uri' in data and not storage_utils.can_connect(data['uri'], conf=conf): raise wsgi_errors.HTTPBadRequestBody( 'cannot connect to %s' % data['uri'] ) fields = common_utils.fields(data, EXPECT, pred=lambda v: v is not None) resp_data = None try: self._ctrl.update(pool, **fields) resp_data = self._ctrl.get(pool, False) except errors.PoolDoesNotExist as ex: LOG.exception('Pool "%s" does not exist', pool) raise wsgi_errors.HTTPNotFound(str(ex)) resp_data['href'] = request.path response.text = transport_utils.to_json(resp_data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/purge.py0000664000175100017510000000647715033040005021341 0ustar00mylesmyles# Copyright 2016 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.transport import acl from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class Resource(object): __slots__ = ('_driver', '_conf', '_queue_ctrl', '_message_ctrl', '_subscription_ctrl', '_validate') def __init__(self, driver): self._driver = driver self._conf = driver._conf self._queue_ctrl = driver._storage.queue_controller self._message_ctrl = driver._storage.message_controller self._subscription_ctrl = driver._storage.subscription_controller self._validate = driver._validate @decorators.TransportLog("Queues item") @acl.enforce("queues:purge") def on_post(self, req, resp, project_id, queue_name): try: if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) self._validate.queue_purging(document) else: document = {'resource_types': ['messages', 'subscriptions']} except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) try: if "messages" in document['resource_types']: pop_limit = 100 LOG.debug("Purge all messages under queue %s", queue_name) messages = self._message_ctrl.pop(queue_name, pop_limit, project=project_id) while messages: messages = self._message_ctrl.pop(queue_name, pop_limit, project=project_id) if "subscriptions" in document['resource_types']: LOG.debug("Purge all subscriptions under queue %s", queue_name) results = self._subscription_ctrl.list(queue_name, project=project_id) subscriptions = list(next(results)) for sub in subscriptions: self._subscription_ctrl.delete(queue_name, sub['id'], project=project_id) except ValueError as err: raise wsgi_errors.HTTPBadRequestAPI(str(err)) except Exception: description = _('Queue could not be purged.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_204 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/queues.py0000664000175100017510000003244215033040005021515 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) def _get_reserved_metadata(validate): _reserved_metadata = ['max_messages_post_size', 'default_message_ttl', 'default_message_delay'] reserved_metadata = { '_%s' % meta: validate.get_limit_conf_value(meta) for meta in _reserved_metadata } for metadata in ['_dead_letter_queue', '_dead_letter_queue_messages_ttl', '_max_claim_count']: reserved_metadata.update({metadata: None}) reserved_metadata.update({'_enable_encrypt_messages': False}) return reserved_metadata class ItemResource(object): __slots__ = ('_validate', '_queue_controller', '_message_controller', '_reserved_metadata') def __init__(self, validate, queue_controller, message_controller): self._validate = validate self._queue_controller = queue_controller self._message_controller = message_controller @decorators.TransportLog("Queues item") @acl.enforce("queues:get") def on_get(self, req, resp, project_id, queue_name): try: resp_dict = self._queue_controller.get(queue_name, project=project_id) for meta, value in _get_reserved_metadata(self._validate).items(): if not resp_dict.get(meta): resp_dict[meta] = value except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Queue metadata could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.text = utils.to_json(resp_dict) # status defaults to 200 @decorators.TransportLog("Queues item") @acl.enforce("queues:create") def on_put(self, req, resp, project_id, queue_name): try: # Place JSON size restriction before parsing self._validate.queue_metadata_length(req.content_length) # Deserialize queue metadata metadata = None if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) metadata = wsgi_utils.sanitize(document) self._validate.queue_metadata_putting(metadata) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) try: created = self._queue_controller.create(queue_name, metadata=metadata, project=project_id) except storage_errors.FlavorDoesNotExist as ex: LOG.exception('Flavor "%s" does not exist', queue_name) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Queue could not be created.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_201 if created else falcon.HTTP_204 resp.location = req.path @decorators.TransportLog("Queues item") @acl.enforce("queues:delete") def on_delete(self, req, resp, project_id, queue_name): LOG.debug('Queue item DELETE - queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: self._queue_controller.delete(queue_name, project=project_id) except Exception: description = _('Queue could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_204 @decorators.TransportLog("Queues item") @acl.enforce("queues:update") def on_patch(self, req, resp, project_id, queue_name): """Allows one to update a queue's metadata. This method expects the user to submit a JSON object. There is also strict format checking through the use of jsonschema. Appropriate errors are returned in each case for badly formatted input. :returns: HTTP | 200,400,409,503 """ LOG.debug('PATCH queue - name: %s', queue_name) try: # Place JSON size restriction before parsing self._validate.queue_metadata_length(req.content_length) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestBody(str(ex)) # NOTE(flwang): See below link to get more details about draft 10, # tools.ietf.org/html/draft-ietf-appsawg-json-patch-10 content_types = { 'application/openstack-messaging-v2.0-json-patch': 10, } if req.content_type not in content_types: headers = {'Accept-Patch': ', '.join(sorted(content_types.keys()))} msg = _("Accepted media type for PATCH: %s.") LOG.debug(msg, headers) raise wsgi_errors.HTTPUnsupportedMediaType(msg % headers) if req.content_length: try: changes = utils.read_json(req.stream, req.content_length) changes = wsgi_utils.sanitize(changes, doctype=list) except utils.MalformedJSON as ex: LOG.debug(ex) description = _('Request body could not be parsed.') raise wsgi_errors.HTTPBadRequestBody(description) except utils.OverflowedJSONInteger as ex: LOG.debug(ex) description = _('JSON contains integer that is too large.') raise wsgi_errors.HTTPBadRequestBody(description) except Exception: # Error while reading from the network/server description = _('Request body could not be read.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) else: msg = _("PATCH body could not be empty for update.") LOG.debug(msg) raise wsgi_errors.HTTPBadRequestBody(msg) try: changes = self._validate.queue_patching(req, changes) # NOTE(Eva-i): using 'get_metadata' instead of 'get', so # QueueDoesNotExist error will be thrown in case of non-existent # queue. metadata = self._queue_controller.get_metadata(queue_name, project=project_id) reserved_metadata = _get_reserved_metadata(self._validate) for change in changes: change_method_name = '_do_%s' % change['op'] change_method = getattr(self, change_method_name) change_method(req, metadata, reserved_metadata, change) self._validate.queue_metadata_putting(metadata) self._queue_controller.set_metadata(queue_name, metadata, project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestBody(str(ex)) except wsgi_errors.HTTPConflict: raise except Exception: description = _('Queue could not be updated.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) for meta, value in _get_reserved_metadata(self._validate).items(): if not metadata.get(meta): metadata[meta] = value resp.text = utils.to_json(metadata) def _do_replace(self, req, metadata, reserved_metadata, change): path = change['path'] path_child = path[1] value = change['value'] if path_child in metadata or path_child in reserved_metadata: metadata[path_child] = value else: msg = _("Can't replace non-existent object %s.") raise wsgi_errors.HTTPConflict(msg % path_child) def _do_add(self, req, metadata, reserved_metadata, change): path = change['path'] path_child = path[1] value = change['value'] metadata[path_child] = value def _do_remove(self, req, metadata, reserved_metadata, change): path = change['path'] path_child = path[1] if path_child in metadata: metadata.pop(path_child) elif path_child not in reserved_metadata: msg = _("Can't remove non-existent object %s.") raise wsgi_errors.HTTPConflict(msg % path_child) class CollectionResource(object): __slots__ = ('_queue_controller', '_validate', '_reserved_metadata') def __init__(self, validate, queue_controller): self._queue_controller = queue_controller self._validate = validate def _queue_list(self, project_id, path, kfilter, **kwargs): try: self._validate.queue_listing(**kwargs) with_count = kwargs.pop('with_count', False) results = self._queue_controller.list(project=project_id, kfilter=kfilter, **kwargs) # Buffer list of queues queues = list(next(results)) total_number = None if with_count: total_number = self._queue_controller.calculate_resource_count( project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Queues could not be listed.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Got some. Prepare the response. kwargs['marker'] = next(results) or kwargs.get('marker', '') reserved_metadata = _get_reserved_metadata(self._validate).items() for each_queue in queues: each_queue['href'] = path + '/' + each_queue['name'] if kwargs.get('detailed'): for meta, value in reserved_metadata: if not each_queue.get('metadata', {}).get(meta): each_queue['metadata'][meta] = value return queues, kwargs['marker'], total_number def _on_get_with_kfilter(self, req, resp, project_id, kfilter={}): kwargs = {} # NOTE(kgriffs): This syntax ensures that # we don't clobber default values with None. req.get_param('marker', store=kwargs) req.get_param_as_int('limit', store=kwargs) req.get_param_as_bool('detailed', store=kwargs) req.get_param('name', store=kwargs) req.get_param_as_bool('with_count', store=kwargs) queues, marker, total_number = self._queue_list(project_id, req.path, kfilter, **kwargs) links = [] kwargs['marker'] = marker if queues: links = [ { 'rel': 'next', 'href': req.path + falcon.to_query_str(kwargs) } ] response_body = { 'queues': queues, 'links': links } if total_number: response_body['count'] = total_number resp.text = utils.to_json(response_body) # status defaults to 200 @decorators.TransportLog("Queues collection") @acl.enforce("queues:get_all") def on_get(self, req, resp, project_id): field = ('marker', 'limit', 'detailed', 'name', 'with_count') kfilter = copy.deepcopy(req.params) for key in req.params.keys(): if key in field: kfilter.pop(key) kfilter = kfilter if len(kfilter) > 0 else {} for key in kfilter.keys(): # Since we get the filter value from URL, so need to # turn the string to integer if using integer filter value. try: kfilter[key] = int(kfilter[key]) except ValueError: continue self._on_get_with_kfilter(req, resp, project_id, kfilter) # status defaults to 200 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/stats.py0000664000175100017510000000471415033040005021345 0ustar00mylesmyles# Copyright (c) 2013 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport.wsgi import errors as wsgi_errors LOG = logging.getLogger(__name__) class Resource(object): __slots__ = '_queue_ctrl' def __init__(self, queue_controller): self._queue_ctrl = queue_controller @decorators.TransportLog("Queues stats item") @acl.enforce("queues:stats") def on_get(self, req, resp, project_id, queue_name): try: resp_dict = self._queue_ctrl.stats(queue_name, project=project_id) message_stats = resp_dict['messages'] if message_stats['total'] != 0: base_path = req.path[:req.path.rindex('/')] + '/messages/' newest = message_stats['newest'] newest['href'] = base_path + newest['id'] del newest['id'] oldest = message_stats['oldest'] oldest['href'] = base_path + oldest['id'] del oldest['id'] resp.text = utils.to_json(resp_dict) # status defaults to 200 except (storage_errors.QueueDoesNotExist, storage_errors.QueueIsEmpty): resp_dict = { 'messages': { 'claimed': 0, 'free': 0, 'total': 0 } } resp.text = utils.to_json(resp_dict) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Queue stats could not be read.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/subscriptions.py0000664000175100017510000003152215033040005023113 0ustar00mylesmyles# Copyright (c) 2015 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import falcon from oslo_log import log as logging from oslo_utils import netutils from oslo_utils import timeutils from stevedore import driver from zaqar.common import decorators from zaqar.i18n import _ from zaqar.notification import notifier from zaqar.storage import errors as storage_errors from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class ItemResource(object): __slots__ = ('_validate', '_subscription_controller') def __init__(self, validate, subscription_controller): self._validate = validate self._subscription_controller = subscription_controller @decorators.TransportLog("Subscriptions item") @acl.enforce("subscription:get") def on_get(self, req, resp, project_id, queue_name, subscription_id): try: resp_dict = self._subscription_controller.get(queue_name, subscription_id, project=project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Subscription could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.text = utils.to_json(resp_dict) # status defaults to 200 @decorators.TransportLog("Subscriptions item") @acl.enforce("subscription:delete") def on_delete(self, req, resp, project_id, queue_name, subscription_id): try: self._subscription_controller.delete(queue_name, subscription_id, project=project_id) except Exception: description = _('Subscription could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_204 @decorators.TransportLog("Subscriptions item") @acl.enforce("subscription:update") def on_patch(self, req, resp, project_id, queue_name, subscription_id): if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) else: document = {} try: self._validate.subscription_patching(document) self._subscription_controller.update(queue_name, subscription_id, project=project_id, **document) resp.status = falcon.HTTP_204 resp.location = req.path except storage_errors.SubscriptionDoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except storage_errors.SubscriptionAlreadyExists as ex: LOG.debug(ex) raise wsgi_errors.HTTPConflict(str(ex)) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = (_('Subscription %(subscription_id)s could not be' ' updated.') % dict(subscription_id=subscription_id)) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to update subscription'), description=description) class CollectionResource(object): __slots__ = ('_subscription_controller', '_validate', '_default_subscription_ttl', '_queue_controller', '_conf', '_notification') def __init__(self, validate, subscription_controller, default_subscription_ttl, queue_controller, conf): self._subscription_controller = subscription_controller self._validate = validate self._default_subscription_ttl = default_subscription_ttl self._queue_controller = queue_controller self._conf = conf self._notification = notifier.NotifierDriver() @decorators.TransportLog("Subscriptions collection") @acl.enforce("subscription:get_all") def on_get(self, req, resp, project_id, queue_name): kwargs = {} # NOTE(kgriffs): This syntax ensures that # we don't clobber default values with None. req.get_param('marker', store=kwargs) req.get_param_as_int('limit', store=kwargs) try: self._validate.subscription_listing(**kwargs) results = self._subscription_controller.list(queue_name, project=project_id, **kwargs) # Buffer list of subscriptions. Can raise NoPoolFound error. subscriptions = list(next(results)) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Subscriptions could not be listed.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Got some. Prepare the response. kwargs['marker'] = next(results) or kwargs.get('marker', '') links = [] if subscriptions: links = [ { 'rel': 'next', 'href': req.path + falcon.to_query_str(kwargs) } ] response_body = { 'subscriptions': subscriptions, 'links': links } resp.text = utils.to_json(response_body) # status defaults to 200 @decorators.TransportLog("Subscriptions collection") @acl.enforce("subscription:create") def on_post(self, req, resp, project_id, queue_name): if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) else: document = {} try: if not self._queue_controller.exists(queue_name, project_id): self._queue_controller.create(queue_name, project=project_id) self._validate.subscription_posting(document) subscriber = document['subscriber'] options = document.get('options', {}) url = netutils.urlsplit(subscriber) ttl = document.get('ttl', self._default_subscription_ttl) mgr = driver.DriverManager('zaqar.notification.tasks', url.scheme, invoke_on_load=True) req_data = req.headers.copy() req_data.update(req.env) mgr.driver.register(subscriber, options, ttl, project_id, req_data) created = self._subscription_controller.create(queue_name, subscriber, ttl, options, project=project_id) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Subscription could not be created.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) expires = now_dt + datetime.timedelta(seconds=ttl) api_version = req.path.split('/')[1] if created: subscription = self._subscription_controller.get(queue_name, created, project_id) # send confirm notification self._notification.send_confirm_notification( queue_name, subscription, self._conf, project_id, str(expires), api_version) resp.location = req.path resp.status = falcon.HTTP_201 resp.text = utils.to_json( {'subscription_id': str(created)}) else: subscription = self._subscription_controller.get_with_subscriber( queue_name, subscriber, project_id) confirmed = subscription.get('confirmed', True) if confirmed: description = _('Such subscription already exists.' 'Subscriptions are unique by project + queue ' '+ subscriber URI.') raise wsgi_errors.HTTPConflict(description, headers={'location': req.path}) else: # The subscription is not confirmed, re-send confirm # notification self._notification.send_confirm_notification( queue_name, subscription, self._conf, project_id, str(expires), api_version) resp.location = req.path resp.status = falcon.HTTP_201 resp.text = utils.to_json( {'subscription_id': str(subscription['id'])}) class ConfirmResource(object): __slots__ = ('_subscription_controller', '_validate', '_notification', '_conf') def __init__(self, validate, subscription_controller, conf): self._subscription_controller = subscription_controller self._validate = validate self._notification = notifier.NotifierDriver() self._conf = conf @decorators.TransportLog("Subscriptions confirmation item") @acl.enforce("subscription:confirm") def on_put(self, req, resp, project_id, queue_name, subscription_id): if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) else: document = {} try: self._validate.subscription_confirming(document) confirmed = document.get('confirmed') self._subscription_controller.confirm(queue_name, subscription_id, project=project_id, confirmed=confirmed) if confirmed is False: now = timeutils.utcnow_ts() now_dt = datetime.datetime.fromtimestamp( now, tz=datetime.timezone.utc).replace(tzinfo=None) ttl = self._conf.transport.default_subscription_ttl expires = now_dt + datetime.timedelta(seconds=ttl) api_version = req.path.split('/')[1] sub = self._subscription_controller.get(queue_name, subscription_id, project=project_id) self._notification.send_confirm_notification(queue_name, sub, self._conf, project_id, str(expires), api_version, True) resp.status = falcon.HTTP_204 resp.location = req.path except storage_errors.SubscriptionDoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = (_('Subscription %(subscription_id)s could not be' ' confirmed.') % dict(subscription_id=subscription_id)) LOG.exception(description) raise falcon.HTTPBadRequest( title=_('Unable to confirm subscription'), description=description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/topic.py0000664000175100017510000003112315033040005021317 0ustar00mylesmyles# Copyright (c) 2019 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) def _get_reserved_metadata(validate): _reserved_metadata = ['max_messages_post_size', 'default_message_ttl', 'default_message_delay'] reserved_metadata = { '_%s' % meta: validate.get_limit_conf_value(meta) for meta in _reserved_metadata } return reserved_metadata class ItemResource(object): __slots__ = ('_validate', '_topic_controller', '_message_controller', '_reserved_metadata') def __init__(self, validate, topic_controller, message_controller): self._validate = validate self._topic_controller = topic_controller self._message_controller = message_controller @decorators.TransportLog("Topics item") @acl.enforce("topics:get") def on_get(self, req, resp, project_id, topic_name): try: resp_dict = self._topic_controller.get(topic_name, project=project_id) for meta, value in _get_reserved_metadata(self._validate).items(): if not resp_dict.get(meta): resp_dict[meta] = value except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Topic metadata could not be retrieved.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.text = utils.to_json(resp_dict) # status defaults to 200 @decorators.TransportLog("Topics item") @acl.enforce("topics:create") def on_put(self, req, resp, project_id, topic_name): try: # Place JSON size restriction before parsing self._validate.queue_metadata_length(req.content_length) # Deserialize Topic metadata metadata = None if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) metadata = wsgi_utils.sanitize(document) self._validate.queue_metadata_putting(metadata) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) try: created = self._topic_controller.create(topic_name, metadata=metadata, project=project_id) except storage_errors.FlavorDoesNotExist as ex: LOG.exception('Flavor "%s" does not exist', topic_name) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Topic could not be created.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_201 if created else falcon.HTTP_204 resp.location = req.path @decorators.TransportLog("Topics item") @acl.enforce("topics:delete") def on_delete(self, req, resp, project_id, topic_name): LOG.debug('Topic item DELETE - topic: %(topic)s, ' 'project: %(project)s', {'topic': topic_name, 'project': project_id}) try: self._topic_controller.delete(topic_name, project=project_id) except Exception: description = _('Topic could not be deleted.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_204 @decorators.TransportLog("Topics item") @acl.enforce("topics:update") def on_patch(self, req, resp, project_id, topic_name): """Allows one to update a topic's metadata. This method expects the user to submit a JSON object. There is also strict format checking through the use of jsonschema. Appropriate errors are returned in each case for badly formatted input. :returns: HTTP | 200,400,409,503 """ LOG.debug('PATCH topic - name: %s', topic_name) try: # Place JSON size restriction before parsing self._validate.queue_metadata_length(req.content_length) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestBody(str(ex)) # NOTE(flwang): See below link to get more details about draft 10, # tools.ietf.org/html/draft-ietf-appsawg-json-patch-10 content_types = { 'application/openstack-messaging-v2.0-json-patch': 10, } if req.content_type not in content_types: headers = {'Accept-Patch': ', '.join(sorted(content_types.keys()))} msg = _("Accepted media type for PATCH: %s.") LOG.debug(msg, headers) raise wsgi_errors.HTTPUnsupportedMediaType(msg % headers) if req.content_length: try: changes = utils.read_json(req.stream, req.content_length) changes = wsgi_utils.sanitize(changes, doctype=list) except utils.MalformedJSON as ex: LOG.debug(ex) description = _('Request body could not be parsed.') raise wsgi_errors.HTTPBadRequestBody(description) except utils.OverflowedJSONInteger as ex: LOG.debug(ex) description = _('JSON contains integer that is too large.') raise wsgi_errors.HTTPBadRequestBody(description) except Exception: # Error while reading from the network/server description = _('Request body could not be read.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) else: msg = _("PATCH body could not be empty for update.") LOG.debug(msg) raise wsgi_errors.HTTPBadRequestBody(msg) try: changes = self._validate.queue_patching(req, changes) # NOTE(Eva-i): using 'get_metadata' instead of 'get', so # QueueDoesNotExist error will be thrown in case of non-existent # queue. metadata = self._topic_controller.get_metadata(topic_name, project=project_id) reserved_metadata = _get_reserved_metadata(self._validate) for change in changes: change_method_name = '_do_%s' % change['op'] change_method = getattr(self, change_method_name) change_method(req, metadata, reserved_metadata, change) self._validate.queue_metadata_putting(metadata) self._topic_controller.set_metadata(topic_name, metadata, project_id) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestBody(str(ex)) except wsgi_errors.HTTPConflict: raise except Exception: description = _('Topic could not be updated.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) for meta, value in _get_reserved_metadata(self._validate).items(): if not metadata.get(meta): metadata[meta] = value resp.text = utils.to_json(metadata) def _do_replace(self, req, metadata, reserved_metadata, change): path = change['path'] path_child = path[1] value = change['value'] if path_child in metadata or path_child in reserved_metadata: metadata[path_child] = value else: msg = _("Can't replace non-existent object %s.") raise wsgi_errors.HTTPConflict(msg % path_child) def _do_add(self, req, metadata, reserved_metadata, change): path = change['path'] path_child = path[1] value = change['value'] metadata[path_child] = value def _do_remove(self, req, metadata, reserved_metadata, change): path = change['path'] path_child = path[1] if path_child in metadata: metadata.pop(path_child) elif path_child not in reserved_metadata: msg = _("Can't remove non-existent object %s.") raise wsgi_errors.HTTPConflict(msg % path_child) class CollectionResource(object): __slots__ = ('_topic_controller', '_validate', '_reserved_metadata') def __init__(self, validate, topic_controller): self._topic_controller = topic_controller self._validate = validate def _topic_list(self, project_id, path, kfilter, **kwargs): try: self._validate.queue_listing(**kwargs) results = self._topic_controller.list(project=project_id, kfilter=kfilter, **kwargs) # Buffer list of topics topics = list(next(results)) except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) except Exception: description = _('Topics could not be listed.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) # Got some. Prepare the response. kwargs['marker'] = next(results) or kwargs.get('marker', '') reserved_metadata = _get_reserved_metadata(self._validate).items() for each_topic in topics: each_topic['href'] = path + '/' + each_topic['name'] if kwargs.get('detailed'): for meta, value in reserved_metadata: if not each_topic.get('metadata', {}).get(meta): each_topic['metadata'][meta] = value return topics, kwargs['marker'] def _on_get_with_kfilter(self, req, resp, project_id, kfilter={}): kwargs = {} # NOTE(kgriffs): This syntax ensures that # we don't clobber default values with None. req.get_param('marker', store=kwargs) req.get_param_as_int('limit', store=kwargs) req.get_param_as_bool('detailed', store=kwargs) req.get_param('name', store=kwargs) topics, marker = self._topic_list(project_id, req.path, kfilter, **kwargs) links = [] kwargs['marker'] = marker if topics: links = [ { 'rel': 'next', 'href': req.path + falcon.to_query_str(kwargs) } ] response_body = { 'topics': topics, 'links': links } resp.text = utils.to_json(response_body) # status defaults to 200 @decorators.TransportLog("Topics collection") @acl.enforce("topics:get_all") def on_get(self, req, resp, project_id): field = ('marker', 'limit', 'detailed', 'name') kfilter = copy.deepcopy(req.params) for key in req.params.keys(): if key in field: kfilter.pop(key) kfilter = kfilter if len(kfilter) > 0 else {} for key in kfilter.keys(): # Since we get the filter value from URL, so need to # turn the string to integer if using integer filter value. try: kfilter[key] = int(kfilter[key]) except ValueError: continue self._on_get_with_kfilter(req, resp, project_id, kfilter) # status defaults to 200 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/topic_purge.py0000664000175100017510000000636415033040005022532 0ustar00mylesmyles# Copyright 2019 Catalyst IT Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.transport import acl from zaqar.transport import validation from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) class Resource(object): __slots__ = ('_driver', '_conf', '_message_ctrl', '_subscription_ctrl', '_validate') def __init__(self, driver): self._driver = driver self._conf = driver._conf self._message_ctrl = driver._storage.message_controller self._subscription_ctrl = driver._storage.subscription_controller self._validate = driver._validate @decorators.TransportLog("Topics item") @acl.enforce("topics:purge") def on_post(self, req, resp, project_id, topic_name): try: if req.content_length: document = wsgi_utils.deserialize(req.stream, req.content_length) self._validate.queue_purging(document) else: document = {'resource_types': ['messages', 'subscriptions']} except validation.ValidationFailed as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) try: if "messages" in document['resource_types']: pop_limit = 100 LOG.debug("Purge all messages under topic %s", topic_name) messages = self._message_ctrl.pop(topic_name, pop_limit, project=project_id) while messages: messages = self._message_ctrl.pop(topic_name, pop_limit, project=project_id) if "subscriptions" in document['resource_types']: LOG.debug("Purge all subscriptions under topic %s", topic_name) results = self._subscription_ctrl.list(topic_name, project=project_id) subscriptions = list(next(results)) for sub in subscriptions: self._subscription_ctrl.delete(topic_name, sub['id'], project=project_id) except ValueError as err: raise wsgi_errors.HTTPBadRequestAPI(str(err)) except Exception: description = _('Topic could not be purged.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) resp.status = falcon.HTTP_204 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/topic_stats.py0000664000175100017510000000471415033040005022543 0ustar00mylesmyles# Copyright (c) 2019 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from zaqar.common import decorators from zaqar.i18n import _ from zaqar.storage import errors as storage_errors from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport.wsgi import errors as wsgi_errors LOG = logging.getLogger(__name__) class Resource(object): __slots__ = '_topic_ctrl' def __init__(self, topic_controller): self._topic_ctrl = topic_controller @decorators.TransportLog("Topics stats item") @acl.enforce("topics:stats") def on_get(self, req, resp, project_id, topic_name): try: resp_dict = self._topic_ctrl.stats(topic_name, project=project_id) message_stats = resp_dict['messages'] if message_stats['total'] != 0: base_path = req.path[:req.path.rindex('/')] + '/messages/' newest = message_stats['newest'] newest['href'] = base_path + newest['id'] del newest['id'] oldest = message_stats['oldest'] oldest['href'] = base_path + oldest['id'] del oldest['id'] resp.text = utils.to_json(resp_dict) # status defaults to 200 except (storage_errors.TopicDoesNotExist, storage_errors.TopicIsEmpty): resp_dict = { 'messages': { 'claimed': 0, 'free': 0, 'total': 0 } } resp.text = utils.to_json(resp_dict) except storage_errors.DoesNotExist as ex: LOG.debug(ex) raise wsgi_errors.HTTPNotFound(str(ex)) except Exception: description = _('Topic stats could not be read.') LOG.exception(description) raise wsgi_errors.HTTPServiceUnavailable(description) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/v2_0/urls.py0000664000175100017510000000507015033040005021170 0ustar00mylesmyles# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import os from oslo_log import log as logging from zaqar.common import decorators from zaqar.common import urls from zaqar.transport import acl from zaqar.transport import utils from zaqar.transport.wsgi import errors as wsgi_errors from zaqar.transport.wsgi import utils as wsgi_utils LOG = logging.getLogger(__name__) _KNOWN_KEYS = {'methods', 'expires', 'paths'} _VALID_PATHS = {'messages', 'subscriptions', 'claims'} class Resource(object): __slots__ = ('_driver', '_conf') def __init__(self, driver): self._driver = driver self._conf = driver._conf @decorators.TransportLog("Queues share item") @acl.enforce("queues:share") def on_post(self, req, resp, project_id, queue_name): LOG.debug('Pre-Signed URL Creation for queue: %(queue)s, ' 'project: %(project)s', {'queue': queue_name, 'project': project_id}) try: document = wsgi_utils.deserialize(req.stream, req.content_length) except ValueError as ex: LOG.debug(ex) raise wsgi_errors.HTTPBadRequestAPI(str(ex)) diff = set(document.keys()) - _KNOWN_KEYS if diff: msg = str('Unknown keys: %s' % diff) raise wsgi_errors.HTTPBadRequestAPI(msg) key = self._conf.signed_url.secret_key paths = document.pop('paths', None) if not paths: paths = [os.path.join(req.path[:-6], 'messages')] else: diff = set(paths) - _VALID_PATHS if diff: msg = str('Invalid paths: %s' % diff) raise wsgi_errors.HTTPBadRequestAPI(msg) paths = [os.path.join(req.path[:-6], path) for path in paths] try: data = urls.create_signed_url(key, paths, project=project_id, **document) except ValueError as err: raise wsgi_errors.HTTPBadRequestAPI(str(err)) resp.text = utils.to_json(data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/transport/wsgi/version.py0000664000175100017510000000176715033040005021133 0ustar00mylesmyles# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import falcon from zaqar.transport import utils from zaqar.transport.wsgi import v1_1 from zaqar.transport.wsgi import v2_0 VERSIONS = { 'versions': [ v1_1.VERSION, v2_0.VERSION ] } class Resource(object): def __init__(self): self.versions = utils.to_json(VERSIONS) def on_get(self, req, resp, project_id): resp.text = self.versions resp.status = falcon.HTTP_300 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924741.0 zaqar-20.1.0.dev29/zaqar/version.py0000664000175100017510000000400215033040005016107 0ustar00mylesmyles# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pbr.version version_info = pbr.version.VersionInfo('zaqar') version_string = version_info.version_string def verify_sha(expected): """Verifies the commit hash for an interim Zaqar build. This function may be used to verify that the version of the zaqar package, as imported from an environment's site-packages, is the expected build. This allows continuous integration scripts to detect out-of-date installations of the package. Note that this function will ALWAYS return False for Zaqar packages that were not installed from git. :param expected: The expected commit object name. May be either a full or abbreviated SHA hash. If abbreviated, at least 7 digits are required. :returns: True if the package's version string contains a hash, and that hash matches `expected`. Otherwise returns False. """ # NOTE(kgriffs): Require 7 digits to avoid false positives. In practice, # Git's abbreviated commit oject names will always include at least # 7 digits. assert len(expected) >= 7 # NOTE(kgriffs): Git usually abbreviates hashed to 7 digits, but also # check 8 digits in case git decides just 7 is ambiguous. Accordingly, # try the longer one first since it is more specific than the other. for abbreviated in (expected[:8], expected[:7]): if ('.g' + abbreviated) in version_info.release_string(): return True return False ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751924757.5620136 zaqar-20.1.0.dev29/zaqar.egg-info/0000775000175100017510000000000015033040026015551 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/PKG-INFO0000644000175100017510000001326615033040006016652 0ustar00mylesmylesMetadata-Version: 2.2 Name: zaqar Version: 20.1.0.dev29 Summary: OpenStack Queuing and Notification Service Home-page: https://docs.openstack.org/zaqar/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Project-URL: Source, https://opendev.org/openstack/zaqar Project-URL: Tracker, https://bugs.launchpad.net/zaqar Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.10 License-File: LICENSE License-File: AUTHORS.rst Requires-Dist: pbr!=2.1.0,>=2.0.0 Requires-Dist: alembic>=0.9.6 Requires-Dist: cryptography>=2.7 Requires-Dist: falcon>=3.0.0 Requires-Dist: jsonschema>=3.2.0 Requires-Dist: keystonemiddleware>=9.1.0 Requires-Dist: msgpack>=1.0.0 Requires-Dist: python-swiftclient>=3.10.1 Requires-Dist: WebOb>=1.7.1 Requires-Dist: stevedore>=3.2.2 Requires-Dist: oslo.cache>=1.26.0 Requires-Dist: oslo.concurrency>=5.0.1 Requires-Dist: oslo.config>=8.3.2 Requires-Dist: oslo.context>=2.19.2 Requires-Dist: oslo.db>=11.0.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=4.6.1 Requires-Dist: oslo.messaging>=12.5.0 Requires-Dist: oslo.reports>=2.2.0 Requires-Dist: oslo.serialization>=4.2.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=4.12.1 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: osprofiler>=1.4.0 Requires-Dist: SQLAlchemy>=1.3.19 Requires-Dist: autobahn>=22.3.2 Requires-Dist: requests>=2.25.0 Requires-Dist: futurist>=1.2.0 Provides-Extra: mongodb Requires-Dist: pymongo>=3.6.0; extra == "mongodb" Provides-Extra: redis Requires-Dist: redis>=3.4.0; extra == "redis" Provides-Extra: mysql Requires-Dist: PyMySQL>=0.8.0; extra == "mysql" Provides-Extra: test Requires-Dist: hacking<6.2.0,>=6.1.0; extra == "test" Requires-Dist: redis>=3.4.0; extra == "test" Requires-Dist: pymongo>=3.6.0; extra == "test" Requires-Dist: python-swiftclient>=3.10.1; extra == "test" Requires-Dist: websocket-client>=0.44.0; extra == "test" Requires-Dist: PyMySQL>=0.8.0; extra == "test" Requires-Dist: coverage!=4.4,>=4.0; extra == "test" Requires-Dist: cryptography>=2.7; extra == "test" Requires-Dist: ddt>=1.0.1; extra == "test" Requires-Dist: doc8>=0.8.1; extra == "test" Requires-Dist: Pygments>=2.2.0; extra == "test" Requires-Dist: fixtures>=3.0.0; extra == "test" Requires-Dist: testscenarios>=0.4; extra == "test" Requires-Dist: testtools>=2.2.0; extra == "test" Requires-Dist: testresources>=2.0.0; extra == "test" Requires-Dist: oslotest>=3.2.0; extra == "test" Requires-Dist: stestr>=2.0.0; extra == "test" Requires-Dist: osprofiler>=1.4.0; extra == "test" Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: project-url Dynamic: provides-extra Dynamic: requires-dist Dynamic: requires-python Dynamic: summary ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/zaqar.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on ===== Zaqar ===== Zaqar is a multi-tenant cloud messaging and notification service for web and mobile developers. It combines the ideas pioneered by Amazon's SQS product with additional semantics to support event broadcasting. The service features a fully RESTful API, which developers can use to send messages between various components of their SaaS and mobile applications, by using a variety of communication patterns. Underlying this API is an efficient messaging engine designed with scalability and security in mind. Other OpenStack components can integrate with Zaqar to surface events to end users and to communicate with guest agents that run in the "over-cloud" layer. Cloud operators can leverage Zaqar to provide equivalents of SQS and SNS to their customers. General information is available in wiki: https://wiki.openstack.org/wiki/Zaqar The API v2.0 (stable) specification and documentation are available at: https://wiki.openstack.org/wiki/Zaqar/specs/api/v2.0 Zaqar's Documentation, the source of which is in ``doc/source/``, is available at: https://docs.openstack.org/zaqar/latest Zaqar's Release notes are available at: https://docs.openstack.org/releasenotes/zaqar/ Contributors are encouraged to join IRC (``#openstack-zaqar`` channel on ``OFTC``): https://wiki.openstack.org/wiki/IRC Information on how to run unit and functional tests is available at: https://docs.openstack.org/zaqar/latest/contributor/running_tests.html Information on how to run benchmarking tool is available at: https://docs.openstack.org/zaqar/latest/admin/running_benchmark.html Zaqar's design specifications is tracked at: https://specs.openstack.org/openstack/zaqar-specs/ Using Zaqar ----------- If you are new to Zaqar and just want to try it, you can set up Zaqar in the development environment. Using Zaqar in production environment: Coming soon! Using Zaqar in development environment: The instruction is available at: https://docs.openstack.org/zaqar/latest/contributor/development.environment.html This will allow you to run local Zaqar server with MongoDB as database. This way is the easiest, quickest and most suitable for beginners. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/SOURCES.txt0000664000175100017510000005471015033040006017442 0ustar00mylesmyles.coveragerc .stestr.conf .zuul.yaml AUTHORS AUTHORS.rst CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bench-requirements.txt bindep.txt pyproject.toml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/claims.inc api-ref/source/conf.py api-ref/source/flavors.inc api-ref/source/health.inc api-ref/source/index.rst api-ref/source/messages.inc api-ref/source/parameters.yaml api-ref/source/pools.inc api-ref/source/queues.inc api-ref/source/status.yaml api-ref/source/subscription.inc api-ref/source/versions.inc api-ref/source/samples/claim_messages_request.json api-ref/source/samples/claim_messages_response.json api-ref/source/samples/claim_query_response.json api-ref/source/samples/claim_update_request.json api-ref/source/samples/flavor-create-request-new.json api-ref/source/samples/flavor-create-request.json api-ref/source/samples/flavor-list-response-new.json api-ref/source/samples/flavor-list-response.json api-ref/source/samples/flavor-show-response-new.json api-ref/source/samples/flavor-show-response.json api-ref/source/samples/flavor-update-request-new.json api-ref/source/samples/flavor-update-request.json api-ref/source/samples/flavor-update-response-new.json api-ref/source/samples/flavor-update-response.json api-ref/source/samples/health-response.json api-ref/source/samples/messages-delete-bypop-response.json api-ref/source/samples/messages-get-byids-response.json api-ref/source/samples/messages-get-response.json api-ref/source/samples/messages-list-response.json api-ref/source/samples/messages-post-request.json api-ref/source/samples/messages-post-response.json api-ref/source/samples/pool-create-request-new.json api-ref/source/samples/pool-create-request.json api-ref/source/samples/pool-list-response-new.json api-ref/source/samples/pool-list-response.json api-ref/source/samples/pool-show-response-new.json api-ref/source/samples/pool-show-response.json api-ref/source/samples/pool-update-request-new.json api-ref/source/samples/pool-update-request.json api-ref/source/samples/pool-update-response-new.json api-ref/source/samples/pool-update-response.json api-ref/source/samples/purge-queue-request.json api-ref/source/samples/queue-create-request.json api-ref/source/samples/queue-pre-signed-request.json api-ref/source/samples/queue-pre-signed-response.json api-ref/source/samples/queue-show-response.json api-ref/source/samples/queue-stats-response.json api-ref/source/samples/queue-update-request.json api-ref/source/samples/queue-update-response.json api-ref/source/samples/queues-list-response.json api-ref/source/samples/subscription-confirm-request.json api-ref/source/samples/subscription-create-request-http.json api-ref/source/samples/subscription-create-request-mail.json api-ref/source/samples/subscription-create-response.json api-ref/source/samples/subscription-show-response.json api-ref/source/samples/subscription-update-request.json api-ref/source/samples/subscriptions-list-response.json api-ref/source/samples/versions-list-response.json devstack/README.rst devstack/plugin.sh devstack/settings devstack/upgrade/resource.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/README.md doc/requirements.txt doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/_static/.placeholder doc/source/admin/CORS.rst doc/source/admin/OSprofiler.rst doc/source/admin/gmr.rst doc/source/admin/index.rst doc/source/admin/running_benchmark.rst doc/source/admin/subscription_confirm.rst doc/source/admin/writing_pipeline_stages.rst doc/source/cli/index.rst doc/source/cli/zaqar-status.rst doc/source/configuration/configuring.rst doc/source/configuration/index.rst doc/source/configuration/sample-configuration.rst doc/source/configuration/zaqar.rst doc/source/contributor/contributing.rst doc/source/contributor/development.environment.rst doc/source/contributor/first_patch.rst doc/source/contributor/first_review.rst doc/source/contributor/gerrit.rst doc/source/contributor/index.rst doc/source/contributor/jenkins.rst doc/source/contributor/launchpad.rst doc/source/contributor/project_info.rst doc/source/contributor/reviewer_guide.rst doc/source/contributor/running_tests.rst doc/source/contributor/storage.rst doc/source/contributor/test_suite.rst doc/source/contributor/transport.rst doc/source/contributor/welcome.rst doc/source/contributor/images/zaqar_review_id.png doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-obs.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/install.rst doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/user/authentication_tokens.rst doc/source/user/getting_started.rst doc/source/user/headers_queue_api_working.rst doc/source/user/index.rst doc/source/user/notification_delivery_policy.rst doc/source/user/send_request_api.rst etc/README-policy.json.sample etc/logging.conf.sample etc/uwsgi.conf etc/zaqar-benchmark-messages.json etc/zaqar-benchmark.conf.sample etc/zaqar-policy-generator.conf etc/oslo-config-generator/zaqar.conf releasenotes/notes/.gitignore releasenotes/notes/Integrate-OSprofiler-with-zaqar-59d0dc3d0326947d.yaml releasenotes/notes/add-a-notifier-using-trust-271d9cd1d2b4cdeb.yaml releasenotes/notes/add-swift-backend-4eb9b43913f39d18.yaml releasenotes/notes/allow-configuration-of-websocket-notification-fa542fbf761378d3.yaml releasenotes/notes/configuration-refactor-0ff219ac59c96347.yaml releasenotes/notes/delete_messages_with_claim_ids-64bb8105de3768b1.yaml releasenotes/notes/deprecate-json-formatted-policy-file-f2abc160715c3f9b.yaml releasenotes/notes/deprecate-v11-976cccc1b56a28e7.yaml releasenotes/notes/drop-py-2-7-09cf95d7d843d8f6.yaml releasenotes/notes/email-notification-by-internal-tool-08910ab2247c3864.yaml releasenotes/notes/encrypted-messages-in-queue-d7438d4f185be444.yaml releasenotes/notes/falcon-4-e4b5aab856e3228c.yaml releasenotes/notes/fix-detailed-queue-without-reserved-metadata-b53857ed9821fe76.yaml releasenotes/notes/fix_auth_issue_for_root_path-b15e1c4e92e4e8b1.yaml releasenotes/notes/fix_subscription_limit-c3cdc9385825285a.yaml releasenotes/notes/introduce-guru-to-zaqar-ac7b51c764503829.yaml releasenotes/notes/introduce-topic-resource-9b40674cac06bdc2.yaml releasenotes/notes/lazy-queues-in-subscriptions-6bade4a1b8eca3e5.yaml releasenotes/notes/purge-queue-6788a249ee59d55a.yaml releasenotes/notes/queue-filter-support-b704a1c27f7473b9.yaml releasenotes/notes/redis-sentinel-authentication-93fa9b1846979e41.yaml releasenotes/notes/redis-username-98a265f61fca6a1c.yaml releasenotes/notes/remove-format-contraint-of-client-id-ab787960df6e1606.yaml releasenotes/notes/remove-pool-group-00f2e69682c48131.yaml releasenotes/notes/remove-pool-group-totally-062ecfccd90a6725.yaml releasenotes/notes/remove-py38-005b0eda63232532.yaml releasenotes/notes/remove-py39-cd35d7feff4be5fb.yaml releasenotes/notes/remove-strict-redis-e50cccbdf4a86f76.yaml releasenotes/notes/remove_pool_group_from_zaqar-f8eafeed21779959.yaml releasenotes/notes/return_reserved_metdata_for_dead_letter_queue-da160301f6d8cfa4.yaml releasenotes/notes/show_default_attributes_for_queue-3d87333752484c87.yaml releasenotes/notes/sql_init-c9b3883241631f24.yaml releasenotes/notes/sqlalchemy-migration-6b4eaebb6e02a449.yaml releasenotes/notes/subscription-confirmation-support-email-0c2a56cfedc5d1e2.yaml releasenotes/notes/support-cors-af8349382a44aa0d.yaml releasenotes/notes/support-dot-in-queue-name-bd2b3d523f55451f.yaml releasenotes/notes/support-extra-specs-to-subscription-confirming-edbdbebbdcd0cd74.yaml releasenotes/notes/support-more-backoff-functions-41e02a5977341576.yaml releasenotes/notes/support-notification-delivery-policy-fbc94083b4e6b8d0.yaml releasenotes/notes/support-query-quques-with-count-4453825671bb5298.yaml releasenotes/notes/support-redis-as-management-storage-backend-a205e3c4c4d01584.yaml releasenotes/notes/support-turnoff-deprecated-versions-44656aeb8ebb8881.yaml releasenotes/notes/support_dead_letter_queue-c8b7303319e7f920.yaml releasenotes/notes/support_delayed_queues-1babcaa3f056a39d.yaml releasenotes/notes/support_md5_of_body-84c1cdc6809b6417.yaml releasenotes/notes/support_password_configure_for_redis_connection-6f169db73ca80416.yaml releasenotes/notes/update-mongo-driver-with-new-version-of-pymongo-ebd82e428bb57ebd.yaml releasenotes/notes/user_ipv6_sockets-1e1b436de6b81ae3.yaml releasenotes/notes/victoria-release-prelude-330129ef9dfd6c03.yaml releasenotes/notes/webhook_subscription_confirmation-883cb7f325885ef0.yaml releasenotes/notes/zaqar-status-upgrade-check-framework-09caa1f741f6119d.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/2025.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.gitignore releasenotes/source/_templates/.gitignore releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/id/LC_MESSAGES/releasenotes.po samples/html/confirmation_web_service_sample.py samples/html/subscriptionConfirmation.html samples/html/unsubscriptionConfirmation.html samples/java-api-for-websocket/receive_message/JsonDecoder.java samples/java-api-for-websocket/receive_message/SampleZaqarEndpoint.java samples/java-api-for-websocket/send_message/SampleZaqarEndpoint.java samples/javascript/websocket.html samples/javascript/receive_message/zaqar_sample.js samples/javascript/send_message/zaqar_sample.js samples/jaxrs/receive_message/SampleZaqarServlet.java samples/jaxrs/send_message/SampleZaqarServlet.java samples/nodejs/receive_message/zaqar_sample.js samples/nodejs/send_message/zaqar_sample.js samples/python-zaqarclient/receive_message/zaqar_sample.py samples/python-zaqarclient/send_message/zaqar_sample.py samples/zaqar/sendmail.py samples/zaqar/subscriber_service_sample.py tools/test-setup.sh tools/doc/find_autodoc_modules.sh tools/doc/generate_autodoc_index.sh zaqar/__init__.py zaqar/bootstrap.py zaqar/context.py zaqar/i18n.py zaqar/version.py zaqar.egg-info/PKG-INFO zaqar.egg-info/SOURCES.txt zaqar.egg-info/dependency_links.txt zaqar.egg-info/entry_points.txt zaqar.egg-info/not-zip-safe zaqar.egg-info/pbr.json zaqar.egg-info/requires.txt zaqar.egg-info/top_level.txt zaqar/api/__init__.py zaqar/api/handler.py zaqar/api/v1_1/__init__.py zaqar/api/v1_1/request.py zaqar/api/v1_1/response.py zaqar/api/v2/__init__.py zaqar/api/v2/endpoints.py zaqar/api/v2/request.py zaqar/api/v2/response.py zaqar/bench/__init__.py zaqar/bench/conductor.py zaqar/bench/config.py zaqar/bench/consumer.py zaqar/bench/helpers.py zaqar/bench/observer.py zaqar/bench/producer.py zaqar/cmd/__init__.py zaqar/cmd/gc.py zaqar/cmd/server.py zaqar/cmd/status.py zaqar/common/__init__.py zaqar/common/access.py zaqar/common/auth.py zaqar/common/cache.py zaqar/common/cli.py zaqar/common/consts.py zaqar/common/decorators.py zaqar/common/errors.py zaqar/common/pipeline.py zaqar/common/urls.py zaqar/common/utils.py zaqar/common/api/__init__.py zaqar/common/api/api.py zaqar/common/api/errors.py zaqar/common/api/request.py zaqar/common/api/response.py zaqar/common/api/utils.py zaqar/common/api/schemas/__init__.py zaqar/common/api/schemas/flavors.py zaqar/common/api/schemas/pools.py zaqar/common/api/schemas/v1_1/__init__.py zaqar/common/api/schemas/v1_1/flavors.py zaqar/common/policies/__init__.py zaqar/common/policies/base.py zaqar/common/policies/claims.py zaqar/common/policies/flavors.py zaqar/common/policies/health.py zaqar/common/policies/messages.py zaqar/common/policies/pools.py zaqar/common/policies/queues.py zaqar/common/policies/subscription.py zaqar/common/policies/topics.py zaqar/common/storage/__init__.py zaqar/common/storage/select.py zaqar/common/transport/__init__.py zaqar/common/transport/wsgi/__init__.py zaqar/common/transport/wsgi/helpers.py zaqar/conf/__init__.py zaqar/conf/default.py zaqar/conf/drivers.py zaqar/conf/drivers_management_store_mongodb.py zaqar/conf/drivers_management_store_redis.py zaqar/conf/drivers_management_store_sqlalchemy.py zaqar/conf/drivers_message_store_mongodb.py zaqar/conf/drivers_message_store_redis.py zaqar/conf/drivers_message_store_swift.py zaqar/conf/drivers_transport_websocket.py zaqar/conf/drivers_transport_wsgi.py zaqar/conf/notification.py zaqar/conf/opts.py zaqar/conf/pooling_catalog.py zaqar/conf/profiler.py zaqar/conf/signed_url.py zaqar/conf/storage.py zaqar/conf/transport.py zaqar/extraspec/__init__.py zaqar/extraspec/tasks/__init__.py zaqar/extraspec/tasks/messagecode.py zaqar/hacking/__init__.py zaqar/hacking/checks.py zaqar/locale/de/LC_MESSAGES/zaqar.po zaqar/locale/en_GB/LC_MESSAGES/zaqar.po zaqar/locale/es/LC_MESSAGES/zaqar.po zaqar/locale/id/LC_MESSAGES/zaqar.po zaqar/notification/__init__.py zaqar/notification/notifier.py zaqar/notification/tasks/__init__.py zaqar/notification/tasks/mailto.py zaqar/notification/tasks/trust.py zaqar/notification/tasks/webhook.py zaqar/storage/__init__.py zaqar/storage/base.py zaqar/storage/configuration.py zaqar/storage/errors.py zaqar/storage/pipeline.py zaqar/storage/pooling.py zaqar/storage/utils.py zaqar/storage/mongodb/__init__.py zaqar/storage/mongodb/catalogue.py zaqar/storage/mongodb/claims.py zaqar/storage/mongodb/controllers.py zaqar/storage/mongodb/driver.py zaqar/storage/mongodb/flavors.py zaqar/storage/mongodb/messages.py zaqar/storage/mongodb/pools.py zaqar/storage/mongodb/queues.py zaqar/storage/mongodb/subscriptions.py zaqar/storage/mongodb/topic_messages.py zaqar/storage/mongodb/topics.py zaqar/storage/mongodb/utils.py zaqar/storage/redis/__init__.py zaqar/storage/redis/catalogue.py zaqar/storage/redis/claims.py zaqar/storage/redis/controllers.py zaqar/storage/redis/driver.py zaqar/storage/redis/flavors.py zaqar/storage/redis/messages.py zaqar/storage/redis/models.py zaqar/storage/redis/pools.py zaqar/storage/redis/queues.py zaqar/storage/redis/scripting.py zaqar/storage/redis/subscriptions.py zaqar/storage/redis/utils.py zaqar/storage/redis/scripts/claim_messages.lua zaqar/storage/redis/scripts/index_messages.lua zaqar/storage/sqlalchemy/__init__.py zaqar/storage/sqlalchemy/catalogue.py zaqar/storage/sqlalchemy/controllers.py zaqar/storage/sqlalchemy/driver.py zaqar/storage/sqlalchemy/flavors.py zaqar/storage/sqlalchemy/pools.py zaqar/storage/sqlalchemy/queues.py zaqar/storage/sqlalchemy/tables.py zaqar/storage/sqlalchemy/utils.py zaqar/storage/sqlalchemy/migration/__init__.py zaqar/storage/sqlalchemy/migration/alembic.ini zaqar/storage/sqlalchemy/migration/cli.py zaqar/storage/sqlalchemy/migration/alembic_migrations/README.md zaqar/storage/sqlalchemy/migration/alembic_migrations/env.py zaqar/storage/sqlalchemy/migration/alembic_migrations/script.py.mako zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/001_liberty.py zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/002_placeholder.py zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/003_placeholder.py zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/004_placeholder.py zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/005_placeholder.py zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/006_queens.py zaqar/storage/sqlalchemy/migration/alembic_migrations/versions/007_stein.py zaqar/storage/swift/__init__.py zaqar/storage/swift/claims.py zaqar/storage/swift/controllers.py zaqar/storage/swift/driver.py zaqar/storage/swift/messages.py zaqar/storage/swift/subscriptions.py zaqar/storage/swift/utils.py zaqar/tests/__init__.py zaqar/tests/base.py zaqar/tests/faulty_storage.py zaqar/tests/helpers.py zaqar/tests/etc/drivers_storage_invalid.conf zaqar/tests/etc/drivers_transport_invalid.conf zaqar/tests/etc/functional-tests.conf zaqar/tests/etc/functional-zaqar.conf zaqar/tests/etc/keystone_auth.conf zaqar/tests/etc/policy.yaml zaqar/tests/etc/websocket_mongodb.conf zaqar/tests/etc/websocket_mongodb_keystone_auth.conf zaqar/tests/etc/websocket_mongodb_subscriptions.conf zaqar/tests/etc/wsgi_faulty.conf zaqar/tests/etc/wsgi_fifo_mongodb.conf zaqar/tests/etc/wsgi_mongodb.conf zaqar/tests/etc/wsgi_mongodb_default_limits.conf zaqar/tests/etc/wsgi_mongodb_pooled.conf zaqar/tests/etc/wsgi_mongodb_pooled_disable_virtual_pool.conf zaqar/tests/etc/wsgi_mongodb_validation.conf zaqar/tests/etc/wsgi_redis.conf zaqar/tests/etc/wsgi_redis_pooled.conf zaqar/tests/etc/wsgi_sqlalchemy.conf zaqar/tests/etc/wsgi_sqlalchemy_pooled.conf zaqar/tests/etc/wsgi_swift.conf zaqar/tests/functional/__init__.py zaqar/tests/functional/base.py zaqar/tests/functional/config.py zaqar/tests/functional/helpers.py zaqar/tests/functional/http.py zaqar/tests/functional/websocket/__init__.py zaqar/tests/functional/websocket/test_queues.py zaqar/tests/functional/wsgi/__init__.py zaqar/tests/functional/wsgi/test_versions.py zaqar/tests/functional/wsgi/v1_1/__init__.py zaqar/tests/functional/wsgi/v1_1/test_claims.py zaqar/tests/functional/wsgi/v1_1/test_health.py zaqar/tests/functional/wsgi/v1_1/test_messages.py zaqar/tests/functional/wsgi/v1_1/test_pools.py zaqar/tests/functional/wsgi/v1_1/test_queues.py zaqar/tests/functional/wsgi/v2/__init__.py zaqar/tests/functional/wsgi/v2/test_subscriptions.py zaqar/tests/unit/__init__.py zaqar/tests/unit/test_bootstrap.py zaqar/tests/unit/cmd/__init__.py zaqar/tests/unit/common/__init__.py zaqar/tests/unit/common/test_api.py zaqar/tests/unit/common/test_decorators.py zaqar/tests/unit/common/test_pipeline.py zaqar/tests/unit/common/test_request.py zaqar/tests/unit/common/test_urls.py zaqar/tests/unit/common/storage/__init__.py zaqar/tests/unit/common/storage/test_select.py zaqar/tests/unit/common/storage/test_utils.py zaqar/tests/unit/hacking/__init__.py zaqar/tests/unit/hacking/test_hacking.py zaqar/tests/unit/notification/__init__.py zaqar/tests/unit/notification/test_notifier.py zaqar/tests/unit/storage/__init__.py zaqar/tests/unit/storage/base.py zaqar/tests/unit/storage/test_impl_mongodb.py zaqar/tests/unit/storage/test_impl_redis.py zaqar/tests/unit/storage/test_impl_sqlalchemy.py zaqar/tests/unit/storage/test_impl_swift.py zaqar/tests/unit/storage/test_pool_catalog_new.py zaqar/tests/unit/storage/test_utils.py zaqar/tests/unit/storage/sqlalchemy_migration/__init__.py zaqar/tests/unit/storage/sqlalchemy_migration/test_db_manage_cli.py zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations.py zaqar/tests/unit/storage/sqlalchemy_migration/test_migrations_base.py zaqar/tests/unit/transport/__init__.py zaqar/tests/unit/transport/test_acl.py zaqar/tests/unit/transport/websocket/__init__.py zaqar/tests/unit/transport/websocket/base.py zaqar/tests/unit/transport/websocket/test_protocol.py zaqar/tests/unit/transport/websocket/utils.py zaqar/tests/unit/transport/websocket/v2/__init__.py zaqar/tests/unit/transport/websocket/v2/test_auth.py zaqar/tests/unit/transport/websocket/v2/test_claims.py zaqar/tests/unit/transport/websocket/v2/test_messages.py zaqar/tests/unit/transport/websocket/v2/test_queue_lifecycle.py zaqar/tests/unit/transport/websocket/v2/test_subscriptions.py zaqar/tests/unit/transport/wsgi/__init__.py zaqar/tests/unit/transport/wsgi/base.py zaqar/tests/unit/transport/wsgi/test_utils.py zaqar/tests/unit/transport/wsgi/test_version.py zaqar/tests/unit/transport/wsgi/v1_1/__init__.py zaqar/tests/unit/transport/wsgi/v1_1/test_auth.py zaqar/tests/unit/transport/wsgi/v1_1/test_claims.py zaqar/tests/unit/transport/wsgi/v1_1/test_default_limits.py zaqar/tests/unit/transport/wsgi/v1_1/test_health.py zaqar/tests/unit/transport/wsgi/v1_1/test_home.py zaqar/tests/unit/transport/wsgi/v1_1/test_media_type.py zaqar/tests/unit/transport/wsgi/v1_1/test_messages.py zaqar/tests/unit/transport/wsgi/v1_1/test_ping.py zaqar/tests/unit/transport/wsgi/v1_1/test_queue_lifecycle.py zaqar/tests/unit/transport/wsgi/v1_1/test_validation.py zaqar/tests/unit/transport/wsgi/v2_0/__init__.py zaqar/tests/unit/transport/wsgi/v2_0/test_auth.py zaqar/tests/unit/transport/wsgi/v2_0/test_claims.py zaqar/tests/unit/transport/wsgi/v2_0/test_default_limits.py zaqar/tests/unit/transport/wsgi/v2_0/test_flavors_new.py zaqar/tests/unit/transport/wsgi/v2_0/test_health.py zaqar/tests/unit/transport/wsgi/v2_0/test_home.py zaqar/tests/unit/transport/wsgi/v2_0/test_media_type.py zaqar/tests/unit/transport/wsgi/v2_0/test_messages.py zaqar/tests/unit/transport/wsgi/v2_0/test_ping.py zaqar/tests/unit/transport/wsgi/v2_0/test_pools_new.py zaqar/tests/unit/transport/wsgi/v2_0/test_purge.py zaqar/tests/unit/transport/wsgi/v2_0/test_queue_lifecycle.py zaqar/tests/unit/transport/wsgi/v2_0/test_subscriptions.py zaqar/tests/unit/transport/wsgi/v2_0/test_topic_lifecycle.py zaqar/tests/unit/transport/wsgi/v2_0/test_urls.py zaqar/tests/unit/transport/wsgi/v2_0/test_validation.py zaqar/transport/__init__.py zaqar/transport/acl.py zaqar/transport/base.py zaqar/transport/encryptor.py zaqar/transport/utils.py zaqar/transport/validation.py zaqar/transport/middleware/__init__.py zaqar/transport/middleware/auth.py zaqar/transport/middleware/cors.py zaqar/transport/middleware/profile.py zaqar/transport/websocket/__init__.py zaqar/transport/websocket/driver.py zaqar/transport/websocket/factory.py zaqar/transport/websocket/protocol.py zaqar/transport/wsgi/__init__.py zaqar/transport/wsgi/app.py zaqar/transport/wsgi/driver.py zaqar/transport/wsgi/errors.py zaqar/transport/wsgi/utils.py zaqar/transport/wsgi/version.py zaqar/transport/wsgi/v1_1/__init__.py zaqar/transport/wsgi/v1_1/claims.py zaqar/transport/wsgi/v1_1/flavors.py zaqar/transport/wsgi/v1_1/health.py zaqar/transport/wsgi/v1_1/homedoc.py zaqar/transport/wsgi/v1_1/messages.py zaqar/transport/wsgi/v1_1/ping.py zaqar/transport/wsgi/v1_1/pools.py zaqar/transport/wsgi/v1_1/queues.py zaqar/transport/wsgi/v1_1/stats.py zaqar/transport/wsgi/v2_0/__init__.py zaqar/transport/wsgi/v2_0/claims.py zaqar/transport/wsgi/v2_0/flavors.py zaqar/transport/wsgi/v2_0/health.py zaqar/transport/wsgi/v2_0/homedoc.py zaqar/transport/wsgi/v2_0/messages.py zaqar/transport/wsgi/v2_0/ping.py zaqar/transport/wsgi/v2_0/pools.py zaqar/transport/wsgi/v2_0/purge.py zaqar/transport/wsgi/v2_0/queues.py zaqar/transport/wsgi/v2_0/stats.py zaqar/transport/wsgi/v2_0/subscriptions.py zaqar/transport/wsgi/v2_0/topic.py zaqar/transport/wsgi/v2_0/topic_purge.py zaqar/transport/wsgi/v2_0/topic_stats.py zaqar/transport/wsgi/v2_0/urls.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/dependency_links.txt0000664000175100017510000000000115033040006021615 0ustar00mylesmyles ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/entry_points.txt0000664000175100017510000000416415033040006021052 0ustar00mylesmyles[console_scripts] zaqar-bench = zaqar.bench.conductor:main zaqar-gc = zaqar.cmd.gc:run zaqar-server = zaqar.cmd.server:run zaqar-sql-db-manage = zaqar.storage.sqlalchemy.migration.cli:main zaqar-status = zaqar.cmd.status:main [oslo.config.opts] zaqar = zaqar.conf.opts:list_opts [oslo.policy.policies] zaqar = zaqar.common.policies:list_rules [zaqar.control.storage] faulty = zaqar.tests.faulty_storage:ControlDriver mongodb = zaqar.storage.mongodb.driver:ControlDriver redis = zaqar.storage.redis.driver:ControlDriver sqlalchemy = zaqar.storage.sqlalchemy.driver:ControlDriver [zaqar.data.storage] faulty = zaqar.tests.faulty_storage:DataDriver mongodb = zaqar.storage.mongodb.driver:DataDriver mongodb.fifo = zaqar.storage.mongodb.driver:FIFODataDriver redis = zaqar.storage.redis.driver:DataDriver swift = zaqar.storage.swift.driver:DataDriver [zaqar.extraspec.tasks] messagecode = zaqar.extraspec.tasks.messagecode:MessageCodeAuthentication [zaqar.notification.tasks] http = zaqar.notification.tasks.webhook:WebhookTask https = zaqar.notification.tasks.webhook:WebhookTask mailto = zaqar.notification.tasks.mailto:MailtoTask trust+http = zaqar.notification.tasks.trust:TrustTask trust+https = zaqar.notification.tasks.trust:TrustTask [zaqar.storage.mongodb.driver.queue.stages] message_queue_handler = zaqar.storage.mongodb.messages:MessageQueueHandler [zaqar.storage.mongodb.driver.topic.stages] message_queue_handler = zaqar.storage.mongodb.topic_messages:MessageTopicHandler [zaqar.storage.redis.driver.queue.stages] message_queue_handler = zaqar.storage.redis.messages:MessageQueueHandler [zaqar.storage.redis.driver.topic.stages] message_queue_handler = zaqar.storage.redis.messages:MessageTopicHandler [zaqar.storage.stages] zaqar.notification.notifier = zaqar.notification.notifier:NotifierDriver [zaqar.storage.swift.driver.queue.stages] message_queue_handler = zaqar.storage.swift.messages:MessageQueueHandler [zaqar.storage.swift.driver.topic.stages] message_queue_handler = zaqar.storage.swift.messages:MessageTopicHandler [zaqar.transport] websocket = zaqar.transport.websocket.driver:Driver wsgi = zaqar.transport.wsgi.driver:Driver ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/not-zip-safe0000664000175100017510000000000115033040006017775 0ustar00mylesmyles ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/pbr.json0000664000175100017510000000006015033040006017221 0ustar00mylesmyles{"git_version": "10fdc647", "is_release": false}././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/requires.txt0000664000175100017510000000164115033040006020151 0ustar00mylesmylespbr!=2.1.0,>=2.0.0 alembic>=0.9.6 cryptography>=2.7 falcon>=3.0.0 jsonschema>=3.2.0 keystonemiddleware>=9.1.0 msgpack>=1.0.0 python-swiftclient>=3.10.1 WebOb>=1.7.1 stevedore>=3.2.2 oslo.cache>=1.26.0 oslo.concurrency>=5.0.1 oslo.config>=8.3.2 oslo.context>=2.19.2 oslo.db>=11.0.0 oslo.i18n>=3.15.3 oslo.log>=4.6.1 oslo.messaging>=12.5.0 oslo.reports>=2.2.0 oslo.serialization>=4.2.0 oslo.upgradecheck>=1.3.0 oslo.utils>=4.12.1 oslo.policy>=4.5.0 osprofiler>=1.4.0 SQLAlchemy>=1.3.19 autobahn>=22.3.2 requests>=2.25.0 futurist>=1.2.0 [mongodb] pymongo>=3.6.0 [mysql] PyMySQL>=0.8.0 [redis] redis>=3.4.0 [test] hacking<6.2.0,>=6.1.0 redis>=3.4.0 pymongo>=3.6.0 python-swiftclient>=3.10.1 websocket-client>=0.44.0 PyMySQL>=0.8.0 coverage!=4.4,>=4.0 cryptography>=2.7 ddt>=1.0.1 doc8>=0.8.1 Pygments>=2.2.0 fixtures>=3.0.0 testscenarios>=0.4 testtools>=2.2.0 testresources>=2.0.0 oslotest>=3.2.0 stestr>=2.0.0 osprofiler>=1.4.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751924742.0 zaqar-20.1.0.dev29/zaqar.egg-info/top_level.txt0000664000175100017510000000000615033040006020275 0ustar00mylesmyleszaqar