././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/0000775000175000017500000000000000000000000012537 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/.coveragerc0000664000175000017500000000012600000000000014657 0ustar00zuulzuul00000000000000[run] branch = True source = aodh omit = aodh/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/.mailmap0000664000175000017500000000370600000000000014166 0ustar00zuulzuul00000000000000# Format is: # # Adam Gandelman Alan Pevec Alexei Kornienko ChangBo Guo(gcb) Chang Bo Guo Chinmaya Bharadwaj chinmay Clark Boylan Doug Hellmann Fei Long Wang Fengqian Gao Fengqian Fengqian Gao Fengqian.Gao Gordon Chung gordon chung Gordon Chung Gordon Chung Gordon Chung gordon chung Ildiko Vancsa Ildiko John H. Tran John Tran Julien Danjou LiuSheng liu-sheng Mehdi Abaakouk Nejc Saje Nejc Saje Nicolas Barcet (nijaba) Pádraig Brady Rich Bowen Sandy Walsh Sascha Peilicke Sean Dague Shengjie Min shengjie-min Shuangtai Tian shuangtai Swann Croiset ZhiQiang Fan ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/.stestr.conf0000664000175000017500000000123700000000000015013 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./aodh/tests/unit} top_dir=./ # The group_regex describes how stestr will group tests into the same process # when running concurently. The following ensures that gabbi tests coming from # the same YAML file are all in the same process. This is important because # each YAML file represents an ordered sequence of HTTP requests. Note that # tests which do not match this regex will not be grouped in any special way. # See the following for more details. # http://stestr.readthedocs.io/en/latest/MANUAL.html#grouping-tests # https://gabbi.readthedocs.io/en/latest/#purpose group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_([^_]+))_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/.zuul.yaml0000664000175000017500000001201600000000000014500 0ustar00zuulzuul00000000000000- project: queue: telemetry templates: - openstack-python3-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 - check-requirements check: jobs: - aodh-tempest-plugin-mysql - aodh-tempest-plugin-postgresql - telemetry-dsvm-integration: irrelevant-files: &aodh-irrelevant-files - ^(test-|)requirements.txt$ - ^setup.cfg$ - ^doc/.*$ - ^.*\.rst$ - ^releasenotes/.*$ - ^aodh/tests/.*$ - ^aodh/locale/.*$ - ^tools/.*$ - ^tox.ini$ vars: # TODO: Update the plugins so they work with global venv devstack_localrc: GLOBAL_VENV: False devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi - telemetry-dsvm-integration-ipv6-only: irrelevant-files: *aodh-irrelevant-files vars: # TODO: Update the plugins so they work with global venv devstack_localrc: GLOBAL_VENV: False devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi - telemetry-dsvm-integration-centos-9s: irrelevant-files: *aodh-irrelevant-files vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi - telemetry-dsvm-integration-centos-9s-fips: irrelevant-files: *aodh-irrelevant-files vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi gate: jobs: - aodh-tempest-plugin-mysql - aodh-tempest-plugin-postgresql - telemetry-dsvm-integration: irrelevant-files: *aodh-irrelevant-files vars: # TODO: Update the plugins so they work with global venv devstack_localrc: GLOBAL_VENV: False devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi, prometheus service_available: gnocchi: True sg-core: True - telemetry-dsvm-integration-ipv6-only: irrelevant-files: *aodh-irrelevant-files vars: # TODO: Update the plugins so they work with global venv devstack_localrc: GLOBAL_VENV: False devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi - telemetry-dsvm-integration-centos-9s: irrelevant-files: *aodh-irrelevant-files vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi - telemetry-dsvm-integration-centos-9s-fips: irrelevant-files: *aodh-irrelevant-files vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi - job: name: aodh-tempest-plugin-postgresql parent: telemetry-tempest-base irrelevant-files: *aodh-irrelevant-files vars: devstack_services: postgresql: true mysql: false devstack_localrc: DATABASE_TYPE: postgresql AODH_BACKEND: postgresql USE_PYTHON3: True # TODO: Update the plugins so they work with global venv GLOBAL_VENV: False devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi alarm_backend: postgresql service_available: gnocchi: True sg-core: False tempest_test_regex: telemetry_tempest_plugin.aodh - job: name: aodh-tempest-plugin-mysql parent: telemetry-tempest-base irrelevant-files: *aodh-irrelevant-files vars: devstack_localrc: AODH_BACKEND: "mysql" USE_PYTHON3: True # TODO: Update the plugins so they work with global venv GLOBAL_VENV: False devstack_local_conf: test-config: $TEMPEST_CONFIG: telemetry_services: metric_backends: gnocchi alarm_backend: mysql service_available: gnocchi: True sg-core: False tempest_test_regex: telemetry_tempest_plugin.aodh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/AUTHORS0000664000175000017500000003374300000000000013621 0ustar00zuulzuul00000000000000Abhishek Chanda Abhishek Lekshmanan Abhishek Lekshmanan Adelina Tuvenie Ajaya Agrawal Akhil Hingane Ala Rezmerita Alessandro Pilotti Alexei Kornienko Alexey Weyl Alfredo Moralejo Ana Malagon Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Angus Lees Angus Salkeld Anh Tran Ann Kamyshnikova Artur Svechnikov Balazs Gibizer Bartosz Górski Ben Nemec Ben Nemec Bhagyashri Shewale Boden R Boris Pavlovic Brad Pokorny Brant Knudson Brian Cline Brooklyn Chen Béla Vancsics Can ZHANG Cedric Soulas Chad Lung Chandan Kumar ChangBo Guo(gcb) Chaozhe.Chen ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah Chris Dent Chris Dent Chris Sibbitt Christian Berendt Christian Martinez Christian Schwede Chuck Short Clark Boylan Claudiu Belu Corey Bryant Cyril Roelandt Dai Dang Van Damian Van Vuuren Dan Florea Dan Prince Dao Cong Tien Darren Birkett Davanum Srinivas David Peraza David Wahlstrom Dazhao Debo~ Dutta Dina Belova Dirk Mueller Divya Dmitriy Rabotyagov Doug Hellmann Dr. Jens Harbott Dung Ha Duong Ha-Quang Edwin Zhai Emilien Macchi Emma Foley Endre Karlson Eoghan Glynn Eoghan Glynn Eric Brown Erno Kuvaja Eyal Fabio Giannetti Fei Long Wang Feng Xi Yan Fengqian Gao Flavio Percoco François Charlier François Rossigneux Frederic FAURE Gangyi Luo Gauvain Pocentek Gerard Garcia Ghanshyam Mann Gordon Chung Graham Binns Guangyu Suo Hang Liu Hangdong Zhang Hanxi Hanxi Liu Haomeng, Wang Harri Hämäläinen Hervé Beraud Hisashi Osanai Hoang Trung Hieu Ian Wienand Igor Degtiarov Ihar Hrachyshka Ildiko Vancsa Ilya Sviridov Ilya Tyaptin Ionuț Arțăriși Jake Liu James E. Blair James E. Blair James Page Jaromir Wysoglad Jaromír Wysoglad Jason Myers Jason Zhang Javier Pena Jay Lau Jay Pipes Jeffrey Guan Jeremy Stanley Jim Rollenhagen Jiong Liu Joanna H. Huang Joe Gordon Joe H. Rahme John H. Tran John Herndon JordanP Jorge Niedbalski Juan Antonio Osorio Robles Julien Danjou Justin SB KIYOHIRO ADACHI Kamil Rykowski Keith Byrne Ken Pepple Ken'ichi Ohmichi Ken'ichi Ohmichi Kennan Kennan Kevin_Zheng Kishore Juigil Koert van der Veer Komei Shimamura Ladislav Smola Lan Qi song Lance Bragstad Lena Novokshonova LiZekun <2954674728@qq.com> Lianhao Lu Lingxian Kong LinuxJedi LiuNanke LiuSheng Luis A. Garcia Lukas Ruzicka Luong Anh Tuan Maho Koshiya Marios Andreou Mark McClain Mark McLoughlin Martin Geisler Martin Kletzander Martin Mágr Mathew Odden Mathieu Gagné Matt Riedemann Matthias Runge Mehdi Abaakouk Mehdi Abaakouk Michael Krotscheck Michael Still Michał Jastrzębski Mike Spreitzer Monsyne Dragon Monty Taylor Nadya Privalova Nadya Shakhat Nagasai Vinaykumar Kapalavai Nejc Saje Nguyen Hung Phuong Nguyen Van Trung Nick Barcet Nick Wiecha Nicolas Barcet (nijaba) Noorul Islam K M Octavian Ciuhandu OpenStack Release Bot Paul Belanger Pavlo Shchelokovskyy Pawel Suder Peter Portante Petr Kovar Phil Neal Piyush Masrani Pradeep Kilambi Pradeep Kilambi Pradeep Kumar Singh Pradyumna Sampath Pádraig Brady Qiaowei Ren Rabi Mishra Rafael Folco Rafael Rivero Remo Mattei Rich Bowen Rikimaru Honjo Rob Raymond Robert Mizielski Rocky Rohit Jaiswal Romain Soufflet Roman Bogorodskiy Rosario Di Somma Ruslan Aliev Russell Bryant Ryan Petrello Ryota MIBU Saba Ahmed Sachin Sam Morrison Samta Samuel Merritt Sandy Walsh Sanja Nosan Sascha Peilicke Sean Dague Sean McGinnis Sergey Lukjanov Sergey Vilgelm Seyeong Kim Shane Wang Shengjie Min Shilla Saebi Shuangtai Tian Shuquan Huang Simona Iuliana Toader Sofer Athlan-Guyot Srinivas Sakhamuri Stas Maksimov Stephen Balukoff Stephen Finucane Stephen Gran Steve Lewis Steve Martinelli Steven Berler Surya Prabhakar Svetlana Shturm Swami Reddy Swann Croiset Swapnil Kulkarni (coolsvap) Sylvain Afchain Takashi Kajinami Takashi Kajinami Tatsuro Makita Terri Yu Thierry Carrez Thomas Bechtold Thomas Goirand Thomas Herve Thomas Herve Thomas Maddox Tong Li Tony Breeds Tovin Seven Trinh Nguyen Tuan Do Anh Ubuntu Uday T Kumar Victor Stinner Victor Stinner Vinay Kapalavai Vitalii Lebedynskyi Vitaly Gridnev Vladislav Kuzmin Vu Cong Tuan Wu Wenxiang XieYingYun Yaguang Tang Yanyan Hu Yarko Tymciurak Yassine Lamgarchal Yathiraj Udupi You Yamagata Yuanbin.Chen Yunhong, Jiang Zane Bitter Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan ZhongShengping Zhongyue Luo akhiljain23 alextricity25 annegentle astacksu caoyuan ccrouch changyufei chenxing deepakmourya dongwenjuan eNovance emilienm florent fujioka yuuichi gecong1973 gengchc2 gengjh ghanshyam ghanshyam gord chung gugug guillaume pernot heha jacky06 joyce keliang kiwik-chenrui leizhang lijian lingyongxu lipan liujiong liuqing liusheng liyi lizheming lqslan lrqrun ls1175 lvdongbing lzhijun melissaml mizeng nellysmitt pangliye pengyuesheng prankul prankul mahajan qiaojian replay rtmdk sanjana sanuptpm sh.huang shangxiaobj shengjie min songwenping srsakhamuri tanlin terriyu unknown vagrant venkatamahesh vivek.nandavanam vivek.nandavanam wangqi wangxiyuan wangzihao wolfgang wu.shiming xhzhf xiaozhuangqing xingzhou xugang yangyawei yanheven yuqian yuyafei zhang-jinnan zhang.lei zhangguoqing zhangyangyang zhangyanxian zhouxinyong zhurong zjingbj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/CONTRIBUTING.rst0000664000175000017500000000105700000000000015203 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/aodh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/ChangeLog0000664000175000017500000037043500000000000014325 0ustar00zuulzuul00000000000000CHANGES ======= 19.0.0 ------ * Keep new RBAC disable by default * Isolate project scope and system scope * Add tempest configs to jobs * Remove SQLAlchemy tips jobs * Fix db upgrade with SQLAlchemy 2.0 * Remove old excludes * reno: Update master for unmaintained/zed * Imported Translations from Zanata * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Update master for stable/2024.1 18.0.0 ------ * Add job to test with SQLAlchemy master (2.x) * db: Replace use of Engine.execute() method * tests: Enable SQLAlchemy 2.0 deprecation warnings * Configure 'cache\_ok' for TypeDecorator implementation * db: Don't pass strings to 'Connection.execute' * db: Remove use of autocommit * db: Replace use of LegacyEngineFacade * Ensure [telemetry\_services] alarm\_backend is set properly * Fix releasenotes build af yoga moved to unmaintained * tox: Drop envdir * Replace md5 with oslo version * Bump hacking * Use zoneinfo instead of pytz if available * Catch specific exception when validating time zone * Add GITREPO for aodhclient * Update python classifier in setup.cfg * doc: Drop DJANGO\_SETTINGS\_MODULE environment * Dump loaded options during start up * Move functional tests to tempest\_plugin * setup: Re-add Python version classifiers * tests: Remove duplicate fixture config * setup: Remove unused dependencies * setup: Remove sqlalchemy extra dep * Add Prometheus evaluator * Remove python2 shebang from some files * Fix py311 job * Revert "Fix docs job" * Use devstack helper for installation * Disable GLOBAL\_VENV * Fix docs job * tox: Remove unnecessary configuration * Update master for stable/2023.2 * Fix bindep.txt for python 3.11 job(Debian Bookworm) 17.0.0 ------ * Imported Translations from Zanata * Add check-requirements job * Remove "test" extra deps from setup.cfg * Drop tenacity cap to align with global\_requirements * [coordination] backend\_url should be secret * Update master for stable/2023.1 * Add oslo.policy.enforcer entry point 16.0.0 ------ * Imported Translations from Zanata * Complete the revert of "Support Ceilometer API" * Make tox.ini tox 4.0 compatible * Imported Translations from Zanata * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed 15.0.0 ------ * Imported Translations from Zanata * Imported Translations from Zanata * Fix compatibility with oslo.db 12.1.0 * Revert "Temporarily disable fips job" * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Remove [coordination] check\_watchers * zuul: Use telemetry queue * Ignore Gnocchi API error when the metric is not yet created * Remove TripleO job * Temporarily disable fips job * Update python testing as per zed cycle testing runtime * Add Python 3.9 to supported runtimes * Imported Translations from Zanata * Replace CentOS Stream 8 by 9 * Update python testing as per zed cycle teting runtime * Remove unnecessary unicode prefixes * Bump minimum version of gnocchiclient for aggregats API support * Migrate evaluation\_interval to [evaluator] * Deprecate unused [DEFAULT] http\_timeout * Migrate to stestr * Add Python3 zed unit tests * Update master for stable/yoga 14.0.0 ------ * gnocchi: Use Dynamic Aggregates API * Load api-paste.ini from configuration directories first * Rename [coordination] heartbeat to hearbeat\_interval * Imported Translations from Zanata * Add Python3 yoga unit tests * Update master for stable/xena * Run TripleO jobs on CentOS8 instead of CentOS7 13.0.0 ------ * Adding FIPS job * Introduce Guru Meditation Reports into Aodh * Replace oslo\_utils.fnmatch with fnmatch * Fix oslo policy DeprecatedRule warnings * Suppress policy default change warnings * Changed minversion in tox to 3.18.0 * Bugs are in launchpad * Switch testing to Xena testing runtime * Deprecate unused [coordination] check\_watchers * sqlalchemy is used but not declared * setup.cfg: Replace dashes with underscores * Limit number of records deleted by aodh-expirer * Use py3 as the default runtime for tox * Imported Translations from Zanata * Cap tenacity < 7.0.0 * Add Python3 xena unit tests * Update master for stable/wallaby * Use python 3.8 for tox * Revert "Support Ceilometer API" 12.0.0 ------ * Imported Translations from Zanata * Imported Translations from Zanata * [goal] Deprecate the JSON formatted policy file * Update TOX\_CONSTRAINTS\_FILE * Remove six * Implement secure RBAC for alarms and quota policies * Add common personas to base policies * Update requirements to implement secure RBAC * Remove deprecated tail\_log function * Imported Translations from Zanata * Add Python3 wallaby unit tests * Update master for stable/victoria 11.0.0 ------ * Use SETUPTOOLS\_USE\_DISTUTILS=stdlib for global pip installs * Stop to use the \_\_future\_\_ module * Cap jsonschema 3.2.0 as the minimal version * Switch to newer openstackdocstheme and reno versions * Fix pygments style * Imported Translations from Zanata * Remove translation sections from setup.cfg * Imported Translations from Zanata * Add py38 package metadata * Add Python3 victoria unit tests * Update master for stable/ussuri * Add irrelevant-files for tempest jobs 10.0.0 ------ * Imported Translations from Zanata * Imported Translations from Zanata * Fix py38 error * Cleanup py27 support * Imported Translations from Zanata * Imported Translations from Zanata * Update hacking for Python3 * Imported Translations from Zanata * Replace third party mock with unittest.mock * Release note for quota API * Delete quota API * Imported Translations from Zanata * Documentation about quota management * Improve the quota check * Support quota API * Use config options when creating keystone session * Improve the description of aodh notifiers * Imported Translations from Zanata * Fix getting alarms * Imported Translations from Zanata * Support aodh-evaluator built-in active/active deployment mode * Some minor improvements for doc and log * Imported Translations from Zanata * Support Ceilometer API * Imported Translations from Zanata * Drop python 2.7 support and testing * heat notifier: Fix getting unhealthy resource names * Update master for stable/train 9.0.0 ----- * Update the constraints url * PDF documentation build * Update jsonschema according to requirements * Run 'telemetry-dsvm-integration-ipv6-only' job in gate * Add the native zuulv3 jobs instead of legacy way * Add \_static folder to fix docs build error * Imported Translations from Zanata * Heat notifier: notify alarm without Octavia tags * Bump the openstackdocstheme extension to 1.20 * Blacklist sphinx 2.1.0 (autodoc bug) * Add Python 3 Train unit tests * Byte encoding of member Id * Sync Sphinx requirement * Add user\_domain\_name to keystone client * Support Heat auto-healing notifier * Add install\_command in tox.ini * Replace git.openstack.org URLs with opendev.org URLs * Add loadbalancer\_member\_health type alarm rule * OpenDev Migration Patch * Add load balancer pool member evaluator * Dropping the py35 testing * inspect.getargspec is deprecated in py3 * Set Tempest's service\_availability setting for Aodh * Remove telemetry-tox-py37 * Add framework for aodh-status upgrade check * Integrate OSprofiler in Aodh * Replace openstack.org git:// URLs with https:// * Update master for stable/stein * add python 3.7 unit test job * Imported Translations from Zanata 8.0.0 ----- * Change in Aodh docs: * remove redundant line * Replace tripleo-scenario002-multinode with scenario002-standalone * Replace scenario001-multinode with scenario001-standalone * Change openstack-dev to openstack-discuss * remove those copy words occured twice times in newton.rst * PY3: Ensure \_refresh is passed str type for reason * update aodh notifier when there're multiple regions * Use standard py37 jobs, remove py35 * Imported Translations from Zanata * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Imported Translations from Zanata * Imported Translations from Zanata * Update reno for stable/rocky 7.0.0 ----- * Add python 3.7 gating * Follow the new PTI for document build * remove outdated evaluation\_service documentation * Validation Check for 'query' params of alarm type 'event' * Update pypi url to new url * Fixing small typo * Invalid link to static file in doc * Support same projects in different domain * Revert "Support same projects in different domain" * Support same projects in different domain * fix tox python3 overrides * fix the loaction of api/v2 file * Fix the incorrect content indents * Trivial: Update pypi url to new url * Update "auth\_url" in install docs * Remove MIA maintainer * Update auth\_uri option to www\_authenticate\_uri * add "severity" value for alarm\_history * fix value of severity field * Fix conf not exist zaqar\_client not define error * Fix doc title format error * Don't include trust IDs in Alarm action output * change doc build command * Imported Translations from Zanata * Remove pbr warnerrors in favor of sphinx check * Imported Translations from Zanata * Imported Translations from Zanata * Update reno for stable/queens * Enable more extensions pep8 * fix internal doc links * Imported Translations from Zanata 6.0.0 ----- * Imported Translations from Zanata * Imported Translations from Zanata * fix partitioning documentation * remove unrelated ceilometer definition * Zuul: Remove project name * Remove use of unsupported TEMPEST\_SERVICES variable * remove remaining ceilometer threshold references * tempest: use new plugin + tripleo to experimental * Remove ceilometer-api alarm type * Remove ceilometer-api from test\_composite * Remove ceilometer-api from test\_threshold * Remove ceilometer-api from test\_complex\_query\_scenarios * Remove ceilometer-api from test\_storage\_scenarios * Remove ceilometer-api from test\_evaluator * Remove ceilometer-api from test\_alarm\_scenarios * change doc aodh bug tracker url * Imported Translations from Zanata * Revert "Replace jsonutils by ujson" * Imported Translations from Zanata * Remove aodh intree tempest plugin * Add missing dependency * Imported Translations from Zanata * tests: fix unexisting method self.fail * don't use last keystonemiddleware * update README.rst * Imported Translations from Zanata * storage: fix upgrade when password contains a % * Remove setting of version/release from releasenotes * Fix releasenotes builds * zuul: run TripleO jobs with new zuulv3 layout * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Implement policy in code - reno and doc (end) * Implement policy in code (3) * Implement policy in code (2) * Implement policy in code (1) * Zuul: add file extension to playbook path * Replace jsonutils by ujson * revise the spelling of 'event' * remove branch specific job * Use oslo.config-sphinxext to build the conf options * Imported Translations from Zanata * [doc] fix word usage of composite alarm * doc: remove mention of combination alarms * Move legacy jobs to project * Add tox whitelist\_externals * Imported Translations from Zanata * Imported Translations from Zanata * Add granularity value to Gnocchi evaluators * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Cleanup setup.cfg * Imported Translations from Zanata * fix gate * remove gnocchiclient cap * support new gnocchiclient interface * Imported Translations from Zanata * Update reno for stable/pike 5.0.0 ----- * Imported Translations from Zanata * Don't allow the user to pass in a trust ID * Replace the old auth\_opts to the new in notifier/zaqar.py * Imported Translations from Zanata * Typo input in "aodh alarm create" for param "--name" * gnocchi: set start/stop when validating alarm * Update and replace http with https for doc links in aodh * adjust section titles * add configuration folder * Replace deprecated test.attr with decorators.attr * remove custom theming * remove templates * Update aodh commands in doc * Fix releasenote RST syntax * Update and optimize documentation links * Added examples to create event based alarm * Update URL home-page in documents according to document migration * turn on warning-is-error for doc builds * add missing sphinx extension * move class api reference into contributor docs * Imported Translations from Zanata * Update the response table info of some sample commands in doc * tests: rework functional live tests * copy admin-guide * move install-guide under install dir * move developer docs under contributor * switch to openstackdocstheme * Clean maintainer list * Imported Translations from Zanata * gnocchi: return better 'insufficient data' reason * Don't translate alarm reason * Expose alarm state reason to API * Fix aodh-config-generator * Enable some off-by-default checks * Remove genconfig tox target * Add a description of the parameters to function * add sqlalchemy-utils * simplify crud notification test * Fix Zaqar notifier doc * gnocchi: fix alarms for unpriviledged user * Fix html\_last\_updated\_fmt for Python3 * Remove oslo.msg deprecation * Remove keystoneclient deprecation * cleanup aodh config instructions in install guide * tempest: remove deprecation * Deprecate olsotest.mockpatch in favor of native fixtures * Add \`aodh-config-generator' tool to generate sample configuration file * tests: simplify tox config * storage: Ensure pymysql is prefered * Remove deprecated oslo.config messages * tests: fix MultiStrOpt value * Remove sqlalchemy upper constraints * Imported Translations from Zanata * Optimize the link address * Fix expecting content-type headers * Fix some reST field lists in docstrings * [install-guide] Minor edits * Remove log translations * Gnocchi: don't fail to create valid alarm * Imported Translations from Zanata * Useing fixtures.MockPatch instead of mockpatch.Patch * Switch to use stable data\_utils * Update pbr requirement to match global requirement * deprecate threshold rule alarms * Fix reno title format * Install test dependencies in docs * Add keystoneauth1 in requirements * Update the doc about manually installation * Remove unused logging import * Trivial-fix: use domain\_id instead of domain\_name * Correct the doc link * gabbi: use history * Remove support for py34 * Fix the migration to use alarm\_history * Use more specific asserts in tests * Avoiding duplication of service available group in tempest plugin * Use https instead of http for git.openstack.org * Remove deprecated combination alarms * Update reno for stable/ocata 4.0.0 ----- * Stop shipping Apache2 configuration file * Switch to decorators.idempotent\_id * Fix all current typo bugs on Aodh project * Add sem-ver flag so pbr generates correct version * Move policy.json out of etc * Move api-paste file to common code location * install-guide: remove useless step * api: add auth\_mode option * enable cachetools for gnocchi alarms * modernise gabbi usage * property refactoring * Simple pip install fails for python3 * Simple pip install fails under python3 * composite: fix evaluation of trending state alarms * Enable coverage report in console output * Fix expecting content-type headers * Don't create multiple trust IDs per alarm * [doc] Note lack of constraints is a choice * Enable healthcheck app to check API status * Remove legacy policy file test * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Add missing webtest dependency in test * Remove API workers option * Remove notes about MongoDB * Replace retrying with tenacity * Add trust+zaqar:// action * Refactor ZaqarAlarmNotifier and fix tests * Change for more useful output in log notifier * Improve docs for ZaqarAlarmNotifier * Fix typo in plugin.sh * add testresources req * cli: Fix --alarm-id in alarm conversion * Replaced e.message with str(e) * Bump hacking to 0.12 * Imported Translations from Zanata * [instll] Update a more simple rabbitmq configuration * cors: update default configuration using cors' own set\_defaults funtion * Fix the endpoint type of zaqar notifier * add alarm.deletion notification * read data from stdout instead of stderr * Support keystone v3 for Zaqar notifier * devstack: fix mispelling of aodh-api in ENABLED\_SERVICES * Remove testtools dependency * Remove deprecated non-SQL drivers * Add http\_proxy\_to\_wsgi to config-generator * sqlalchemy: use DATETIME(fsp=6) rather than DECIMAL * Remove pecan\_debug option * Add http\_proxy\_to\_wsgi to api-paste * Handle case where sample-api is disabled * Enable release notes translation * Fix typo * Adds notes on how to launch aodh-api with uwsgi * Remove default=None when set value in Config * sqlalchemy: remove Ceilometer upgrade workaround * Fix locale builds * Update reno for stable/newton 3.0.0 ----- * Add pre-signed support for zaqar notifier * Imported Translations from Zanata * standardize release note page ordering * devstack: set correct port number for aodh-api * aodh-notifier: fix the launch bug * devstack: fix aodh-api launch 3.0.0.0b3 --------- * inmemory: add % parameter to formating string * add url in setup.cfg * Clean imports in code * Revert "Fix config group for SSL in tempest test" * remove default=None for config options * Fix config group for SSL in tempest test * Remove unnecessary confirm prompt * Limit Happybase to < 1.0.0 * Fix tempest.conf generation * Fix French locale build * Use proper international logging message * fix the %{} when string formating * Imported Translations from Zanata * Trival: Remove unused logging import * Delete openstack/common from the exclude list of flake8 in tox * Allow to extends the evaluator lookback window * Remove Nadya Privalova from core reviewers * Add Python 3.5 classifiers for Aodh 3.0.0.0b2 --------- * update .gitignore for install-guide/build * Imported Translations from Zanata * gnocchi: always set needed\_overlap for aggregation * Remove unused LOG object * Record the state transition reason in alarm's history data when evaluating * use Cotyledon lib * Add \_\_ne\_\_ built-in function * Add install-guide for aodh * Replace raw\_input with input to make PY3 compatible * sqlalchemy: allow to upgrade schema from Ceilometer Liberty * Make help string more accurate for rest notifier * Correct the order when sorting by "severity" * Fixing ordering of 'severity' in alarms * doc: remove leftover from docbookrestapi * Correct concurrency of gabbi tests for gabbi 1.22.0 * Fix trust notifier * Use "topics" instead of "topic" in Notifier initialization * Clean deprecated "rpc\_backend" in tests * Support combination alarms to composite alarms conversion * Imported Translations from Zanata * Imported Translations from Zanata * Add ca\_bundle path in ssl request * Add indexs of alarm.enabled and alarm.type * Catch DriverLoadFailure for get\_transport optional * Bump the oslo.messaging version * gabbi: fail test if no backend configured * Imported Translations from Zanata * Replace overtest by pifpaf * Make some tests more like Aodh tests * skip test\_create\_delete\_alarm\_with\_combination\_rule * tests/functional: enable Gabbi for all backends * Imported Translations from Zanata * fix typos in our doc, comment and releasenotes * Use pbr wsgi\_scripts to build aodh-api * Add pagination support for Aodh * Add a tool for migrating alarms data from NoSQL to SQL * api: deprecate and disable combination alarms * Update the home-page with developer documentation * Clean unrelated error of two tests * Remove unused option \`host' * Remove the unused dict\_to\_keyval and its test * gnocchi: log on warning level, not exception * Don't notify alarm on each refresh * remove alarm name unique constraint in each project * Update to hacking 0.11.0 * generate latest sample config file along with document * move aodh-config-generator.conf to etc/aodh dir * [Trivial] Remove an unused exception definition * Trival fix bug in docs * service: fix typo in option help for listener workers * Add batch listener support for event evaluator * [Trivial] Remove two unused methods of PartitionCoordinator * Add missing Python 3 classifiers * log alarm rest notifier response * Fix and improve the partition coordinator * support batch listener for aodh-notifier * Enable aodh service to be multi-processes * [Trivial] Improve alarm reason text * [Trivial] Use local conf instead of global conf * [Trivial] Remove api bin unit test * [Trivial] Add zaqar options to list\_opts * [Trivial] Remove AODH\_API\_LOG\_DIR option for devstack * Update the default log levels * Replace logging with oslo\_log * Remove the notify\_alarm method and refactor related tests * Add documentation about event alarm * promote log level to warning for invalid event * remove unused file pylintrc * remove todo for OS\_TEST\_PATH * remove local hacking check for oslo namespace and log debug * rm functions.sh * remove deprecated auth type password-aodh-legacy * update document * Clean and reorganize the API config options * replace eventlet timer with thread manner * Imported Translations from Zanata * use thread safe fnmatch * use static timestamps for api samples * add tempest to test requirement * document how to enable aodh stable branch in devstack * remove deprecated option alarm\_connection * add default value to functional test environment variables * fix some message string * Remove an unrelated comment * remove store\_events option in devstack/plugin.sh * install aodhclient instead of ceilometerclient * Imported Translations from Zanata * fix release note link in README.rst * Fix doc build if git is absent * Fix dependency from tempest-lib to os-testr * use default option for notification topics * Remove oslo.context dependency * Clean deprecations from old "alarm" group * Fix Aodh-alarm-evaluator recreates deleted alarms in some cases * Remove the deprecated RPC IPC code * remove non ascii character in doc * api: rename \_alarm to \_enforce\_rbac * api: stop relying on side-effect of \_alarm() * Raise Error when query history of an alarm that are not existed * Update reno for stable/mitaka 2.0.0 ----- * add missing hbase requirement * devstack: allow uwsgi deployments * cleanup core list * Use assertIn and assertNotIn for test * It is easy to understand to use words no numbers * register the config generator default hook with the right name * Replace deprecated LOG.warn with LOG.warning * Properly retrieve keystone user from admin client * Fixed tempest error due to upstream change * Record all the fired alarm state for event-alarm * tempest: migrate api tests from tempest tree * add missing mitaka-3 release notes * A little typo of doc * Moved CORS middleware configuration into oslo-config-generator 2.0.0.0b3 --------- * Add composite alarm usage description * Remove unused pngmath Sphinx extension * Fix py34 error of indexing 'dict\_keys' object * Add releasenote for composite alarm feature * Change the SERVICE\_TENANT\_NAME to SERVICE\_PROJECT\_NAME * Fix tempest test path * Add composite rule alarm API support * Add composite rule alarm evaluator * Remove ceilometer-alarm-\* related content of installation * Clean etc directory * Install configuration files by default * KEYSTONE\_CATALOG\_BACKEND is deprecated * Added CORS support to Aodh * devstack: Fix Keystone v3 configuration typo * Fix alarm reason * Clean config in source code * tempest: add aodh tempest plugin * gabbi's own paste.ini file * Log deprecation message if users use nosql backend * devstack: use password with version discovery * devstack: support publicURL retrieval in both keystone v2/v3 format * Load zaqar client outside init * Update alarm history only if change in alarm property * functional tests: fix publicURL retrieval * threshold: fix statistics empty case * tempest: migrate codes from tempest tree 2.0.0.0b2 --------- * Zaqar notifier for alarms * tox: change default target from MongoDB to MySQL * tests: replace bash scripts with overtest * Imported Translations from Zanata * add release notes for mitaka-2 * Refactor Gnocchi and threshold evaluators * gnocchi: use gnocchiclient instead of requests * Use keystoneauth1 instead of manual setup * Replace deprecated library function os.popen() with subprocess * Use assertTrue/False instead of assertEqual(T/F) * Test: make enforce\_type=True in CONF.set\_override * devstack: add support for Gnocchi * Replace LOG.warn with LOG.warning * Trivial: Remove vim header from source files * Trival: Remove unused logging import * Fix an minor error in test\_hbase\_table\_utils.py * Don't need a metaclass for AlarmEvaluationService * Use extras for dependency installation * Support newer versions of MySQL * rbac: add some backport compat tests * Fix rbac system * MAINTAINERS: remove outdated data * Replace stackforge with openstack * messaging: remove most oslo.context usage 2.0.0.0b1 --------- * add initial release notes * Put py34 first in the env order of tox * Update policy.json.sample with correct values * deprecate timeutils.total\_seconds() * clean up integration test urls * initialize ceilometerclient when we use it * fix some test cases wrongly skipped for mysql backend * support queue based communication between evaluator and notifier * remove unnecessary mock for rpc server start * Move the content of ReleaseNotes to README.rst * devstack: fix HBase functional tests * don't pass aodh options to oslo.db engine facade * gnocchi: only evaluate the required eval\_periods * Fix combination alarms * Fixing evaluation of gnocchi aggregation-by-metric * add reno for release notes management * Revert "Revert "Use oslo\_config PortOpt support"" * Do not use oslo.messaging 2.8.0 * utils: move code where it's actually used and remove * hbase: add functional testing * tests: remove testscenarios usage * Remove eventlet usage * remove default=None for config options * Do not use system config file for test * devstack: install PostgreSQL devel tool for psycopg2 * Move evaluator tests into the unit folder * Revert "Use oslo\_config PortOpt support" * Use oslo\_config PortOpt support * Add deprecated group for gnocchi\_url * Fix indent of code blocks in Devstack plugin README file * Imported Translations from Zanata * devstack: Fix some comments * remove unused configuration options * devstack/plugin.sh: fix typo * monkeypatch thread for oslo.messaging tests * Remove dependency on sphinxcontrib-docbookrestapi * Using oslo-config-generator to instead of generate-config-file.sh * Added README.rst and corrected the rally repository * proposal to add Ryota Mibu to Aodh core * Fix the gabbi target in tox.ini to use correct path 1.1.0 ----- * Avoid oslo.messaging 2.6.0 and 2.6.1 * update ceilometerclient requirement * re-organise tests * Imported Translations from Zanata * Cleanup of Translations * Remove unused file * Add test to cover history rule change * Change ignore-errors to ignore\_errors * tox: Allow to pass some OS\_\* variables * Imported Translations from Zanata * gnocchi: Fix typo for needed\_overlap * Cleanup keystonemiddleware configuration * event-alarm: add unit tests for various trait types * event-alarm: add alarm wrapper class * event-alarm: add event wrapper class * Refactor event-alarm caching * event-alarm: fix unit tests to check stored alrams * event-alarm: fix the order of alarms in unit test * event-alarm: delete debug message to show # of alarms 1.0.0 ----- * remove db2 nosql driver * storage: remove unused classes * storage: remove unused CLI option * tests: use requests rather than httplib2 * Remove unused tests requirements * percent\_of\_overlap=0 to validate gnocchi alarm * Adding liusheng to MAINTAINERS * Fix the aodh api port * Use new location of subunit2html * Add storage documentation * Fix args for get\_notification\_listener() * Create conf directory during devstack install phase * event-alarm: devstack plugin support * Update tests to reflect WSME 0.8.0 changes * Make event-alarm evaluator caching alarms * Add listener service for event alarm evaluation * Add evaluator for event alarm * doc: Fix the devstack configuration * Revert "Use generic keystone uri in devstack ..." * Imported Translations from Transifex * Exclude event type from targets of alarm evaluator * tox: generate sample config file on default target * Refactor api tests (\_update\_alarm) * Storage: add 'exclude' constraint to get\_alarms() * Use generic keystone uri in devstack config * Avoid translating debug log * Use the Serializer from oslo.messaging * Fixes querying alarm history with severity field * Remove the unused cpu\_count utils method * api: move API options to their own api group * storage: remove mongodb\_replica\_set option * service: stop supporting deprecated group for auth option * storage: remove unused option db2nosql\_resource\_id\_maxlen * Stop registering oslo.messaging option * Move import to local to resolve circular dependency failure * Refactor api tests for alarm history * Move ceilometerclient mock to evaluator/base * Correct database functional tests * Correct thread handling in TranslationHook * storage: re-add and deprecate alarm\_connection * Fix TestEvaluatorBase.prepare\_alarms() * Make ConnectionRetryTest more reliable * storage: remove deprecated database\_connection * Use storage scenario test base to test migration * devstack: use $API\_WORKERS to set the number of WSGI workers in Apache * Add 'event' type and 'event\_rule' to alarm API * Refactor alarm scenario tests (RuleCombination) * gnocchi: percent\_of\_overlap=0 for agg. alarms * Drop downgrade field in alembic script.py.mako * Imported Translations from Transifex * Refactor alarm scenario tests (RuleGnocchi) * Add alembic support for aodh * Use mocked object to test log message * storage: only retry connection, not driver finding * Stop using global conf object * gnocchi: stop using global conf object for Gnocchi evaluator * api: fix alarm group declaration * mongodb: stop relying on global conf object in utils * mongodb: replace custom retry code by retrying * evaluator: remove global conf usage from threshold evaluator * rpc: remove global conf usage from notifier * api: remove global conf and local pecan config * api: remove force\_canonical option * tests.api: remove unused argument/config option * api: stop using a global Enforcer object * api.hooks: stop using global conf object * Port remaining tests to Python 3 * Keep alarm other attrs constantly after evaluating an alarm * tests: ensure gabbi live test fail * api: fix alarm deletion and update * functionnal: fix gating * Imported Translations from Transifex * mongodb: stop using global config object * tests.db: simplify connection handling * storage: always use get\_connection\_from\_config() * Add keystone V3 support for service credentials * Delete its corresponding history data when deleting an alarm * Avoid getting alarm change notifier repeatedly * Use user\_id/project\_id from service\_credentials in alarm\_change * Refactor alarm scenario tests (RuleThreshold) * Fix the service entry of evaluator and notifier * Use stevedore directive to document plugins * Add basic gate functional testing jobs for aodh * notifier: stop using global conf object * tests: use config fixture in evaluator tests * coordination: stop using global conf object * storage: pass conf rather at \_\_init\_\_ than using a global one * evaluator: stop using global conf in evaluator service * evaluator: stop using global conf in Evaluator * notifier: stop relying on global conf object * api: stop using cfg.CONF and use request local conf * keystone\_client: stop using cfg.CONF * Move service classes to their correct subdir * api: use oslo.config to validate data for worker * rpc: stop using global conf object in some functions * tests: remove unused fake class * Switch to oslo.utils.fileutils * Move Gnocchi options out of the "alarms" group * Remove aodh/tests/alarm, move last test out * evaluator: move to top-level * notifier: move from alarm to top-level * Close and dispose test database setup connections * Remove remnants of ceilometer from mongodb data file * Make py27 run tests on all backends by default * Imported Translations from Transifex * Move aodh.alarm.rpc to aodh.rpc * Move alarm.service to service * Allow aodh directly acessing to its storage * Refactor alarm scenario tests (TestAlarmsHistory) * trust: remove useless conf imports * api: Add location alarm creation * Add devstack plugin * Use the right sqla type for JSONEncodedDict * Refactor alarm scenario tests (TestAlarmsBase) * Imported Translations from Transifex * Make GnocchiThreshold evaluator derived from ThresholdEvaluator * Tolerate alarm actions set to None * Optionally create trust for alarm actions * Imported Translations from Transifex * doc: use pbr autodoc feature to build api doc * Remove code related to metadata/metaquery * messaging: remove unused cleanup function * impl\_log: make methods static * Remove useless migration module * Minor changes for evaluator service * Update the requirements * notifier: tests stop method * api: remove v1 handling * api: remove unused extra\_hooks * Move 'alarm\_connection' to 'connection' * Move aodh.alarm.storage to aodh.storage * Replaces methods deprecated in pymongo3.0 * Fix options registeration in tests * Change the default api server port * Initial translation import * Rename to aodh * Remove locale * Remove code unrelated to alarming * remove unused notifier * Add support for posting samples to notification-agent via API * Stop dropping deprecated tables while upgrade in mongodb and db2 * Add handler of sample creation notification * Remove the unused get\_targets method of plugin base * add oslo.service options * Restricts pipeline to have unique source names * drop use of oslo.db private attribute * Fix oslo.service configuration options building * Add fileutils to openstack-common.conf * Remove unnecessary executable permission * Switch to oslo.service * Remove unnecessary wrapping of transformer ExtentionManager * Port test\_complex\_query to Python 3 * Fix expected error message on Python 3 * Fix usage of iterator/list on Python 3 * Replaces ensure\_index for create\_index * pip has its own download cache by default * For sake of future python3 encode FakeMemcache hashes * Make acl\_scenarios tests' keystonemiddleware cache work flexibly * Update version for Liberty * Gnocchi Dispatcher support in Ceilometer * Updated from global requirements * Fix alarm rest notifier logging to include severity * Remove useless execute bit on rst file * Fix unicode/bytes issues in API v2 tests * Fix script name in tox.ini for Elasticsearch * Fix the meter unit types to be consistent * tests: use policy\_file in group oslo\_policy * Fix publisher test\_udp on Python 3 * Fix Ceph object store tests on Python 3 * Port IPMI to Python 3 * Port middleware to Python 3 * [elasticsearch] default trait type to string * Updated from global requirements * Lower down the range for columns which are being used as uuid * Sync with latest oslo-incubator * Fix testing of agent manager with tooz * Remove deprecated Swift middleware * add DNS events * Handle database failures on api startup * Fix more tests on Python 3 * Remove old oslo.messaging aliases * Remove useless versioninfo and clean ceilometer.conf git exclusion * Register oslo\_log options before using them * Add running functional scripts for defined backend * Remove snapshot.update events as they are not sent * WSME version >=0.7 correctly returns a 405 * TraitText value restricted to max length 255 * Cause gabbi to skip on no storage sooner * Updated from global requirements * Move eventlet using commands into own directory * adjust alarm post ut code to adapt to upstream wsme * Disable rgw pollster when aws module not found * Fixes DiskInfoPollster AttributeError exception * remove useless log message * use oslo.log instead of oslo-incubator code * Port test\_inspector to Python 3 * Fix usage of dictionary methods on Python 3 * Imported Translations from Transifex * Add oslo.vmware to Python 3 test dependencies * Remove iso8601 dependency * Enable test\_swift\_middleware on Python 3 * Enable more tests on Python 3 * Skip hbase tests on Python 3 * Clear useless exclude from flake8 ignore in tox * Remove pagination code * Stop importing print\_function * Remove useless release script in tools * Remove useless dependency on posix\_ipc * Remove exceute bit on HTTP dispatcher * Remove oslo.messaging compat from Havana * Fixing event types pattern for Role Noti. handler * Mask database.event\_connection details in logs * Switch from MySQL-python to PyMySQL * Python 3: replace long with int * Python 3: Replace unicode with six.text\_type * Python 3: generalize the usage of the six module * Update Python 3 requirements * Python 3: set \_\_bool\_\_() method on Namespace * Python 3: encode to UTF-8 when needed * Python 3: sort tables by their full name * Python 3: replace sys.maxint with sys.maxsize * Initial commit for functional tests * Update a test to properly anticipate HTTP 405 for RestController * proposal to add Chris Dent to Ceilometer core * rebuild event model only for database writes * cleanup problem events logic in event db storage * fix incorrect docstring for dispatcher * Imported Translations from Transifex * api: record severity change in alarm history * VMware: verify vCenter server certificate * Add hardware memory buffer and cache metrics * Make interval optional in pipeline * Improve ceilometer-api install documentation * empty non-string values are returned as string traits * Trait\_\* models have incorrect type for key * small change to development.rst file * Drop use of 'oslo' namespace package * [unittests] Increase agent module unittests coverage * stop mocking os.path in test\_setup\_events\_default\_config * Remove py33 tox target * made change to mod\_wsgi.rst file * ensure collections created on upgrade * Fix raise error when run "tox -egenconfig" * Updated from global requirements * Fix None TypeError in neutron process notifications * Have eventlet monkeypatch the time module * Have eventlet monkeypatch the time module * Add the function of deleting alarm history * Updated from global requirements * Fix valueerror when ceilometer-api start * Override gnocchi\_url configuration in test * Move ceilometer/cli.py to ceilometer/cmd/sample.py * Fix valueerror when ceilometer-api start * remove deprecated partitioned alarm service * use message id to generate hbase unique key * gnocchi: fix typo in the aggregation endpoint * Release Import of Translations from Transifex * Fix Copyright date in docs * Replace 'metrics' with 'meters' in option and doc * use message id to generate hbase unique key * update .gitreview for stable/kilo * gnocchi: fix typo in the aggregation endpoint * broadcast data to relevant queues only * Imported Translations from Transifex * fix combination alarm with operator == 'or' * Updated from global requirements * proposal to add ZhiQiang Fan to Ceilometer core * Open Liberty development * Fix a samples xfail test that now succeeds * Cosmetic changes for system architecture docs * Fix a issue for kafka-publisher and refactor the test code * pymongo 3.0 breaks ci gate * use oslo.messaging dispatch filter * Further mock adjustments to deal with intermittent failure * Adds support for default rule in ceilometer policy.json * Updated from global requirements * limit alarm actions * Use oslo\_vmware instead of deprecated oslo.vmware * Remove 'samples:groupby' from the Capabilities list * Use old name of 'hardware.ipmi.node.temperature' * Revert "remove instance: meter" * Tweak authenticate event definition * Add project and domain ID to event definition for identity CRUD * Fix the event type for trusts * reset croniter to avoid cur time shift * Imported Translations from Transifex * Avoid a error when py27 and py-mysql tests run in sequence * Stop using PYTHONHASHSEED=0 in ceilometer tests * remove instance: meter * Added ipv6 support for udp publisher * Remove the unnecessary dependency to netaddr * Optimize the flow of getting pollster resources * support ability to skip message signing * Avoid conflict with existing gnocchi\_url conf value * Using oslo.db retry decorator for sample create * alarm: Use new gnocchi aggregation API * collector: enable the service to listen on IPv6 * minimise the use of hmac * Typo in pylintrc * Ceilometer retrieve all images by 'all-tenants' * fix incorrect key check in swift notifications * support disabling profiler and http meters * ensure collections created on upgrade * Fix common misspellings * Updated from global requirements * refuse to post sample which is not supported * Enable collector to requeue samples when enabled * drop deprecated novaclient.v1\_1 * exclude precise metaquery in query field * Imported Translations from Transifex * remove log message when process notification * Add gabbi tests for resources * Fix typos and format in docstrings in http dispatcher * add ability to dispatch events to http target * doc: fix class name * add ability to publish to multiple topics * make field and value attributes mandatory in API Query * Fix db2 upgrade in multi-thread run issue * Add memory.resident libvirt meter for Ceilometer * Update reference * Check the namespaces duplication for ceilometer-polling * Add gabbi tests to explore the Meter and MetersControllers * Imported Translations from Transifex * mysql doesn't understand intersect * order traits returned within events * add network, kv-store, and http events * Add support for additional identity events * Add a Kafka publisher as a Ceilometer publisher * Fix response POST /v2/meters/(meter\_name) to 201 status * Attempt to set user\_id for identity events * Switch to oslo.policy 0.3.0 * normalise timestamp in query * Add more power and thermal data * Updated from global requirements * Fix formatting error in licence * Added option to allow sample expiration more frequently * add option to store raw notification * use mongodb distinct * remove event\_types ordering assumption * Add gabbi tests to cover the SamplesController * api: fix alarm creation if time\_constraint is null * fix log message format in event.storage.impl\_sqlalchemy * Remove duplications from docco * Tidy up clean-samples.yaml * Fix a few typos in the docs * use default trait type in event list query * fix wrong string format in libvirt inspector * create a developer section and refactor * Do not default pecan\_debug to CONF.debug * Adding Gabbi Tests to Events API * fix config opts in objectstore.rgw * Updated from global requirements * support time to live on event database for sql backend * add an option to disable non-metric meters * add missing objectstore entry points * Initial gabbi testing for alarms * reorganise architecture page * Add ceph object storage meters * Use oslo\_config choices support * fix inline multiple assignment * alarming: add gnocchi alarm rules * Protect agent startup from import errors in plugins * Revert "Add ceph object storage meters" * api: move alarm rules into they directory * compress events notes * Destroy fixture database after each gabbi TestSuite * Fix unittests for supporting py-pgsql env * Adding links API and CLI query examples * correct column types in events * Be explicit about using /tmp for temporary datafiles * Patch for fixing hardware.memory.used metric * Add ceph object storage meters * [PostgreSQL] Fix regexp operator * Add clean\_exit for py-pgsql unit tests * modify events sql schema to reduce empty columns * Remove duplicated resource when pollster polling * check metering\_connection attribute by default * unicode error in event converter * cleanup measurements page * api: add missing combination\_rule field in sample * Fix test case of self-disabled pollster * update event architecture diagram * use configured max\_retries and retry\_interval for database connection * Updated from global requirements * Making utilization the default spelling * Add Disk Meters for ceilometer * correctly leave group when process is stopped * Updated from global requirements * enable oslo namespace check for ceilometer project * Add doc for version list API * Enabling self-disabled pollster * Use werkzeug to run the developement API server * Imported Translations from Transifex * switch to oslo\_serialization * move non-essential libs to test-requirements * Validate default values in config * fix the value of query\_spec.maxSample to advoid to be zero * clean up to use common service code * Add more sql test scenarios * [SQLalchemy] Add regex to complex queries * Fix duplication in sinks names * metering data ttl sql backend breaks resource metadata * Refactor unit test code for disk pollsters * start recording error notifications * Remove no\_resource hack for IPMI pollster * Add local node resource for IPMI pollsters * Use stevedore to load alarm rules api * [MongoDB] Add regex to complex queries * Imported Translations from Transifex * support time to live on event database for MongoDB * split api.controllers.v2 * add elasticsearch events db * use debug value for pecan\_debug default * Shuffle agents to send request * Updated from global requirements * Adds disk iops metrics implementation in Hyper-V Inspector * discovery: allow to discover all endpoints * Declarative HTTP testing for the Ceilometer API * add listener to pick up notification from ceilometermiddleware * Drop deprecated namespace for oslo.rootwrap * remove empty module tests.collector * Add disk latency metrics implementation in Hyper-V Inspector * add event listener to collector * add notifier publisher for events * enable event pipeline * Imported Translations from Transifex * deprecate swift middleware * sync oslo and bring in versionutils * Expose alarm severity in Alarm Model * Hyper-V: Adds memory metrics implementation * Remove mox from requirements * Fix IPMI unit test to cover different platforms * adjust import group order in db2 ut code * add event pipeline * remove unexistent module from doc/source/conf.py * Upgrade to hacking 0.10 * Remove the Nova notifier * Remove argparse from requirements * [MongoDB] Improves get\_meter\_statistics method * Fix docs repeating measuring units * [DB2 nosql] Create TIMESTAMP type index for 'timestamp' field * remove pytidylib and netifaces from tox.ini external dependency * Avoid unnecessary API dependency on tooz & ceilometerclient * Correct name of "ipmi" options group * Fix Opencontrail pollster according the API changes * enable tests.storage.test\_impl\_mongodb * Remove lockfile from requirements * Disable eventlet monkey-patching of DNS * Expose vm's metadata to metrics * Adding build folders & sorting gitignore * Disable proxy in unit test case of test\_bin * Add Event and Trait API to document * Refactor ipmi agent manager * Use alarm's evaluation periods in sufficient test * Use oslo\_config instead of deprecated oslo.config * Avoid executing ipmitool in IPMI unit test * Updated from global requirements * Add a direct to database publisher * Fixed MagnetoDB metrics title * Imported Translations from Transifex * Fix incorrect test case name in test\_net.py * Updated from global requirements * notification agent missing CONF option * switch to oslo\_i18n * Use right function to create extension list for agent test * Imported Translations from Transifex * Add an exchange for Zaqar in profiler notification plugin * Remove unused pecan configuration options * Updated from global requirements * Use oslo\_utils instead of deprecated oslo.utils * Match the meter names for network services * stop using private timeutils attribute * Update measurement docs for network services * Catch exception when evaluate single alarm * Return a meaningful value or raise an excpetion for libvirt * Imported Translations from Transifex * make transformers optional in pipeline * Added metering for magnetodb * Add release notes URL for Juno * Fix release notes URL for Icehouse * remove unnecessary str method when log messages * Revert "Remove Sphinx from py33 requirements" * untie pipeline manager from samples * reset listeners on agent refresh * Remove inspect\_instances method from virt * Optimize resource list query * Synchronize Python 3 requirements * Remove unnecessary import\_opt|group * Add test data generator via oslo messaging * Check to skip to poll and publish when no resource * Add oslo.concurrency module to tox --env genconfig * add glance events * add cinder events * Manual update from global requirements * Add cmd.polling.CLI\_OPTS to option list * Ignore ceilometer.conf * Switch to oslo.context library * Revert "Skip to poll and publish when no resources found" * Added missing measurements and corrected errors in doc * Remove Sphinx from py33 requirements * Clean up bin directory * Improve tools/make\_test\_data.sh correctness * ensure unique pipeline names * implement notification coordination * Make methods static where possible (except openstack.common) * Fix docs to suit merged compute/central agents concept * Drop anyjson * Move central agent code to the polling agent module * RBAC Support for Ceilometer API Implementation * [SQLalchemy] Add groupby ability resource\_metadata * Improve links in config docs * Make LBaaS total\_connections cumulative * remove useless looping in pipeline * Encompassing one source pollsters with common context * Modify tests to support ordering of wsme types * Make compute discovery pollster-based, not agent-level * Add docs about volume/snapshot measurements * Port to graduated library oslo.i18n * Retry to connect database when DB2 or mongodb is restarted * Updated from global requirements * Standardize timestamp fields of ceilometer API * Workflow documentation is now in infra-manual * Add alarm\_name field to alarm notification * Updated from global requirements * Rely on VM UUID to fetch metrics in libvirt * Imported Translations from Transifex * Initializing a longer resource id in DB2 nosql backend * Sync oslo-incubator code to latest * ensure unique list of consumers created * fix import oslo.concurrency issue * Add some rally scenarios * Do not print snmpd password in logs * Miniscule typo in metering\_connection help string * add http dispatcher * [MongoDB] Add groupby ability on resource\_metadata * [MongoDB] Fix bug with 'bad' chars in metadatas keys * Override retry\_interval in MongoAutoReconnectTest * Exclude tools/lintstack.head.py for pep8 check * Add encoding of rows and qualifiers in impl\_hbase * Database.max\_retries only override on sqlalchemy side * Support to capture network services notifications * Internal error with period overflow * Remove Python 2.6 classifier * Enable pep8 on ./tools directory * Imported Translations from Transifex * Fixes Hyper-V Inspector disk metrics cache issue * fix swift middleware parsing * Fix order of arguments in assertEqual * Updated from global requirements * Adapting pylint runner to the new message format * Validate AdvEnum & return an InvalidInput on error * add sahara and heat events * add keystone events to definitions * Add timeout to all http requests * [MongoDB] Refactor time to live feature * transform samples only when transformers exist * Updated from global requirements * Remove module not really used by Ceilometer * Switch to oslo.concurrency * Skip to poll and publish when no resources found * Change event type for identity trust notifications * Add mysql and postgresql in tox for debug env * Add new notifications types for volumes/snapshots * Add encoding to keys in compute\_signature * Tests for system and network aggregate pollsters * Add bandwidth to measurements * Fix wrong example of capabilities * Correct the mongodb\_replica\_set option's description * Alarms listing based on "timestamp" * Use 'pg\_ctl' utility to start and stop database * Correct alarm timestamp field in unittest code * Refactor kwapi unit test * Remove duplicated config doc * VMware: Enable VMware inspector to support any port * Clean event method difinition in meter storage base * Fix some nits or typos found by chance * Add Sample ReST API path in webapi document * Enable filter alarms by their type * Fix storage.hbase.util.prepare\_key() for 32-bits system * Add event storage for test\_hbase\_table\_utils * Add per device rate metrics for instances * Fix hacking rule H305 imports not grouped correctly * Add \_\_repr\_\_ method for sample.Sample * remove ordereddict requirement * Improve manual.rst file * Imported Translations from Transifex * Fix columns migrating for PostgreSQL * Updated from global requirements * Updated from global requirements * [MongoDB] Fix bug with reconnection to new master node * Updated from global requirements * support request-id * Update coverage job to references correct file * remove reference to model in migration * Use oslo\_debug\_helper and remove our own version * Allow collector service database connection retry * refresh ceilometer architecture documentation * Edits assert methods * Adds memory stats meter to libvirt inspector * Edits assert methods * Edits assert methods * Edits assert methods * Edits assert method * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * add script to generate test event data * Handle poorly formed individual sensor readings * refactor hbase storage code * Avoid clobbering existing class definition * Hoist duplicated AlarmService initialization to super * Clarify deprecation comment to be accurate * Work toward Python 3.4 support and testing * Fix recording failure for system pollster * sync and clean up oslo * Add missing notification options to the documentation * Add missing alarm options to the documentation * Add oslo.db to config generator * Add missed control exchange options to the documentation * Add coordination related options to the documentation * Add missing collector options to the documentation * switch to oslo-config-generator * Edit docs for docs.opentack.org/developer/ * Add oslo.db to config generator * Fix signature validation failure when using qpid message queue * clean capabilities * move db2 and mongo driver to event tree * move sql event driver to event tree * move hbase event driver to event tree * Sets default encoding for PostgreSQL testing * update database dispatcher to use events db * Add role assignment notifications for identity * add mailmap to avoid dup of authors * Add user\_metadata to network samples * Fix recording failure for system pollster * Manually updated translations * Updated from global requirements * Creates one database per sql test * Adds pylint check for critical error in new patches * Fix neutron client to catch 404 exceptions * Fix OrderedDict usage for Python 2.6 * Include a 'node' key and value in ipmi metadata * clean path in swift middleware * Implement redesigned separator in names of columns in HBase * [HBase] Add migration script for new row separate design * Imported Translations from Transifex * Include a 'node' key and value in ipmi metadata * Updated from global requirements * Run unit tests against PostgreSQL * create skeleton files for event storage backends * Imported Translations from Transifex * isolate event storage models * Fix neutron client to catch 404 exceptions * Run unit tests against MySQL * Updated from global requirements * Correct JSON-based query examples in documentation * Open Kilo development * Add cfg.CONF.import\_group for service\_credentials * Fix OrderedDict usage for Python 2.6 * clean path in swift middleware * Partition static resources defined in pipeline.yaml * Per-source separation of static resources & discovery * dbsync: Acknowledge 'metering\_connection' option * Fix bug in the documentation * Use oslo.msg retry API in rpc publisher * Describe API versions * Change compute agent recurring logs from INFO to DEBUG * Fix bug with wrong bool opt value interpolation * [HBase] Improves speed of unit tests on real HBase backend * Imported Translations from Transifex * Removed unused abc meta class * update references to auth\_token middleware * clean up swift middleware to avoid unicode errors * [HBase] Catch AlreadyExists error in Connection upgrade * Use None instead of mutables in method params default values * Updated from global requirements * Enable to get service types from configuration file * test db2 driver code * Docs: Add description of pipeline discovery section * Typo "possibilites" should be "possibilities" * Modified docs to update DevStack's config filename * Add an API configuration section to docs * Tune up mod\_wsgi settings in example configuration * Allow pecan debug middleware to be turned off * Provide \_\_repr\_\_ for SampleFilter * Eliminate unnecessary search for test cases * Switch to a custom NotImplementedError * minimise ceilometer memory usage * Partition swift pollster resources by tenant * Add IPMI pollster * Add IPMI support * Stop using intersphinx * Use central agent manager's keystone token in discoveries * Handle invalid JSON filters from the input gracefully * Sync jsonutils for namedtuple\_as\_object fix * ceilometer spamming syslog * Timestamp bounds need not be tight (per ceilometer 1288372) * Allow to pass dict from resource discovery * fix network discovery meters * switch to sqlalchemy core * Imported Translations from Transifex * Improve the timestamp validation of ceilometer API * Update docs with Sahara notifications configuration * Migrate the rest of the central agent pollsters to use discoveries * Add documentation for implemented identity meters * Fix tests with testtools>=0.9.39 * Document the standard for PaaS service notifications * Returns 401 when unauthorized project access occurs * Adding another set of hardware metrics * normalise resource data * warn against sorting requirements * Add validate alarm\_actions schema in alarm API * Fix help strings * Imported Translations from Transifex * Switch partitioned alarm evaluation to a hash-based approach * Central agent work-load partitioning * collector: Allows to requeue a sample * Typo fixed * Switch to oslo.serialization * Document pipeline publishers configuration * Alarm: Use stevedore to load the service class * Enhance compute diskio tests to handle multi instance * Adding comparison operators in query for event traits * XenAPI support: Update measurements documentation * update requirements * add documentation for setting up api pipeline * Permit usage of notifications for metering * XenAPI support: Disk rates * XenAPI support: Changes for networking metrics * XenAPI support: Memory Usage * XenAPI support: Changes for cpu\_util * XenAPI support: List the instances * Rebase hardware pollsters to use new inspector interface * Switch to use oslo.db * Remove oslo middleware * Adding quotas on alarms * Add an exchange for Trove in profiler notification plugin * Simplify chained comparisons * In-code comments should start with \`#\`, not with \`"""\` * Remove redundant parentheses * skip polls if service is not registered * re-add hashseed to avoid gate error * Switch to oslo.utils * Switch to oslotest * Handle sqlalchemy connection strings with drivers * Rewrite list creation as a list literal * Rewrite dictionary creation as a dictionary literal * Triple double-quoted strings should be used for docstrings * Add upgrading alarm storage in dbsync * Improving of configuration.rst * Fix typos in transformer docstrings * Update tox.ini pep8 config to ignore i18n functions * Added new hardware inspector interface * compute: fix wrong test assertion * sync olso-incubator code * VMware: Support secret host\_password option * refactor filter code in sql backend * Support for per disk volume measurements * Use a FakeRequest object to test middleware * Imported Translations from Transifex * Improve api\_paste\_config file searching * [Hbase] Add column for source filter in \_get\_meter\_samples * Issue one SQL statement per execute() call * Allow tests to run outside tox * [HBase] Refactor hbase.utils * Set page size when Glance API request is called * Adding init into tools folder * Enhancing the make\_test\_data script * correct DB2 installation supported features documentation * Avoid duplication of discovery for multi-sink sources * Improve performance of libvirt inspector requests * Documented Stevedore usage and source details * Add notifications for identity authenticate events * Add message translate module in vmware inspector * Handle Cinder attach and detach notifications * [HBase] Improve uniqueness for row in meter table * Doc enhancement for API service deployment with mod\_wsgi * Update documentation for new transformer * Add the arithmetic transformer endpoint to setup.cfg * Imported Translations from Transifex * Fix unit for vpn connection metric * Debug env for tox * Change spelling mistakes * Use auth\_token from keystonemiddleware * Fix dict and set order related issues in tests * Fix listener for update.start notifications * Sahara integration with Ceilometer * Add notifications for identity CRUD events * Extracting make\_resource\_metadata method * Fix make\_test\_data tools script * Add cumulative and gauge to aggregator transformer * Enable some tests against py33 * Remove --tmpdir from mktemp * Replace dict.iteritems() with six.iteritems(dict) * Replace iterator.next() with next(iterator) * Fix aggregator flush method * Automatic discovery of TripleO Overcloud hardware * Set python hash seed to 0 in tox.ini * Don't override the original notification message * Remove ConnectionProxy temporary class * Move sqlalchemy alarms driver code to alarm tree * basestring replaced with six.string\_types * Correct misspelled words * Add retry function for alarm REST notifier * Move hbase alarms driver code to alarm tree * Update measurement docs for FWaaS * Update measurement docs for VPNaaS * Follow up fixes to network services pollsters * Updated from global requirements * Implement consuming ipmi notifications from Ironic * Support for metering FWaaS * Adds Content-Type to alarm REST notifier * Multi meter arithmetic transformer * Remove redudent space in doc string * Use None instead of mutables in test method params defaults * Add support for metering VPNaaS * Use resource discovery for Network Services * Change of get\_events and get\_traits method in MongoDB and Hbase * Fix two out-dated links in doc * Move log alarms driver code to alarm tree * Separate the console scripts * clean up event model * improve expirer performance for sql backend * Move mongodb/db2 alarms driver code to alarm tree * Allow to have different DB for alarm and metering * Replace datetime of time\_constraints by aware object * Sync oslo log module and its dependencies * Use hmac.compare\_digest to compare signature * Add testcase for multiple discovery-driven sources * Fixes aggregator transformer timestamp and user input handling * Improves pipeline transformer documentation * Fix incorrect use of timestamp in test * Add keystone control exchange * Fix call to meter-list in measurements doc * Remove redundant parentheses * [Mongodb] Implement events on Mongodb and DB2 * Fix typos in code comments & docstrings * Make the error message of alarm-not-found clear * Fix SQL exception getting statitics with metaquery * Remove docutils pin * update default\_log\_levels set by ceilometer * Fix annoying typo in partition coordinator test * Transform sample\_cnt type to int * Remove useless sources.json * Fix H405 violations and re-enable gating * Fix H904 violations and re-enable gating * Fix H307 violations and re-enable gating * Fix the section name in CONTRIBUTING.rst * Added osprofiler notifications plugin * Improve a bit performance of Ceilometer * Revert "Align to openstack python package index mirror" * Fix aggregator \_get\_unique\_key method * Remove meter hardware.network.bandwidth.bytes * Fix F402 violations and re-enable gating * Fix E265 violations and re-enable gating * Fix E251 violations and re-enable gating * Fix E128 violations and re-enable gating * Fix E126,H104 violations and re-enable gating * Bump hacking to 0.9.x * Fixed various import issues exposed by unittest * use urlparse from six * clean up sample index * Fix HBase available capabilities list * Updated from global requirements * VMware:Update the ceilometer doc with VMware opts * Handle non-ascii character in meter name * Add log output of "x-openstack-request-id" from nova * Imported Translations from Transifex * fix StringIO errors in unit test * Fix hacking rule 302 and enable it * Imported Translations from Transifex * sync oslo code * Fixes ceilometer-compute service start failure * Reenables the testr per test timeout * Avoid reading real config files in unit test * Clean up oslo.middleware.{audit,notifier} * Use hacking from test-requirements * Splits hbase storage code base * Splits mongo storage code base * Separate alarm storage models from other models * Iterates swift response earlier to get the correct status * Fix messaging.get\_transport caching * Fix method mocked in a test * Don't keep a single global TRANSPORT object * Clean up .gitignore * Fix Sphinx directive name in session.py * Fix list of modules not included in auto-gen docs * Downgrade publisher logging to debug level again * remove default=None for config options * [HBase] get\_resource optimization * Fix incorrect trait initialization * Remove unused logging in tests * Revert "Fix the floatingip pollster" * Remove low-value logging from publication codepath * Fix LBaaS connection meter docs * Fix the meter type for LB Bytes * Adding alarm list filtering by state and meter * Adds caches for image and flavor in compute agent * [HBase] Implement events on HBase * Skipping central agent pollster when keystone not available * Respect $TMPDIR environment variable to run tests * Fixed unit test TestRealNotification * Update Measurement Docs for LBaaS * Metering LoadBalancer as a Service * Removes per test testr timeout * Change pipeline\_manager to instance attribute in hooks * Change using of limit argument in get\_sample * Refactor tests to remove direct access to test DBManagers * Fix notification for NotImplemented record\_events * Add missing explicit cfg option import * Fix ceilometer.alarm.notifier.trust import * Use TYPE\_GAUGE rather than TYPE\_CUMULATIVE * Update doc for sample config file issue * Corrects a flaw in the treatment of swift endpoints * use LOG instead of logger as name for the Logger object * Fix doc gate job false success * Improve performance of api requests with hbase scan * Add new 'storage': {'production\_ready': True} capability * Clean tox.ini * Remove (c) and remove unnecessary encoding lines * Fix testing gate due to new keystoneclient release * Ignore the generated file ceilometer.conf.sample * Update the copyright date in doc * Updated from global requirements * reconnect to mongodb on connection failure * refactor sql backend to improve write speed * Don't rely on oslomsg configuration options * replaced unicode() with six.text\_type() * Synced jsonutils from oslo-incubator * Fix the floatingip pollster * Fix project authorization check * Update testrepository configuration * Implemented metering for Cinder's snapshots * Use joins instead of subqueries for metadata filtering * Use None instead of mutables in method params defaults * Remove all mostly untranslated PO files * switch SplitResult to use six * Remove unused db code due to api v1 drop * Updated from global requirements * oslo.messaging context must be a dict * Drop deprecated api v1 * Fix network notifications of neutron bulk creation * mongo: remove \_id in inserted alarm changes * Clean up openstack-common.conf * Revert "oslo.messaging context must be a dict" * Correct class when stopping partitioned alarm eval svc * oslo.messaging context must be a dict * Corrections of spelling, rephrasing for clarity * Adapt failing tests for latest wsme version * Removed StorageEngine class and it's hierarchy * Correcting formatting and adding period in measurement doc * Initialize dispatcher manager in event endpoint * Replaced CONF object with url in storage engine creation * Synced jsonutils from oslo-incubator * Remove gettextutils.\_ imports where they are not used * Remove "# noqa" leftovers for gettextutils.\_ * transformer: Add aggregator transformer * Remove conversion debug message * Fix the return of statistic with getting no sample * Remove eventlet.sleep(0) in collector tests * Don't allow queries with 'IN' predicate with an empty sequence * Check if samples returned by get\_sample\_data are not None * Opencontrail network statistics driver * Add a alarm notification using trusts * Replace hard coded WSGI application creation * Describe storage backends in the collector installation guide * Made get\_capabilities a classmethod instead of object method * Disable reverse dns lookup * Consume notif. from multiple message bus * Use NotificationPlugin as an oslo.msg endpoint * Improve combination rule validation * Remove ceilometer.conf.sample * Use known protocol scheme in keystone tests * cleanup virt pollster code * Add encoding argument to deserialising udp packets in collector * Made get\_engine method module-private * Make entities (Resource, User, Project) able to store lists * Remove duplicate alarm from alarm\_ids * More accurate meter name and unit for host load averages * Replace oslo.rpc by oslo.messaging * Fix a response header bug in the error middleware * Remove unnecessary escape character in string format * Optimize checks to set image properties in metadata * fix statistics query in postgres * Removed useless code from \_\_init\_\_ method * Refactored fake connection URL classes * Replace assert statements with assert methods * Removes direct access of timeutils.override\_time * Disable specifying alarm itself in combination rule * Include instance state in metadata * Allowed nested resource metadata in POST'd samples * Sync oslo-incubator code * Updated from global requirements * Refactor the DB implementation of Capabilities API * Fix Jenkins translation jobs * Align to openstack python package index mirror * User a more accurate max\_delay for reconnects * Open Juno development * Imported Translations from Transifex * Add note on aggregate duplication to API docco * Use ConectionPool instead of one Connection in HBase * remove dump tables from previous migrations * De-dupe selectable aggregate list in statistics API * ensure dispatcher service is configured before rpc * improve performance of resource-list in sql * SSL errors thrown with Postgres on multi workers * Remove escape character in string format * Verify user/project ID for alarm created by non-admin user * enable a single worker by default * Fix ceilometer.conf.sample mismatch * Metadata in compute.instance.exists fix * Fix order of arguments in assertEquals * Documenting hypervisor support for nova meters * Ensure idempotency of cardinality reduction in mongo * VMware vSphere: Improve the accuracy of queried samples * Use swob instead of webob in swift unit tests * Disable oslo.messaging debug logs * Fix validation error for invalid field name in simple query * fix create\_or\_update logic to avoid rollbacks * Avoid swallowing AssertionError in test skipping logic * Fix hardware pollster to inspect multiple resources * spawn multiple workers in services * Install global lazy \_() * Fixes Hyper-V metrics units * Ensure intended indices on project\_id are created for mongo * Fix the type of the disk IO rate measurements * Change the sample\_type from tuple to string * Fix order of arguments in assertEquals * Ensure alarm rule conform to alarm type * insecure flag added to novaclient * Fixes duplicated names in alarm time constraints * Use the list when get information from libvirt * Eventlet monkeypatch must be done before anything * 028 migration script incorrectly skips over section * Fix bug in get\_capabilities behavior in DB drivers * Added documentation for selectable aggregates * Make sure use IPv6 sockets for ceilometer in IPv6 environment * VMware vSphere: Bug fixes * Ensure insecure config option propagated by alarm evaluator * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Rationalize get\_resources for mongodb * Ensure insecure config option propagated by alarm service * add host meters to doc * Add field translation to complex query from OldSample to Sample * Extend test case to cover old alarm style conversion * Updated doc with debug instructions * Refactored the way how testscenarios tests are run * Corrected the sample names in hardware pollsters * Prevent alarm\_id in query field of getting history * Make ceilometer work with sqla 0.9.x * Implements monitoring-network-from-opendaylight * Add user-supplied arguments in log\_handler * VMware vSphere support: Disk rates * Fix updating alarm can specify existing alarm name * Changes for networking metrics support for vSphere * VMware vSphere: Changes for cpu\_util * VMware vSphere support: Memory Usage * Fix broken statistics in sqlalchemy * Fixes Hyper-V Inspector network metrics values * Set storage engine for the trait\_type table * Enable monkeypatch for select module * Rename id to alarm\_id of Alarm in SqlAlchemy * Fix some spelling mistakes and a incorrect url * Skip central agent interval\_task when keystone fails * Ensure user metadata mapped for instance notifications * Per pipeline pluggable resource discovery * Wider selection of aggregates for sqlalchemy * Wider selection of aggregates for mongodb * Adds time constraints to alarms * Remove code duplication Part 3 * Decouple source and sink configuration for pipelines * Selectable aggregate support in mongodb * Selectable aggregation functions for statistics * Add simple capabilities API * Removed global state modification by api test * VMware vSphere support: Performance Mgr APIs * Fix typo * move databases to test requirements * Make recording and scanning data more determined * Implements "not" operator for complex query * Implements metadata query for complex query feature * Alarms support in HBase Part 2 * Alarm support in HBase Part 1 * Remove unused variable * Added hardware pollsters for the central agent * Added hardware agent's inspector and snmp implementation * Updated from global requirements * Pluggable resource discovery for agents * Remove code duplication Part 2 * Imported Translations from Transifex * remove audit logging on flush * Tolerate absent recorded\_at on older mongo/db2 samples * api: export recorded\_at in returned samples * Fix the way how metadata is stored in HBase * Set default log level of iso8601 to WARN * Sync latest config file generator from oslo-incubator * Fix typo on testing doc page * Remove code duplication * sample table contains redundant/duplicate data * rename meter table to sample * storage: store recording timestamp * Fixed spelling error in Ceilometer * Adds doc string to query validate functions in V2 API * Updated from global requirements * Remove code that works around a (now-resolved) bug in pecan * Fix missing source field content on /v2/samples API * Refactor timestamp existence validation in V2 API * Use the module units to refer bytes type * sync units.py from oslo to ceilometer * Add comments for \_build\_paginate\_query * Implements monitoring-network * Handle Heat notifications for stack CRUD * Alembic migrations not tested * Modify the discription of combination alarm * check domain state before inspecting nics/disks * Adds gettextutils module in converter * Keep py3.X compatibility for urllib.urlencode * Added missing import * Removed useless prints that pollute tests log * Implements in operator for complex query functionality * Implements field validation for complex query functionality * allow hacking to set dependencies * Implements complex query functionality for alarm history * Implements complex query functionality for alarms * Remove None for dict.get() * Replace assertEqual(None, \*) with assertIsNone in tests * Update notification\_driver * Switch over to oslosphinx * Fix some flaws in ceilometer docstrings * Rename Openstack to OpenStack * Remove start index 0 in range() * Updated from global requirements * Remove blank line in docstring * Use six.moves.urllib.parse instead of urlparse * Propogate cacert and insecure flags to glanceclient * Test case for creating an alarm without auth headers * Refactored run-tests script * Implements complex query functionality for samples * fix column name and alignment * Remove tox locale overrides * Updated from global requirements * Adds flavor\_id in the nova\_notifier * Improve help strings * service: re-enable eventlet just for sockets * Fixes invalid key in Neutron notifications * Replace BoundedInt with WSME's IntegerType * Replace 'Ceilometer' by 'Telemetry' in the generated doc * Doc: Add OldSample to v2.rst * Fixing some simple documentation typos * Updated from global requirements * Fix for a simple typo * Replace 'a alarm' by 'an alarm' * Move ceilometer-send-counter to a console script * sync oslo common code * Handle engine creation inside of Connection object * Adds additional details to alarm notifications * Fix formating of compute-nova measurements table * Fix string-to-boolean casting in queries * nova notifier: disable tests + update sample conf * Update oslo * Refactored session access * Fix the py27 failure because of "ephemeral\_key\_uuid" error * Correct a misuse of RestController in the Event API * Fix docs on what an instance meter represents * Fix measurement docs to correctly represent Existance meters * samples: fix test case status code check * Replace non-ascii symbols in docs * Use swift master * Add table prefix for unit tests with hbase * Add documentation for pipeline configuration * Remove unnecessary code from alarm test * Updated from global requirements * Use stevedore's make\_test\_instance * use common code for migrations * Use explicit http error code for api v2 * Clean .gitignore * Remove unused db engine variable in api * Revert "Ensure we are not exhausting the sqlalchemy pool" * eventlet: stop monkey patching * Update dev docs to include notification-agent * Change meter\_id to meter\_name in generated docs * Correct spelling of logger for dispatcher.file * Fix some typos in architecture doc * Drop foreign key contraints of alarm in sqlalchemy * Re-enable lazy translation * Sync gettextutils from Oslo * Fix wrong doc string for meter type * Fix recursive\_keypairs output * Added abc.ABCMeta metaclass for abstract classes * Removes use of timeutils.set\_time\_override * tests: kill all started processes on exit * Exclude weak datapoints from alarm threshold evaluation * Move enable\_acl and debug config to ceilometer.conf * Fix the Alarm documentation of Web API V2 * StringIO compatibility for python3 * Set the SQL Float precision * Convert alarm timestamp to PrecisionTimestamp * use six.move.xrange replace xrange * Exit expirer earlier if db-ttl is disabled * Added resources support in pollster's interface * Improve consistency of help strings * assertTrue(isinstance) replace by assertIsInstance * Return trait type from Event api * Add new rate-based disk and network pipelines * Name and unit mapping for rate\_of\_change transformer * Update oslo * Remove dependencies on pep8, pyflakes and flake8 * Implement the /v2/samples/ API * Fix to handle null threshold\_rule values * Use DEFAULT section for dispatcher in doc * Insertion in HBase should be fixed * Trivial typo * Update ceilometer.conf.sample * Fix use the fact that empty sequences are false * Remove unused imports * Replace mongo aggregation with plain ol' map-reduce * Remove redundant meter (name,type,unit) tuples from Resource model * Fix work of udp publisher * tests: pass /dev/null as config for mongod * requirements: drop netaddr * tests: allow to skip if no database URL * Fix to tackle instances without an image assigned * Check for pep8 E226 and E24 * Fixed spelling mistake * AlarmChange definition added to doc/source/webapi/v2.rst * 1st & last sample timestamps in Resource representation * Avoid false negatives on message signature comparison * cacert is not picked up correctly by alarm services * Change endpoint\_type parameter * Utilizes assertIsNone and assertIsNotNone * Add missing gettextutils import to ceilometer.storage.base * Remove redundant code in nova\_client.Client * Allow customized reseller\_prefix in Ceilometer middleware for Swift * Fix broken i18n support * Empty files should no longer contain copyright * Add Event API * Ensure we are not exhausting the sqlalchemy pool * Add new meters for swift * Sync config generator workaround from oslo * storage: factorize not implemented methods * Don't assume alarms are returned in insert order * Correct env variable in file oslo.config.generator.rc * Handle the metrics sent by nova notifier * Add a wadl target to the documentation * Sync config generator from oslo-incubator * Convert event timestamp to PrecisionTimestamp * Add metadata query validation limitation * Ensure the correct error message is displayed * Imported Translations from Transifex * Move sphinxcontrib-httpdomain to test-requirements * Ensure that the user/project exist on alarm update * api: raise ClientSideError rather than ValueError * Implement the /v2/sample API * service: fix service alive checking * Oslo sync to recover from db2 server disconnects * Event Storage Layer * config: specify a template for mktemp * test code should be excluded from test coverage summary * doc: remove note about Nova plugin framework * doc: fix formatting of alarm action types * Updated from global requirements * Add configuration-driven conversion to Events * add newly added constraints to expire clear\_expired\_metering\_data * fix unit * Add import for publisher\_rpc option * add more test cases to improve the test code coverage #5 * Create a shared queue for QPID topic consumers * Properly reconnect subscribing clients when QPID broker restarts * Don't need session.flush in context managed by session * sql migration error in 020\_add\_metadata\_tables * Remove rpc service from agent manager * Imported Translations from Transifex * organise requirements files * Add a Trait Type model and db table * No module named MySQLdb bug * Add a note about permissions to ceilometer logging directory * sync with oslo-incubator * Rename OpenStack Metering to OpenStack Telemetry * update docs to adjust for naming change * Add i18n warpping for all LOG messages * Imported Translations from Transifex * Removed unused method in compute agent manger * connection is not close in migration script * Fixed a bug in sql migration script 020 * Fixed nova notifier test * Added resources definition in the pipeline * Change metadata\_int's value field to type bigint * Avoid intermittent integrity error on alarm creation * Simplify the dispatcher method prototype * Use map\_method from stevedore 0.12 * Remove the collector submodule * Move dispatcher a level up * Split collector * Add a specialized Event Type model and db table * Remove old sqlalchemy-migrate workaround * Revert "Support building wheels (PEP-427)" * full pep8 compliance (part 2) * Selectively import RPC backend retry config * Fixes Hyper-V Inspector disk metrics bug * Imported Translations from Transifex * full pep8 compliance (part1) * Replace mox with mock in alarm,central,image tests * Stop ignoring H506 errors * Update hacking for real * Replace mox with mock in tests.collector * Replace mox with mock in publisher and pipeline * Replace mox with mock in novaclient and compute * Remove useless defined Exception in tests * Support building wheels (PEP-427) * Fixes Hyper-V Inspector cpu metrics bug * Replace mox with mock in tests.storage * Document user-defined metadata for swift samples * Replace mox with mock in energy and objectstore * Updated from global requirements * Replace mox with mock in tests.api.v2 * Refactor API error handling * make record\_metering\_data concurrency safe * Move tests into ceilometer module * Replace mox with mock in tests.api.v1 * Replace mox with mock in tests.api.v2.test\_compute * Corrected import order * Use better predicates from testtools instead of plain assert * Stop using openstack.common.exception * Replace mox with mock in tests.network * Replace mox with mocks in test\_inspector * Fix failing nova\_tests tests * Replace mox with mocks in tests.compute.pollsters * Add an insecure option for Keystone client * Sync log from oslo * Cleanup tests.publisher tests * mongodb, db2: do not print full URL in logs * Use wsme ClientSideError to handle unicode string * Use consistant cache key for swift pollster * Fix the developer documentation of the alarm API * Fix the default rpc policy value * Allow Events without traits to be returned * Replace tests.base part8 * Replace tests.base part7 * Replace tests.base part6 * Imported Translations from Transifex * Imported Translations from Transifex * Sync log\_handler from Oslo * Don't use sqlachemy Metadata as global var * enable sql metadata query * Replace tests.base part5 * Replace tests.base part4 * Imported Translations from Transifex * Updated from global requirements * Fix doc typo in volume meter description * Updated from global requirements * Add source to Resource API object * compute: virt: Fix Instance creation * Fix for get\_resources with postgresql * Updated from global requirements * Add tests when admin set alarm owner to its own * Replace tests.base part3 * Replace tests.base part2 * Replace tests.base part1 * Fix wrong using of Metadata in 15,16 migrations * api: update for WSME 0.5b6 compliance * Changes FakeMemcache to set token to expire on utcnow + 5 mins * Change test case get\_alarm\_history\_on\_create * Change alarm\_history.detail to text type * Add support for keystoneclient 0.4.0 * Ceilometer has no such project-list subcommand * Avoid leaking admin-ness into combination alarms * Updated from global requirements * Avoid leaking admin-ness into threshold-oriented alarms * Update Oslo * Set python-six minimum version * Ensure combination alarms can be evaluated * Ensure combination alarm evaluator can be loaded * Apply six for metaclass * add more test cases to improve the test code coverage #6 * Update python-ceilometerclient lower bound to 1.0.6 * Imported Translations from Transifex * add more test cases to improve the test code coverage #4 * db2 does not allow None as a key for user\_id in user collection * Start Icehouse development * Imported Translations from Transifex * Disable lazy translation * Add notifications for alarm changes * Updated from global requirements * api: allow alarm creation for others project by admins * assertEquals is deprecated, use assertEqual * Imported Translations from Transifex * update alarm service setup in dev doc * Add bug number of some wsme issue * api: remove useless comments * issue an error log when cannot import libvirt * add coverage config file to control module coverage report * tests: fix rounding issue in timestamp comparison * api: return 404 if a alarm is not found * remove locals() for stringformat * add more test cases to improve the test code coverage #3 * Remove extraneous vim configuration comments * Return 401 when action is not authorized * api: return 404 if a resource is not found * keystone client changes in AuthProtocol made our test cases failing * Don't load into alarms evaluators disabled alarms * Remove MANIFEST.in * Allow to get a disabled alarm * Add example with return values in API v2 docs * Avoid imposing alembic 6.0 requirement on all distros * tests: fix places check for timestamp equality * Don't publish samples if resource\_id in missing * Require oslo.config 1.2.0 final * Don't send unuseful rpc alarm notification * service: check that timestamps are almost equals * Test the response body when deleting a alarm * Change resource.resource\_metadata to text type * Adding region name to service credentials * Fail tests early if mongod is not found * add more test cases to improve the test code coverage #2 * add more test cases to improve the test code coverage #1 * Imported Translations from Transifex * Replace OpenStack LLC with OpenStack Foundation * Use built-in print() instead of print statement * Simple alarm partitioning protocol based on AMQP fanout RPC * Handle manually mandatory field * Provide new API endpoint for alarm state * Implement the combination evaluator * Add alarm combination API * Notify with string representation of alarm reason * Convert BoundedInt value from json into int * Fix for timestamp precision in SQLAlchemy * Add source field to Meter model * Refactor threshold evaluator * Alarm API update * Update requirements * WSME 0.5b5 breaking unit tests * Fix failed downgrade in migrations * refactor db2 get\_meter\_statistics method to support mongodb and db2 * tests: import pipeline config * Fix a tiny mistake in api doc * collector-udp: use dispatcher rather than storage * Imported Translations from Transifex * Drop sitepackages=False from tox.ini * Update sphinxcontrib-pecanwsme to 0.3 * Architecture enhancements * Force MySQL to use InnoDB/utf8 * Update alembic requirement to 0.6.0 version * Correctly output the sample content in the file publisher * Pecan assuming meter names are extensions * Handle inst not found exceptions in pollsters * Catch exceptions from nova client in poll\_and\_publish * doc: fix storage backend features status * Add timestamp filtering cases in storage tests * Imported Translations from Transifex * Use global openstack requirements * Add group by statistics examples in API v2 docs * Add docstrings to some methods * add tests for \_query\_to\_kwargs func * validate counter\_type when posting samples * Include auth\_token middleware in sample config * Update config generator * run-tests: fix MongoDB start wait * Imported Translations from Transifex * Fix handling of bad paths in Swift middleware * Drop the \*.create.start notification for Neutron * Make the Swift-related doc more explicit * Fix to return latest resource metadata * Update the high level architecture * Alarm history storage implementation for sqlalchemy * Improve libvirt vnic parsing with missing mac! * Handle missing libvirt vnic targets! * Make type guessing for query args more robust * add MAINTAINERS file * nova\_notifier: fix tests * Update openstack.common.policy from oslo-incubator * Clean-ups related to alarm history patches * Improved MongoClient pooling to avoid out of connections error * Disable the pymongo pooling feature for tests * Fix wrong migrations * Fixed nova notifier unit test * Add group by statistics in API v2 * Update to tox 1.6 and setup.py develop * Add query support to alarm history API * Reject duplicate events * Fixes a bug in Kwapi pollster * alarm api: rename counter\_name to meter\_name * Fixes service startup issue on Windows * Handle volume.resize.\* notifications * Network: process metering reports from Neutron * Alarm history storage implementation for mongodb * Fix migration with fkeys * Fixes two typos in this measurements.rst * Add a fake UUID to Meter on API level * Append /usr/sbin:/sbin to the path for searching mongodb * Plug alarm history logic into the API * Added upper version boundry for six * db2 distinct call results are different from mongodb call * Sync rpc from oslo-incubator * Imported Translations from Transifex * Add pagination parameter to the database backends of storage * Base Alarm history persistence model * Fix empty metadata issue of instance * alarm: generate alarm\_id in API * Import middleware from Oslo * Imported Translations from Transifex * Adds group by statistics for MongoDB driver * Fix wrong UniqueConstraint name * Adds else and TODO in statistics storage tests * Imported Translations from Transifex * Extra indexes cleanup * API FunctionalTest class lacks doc strings * install manual last few sections format needs to be fixed * api: update v1 for Flask >= 0.10 * Use system locale when Accept-Language header is not provided * Adds Hyper-V compute inspector * missing resource in middleware notification * Support for wildcard in pipeline * Refactored storage tests to use testscenarios * doc: replace GitHub by git.openstack.org * api: allow usage of resource\_metadata in query * Remove useless doc/requirements * Fixes non-string metadata query issue * rpc: reduce sleep time * Move sqlachemy tests only in test\_impl\_sqlachemy * Raise Error when pagination/groupby is missing * Raise Error when pagination support is missing * Use timeutils.utcnow in alarm threshold evaluation * db2 support * plugin: remove is\_enabled * Doc: improve doc about Nova measurements * Storing events via dispatchers * Imported Translations from Transifex * ceilometer-agent-compute did not catch exception for disk error * Change counter to sample in network tests * Change counter to sample in objectstore tests * Remove no more used code in test\_notifier * Change counter to sample vocable in cm.transformer * Change counter to sample vocable in cm.publisher * Change counter to sample vocable in cm.image * Change counter to sample vocable in cm.compute * Change counter to sample vocable in cm.energy * Use samples vocable in cm.publisher.test * Change counter to sample vocable in volume tests * Change counter to sample vocable in api tests * Add the source=None to from\_notification * Make RPCPublisher flush method threadsafe * Enhance delayed message translation when \_ is imported * Remove use\_greenlets argument to MongoClient * Enable concurrency on nova notifier tests * Imported Translations from Transifex * Close database connection for alembic env * Fix typo in 17738166b91 migration * Don't call publisher without sample * message\_id is not allowed to be submitted via api * Api V2 post sample refactoring * Add SQLAlchemy implementation of groupby * Fixes failed notification when deleting instance * Reinitialize pipeline manager for service restart * Sync gettextutils from oslo-incubator * Doc: clearly state that one can filter on metadata * Add HTTP request/reply samples * Use new olso fixture in CM tests * Imported Translations from Transifex * Bump hacking to 0.7.0 * Fix the dict type metadata missing issue * Raise error when period with negative value * Imported Translations from Transifex * Import missing gettext \_ * Remove 'counter' occurences in pipeline * Remove the mongo auth warning during tests * Change the error message of resource listing in mongodb * Change test\_post\_alarm case in test\_alarm\_scenarios * Skeletal alarm history API * Reorg alarms controller to facilitate history API * Fix Jenkins failed due to missing \_ * Fix nova test\_notifier wrt new notifier API * Remove counter occurences from documentation * Updated from global requirements * Fixes dict metadata query issue of HBase * s/alarm/alarm\_id/ in alarm notification * Remove unused abstract class definitions * Removed unused self.counters in storage test class * Initial alarming documentation * Include previous state in alarm notification * Consume notification from the default queue * Change meter.resource\_metadata column type * Remove MongoDB TTL support for MongoDB < 2.2 * Add first and last sample timestamp * Use MongoDB aggregate to get resources list * Fix resources/meters pagination test * Handle more Nova and Neutron events * Add support for API message localization * Add the alarm id to the rest notifier body * fix alarm notifier tests * Sync gettextutils from oslo * Fix generating coverage on MacOSX * Use the new nova Instance class * Return message\_id in POSTed samples * rpc: remove source argument from message conversion * Remove source as a publisher argument * Add repeat\_actions to alarm * Rename get\_counters to get\_samples * Add pagination support for MongoDB * Doc: measurements: add doc on Cinder/Swift config * Update nova\_client.py * objectstore: trivial cleanup in \_Base * Add support for CA authentication in Keystone * add unit attribute to statistics * Fix notify method signature on LogAlarmNotifier * Fix transformer's LOG TypeError * Update openstack.common * Fixes Hbase metadata query return wrong result * Fix Hacking 0.6 warnings * Make middleware.py Python 2.6 compatible * Call alembic migrations after sqlalchemy-migrate * Rename ceilometer.counter to ceilometer.sample * Added separate MongoDB database for each test * Relax OpenStack upper capping of client versions * Refactored MongoDB connection pool to use weakrefs * Centralized backends tests scenarios in one place * Added tests to verify that local time is correctly handled * Refactored impl\_mongodb to use full connection url * calling distinct on \_id field against a collection is slow * Use configured endpoint\_type everywhere * Allow use of local conductor * Update nova configuration doc to use notify\_on\_state\_change * doc: how to inject user-defined data * Add documentation on nova user defined metadata * Refactored API V2 tests to use testscenarios * Refactored API V1 tests to use testscenarios * alarm: Per user setting to disable ssl verify * alarm: Global setting to disable ssl verification * Imported Translations from Transifex * Implementation of the alarm RPCAlarmNotifier * Always init cfg.CONF before running a test * Sets storage\_conn in CollectorService * Remove replace/preserve logic from rate of change transformer * storage: remove per-driver options * hbase: do not register table\_prefix as a global option * mongodb: do not set replica\_set as a global option * Change nose to testr in the documentation * Fixed timestamp creation in MongoDB mapreduce * Ensure url is a string for requests.post * Implement a https:// in REST alarm notification * Implement dot in matching\_metadata key for mongodb * trailing slash in url causes 404 error * Fix missing foreign keys * Add cleanup migration for indexes * Sync models with migrations * Avoid dropping cpu\_util for multiple instances * doc: /statistics fields are not queryable (you cannot filter on them) * fix resource\_metadata failure missing image data * Standardize on X-Project-Id over X-Tenant-Id * Default to ctx user/project ID in sample POST API * Multiple dispatcher enablement * storage: fix clear/upgrade order * Lose weight for Ceilometer log in verbose mode * publisher.rpc: queing policies * Remove useless mongodb connection pool comment * Add index for db.meter by descending timestamp * doc: add a bunch of functional examples for the API * api: build the storage connection once and for all * Fix the argument of UnknownArgument exception * make publisher procedure call configurable * Disable mongod prealloc, wait for it to start * Added alembic migrations * Allow to enable time to live on metering sample * Implement a basic REST alarm notification * Imported Translations from Transifex * Ensure correct return code of run-tests.sh * File based publisher * Unset OS\_xx variable before generate configuration * Use run-tests.sh for tox coverage tests * Emit cpu\_util from transformer instead of pollster * Allow simpler scale exprs in transformer.conversions * Use a real MongoDB instance to run unit tests * Allow to specify the endpoint type to use * Rename README.md to README.rst * Use correct hostname to get instances * Provide CPU number as additional metadata * Remove get\_counter\_names from the pollster plugins * Sync SQLAlchemy models with migrations * Transformer to measure rate of change * Make sure plugins are named after their meters * Break up the swift pollsters * Split up the glance pollsters * Make visual coding style consistent * Separate power and energy pollsters * Break up compute pollsters * Implement a basic alarm notification service * Optionally store Events in Collector * Fix issue with pip installing oslo.config-1.2.0 * Transformer to convert between units * publisher.rpc: make per counter topic optional * ceilometer tests need to be enabled/cleaned * Also accept timeout parameter in FakeMemCache * Fix MongoDB backward compat wrt units * Use oslo.sphinx and remove local copy of doc theme * Reference setuptools and not distribute * enable v2 api hbase tests * Register all interesting events * Unify Counter generation from notifications * doc: enhance v2 examples * Update glossary * Imported Translations from Transifex * Imported Translations from Transifex * Filter query op:gt does not work as expected * sqlalchemy: fix performance issue on get\_meters() * enable v2 api sqlalchemy tests * Update compute vnic pollster to use cache * Update compute CPU pollster to use cache * Update compute disk I/O pollster to use cache * update Quantum references to Neutron * Update swift pollster to use cache * Update kwapi pollster to use cache * Update floating-ip pollster to use cache * Update glance pollster to use cache * Add pollster data cache * Fix flake8 errors * Update Oslo * Enable Ceilometer to support mongodb replication set * Fix return error when resource can't be found * Simple service for singleton threshold eval * Basic alarm threshold evaluation logic * add metadata to nova\_client results * Bring in oslo-common rpc ack() changes * Pin the keystone client version * Fix auth logic for PUT /v2/alarms * Imported Translations from Transifex * Change period type in alarms API to int * mongodb: fix limit value not being an integer * Check that the config file sample is always up to date * api: enable v2 tests on SQLAlchemy & HBase * Remove useless periodic\_interval option * doc: be more explicit about network counters * Capture instance metadata in reserved namespace * Imported Translations from Transifex * pep8: enable E125 checks * pep8: enable F403 checks * pep8: enable H302 checks * pep8: enable H304 checks * pep8: enable H401 * pep8: enable H402 checks * Rename the MeterPublisher to RPCPublisher * Replace publisher name by URL * Enable pep8 H403 checks * Activate H404 checks * Ceilometer may generate wrong format swift url in some situations * Code cleanup * Update Oslo * Use Flake8 gating for bin/ceilometer-\* * Update requirements to fix devstack installation * Update to the latest stevedore * Start gating on H703 * Remove disabled\_notification\_listeners option * Remove disabled\_compute\_pollsters option * Remove disabled\_central\_pollsters option * Longer string columns for Trait and UniqueNames * Fix nova notifier tests * pipeline: switch publisher loading model to driver * Enforce reverse time-order for sample return * Remove explicit distribute depend * Use Python 3.x compatible octal literals * Improve Python 3.x compatibility * Fix requirements * Corrected path for test requirements in docs * Fix some typo in documentation * Add instance\_scheduled in entry points * fix session connection * Remove useless imports, reenable F401 checks * service: run common initialization stuff * Use console scripts for ceilometer-api * Use console scripts for ceilometer-dbsync * Use console scripts for ceilometer-agent-compute * Use console scripts for ceilometer-agent-central * agent-central: use CONF.import\_opt rather than import * Move os\_\* options into a group * Use console scripts for ceilometer-collector * sqlalchemy: migration error when running db-sync * session flushing error * api: add limit parameters to meters * python3: Introduce py33 to tox.ini * Start to use Hacking * Session does not use ceilometer.conf's database\_connection * Add support for limiting the number of samples returned * Imported Translations from Transifex * Add support policy to installation instructions * sql: fix 003 downgrade * service: remove useless PeriodicService class * Fix nova notifier tests * Explicitly set downloadcache in tox.ini * Imported Translations from Transifex * Switch to sphinxcontrib-pecanwsme for API docs * Update oslo, use new configuration generator * doc: fix hyphens instead of underscores for 'os\*' conf options * Allow specifying a listen IP * Log configuration values on API startup * Don't use pecan to configure logging * Mark sensitive config options as secret * Imported Translations from Transifex * ImagePollster record duplicate counter during one poll * Rename requires files to standard names * Add an UDP publisher and receiver * hbase metaquery support * Imported Translations from Transifex * Fix and update extract\_opts group extraction * Fix the sample name of 'resource\_metadata' * Added missing source variable in storage drivers * Add Event methods to db api * vnics: don't presume existence of filterref/filter * force the test path to a str (sometimes is unicode) * Make sure that v2 api tests have the policy file configured * Imported Translations from Transifex * setup.cfg misses swift filter * Add a counter for instance scheduling * Move recursive\_keypairs into utils * Replace nose with testr * Use fixtures in the tests * fix compute units in measurement doc * Allow suppression of v1 API * Restore default interval * Change from unittest to testtools * remove unused tests/skip module * Imported Translations from Transifex * Get all tests to use tests.base.TestCase * Allow just a bit longer to wait for the server to startup * Document keystone\_authtoken section * Restore test dependency on Ming * Set the default pipline config file for tests * Imported Translations from Transifex * Fix cross-document references * Fix config setting references in API tests * Restrict pep8 & co to pep8 target * Fix meter\_publisher in setup.cfg * Use flake8 instead of pep8 * Imported Translations from Transifex * Use sqlalchemy session code from oslo * Switch to pbr * fix the broken ceilometer.conf.sample link * Add a direct Ceilometer notifier * Do the same auth checks in the v2 API as in the v1 API * Add the sqlalchemy implementation of the alarms collection * Allow posting samples via the rest API (v2) * Updated the ceilometer.conf.sample * Don't use trivial alarm\_id's like "1" in the test cases * Fix the nova notifier tests after a nova rename * Document HBase configuration * alarm: fix MongoDB alarm id * Use jsonutils instead of json in test/api.py * Connect the Alarm API to the db * Add the mongo implementation of alarms collection * Move meter signature computing into meter\_publish * Update WSME dependency * Imported Translations from Transifex * Add Alarm DB API and models * Imported Translations from Transifex * Remove "extras" again * add links to return values from API methods * Modify limitation on request version * Doc improvements * Rename EventFilter to SampleFilter * Fixes AttributeError of FloatingIPPollster * Add just the most minimal alarm API * Update oslo before bringing in exceptions * Enumerate the meter type in the API Meter class * Remove "extras" as it is not used * Adds examples of CLI and API queries to the V2 documentation * Measurements documentation update * update the ceilometer.conf.sample * Set hbase table\_prefix default to None * glance/cinder/quantum counter units are not accurate/consistent * Add some recommendations about database * Pin SQLAlchemy to 0.7.x * Ceilometer configuration.rst file not using right param names for logging * Fix require\_map\_reduce mim import * Extend swift middleware to collect number of requests * instances: fix counter unit * Remove Folsom support * transformer, publisher: move down base plugin classes * pipeline, publisher, transformer: reorganize code * Fix tests after nova changes * Update to the lastest loopingcall from oslo * Imported Translations from Transifex * update devstack instructions for cinder * Update openstack.common * Reformat openstack-common.conf * storage: move nose out of global imports * storage: get rid of get\_event\_interval * Remove gettext.install from ceilometer/\_\_init\_\_.py * Prepare for future i18n use of \_() in nova notifier * Update part of openstack.common * Convert storage drivers to return models * Adpated to nova's gettext changes * add v2 query examples * storage: remove get\_volume\_sum and get\_volume\_max * api: run tests against HBase too * api: run sum unit tests against SQL backend too * Split and fix live db tests * Remove impl\_test * api: run max\_resource\_volume test on SQL backend * Refactor DB tests * fix volume tests to utilize VOLUME\_DELETE notification * Open havana development, bump to 2013.2 * Change the column counter\_volume to Float * tests: disable Ming test if Ming unavailable * Imported Translations from Transifex * enable arguments in tox * api: run max\_volume tests on SQL backend too * api: run list\_sources tests on SQL and Mongo backend * api: run list\_resources test against SQL * api: handle case where metadata is None * Fix statistics period computing with start/end time * Allow publishing arbitrary headers via the "storage.objects.\*.bytes" counter * Updated the description of get\_counters routine * enable xml error message response * Swift pollster silently return no counter if keystone endpoint is not present * Try to get rid of the "events" & "raw events" naming in the code * Switch to python-keystoneclient 0.2.3 * include a copy of the ASL 2.0 * add keystone configuration instructions to manual install docs * Update openstack.common * remove unused dependencies * Set the default\_log\_levels to include keystoneclient * Switch to final 1.1.0 oslo.config release * Add deprecation warnings for V1 API * Raise stevedore requirement to 0.7 * Fixed the blocking unittest issues * Fix a pep/hacking error in a swift import * Add sample configuration files for mod\_wsgi * Add a tox target for building documentation * Use a non-standard port for the test server * Ensure the statistics are sorted * Start both v1 and v2 api from one daemon * Handle missing units values in mongodb data * Imported Translations from Transifex * Make HACKING compliant * Update manual installation instructions * Fix oslo.config and unittest * Return something sane from the log impl * Fix an invalid test in the storage test suite * Add the etc directory to the sdist manifest * api: run compute duration by resource on SQL backend * api: run list\_projects tests against SQL backend too * api: run list users test against SQL backend too * api: run list meters tests against SQL backend too * Kwapi pollster silently return no probre if keystone endpoint is not present * HBase storage driver, initial version * Exclude tests directory from installation * Ensure missing period is treated consistently * Exclude tests when installing ceilometer * Run some APIv1 tests on different backends * Remove old configuration metering\_storage\_engine * Set where=tests * Decouple the nova notifier from ceilometer code * send-counter: fix & test * Remove nose wrapper script * Fix count type in MongoDB * Make sure that the period is returned as an int as the api expects an int * Imported Translations from Transifex * Remove compat cfg wrapper * compute: fix unknown flavor handling * Allow empty dict as metaquery param for sqlalchemy * Add glossary definitions for additional terms * Support different publisher interval * Fix message envelope keys * Revert recent rpc wire format changes * Document the rules for units * Fix a bug in compute manager test case * plugin: don't use @staticmethod with abc * Support list/tuple as meter message value * Imported Translations from Transifex * Update common to get new kombu serialization code * Disable notifier tests * pipeline: manager publish multiple counters * Imported Translations from Transifex * Use oslo-config-2013.1b3 * mongodb: make count an integer explicitely * tests: allow to run API tests on live db * Update to latest oslo-version * Imported Translations from Transifex * Add directive to MANIFEST.in to include all the html files * Use join\_consumer\_pool() for notifications * Update openstack.common * Add period support in storage drivers and API * Update openstack/common tree * storage: fix mongo live tests * swift: configure RPC service correctly * Fix tox python version for Folsom * api: use delta\_seconds() * transformer: add acculumator transformer * Import service when cfg.CONF.os\_\* is used * pipeline: flush after publishing call * plugin: format docstring as rst * Use Mongo finalize to compute avg and duration * Code cleanup, remove useless import * api: fix a test * compute: fix notifications test * Move counter\_source definition * Allow to publish several counters in a row * Fixed resource api in v2-api * Update meter publish with pipeline framework * Use the same Keystone client instance for pollster * pipeline: fix format error in logging * More robust mocking of nova conductor * Mock more conductor API methods to unblock tests * Update pollsters to return counter list * Update V2 API documentation * Added hacking.py support to pep8 portion of tox * setup: fix typo in package data * Fix formatting issue with v1 API parameters * Multiple publisher pipeline framework * Remove setuptools\_git from setup\_requires * Removed unused param for get\_counters() * Use WSME 0.5b1 * Factorize agent code * Fixed the TemplateNotFound error in v1 api * Ceilometer-api is crashing due to pecan module missing * Clean class variable in compute manager test case * Update nova notifier test after nova change * Fix documentation formatting issues * Simplify ceilometer-api and checks Keystone middleware parsing * Fix nova conf compute\_manager unavailable * Rename run\_tests.sh to wrap\_nosetests.sh * Update openstack.common * Corrected get\_raw\_event() in sqlalchemy * Higher level test for db backends * Remove useless imports * Flatten the v2 API * Update v2 API for WSME code reorg * Update WebOb version specification * Remove the ImageSizePollster * Add Kwapi pollster (energy monitoring) * Fixes a minor documentation typo * Peg the version of Ming used in tests * Update pep8 to 1.3.3 * Remove leftover useless import * Enhance policy test for init() * Provide the meters unit's in /meters * Fix keystoneclient auth\_token middleware changes * policy: fix policy\_file finding * Remove the \_initialize\_config\_options * Add pyflakes * Make the v2 API date query parameters consistent * Fix test blocking issue and pin docutils version * Apply the official OpenStack stylesheets and templates to the Doc build * Fixed erroneous source filter in SQLAlchemy * Fix warnings in the documentation build * Handle finish and revert resize notifications * Add support for Folsom version of Swift * Implement user-api * Add support for Swift incoming/outgoing trafic metering * Pass a dict configuration file to auth\_keystone * Import only once in nova\_notifier * Fix MySQL charset error * Use default configuration file to make test data * Fix Glance control exchange * Move back api-v1 to the main api * Fix WSME arguments handling change * Remove useless gettext call in sql engine * Ground work for transifex-ify ceilometer * Add instance\_type information to NetPollster * Fix dbsync API change * Fix image\_id in instance resource metadata * Instantiate inspector in compute manager * remove direct nova db access from ceilometer * Make debugging the wsme app a bit easier * Implements database upgrade as storage engine independent * Fix the v1 api importing of acl * Add the ability to filter on metadata * Virt inspector directly layered over hypervisor API * Move meter.py into collector directory * Change mysql schema from latin1 to utf8 * Change default os-username to 'ceilometer' * Restore some metadata to the events and resources * Update documentation URL * Add sql db option to devstack for ceilometer * Remove debug print in V2 API * Start updating documentation for V2 API * Implement V2 API with Pecan and WSME * Move v1 API files into a subdirectory * Add test storage driver * Implement /meters to make discovery "nicer" from the client * Fix sqlalchemy for show\_data and v1 web api * Implement object store metering * Make Impl of mongodb and sqlalchemy consistent * add migration migrate.cfg file to the python package * Fixes to enable the jenkins doc job to work * Lower the minimum required version of anyjson * Fix blocking test for nova notifier * network: remove left-over useless nova import * tools: set novaclient minimum version * libvirt: fix Folsom compatibility * Lower pymongo dependency * Remove rickshaw subproject * Remove unused rpc import * Adapted to nova's compute\_driver moving * doc: fix cpu counter unit * tools: use tarballs rather than git for Folsom tests * Used auth\_token middleware from keystoneclient * Remove cinderclient dependency * Fix latest nova changes * api: replace minified files by complete version * Add Folsom tests to tox * Handle nova.flags removal * Provide default configuration file * Fix mysql\_engine option type * Remove nova.flags usage * api: add support for timestamp in \_list\_resources() * api: add timestamp interval support in \_list\_events() * tests: simplify api list\_resources * Update openstack.common(except policy) * Adopted the oslo's rpc.Service change * Use libvirt num\_cpu for CPU utilization calculation * Remove obsolete reference to instance.vcpus * Change references of /etc/ceilometer-{agent,collector}.conf to /etc/ceilometer/ceilometer.conf * Determine instance cores from public flavors API * Determine flavor type from the public nova API * Add comment about folsom compatibility change * Add keystone requirement for doc build * Avoid TypeError when loading libvirt.LibvirtDriver * Don't re-import flags and do parse\_args instead of flags.FLAGS() * doc: rename stackforge to openstack * Fix pymongo requirements * Update .gitreview for openstack * Update use of nova config to work with folsom * compute: remove get\_disks work-around * Use openstack versioning * Fix documentation build * document utc naive timestamp * Remove database access from agent pollsters * Fix merge error in central/manager.py * Fix nova config parsing * pollster trap error due to zero floating ip * Use the service.py in openstack-common * Allow no configured sources, provide a default file * Add service.py from openstack-common * Update common (except policy) * nova fake libvirt library breaking tests * Move db access out into a seperate file * Remove invalid fixme comments * Add new cpu\_util meter recording CPU utilization % * Fix TypeError from old-style publish\_counter calls * Fix auth middleware configuration * pin sqlalchemy to 0.7.x but not specifically 0.7.8 * add mongo index names * set tox to ignore global packages * Provide a way to disable some plugins * Use stevedore to load all plugins * implement get\_volume\_max for sqlalchemy * Add basic text/html renderer * network: floating IP account in Quantum * add unit test for CPUPollster * Clean up context usage * Add dependencies on clients used by pollsters * add ceilometer-send-counter * Update openstack.common.cfg * Fix tests broken by API change with Counter class * api: add source detail retrieval * Set source at publish time * Instance pollster emits instance. meter * timestamp columns in sqlalchemy not timezone aware * Remove obsolete/incorrect install instructions * network: emit router meter * Fix sqlalchemy performance problem * Added a working release-bugs.py script to tools/ * Change default API port * sqlalchemy record\_meter merge objs not string * Use glance public API as opposed to registry API * Add OpenStack trove classifier for PyPI * bump version number to 0.2 * Nova libvirt release note * Update metadata for PyPI registration * tox: add missing venv * Fixes a couple typos * Counter renaming * Set correct timestamp on floatingip counter * Fix API change in make\_test\_data.py * Fix Nova URL in doc * Some more doc fixes * Ignore instances in the ERROR state * Use the right version number in documentation * doc: fix network.\*.\* resource id * image: handle glance delete notifications * image: handle glance upload notifications * image: add update event, fix ImageServe owner * network: fix create/update counter type & doc * Assorted doc fixes * add max/sum project volume and fix tests * Add general options * compute.libvirt: split read/write counters * API: add Keystone ACL and policy support * Add documentation for configuration options * network: do not emit counter on exists event, fix resource id * Move net function in class method and fix instance id * Prime counter table * Fix the configuration for the nova notifier * Initialize the control\_exchange setting * Set version 0.1 * Make the instance counters use the same type * Restore manual install documentation * add quantum release note * Add release notes to docs * Update readme and create release notes * Remove duration field in Counter * Add counter for number of packets per vif * Move instance counter into its own pollster * Add a request counter for instance I/O * Rename instance disk I/O counter * Rename instances network counters * Use constant rather than string from counter type * Update the architecture diagram * Increase default polling interval * Fix compute agent publishing call * network: listen for Quantum exists event * Correct requirements filename * Fix notification subscription logic * Fix quantum notification subscriptions * Split meter publishing from the global config obj * network: add counter for actions * network: listen for Quantum notifications * Rename absolute to gauge * Fix typo in control exchanges help texts * Rework RPC notification mechanism * Update packaging files * Update URL list * Update openstack.common * Add volume/sum API endpoint for resource meters * Add resource volume/max api call * Fix dependency on anyjson * Listen for volume.delete.start instead of end * implement sqlalchemy dbengine backend * Add a notification handler for image downloads * Allow glance pollster tests to run * Create tox env definition for using a live db * Picking up dependencies from pip-requires file * Specify a new queue in manager * Rework RPC connection * Stop using nova's rpc module * Add configuration script to turn on notifications * Pep8 fixes, implement pep8 check on tests subdir * Use standard CLI options & env vars for creds * compute: remove get\_metadata\_from\_event() * Listen for volume notifications * Add pollster for Glance * Fix Nova notifier test case * Fix nova flag parsing * Add nova\_notifier notification driver for nova * Split instance polling code * Use stevedore to load storage engine drivers * Implement duration calculation API * Create tool for generating test meter data * Update openstack-common code to latest * Add bin/ceilometer-api for convenience * Add local copy of architecture diagram * Add timestamp parameters to the API docs * Check for doc build dependency before building * Pollster for network internal traffic (n1,n2) * Fix PEP8 issues * Add archicture diagram to documentation * added mongodb auth * Change timestamp management for resources * Log the instance causing the error when a pollster fails * Document how to install with devstack * Remove test skipping logic * Remove dependency on nova test modules * Add date range parameters to resource API * Add setuptools-git support * Add separate notification handler for instance flavor * Change instance meter type * Split the existing notification handlers up * Remove redundancy in the API * Separate the tox coverage test setup from py27 * Do not require user or project argument for event query * Add pymongo dependency for readthedocs.org build * Update openstack.common * Add API documentation * Be explicit about test dir * Add list projects API * Sort list of users and projects returned from queries * Add project arg to event and resource queries * Fix "meter" literal in event list API * collector exception on record\_metering\_data * Add API endpoint for listing raw event data * Change compute pollster API to work on one instance at a time * Create "central" agent * Skeleton for API server * fix use of source value in mongdb driver * Add {root,ephemeral}\_disk\_size counters * Implements vcpus counter * Fix nova configuration loading * Implements memory counter * Fix and document counter types * Check compute driver using new flag * Add openstack.common.{context,notifier,log} and update .rpc * Update review server link * Add link to roadmap * Add indexes to MongoDB driver * extend developer documentation * Reset the correct nova dependency URL * Switch .gitreview to use OpenStack gerrit * Add MongoDB engine * Convert timestamps to datetime objects before storing * Reduce complexity of storage engine API * Remove usage of nova.log * Documentation edits: * fix typo in instance properties list * Add Sphinx wrapper around existing docs * Configure nova.flags as well as openstack.common.cfg * First draft of plugin/agent documentation. Fixes bug 1018311 * Essex: update Nova to 2012.1.1, add python-novaclient * Split service preparation, periodic interval configurable * Use the same instance metadata everywhere * Emit meter event for instance "exists" * Start defining DB engine API * Fallback on nova.rpc for Essex * Add instance metadata from notification events * Combined fix to get past broken state of repo * Add more metadata to instance counter * Register storage options on import * Add Essex tests * log more than ceilometer * Remove event\_type field from meter messages * fix message signatures for nested dicts * Remove nova.flags usage * Copy openstack.common.cfg * check message signatures in the collector * Sketch out a plugin system for saving metering data * refactor meter event publishing code * Add and use ceilometer own log module * add counter type field * Use timestamp instead of datetime when creating Counter * Use new flag API * Fix a PEP8 error * Make the stand-alone test script mimic tox * Remove unneeded eventlet test requirement * Add listeners for other instance-related events * Add tox configuration * Use openstack.common.cfg for ceilometer options * Publish and receive metering messages * Add floating IP pollster * Fix tests based on DB by importing nova.tests * make the pollsters in the agent plugins * Build ceilometer-agent and ceilometer-collector * Add plugin support to the notification portion of the collector daemon * Add CPU time fetching * Add an example function for converting a nova notification to a counter * add a tool for recording notifications and replaying them * Add an exception handler to deal with errors that occur when the info in nova is out of sync with reality (as on my currently broken system). Also adds a nova prefix to the logger for now so messages from this module make it into the log file * Periodically fetch for disk io stats * Use nova.service, add a manager class * Change license to Apache 2.0 * Add setup.py * Import ceilometer-nova-compute * Ignore pyc files * Add link to blueprint * Add .gitreview file * initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/HACKING.rst0000664000175000017500000000032700000000000014337 0ustar00zuulzuul00000000000000Aodh Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Aodh Specific Commandments -------------------------- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/LICENSE0000664000175000017500000002363700000000000013557 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/MAINTAINERS0000664000175000017500000000060300000000000014233 0ustar00zuulzuul00000000000000= Generalist Code Reviewers = The current members of aodh-core are listed here: https://launchpad.net/~aodh-drivers/+members#active This group can +2 and approve patches in aodh. However, they may choose to seek feedback from the appropriate specialist maintainer before approving a patch if it is in any way controversial or risky. = IRC handles of maintainers = gordc jd_ sileht ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/PKG-INFO0000664000175000017500000000364100000000000013640 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: aodh Version: 19.0.0 Summary: OpenStack Telemetry Alarming Home-page: https://docs.openstack.org/aodh/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: aodh ==== Aodh is the alarming service for OpenStack. ------------- Documentation ------------- Documentation for the project can be found at: https://docs.openstack.org/aodh/latest/ Release notes can be read online at: https://docs.openstack.org/aodh/latest/contributor/releasenotes/index.html Code Repository --------------- - Server: https://opendev.org/openstack/aodh/ Bug Tracking ------------ Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/aodh/ IRC --- IRC Channel: #openstack-telemetry on `OFTC`_. .. _OFTC: https://oftc.net/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Topic :: System :: Monitoring Requires-Python: >=3.8 Provides-Extra: mysql Provides-Extra: postgresql Provides-Extra: test Provides-Extra: zaqar ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/README.rst0000664000175000017500000000111300000000000014222 0ustar00zuulzuul00000000000000aodh ==== Aodh is the alarming service for OpenStack. ------------- Documentation ------------- Documentation for the project can be found at: https://docs.openstack.org/aodh/latest/ Release notes can be read online at: https://docs.openstack.org/aodh/latest/contributor/releasenotes/index.html Code Repository --------------- - Server: https://opendev.org/openstack/aodh/ Bug Tracking ------------ Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/aodh/ IRC --- IRC Channel: #openstack-telemetry on `OFTC`_. .. _OFTC: https://oftc.net/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0630102 aodh-19.0.0/aodh/0000775000175000017500000000000000000000000013452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/__init__.py0000664000175000017500000000146100000000000015565 0ustar00zuulzuul00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NotImplementedError(NotImplementedError): # FIXME(jd) This is used by WSME to return a correct HTTP code. We should # not expose it here but wrap our methods in the API to convert it to a # proper HTTP error. code = 501 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0670109 aodh-19.0.0/aodh/api/0000775000175000017500000000000000000000000014223 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/__init__.py0000664000175000017500000000170000000000000016332 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg # Register options for the service OPTS = [ cfg.StrOpt('paste_config', default='api-paste.ini', help="Configuration file for WSGI definition of API."), cfg.StrOpt( 'auth_mode', default="keystone", help="Authentication mode to use. Unset to disable authentication"), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/api-paste.ini0000664000175000017500000000257400000000000016617 0ustar00zuulzuul00000000000000[composite:aodh+noauth] use = egg:Paste#urlmap / = aodhversions_pipeline /v2 = aodhv2_noauth_pipeline /healthcheck = healthcheck [composite:aodh+keystone] use = egg:Paste#urlmap / = aodhversions_pipeline /v2 = aodhv2_keystone_pipeline /healthcheck = healthcheck [app:healthcheck] use = egg:oslo.middleware#healthcheck oslo_config_project = aodh [pipeline:aodhversions_pipeline] pipeline = cors http_proxy_to_wsgi aodhversions [app:aodhversions] paste.app_factory = aodh.api.app:app_factory root = aodh.api.controllers.root.VersionsController [pipeline:aodhv2_keystone_pipeline] pipeline = cors http_proxy_to_wsgi request_id osprofiler authtoken aodhv2 [pipeline:aodhv2_noauth_pipeline] pipeline = cors http_proxy_to_wsgi request_id osprofiler aodhv2 [app:aodhv2] paste.app_factory = aodh.api.app:app_factory root = aodh.api.controllers.v2.root.V2Controller [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory oslo_config_project = aodh [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = aodh [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory oslo_config_project = aodh [filter:osprofiler] paste.filter_factory = aodh.profiler:WsgiMiddleware.factory oslo_config_project = aodh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/app.py0000664000175000017500000000546600000000000015370 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2015-2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid from oslo_log import log from paste import deploy import pecan from aodh.api import hooks from aodh.api import middleware from aodh import service from aodh import storage LOG = log.getLogger(__name__) # NOTE(sileht): pastedeploy uses ConfigParser to handle # global_conf, since python 3 ConfigParser doesn't # allow storing object as config value, only strings are # permit, so to be able to pass an object created before paste load # the app, we store them into a global var. But the each loaded app # store it's configuration in unique key to be concurrency safe. global APPCONFIGS APPCONFIGS = {} def setup_app(root, conf): app_hooks = [hooks.ConfigHook(conf), hooks.DBHook( storage.get_connection_from_config(conf)), hooks.TranslationHook()] return pecan.make_app( root, hooks=app_hooks, wrap_app=middleware.ParsableErrorMiddleware, guess_content_type_from_ext=False ) def load_app(conf): global APPCONFIGS # Build the WSGI app cfg_path = conf.api.paste_config if not os.path.isabs(cfg_path): cfg_path = conf.find_file(cfg_path) if cfg_path is None or not os.path.exists(cfg_path): LOG.debug("No api-paste configuration file found! Using default.") cfg_path = os.path.abspath( os.path.join( os.path.dirname(__file__), "api-paste.ini")) config = dict(conf=conf) configkey = str(uuid.uuid4()) APPCONFIGS[configkey] = config LOG.info("WSGI config used: %s", cfg_path) return deploy.loadapp("config:" + cfg_path, name="aodh+" + ( conf.api.auth_mode if conf.api.auth_mode else "noauth" ), global_conf={'configkey': configkey}) def app_factory(global_config, **local_conf): global APPCONFIGS appconfig = APPCONFIGS.get(global_config.get('configkey')) return setup_app(root=local_conf.get('root'), **appconfig) def build_wsgi_app(argv=None): conf = service.prepare_service(argv=argv) conf.log_opt_values(LOG, log.DEBUG) return load_app(conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/app.wsgi0000664000175000017500000000152700000000000015703 0ustar00zuulzuul00000000000000# -*- mode: python -*- # # Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Use this file for deploying the API under mod_wsgi. See http://pecan.readthedocs.org/en/latest/deployment.html for details. """ from aodh.api import app application = app.build_wsgi_app(argv=[]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0670109 aodh-19.0.0/aodh/api/controllers/0000775000175000017500000000000000000000000016571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/__init__.py0000664000175000017500000000000000000000000020670 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/root.py0000664000175000017500000000336400000000000020134 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json' MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml' class VersionsController(object): @pecan.expose('json') def index(self): base_url = pecan.request.host_url available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }] collected = [version_descriptor(base_url, v['tag'], v['date']) for v in available] versions = {'versions': {'values': collected}} return versions def version_descriptor(base_url, version, released_on): url = version_url(base_url, version) return { 'id': version, 'links': [ {'href': url, 'rel': 'self', }, {'href': 'https://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', }], 'media-types': [ {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, }, {'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }], 'status': 'stable', 'updated': released_on, } def version_url(base_url, version_number): return '%s/%s' % (base_url, version_number) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0710115 aodh-19.0.0/aodh/api/controllers/v2/0000775000175000017500000000000000000000000017120 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/__init__.py0000664000175000017500000000000000000000000021217 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0710115 aodh-19.0.0/aodh/api/controllers/v2/alarm_rules/0000775000175000017500000000000000000000000021426 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/alarm_rules/__init__.py0000664000175000017500000000000000000000000023525 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/alarm_rules/composite.py0000664000175000017500000000765600000000000024020 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from stevedore import named from wsme.rest import json as wjson from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.i18n import _ class InvalidCompositeRule(base.ClientSideError): def __init__(self, error): err = _('Invalid input composite rule: %s, it should ' 'be a dict with an "and" or "or" as key, and the ' 'value of dict should be a list of basic threshold ' 'rules or sub composite rules, can be nested.') % error super(InvalidCompositeRule, self).__init__(err) class CompositeRule(wtypes.UserType): """Composite alarm rule. A simple dict type to preset composite rule. """ basetype = wtypes.text name = 'composite_rule' threshold_plugins = None def __init__(self): threshold_rules = ('prometheus', 'gnocchi_resources_threshold', 'gnocchi_aggregation_by_metrics_threshold', 'gnocchi_aggregation_by_resources_threshold') CompositeRule.threshold_plugins = named.NamedExtensionManager( "aodh.alarm.rule", threshold_rules) super(CompositeRule, self).__init__() @staticmethod def valid_composite_rule(rules): if isinstance(rules, dict) and len(rules) == 1: and_or_key = list(rules)[0] if and_or_key not in ('and', 'or'): raise base.ClientSideError( _('Threshold rules should be combined with "and" or "or"')) if isinstance(rules[and_or_key], list): for sub_rule in rules[and_or_key]: CompositeRule.valid_composite_rule(sub_rule) else: raise InvalidCompositeRule(rules) elif isinstance(rules, dict): rule_type = rules.pop('type', None) if not rule_type: raise base.ClientSideError(_('type must be set in every rule')) if rule_type not in CompositeRule.threshold_plugins: plugins = sorted(CompositeRule.threshold_plugins.names()) err = _('Unsupported sub-rule type :%(rule)s in composite ' 'rule, should be one of: %(plugins)s') % { 'rule': rule_type, 'plugins': plugins} raise base.ClientSideError(err) plugin = CompositeRule.threshold_plugins[rule_type].plugin wjson.fromjson(plugin, rules) rule_dict = plugin(**rules).as_dict() rules.update(rule_dict) rules.update(type=rule_type) else: raise InvalidCompositeRule(rules) @staticmethod def validate(value): try: json.dumps(value) except TypeError: raise base.ClientSideError(_('%s is not JSON serializable') % value) else: CompositeRule.valid_composite_rule(value) return value @staticmethod def frombasetype(value): return CompositeRule.validate(value) @staticmethod def create_hook(alarm): pass @staticmethod def validate_alarm(alarm): pass @staticmethod def update_hook(alarm): pass @staticmethod def as_dict(): pass @staticmethod def __call__(**rule): return rule composite_rule = CompositeRule() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/alarm_rules/event.py0000664000175000017500000000506400000000000023126 0ustar00zuulzuul00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import voluptuous import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.i18n import _ # Schema validation for the event type query. _q_validator = voluptuous.Schema( {"field": voluptuous.Match(r"^[a-zA-Z.',0-9_-]*$"), "op": voluptuous.In(base.operation_kind), "value": voluptuous.In(["string", "integer", "float", "boolean", ""])}) class AlarmEventRule(base.AlarmRule): """Alarm Event Rule. Describe when to trigger the alarm based on an event """ event_type = wsme.wsattr(wtypes.text) "The type of event (default is '*')" query = wsme.wsattr([base.Query]) "The query to find the event (default is [])" def __init__(self, event_type=None, query=None): event_type = event_type or '*' query = [base.Query(**q) for q in query or []] super(AlarmEventRule, self).__init__(event_type=event_type, query=query) @classmethod def validate_alarm(cls, alarm): super(AlarmEventRule, cls).validate_alarm(alarm) for i in alarm.event_rule.query: i.get_value() try: _q_validator({"field": i.field, "op": i.op, "value": i.type}) except voluptuous.MultipleInvalid as e: raise base.ClientSideError( _("Query value or traits invalid: %s") % str(e)) @property def default_description(self): return _('Alarm when %s event occurred.') % self.event_type def as_dict(self): rule = self.as_dict_from_keys(['event_type']) rule['query'] = [q.as_dict() for q in self.query] return rule @classmethod def sample(cls): return cls(event_type='compute.instance.update', query=[{'field': 'traits.instance_id"', 'value': '153462d0-a9b8-4b5b-8175-9e4b05e9b856', 'op': 'eq', 'type': 'string'}]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/alarm_rules/gnocchi.py0000664000175000017500000002164500000000000023422 0ustar00zuulzuul00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import threading import cachetools from gnocchiclient import client from gnocchiclient import exceptions from keystoneauth1 import exceptions as ka_exceptions from oslo_config import cfg from oslo_log import log import pecan import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.api.controllers.v2 import utils as v2_utils from aodh import keystone_client LOG = log.getLogger(__name__) GNOCCHI_OPTS = [ cfg.StrOpt('gnocchi_external_project_owner', default="service", help='Project name of resources creator in Gnocchi. ' '(For example the Ceilometer project name'), cfg.StrOpt('gnocchi_external_domain_name', default="Default", help='Domain name of resources creator in Gnocchi. ' '(For example, default or service_domain'), ] class GnocchiUnavailable(Exception): code = 503 class AlarmGnocchiThresholdRule(base.AlarmRule): comparison_operator = base.AdvEnum('comparison_operator', str, 'lt', 'le', 'eq', 'ne', 'ge', 'gt', default='eq') "The comparison against the alarm threshold" threshold = wsme.wsattr(float, mandatory=True) "The threshold of the alarm" aggregation_method = wsme.wsattr(wtypes.text, mandatory=True) "The aggregation_method to compare to the threshold" evaluation_periods = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) "The number of historical periods to evaluate the threshold" granularity = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60) "The time range in seconds over which query" cache = cachetools.TTLCache(maxsize=1, ttl=3600) lock = threading.RLock() @classmethod def validate_alarm(cls, alarm): alarm_rule = getattr(alarm, "%s_rule" % alarm.type) aggregation_method = alarm_rule.aggregation_method if aggregation_method not in cls._get_aggregation_methods(): raise base.ClientSideError( 'aggregation_method should be in %s not %s' % ( cls._get_aggregation_methods(), aggregation_method)) @staticmethod @cachetools.cached(cache, lock=lock) def _get_aggregation_methods(): conf = pecan.request.cfg gnocchi_client = client.Client( '1', keystone_client.get_session(conf), adapter_options={ 'interface': conf.service_credentials.interface, 'region_name': conf.service_credentials.region_name}) try: return gnocchi_client.capabilities.list().get( 'aggregation_methods', []) except exceptions.ClientException as e: raise base.ClientSideError(e.message, status_code=e.code) except Exception as e: raise GnocchiUnavailable(e) class MetricOfResourceRule(AlarmGnocchiThresholdRule): metric = wsme.wsattr(wtypes.text, mandatory=True) "The name of the metric" resource_id = wsme.wsattr(wtypes.text, mandatory=True) "The id of a resource" resource_type = wsme.wsattr(wtypes.text, mandatory=True) "The resource type" def as_dict(self): rule = self.as_dict_from_keys(['granularity', 'comparison_operator', 'threshold', 'aggregation_method', 'evaluation_periods', 'metric', 'resource_id', 'resource_type']) return rule class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule): metric = wsme.wsattr(wtypes.text, mandatory=True) "The name of the metric" query = wsme.wsattr(wtypes.text, mandatory=True) ('The query to filter the metric, Don\'t forget to filter out ' 'deleted resources (example: {"and": [{"=": {"ended_at": null}}, ...]}), ' 'Otherwise Gnocchi will try to create the aggregate against obsolete ' 'resources') resource_type = wsme.wsattr(wtypes.text, mandatory=True) "The resource type" def as_dict(self): rule = self.as_dict_from_keys(['granularity', 'comparison_operator', 'threshold', 'aggregation_method', 'evaluation_periods', 'metric', 'query', 'resource_type']) return rule cache = cachetools.TTLCache(maxsize=1, ttl=3600) lock = threading.RLock() @staticmethod @cachetools.cached(cache, lock=lock) def get_external_project_owner(): kc = keystone_client.get_client(pecan.request.cfg) project_name = pecan.request.cfg.api.gnocchi_external_project_owner domain_name = pecan.request.cfg.api.gnocchi_external_domain_name try: domains = kc.domains.list(name=domain_name) project = kc.projects.find( name=project_name, domain_id=domains[0].id) return project.id except ka_exceptions.NotFound: LOG.warning("Unable to get domain or project information. " "domain_name : %(domain_name)s, " "project_name : %(project_name)s", {'domain_name': domain_name, 'project_name': project_name}) return None @classmethod def validate_alarm(cls, alarm): super(AggregationMetricByResourcesLookupRule, cls).validate_alarm(alarm) rule = alarm.gnocchi_aggregation_by_resources_threshold_rule # check the query string is a valid json try: query = json.loads(rule.query) except ValueError: raise wsme.exc.InvalidInput('rule/query', rule.query) conf = pecan.request.cfg # Scope the alarm to the project id if needed auth_project = v2_utils.get_auth_project(alarm.project_id) if auth_project: perms_filter = {"=": {"created_by_project_id": auth_project}} external_project_owner = cls.get_external_project_owner() if external_project_owner: perms_filter = {"or": [ perms_filter, {"and": [ {"=": {"created_by_project_id": external_project_owner}}, {"=": {"project_id": auth_project}}]} ]} query = {"and": [perms_filter, query]} rule.query = json.dumps(query) gnocchi_client = client.Client( '1', keystone_client.get_session(conf), adapter_options={ 'interface': conf.service_credentials.interface, 'region_name': conf.service_credentials.region_name}) try: gnocchi_client.aggregates.fetch( operations=[ 'aggregate', rule.aggregation_method, [ 'metric', rule.metric, rule.aggregation_method.lstrip('rate:') ] ], search=query, needed_overlap=0, start="-1 day", stop="now", resource_type=rule.resource_type) except exceptions.ClientException as e: if e.code == 404: # NOTE(sileht): We are fine here, we just want to ensure the # 'query' payload is valid for Gnocchi If the metric # doesn't exists yet, it doesn't matter. return if e.code == 400 and 'Metrics not found' in e.message["cause"]: # NOTE(tkajinam): Gnocchi<4.5 returns 400 instead of 404 return raise base.ClientSideError(e.message, status_code=e.code) except Exception as e: raise GnocchiUnavailable(e) class AggregationMetricsByIdLookupRule(AlarmGnocchiThresholdRule): metrics = wsme.wsattr([wtypes.text], mandatory=True) "A list of metric Ids" def as_dict(self): rule = self.as_dict_from_keys(['granularity', 'comparison_operator', 'threshold', 'aggregation_method', 'evaluation_periods', 'metrics']) return rule ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/alarm_rules/loadbalancer.py0000664000175000017500000000250200000000000024406 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base class LoadBalancerMemberHealthRule(base.AlarmRule): pool_id = wsme.wsattr(wtypes.text, mandatory=True) "ID of a load balancer pool the members belongs to." stack_id = wsme.wsattr(wtypes.text, mandatory=True) "ID of a Heat stack which contains the load balancer member." autoscaling_group_id = wsme.wsattr(wtypes.text, mandatory=True) "ID of a Heat autoscaling group that contains the load balancer member." def as_dict(self): rule = self.as_dict_from_keys( ['pool_id', 'stack_id', 'autoscaling_group_id'] ) return rule @staticmethod def create_hook(alarm): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/alarm_rules/prometheus.py0000664000175000017500000000266600000000000024205 0ustar00zuulzuul00000000000000# # Copyright 2023 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base LOG = log.getLogger(__name__) class PrometheusRule(base.AlarmRule): comparison_operator = base.AdvEnum('comparison_operator', str, 'lt', 'le', 'eq', 'ne', 'ge', 'gt', default='eq') "The comparison against the alarm threshold" threshold = wsme.wsattr(float, mandatory=True) "The threshold of the alarm" query = wsme.wsattr(wtypes.text, mandatory=True) "The Prometheus query" @staticmethod def validate(rule): # TO-DO(mmagr): validate Prometheus query maybe? return rule def as_dict(self): rule = self.as_dict_from_keys(['comparison_operator', 'threshold', 'query']) return rule ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/alarms.py0000664000175000017500000010313200000000000020751 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import itertools import json import croniter from oslo_config import cfg from oslo_log import log from oslo_utils import netutils from oslo_utils import timeutils from oslo_utils import uuidutils import pecan from pecan import rest from stevedore import extension from urllib import parse as urlparse import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan try: import zoneinfo except ImportError: # zoneinfo is available in Python >= 3.9 import pytz import pytz.exceptions zoneinfo = None import aodh from aodh.api.controllers.v2 import base from aodh.api.controllers.v2 import utils as v2_utils from aodh.api import rbac from aodh.i18n import _ from aodh import keystone_client from aodh import messaging from aodh import notifier from aodh import profiler from aodh.storage import models LOG = log.getLogger(__name__) ALARM_API_OPTS = [ cfg.IntOpt('user_alarm_quota', deprecated_group='DEFAULT', default=-1, help='Maximum number of alarms defined for a user.' ), cfg.IntOpt('project_alarm_quota', deprecated_group='DEFAULT', default=-1, help='Maximum number of alarms defined for a project.' ), cfg.IntOpt('alarm_max_actions', default=-1, deprecated_group='DEFAULT', help='Maximum count of actions for each state of an alarm, ' 'non-positive number means no limit.'), ] state_kind = ["ok", "alarm", "insufficient data"] state_kind_enum = wtypes.Enum(str, *state_kind) severity_kind = ["low", "moderate", "critical"] severity_kind_enum = wtypes.Enum(str, *severity_kind) ALARM_REASON_DEFAULT = "Not evaluated yet" ALARM_REASON_MANUAL = "Manually set via API" ALARM_QUERY_FIELDS_ALLOWED = set([ 'all_projects', 'user_id', 'project_id', 'type', 'name', 'enabled', 'state', 'severity', 'timestamp', 'repeat_actions' ]) ALARM_QUERY_OPS_ALLOWED = set(['eq']) class OverQuota(base.ClientSideError): def __init__(self, data): d = { 'u': data.user_id, 'p': data.project_id } super(OverQuota, self).__init__( _("Alarm quota exceeded for user %(u)s on project %(p)s") % d, status_code=403) def is_over_quota(conn, project_id, user_id): """Returns False if an alarm is within the set quotas, True otherwise. :param conn: a backend connection object :param project_id: the ID of the project setting the alarm :param user_id: the ID of the user setting the alarm """ over_quota = False project_quotas = conn.get_quotas(project_id) project_alarms = conn.get_alarms(project_id=project_id) user_alarms = conn.get_alarms(user_id=user_id) user_default_alarm_quota = pecan.request.cfg.api.user_alarm_quota project_default_alarm_quota = pecan.request.cfg.api.project_alarm_quota # 1. Check project quota if len(project_quotas) > 0: for quota in project_quotas: if quota.resource == 'alarms': over_quota = len(user_alarms) >= quota.limit else: if project_default_alarm_quota != -1: over_quota = len(project_alarms) >= project_default_alarm_quota # 2. Check user quota if not over_quota and user_default_alarm_quota != -1: over_quota = len(user_alarms) >= user_default_alarm_quota return over_quota class CronType(wtypes.UserType): """A user type that represents a cron format.""" basetype = str name = 'cron' @staticmethod def validate(value): # raises ValueError if invalid croniter.croniter(value) return value class AlarmTimeConstraint(base.Base): """Representation of a time constraint on an alarm.""" name = wsme.wsattr(wtypes.text, mandatory=True) "The name of the constraint" _description = None # provide a default def get_description(self): if not self._description: return ('Time constraint at %s lasting for %s seconds' % (self.start, self.duration)) return self._description def set_description(self, value): self._description = value description = wsme.wsproperty(wtypes.text, get_description, set_description) "The description of the constraint" start = wsme.wsattr(CronType(), mandatory=True) "Start point of the time constraint, in cron format" duration = wsme.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) "How long the constraint should last, in seconds" timezone = wsme.wsattr(wtypes.text, default="") "Timezone of the constraint" def as_dict(self): return self.as_dict_from_keys(['name', 'description', 'start', 'duration', 'timezone']) @staticmethod def validate(tc): if tc.timezone: checker = zoneinfo.ZoneInfo if zoneinfo else pytz.timezone exc = (zoneinfo.ZoneInfoNotFoundError if zoneinfo else pytz.exceptions.UnknownTimeZoneError) try: checker(tc.timezone) except exc: raise base.ClientSideError(_("Timezone %s is not valid") % tc.timezone) return tc @classmethod def sample(cls): return cls(name='SampleConstraint', description='nightly build every night at 23h for 3 hours', start='0 23 * * *', duration=10800, timezone='Europe/Ljubljana') ALARMS_RULES = extension.ExtensionManager("aodh.alarm.rule") LOG.debug("alarm rules plugin loaded: %s" % ",".join(ALARMS_RULES.names())) ACTIONS_SCHEMA = extension.ExtensionManager( notifier.AlarmNotifierService.NOTIFIER_EXTENSIONS_NAMESPACE).names() @profiler.trace_cls('api') class Alarm(base.Base): """Representation of an alarm.""" alarm_id = wtypes.text "The UUID of the alarm" name = wsme.wsattr(wtypes.text, mandatory=True) "The name for the alarm" _description = None # provide a default def get_description(self): rule = getattr(self, '%s_rule' % self.type, None) if not self._description: if hasattr(rule, 'default_description'): return str(rule.default_description) return "%s alarm rule" % self.type return self._description def set_description(self, value): self._description = value description = wsme.wsproperty(wtypes.text, get_description, set_description) "The description of the alarm" enabled = wsme.wsattr(bool, default=True) "This alarm is enabled?" ok_actions = wsme.wsattr([wtypes.text], default=[]) "The actions to do when alarm state change to ok" alarm_actions = wsme.wsattr([wtypes.text], default=[]) "The actions to do when alarm state change to alarm" insufficient_data_actions = wsme.wsattr([wtypes.text], default=[]) "The actions to do when alarm state change to insufficient data" repeat_actions = wsme.wsattr(bool, default=False) "The actions should be re-triggered on each evaluation cycle" type = base.AdvEnum('type', str, *ALARMS_RULES.names(), mandatory=True) "Explicit type specifier to select which rule to follow below." time_constraints = wtypes.wsattr([AlarmTimeConstraint], default=[]) """Describe time constraints for the alarm""" # These settings are ignored in the PUT or POST operations, but are # filled in for GET project_id = wtypes.text "The ID of the project or tenant that owns the alarm" user_id = wtypes.text "The ID of the user who created the alarm" timestamp = datetime.datetime "The date of the last alarm definition update" state = base.AdvEnum('state', str, *state_kind, default='insufficient data') "The state offset the alarm" state_timestamp = datetime.datetime "The date of the last alarm state changed" state_reason = wsme.wsattr(wtypes.text, default=ALARM_REASON_DEFAULT) "The reason of the current state" severity = base.AdvEnum('severity', str, *severity_kind, default='low') "The severity of the alarm" evaluate_timestamp = datetime.datetime "The latest alarm evaluation time" def __init__(self, rule=None, time_constraints=None, **kwargs): super(Alarm, self).__init__(**kwargs) if rule: setattr(self, '%s_rule' % self.type, ALARMS_RULES[self.type].plugin(**rule)) if time_constraints: self.time_constraints = [AlarmTimeConstraint(**tc) for tc in time_constraints] @classmethod def from_db_model_scrubbed(cls, m): # Return an Alarm from a DB model with trust IDs scrubbed from actions data = m.as_dict() for field in ('ok_actions', 'alarm_actions', 'insufficient_data_actions'): if data.get(field) is not None: data[field] = [cls._scrub_action_url(action) for action in data[field]] return cls(**data) @staticmethod def validate(alarm): Alarm.check_rule(alarm) Alarm.check_alarm_actions(alarm) ALARMS_RULES[alarm.type].plugin.validate_alarm(alarm) if alarm.time_constraints: tc_names = [tc.name for tc in alarm.time_constraints] if len(tc_names) > len(set(tc_names)): error = _("Time constraint names must be " "unique for a given alarm.") raise base.ClientSideError(error) return alarm @staticmethod def check_rule(alarm): rule = '%s_rule' % alarm.type if getattr(alarm, rule) in (wtypes.Unset, None): error = _("%(rule)s must be set for %(type)s" " type alarm") % {"rule": rule, "type": alarm.type} raise base.ClientSideError(error) rule_set = None for ext in ALARMS_RULES: name = "%s_rule" % ext.name if getattr(alarm, name): if rule_set is None: rule_set = name else: error = _("%(rule1)s and %(rule2)s cannot be set at the " "same time") % {'rule1': rule_set, 'rule2': name} raise base.ClientSideError(error) @staticmethod def check_alarm_actions(alarm): max_actions = pecan.request.cfg.api.alarm_max_actions for state in state_kind: actions_name = state.replace(" ", "_") + '_actions' actions = getattr(alarm, actions_name) if not actions: continue action_set = set(actions) if len(actions) != len(action_set): LOG.info('duplicate actions are found: %s, ' 'remove duplicate ones', actions) actions = list(action_set) setattr(alarm, actions_name, actions) if 0 < max_actions < len(actions): error = _('%(name)s count exceeds maximum value ' '%(maximum)d') % {"name": actions_name, "maximum": max_actions} raise base.ClientSideError(error) limited = rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer) for action in actions: try: url = netutils.urlsplit(action) except Exception: error = _("Unable to parse action %s") % action raise base.ClientSideError(error) if url.scheme not in ACTIONS_SCHEMA: error = _("Unsupported action %s") % action raise base.ClientSideError(error) if limited and url.scheme in ('log', 'test'): error = _('You are not authorized to create ' 'action: %s') % action raise base.ClientSideError(error, status_code=401) @classmethod def sample(cls): return cls(alarm_id=None, name="SwiftObjectAlarm", description="An alarm", type='gnocchi_aggregation_by_metrics_threshold', time_constraints=[AlarmTimeConstraint.sample().as_dict()], user_id="c96c887c216949acbdfbd8b494863567", project_id="c96c887c216949acbdfbd8b494863567", enabled=True, timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), state="ok", severity="moderate", state_reason="threshold over 90%", state_timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), ok_actions=["http://site:8000/ok"], alarm_actions=["http://site:8000/alarm"], insufficient_data_actions=["http://site:8000/nodata"], repeat_actions=False, ) def as_dict(self, db_model): d = super(Alarm, self).as_dict(db_model) for k in d: if k.endswith('_rule'): del d[k] rule = getattr(self, "%s_rule" % self.type) d['rule'] = rule if isinstance(rule, dict) else rule.as_dict() if self.time_constraints: d['time_constraints'] = [tc.as_dict() for tc in self.time_constraints] return d @staticmethod def _is_trust_url(url): return url.scheme.startswith('trust+') @staticmethod def _scrub_action_url(action): """Remove trust ID from a URL.""" url = netutils.urlsplit(action) if Alarm._is_trust_url(url): netloc = url.netloc.rsplit('@', 1)[-1] url = urlparse.SplitResult(url.scheme, netloc, url.path, url.query, url.fragment) return url.geturl() def _get_existing_trust_ids(self): for action in itertools.chain(self.ok_actions or [], self.alarm_actions or [], self.insufficient_data_actions or []): url = netutils.urlsplit(action) if self._is_trust_url(url): trust_id = url.username if trust_id and url.password == 'delete': yield trust_id def update_actions(self, old_alarm=None): trustor_user_id = pecan.request.headers.get('X-User-Id') trustor_project_id = pecan.request.headers.get('X-Project-Id') roles = pecan.request.headers.get('X-Roles', '') if roles: roles = roles.split(',') else: roles = [] auth_plugin = pecan.request.environ.get('keystone.token_auth') if old_alarm: prev_trust_ids = set(old_alarm._get_existing_trust_ids()) else: prev_trust_ids = set() trust_id = prev_trust_ids.pop() if prev_trust_ids else None trust_id_used = False for actions in (self.ok_actions, self.alarm_actions, self.insufficient_data_actions): if actions is not None: for index, action in enumerate(actions[:]): url = netutils.urlsplit(action) if self._is_trust_url(url): if '@' in url.netloc: errmsg = _("trust URL cannot contain a trust ID.") raise base.ClientSideError(errmsg) if trust_id is None: # We have a trust action without a trust ID, # create it trust_id = keystone_client.create_trust_id( pecan.request.cfg, trustor_user_id, trustor_project_id, roles, auth_plugin) if trust_id_used: pw = '' else: pw = ':delete' trust_id_used = True netloc = '%s%s@%s' % (trust_id, pw, url.netloc) url = urlparse.SplitResult(url.scheme, netloc, url.path, url.query, url.fragment) actions[index] = url.geturl() if trust_id is not None and not trust_id_used: prev_trust_ids.add(trust_id) for old_trust_id in prev_trust_ids: keystone_client.delete_trust_id(pecan.request.cfg, old_trust_id, auth_plugin) def delete_actions(self): auth_plugin = pecan.request.environ.get('keystone.token_auth') for trust_id in self._get_existing_trust_ids(): keystone_client.delete_trust_id(pecan.request.cfg, trust_id, auth_plugin) Alarm.add_attributes(**{"%s_rule" % ext.name: ext.plugin for ext in ALARMS_RULES}) class AlarmChange(base.Base): """Representation of an event in an alarm's history.""" event_id = wtypes.text "The UUID of the change event" alarm_id = wtypes.text "The UUID of the alarm" type = wtypes.Enum(str, 'creation', 'rule change', 'state transition', 'deletion') "The type of change" detail = wtypes.text "JSON fragment describing change" project_id = wtypes.text "The project ID of the initiating identity" user_id = wtypes.text "The user ID of the initiating identity" on_behalf_of = wtypes.text "The tenant on behalf of which the change is being made" timestamp = datetime.datetime "The time/date of the alarm change" @classmethod def sample(cls): return cls(alarm_id='e8ff32f772a44a478182c3fe1f7cad6a', type='rule change', detail='{"threshold": 42.0, "evaluation_periods": 4}', user_id="3e5d11fda79448ac99ccefb20be187ca", project_id="b6f16144010811e387e4de429e99ee8c", on_behalf_of="92159030020611e3b26dde429e99ee8c", timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), ) def _send_notification(event, payload): notification = event.replace(" ", "_") notification = "alarm.%s" % notification transport = messaging.get_transport(pecan.request.cfg) notifier = messaging.get_notifier(transport, publisher_id="aodh.api") # FIXME(sileht): perhaps we need to copy some infos from the # pecan request headers like nova does notifier.info({}, notification, payload) def stringify_timestamps(data): """Stringify any datetimes in given dict.""" return dict((k, v.isoformat() if isinstance(v, datetime.datetime) else v) for (k, v) in data.items()) @profiler.trace_cls('api') class AlarmController(rest.RestController): """Manages operations on a single alarm.""" _custom_actions = { 'history': ['GET'], 'state': ['PUT', 'GET'], } def __init__(self, alarm_id): pecan.request.context['alarm_id'] = alarm_id self._id = alarm_id def _enforce_rbac(self, rbac_directive): auth_project = pecan.request.headers.get('X-Project-Id') filters = {'alarm_id': self._id} if not rbac.is_admin(pecan.request.headers): filters['project_id'] = auth_project alarms = pecan.request.storage.get_alarms(**filters) if not alarms: raise base.AlarmNotFound(alarm=self._id, auth_project=None) alarm = alarms[0] target = {'user_id': alarm.user_id, 'project_id': alarm.project_id} rbac.enforce(rbac_directive, pecan.request.headers, pecan.request.enforcer, target) return alarm def _record_change(self, data, now, on_behalf_of=None, type=None): if not pecan.request.cfg.record_history: return if not data: return type = type or models.AlarmChange.RULE_CHANGE scrubbed_data = stringify_timestamps(data) detail = json.dumps(scrubbed_data) user_id = pecan.request.headers.get('X-User-Id') project_id = pecan.request.headers.get('X-Project-Id') on_behalf_of = on_behalf_of or project_id severity = scrubbed_data.get('severity') payload = dict(event_id=uuidutils.generate_uuid(), alarm_id=self._id, type=type, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=on_behalf_of, timestamp=now, severity=severity) try: pecan.request.storage.record_alarm_change(payload) except aodh.NotImplementedError: pass # Revert to the pre-json'ed details ... payload['detail'] = scrubbed_data _send_notification(type, payload) def _record_delete(self, alarm): if not alarm: return type = models.AlarmChange.DELETION detail = {'state': alarm.state} user_id = pecan.request.headers.get('X-User-Id') project_id = pecan.request.headers.get('X-Project-Id') payload = dict(event_id=uuidutils.generate_uuid(), alarm_id=self._id, type=type, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=project_id, timestamp=timeutils.utcnow(), severity=alarm.severity) pecan.request.storage.delete_alarm(alarm.alarm_id) _send_notification(type, payload) @wsme_pecan.wsexpose(Alarm) def get(self): """Return this alarm.""" return Alarm.from_db_model_scrubbed(self._enforce_rbac('get_alarm')) @wsme_pecan.wsexpose(Alarm, body=Alarm) def put(self, data): """Modify this alarm. :param data: an alarm within the request body. """ # Ensure alarm exists alarm_in = self._enforce_rbac('change_alarm') now = timeutils.utcnow() data.alarm_id = self._id user, project = rbac.get_limited_to(pecan.request.headers, pecan.request.enforcer) if user: data.user_id = user elif data.user_id == wtypes.Unset: data.user_id = alarm_in.user_id if project: data.project_id = project elif data.project_id == wtypes.Unset: data.project_id = alarm_in.project_id data.timestamp = now if alarm_in.state != data.state: data.state_timestamp = now data.state_reason = ALARM_REASON_MANUAL else: data.state_timestamp = alarm_in.state_timestamp data.state_reason = alarm_in.state_reason ALARMS_RULES[data.type].plugin.update_hook(data) old_data = Alarm.from_db_model(alarm_in) old_alarm = old_data.as_dict(models.Alarm) data.update_actions(old_data) updated_alarm = data.as_dict(models.Alarm) try: alarm_in = models.Alarm(**updated_alarm) except Exception: LOG.exception("Error while putting alarm: %s", updated_alarm) raise base.ClientSideError(_("Alarm incorrect")) alarm = pecan.request.storage.update_alarm(alarm_in) change = dict((k, v) for k, v in updated_alarm.items() if v != old_alarm[k] and k not in ['timestamp', 'state_timestamp']) self._record_change(change, now, on_behalf_of=alarm.project_id) return Alarm.from_db_model_scrubbed(alarm) @wsme_pecan.wsexpose(None, status_code=204) def delete(self): """Delete this alarm.""" # ensure alarm exists before deleting alarm = self._enforce_rbac('delete_alarm') self._record_delete(alarm) alarm_object = Alarm.from_db_model(alarm) alarm_object.delete_actions() @wsme_pecan.wsexpose([AlarmChange], [base.Query], [str], int, str) def history(self, q=None, sort=None, limit=None, marker=None): """Assembles the alarm history requested. :param q: Filter rules for the changes to be described. :param sort: A list of pairs of sort key and sort dir. :param limit: The maximum number of items to be return. :param marker: The pagination query marker. """ # Ensure alarm exists self._enforce_rbac('alarm_history') q = q or [] # allow history to be returned for deleted alarms, but scope changes # returned to those carried out on behalf of the auth'd tenant, to # avoid inappropriate cross-tenant visibility of alarm history auth_project = rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer) conn = pecan.request.storage kwargs = v2_utils.query_to_kwargs( q, conn.get_alarm_changes, ['on_behalf_of', 'alarm_id']) if sort or limit or marker: kwargs['pagination'] = v2_utils.get_pagination_options( sort, limit, marker, models.AlarmChange) return [AlarmChange.from_db_model(ac) for ac in conn.get_alarm_changes(self._id, auth_project, **kwargs)] @wsme.validate(state_kind_enum) @wsme_pecan.wsexpose(state_kind_enum, body=state_kind_enum) def put_state(self, state): """Set the state of this alarm. :param state: an alarm state within the request body. """ alarm = self._enforce_rbac('change_alarm_state') # note(sileht): body are not validated by wsme # Workaround for https://bugs.launchpad.net/wsme/+bug/1227229 if state not in state_kind: raise base.ClientSideError(_("state invalid")) now = timeutils.utcnow() alarm.state = state alarm.state_timestamp = now alarm.state_reason = ALARM_REASON_MANUAL alarm = pecan.request.storage.update_alarm(alarm) change = {'state': alarm.state, 'state_reason': alarm.state_reason} self._record_change(change, now, on_behalf_of=alarm.project_id, type=models.AlarmChange.STATE_TRANSITION) return alarm.state @wsme_pecan.wsexpose(state_kind_enum) def get_state(self): """Get the state of this alarm.""" return self._enforce_rbac('get_alarm_state').state @profiler.trace_cls('api') class AlarmsController(rest.RestController): """Manages operations on the alarms collection.""" @pecan.expose() def _lookup(self, alarm_id, *remainder): return AlarmController(alarm_id), remainder @staticmethod def _record_creation(conn, data, alarm_id, now): if not pecan.request.cfg.record_history: return type = models.AlarmChange.CREATION scrubbed_data = stringify_timestamps(data) detail = json.dumps(scrubbed_data) user_id = pecan.request.headers.get('X-User-Id') project_id = pecan.request.headers.get('X-Project-Id') severity = scrubbed_data.get('severity') payload = dict(event_id=uuidutils.generate_uuid(), alarm_id=alarm_id, type=type, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=project_id, timestamp=now, severity=severity) try: conn.record_alarm_change(payload) except aodh.NotImplementedError: pass # Revert to the pre-json'ed details ... payload['detail'] = scrubbed_data _send_notification(type, payload) @wsme_pecan.wsexpose(Alarm, body=Alarm, status_code=201) def post(self, data): """Create a new alarm. :param data: an alarm within the request body. """ rbac.enforce('create_alarm', pecan.request.headers, pecan.request.enforcer, {}) conn = pecan.request.storage now = timeutils.utcnow() data.alarm_id = uuidutils.generate_uuid() user_limit, project_limit = rbac.get_limited_to(pecan.request.headers, pecan.request.enforcer) def _set_ownership(aspect, owner_limitation, header): attr = '%s_id' % aspect requested_owner = getattr(data, attr) explicit_owner = requested_owner != wtypes.Unset caller = pecan.request.headers.get(header) if (owner_limitation and explicit_owner and requested_owner != caller): raise base.ProjectNotAuthorized(requested_owner, aspect) actual_owner = (owner_limitation or requested_owner if explicit_owner else caller) setattr(data, attr, actual_owner) _set_ownership('user', user_limit, 'X-User-Id') _set_ownership('project', project_limit, 'X-Project-Id') # Check if there's room for one more alarm if is_over_quota(conn, data.project_id, data.user_id): raise OverQuota(data) data.timestamp = now data.state_timestamp = now data.state_reason = ALARM_REASON_DEFAULT ALARMS_RULES[data.type].plugin.create_hook(data) change = data.as_dict(models.Alarm) data.update_actions() try: alarm_in = models.Alarm(**change) except Exception: LOG.exception("Error while posting alarm: %s", change) raise base.ClientSideError(_("Alarm incorrect")) alarm = conn.create_alarm(alarm_in) self._record_creation(conn, change, alarm.alarm_id, now) v2_utils.set_resp_location_hdr("/alarms/" + alarm.alarm_id) return Alarm.from_db_model_scrubbed(alarm) @wsme_pecan.wsexpose([Alarm], [base.Query], [str], int, str) def get_all(self, q=None, sort=None, limit=None, marker=None): """Return all alarms, based on the query provided. :param q: Filter rules for the alarms to be returned. :param sort: A list of pairs of sort key and sort dir. :param limit: The maximum number of items to be return. :param marker: The pagination query marker. """ target = rbac.target_from_segregation_rule( pecan.request.headers, pecan.request.enforcer) rbac.enforce('get_alarms', pecan.request.headers, pecan.request.enforcer, target) q = q or [] filters = {} # Check field keys = set([query.field for query in q]) if not keys.issubset(ALARM_QUERY_FIELDS_ALLOWED): raise wsme.exc.InvalidInput( 'field', keys, 'only fields %s are allowed' % ALARM_QUERY_FIELDS_ALLOWED ) # Check op ops = set([query.op for query in q]) if any([op not in ALARM_QUERY_OPS_ALLOWED for op in ops]): raise wsme.exc.InvalidInput( 'op', ops, 'only operations %s are allowed' % ALARM_QUERY_OPS_ALLOWED ) if 'all_projects' in keys: if v2_utils.get_query_value(q, 'all_projects', 'boolean'): rbac.enforce('get_alarms:all_projects', pecan.request.headers, pecan.request.enforcer, target) keys.remove('all_projects') else: project_id = pecan.request.headers.get('X-Project-Id') is_admin = rbac.is_admin(pecan.request.headers) if not v2_utils.is_field_exist(q, 'project_id'): q.append( base.Query(field='project_id', op='eq', value=project_id) ) keys.add('project_id') else: request_project = v2_utils.get_query_value(q, 'project_id') if not is_admin and request_project != project_id: raise base.ProjectNotAuthorized(request_project) for query in q: if query.field in keys: filters[query.field] = {query.op: query.get_value(query.type)} if sort or limit or marker: filters['pagination'] = v2_utils.get_pagination_options( sort, limit, marker, models.Alarm) LOG.debug('Getting alarms from database, filters: %s', filters) return [Alarm.from_db_model_scrubbed(m) for m in pecan.request.storage.get_alarms(**filters)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/base.py0000664000175000017500000002064300000000000020411 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import datetime import functools from oslo_utils import strutils from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from aodh.i18n import _ from aodh.utils import get_func_valid_keys operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt') operation_kind_enum = wtypes.Enum(str, *operation_kind) class ClientSideError(wsme.exc.ClientSideError): def __init__(self, error, status_code=400): pecan.response.translatable_error = error super(ClientSideError, self).__init__(error, status_code) class ProjectNotAuthorized(ClientSideError): def __init__(self, id, aspect='project'): params = dict(aspect=aspect, id=id) super(ProjectNotAuthorized, self).__init__( _("Not Authorized to access %(aspect)s %(id)s") % params, status_code=401) class AdvEnum(wtypes.wsproperty): """Handle default and mandatory for wtypes.Enum.""" def __init__(self, name, *args, **kwargs): self._name = '_advenum_%s' % name self._default = kwargs.pop('default', None) mandatory = kwargs.pop('mandatory', False) enum = wtypes.Enum(*args, **kwargs) super(AdvEnum, self).__init__(datatype=enum, fget=self._get, fset=self._set, mandatory=mandatory) def _get(self, parent): if hasattr(parent, self._name): value = getattr(parent, self._name) return value or self._default return self._default def _set(self, parent, value): try: if self.datatype.validate(value): setattr(parent, self._name, value) except ValueError as e: raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1), value, e) class Base(wtypes.DynamicBase): _wsme_attributes = [] @classmethod def from_db_model(cls, m): return cls(**(m.as_dict())) @classmethod def from_db_and_links(cls, m, links): return cls(links=links, **(m.as_dict())) def as_dict(self, db_model): valid_keys = get_func_valid_keys(db_model.__init__) if 'self' in valid_keys: valid_keys.remove('self') return self.as_dict_from_keys(valid_keys) def as_dict_from_keys(self, keys): return dict((k, getattr(self, k)) for k in keys if hasattr(self, k) and getattr(self, k) != wsme.Unset) def to_dict(self): d = {} for attr in self._wsme_attributes: attr_val = getattr(self, attr.name) if not isinstance(attr_val, wtypes.UnsetType): d[attr.name] = attr_val return d class Query(Base): """Query filter.""" # The data types supported by the query. _supported_types = ['integer', 'float', 'string', 'boolean', 'datetime'] # Functions to convert the data field to the correct type. _type_converters = {'integer': int, 'float': float, 'boolean': functools.partial( strutils.bool_from_string, strict=True), 'string': str, 'datetime': timeutils.parse_isotime} _op = None # provide a default def get_op(self): return self._op or 'eq' def set_op(self, value): self._op = value field = wsme.wsattr(wtypes.text, mandatory=True) "The name of the field to test" # op = wsme.wsattr(operation_kind, default='eq') # this ^ doesn't seem to work. op = wsme.wsproperty(operation_kind_enum, get_op, set_op) "The comparison operator. Defaults to 'eq'." value = wsme.wsattr(wtypes.text, mandatory=True) "The value to compare against the stored data" type = wtypes.text "The data type of value to compare against the stored data" def __repr__(self): # for logging calls return '' % (self.field, self.op, self.value, self.type) @classmethod def sample(cls): return cls(field='resource_id', op='eq', value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', type='string' ) def as_dict(self): return self.as_dict_from_keys(['field', 'op', 'type', 'value']) def get_value(self, forced_type=None): """Convert metadata value to the specified data type. This method is called during metadata query to help convert the querying metadata to the data type specified by user. If there is no data type given, the metadata will be parsed by ast.literal_eval to try to do a smart converting. NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised from wsmeext/sphinxext.py. It's OK to call it outside the Query class. Because the "public" side of that class is actually the outside of the API, and the "private" side is the API implementation. The method is only used in the API implementation, so it's OK. :returns: metadata value converted with the specified data type. """ type = forced_type or self.type try: converted_value = self.value if not type: try: converted_value = ast.literal_eval(self.value) except (ValueError, SyntaxError): # Unable to convert the metadata value automatically # let it default to self.value pass else: if type not in self._supported_types: # Types must be explicitly declared so the # correct type converter may be used. Subclasses # of Query may define _supported_types and # _type_converters to define their own types. raise TypeError() converted_value = self._type_converters[type](self.value) if isinstance(converted_value, datetime.datetime): converted_value = timeutils.normalize_time(converted_value) except ValueError: msg = (_('Unable to convert the value %(value)s' ' to the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) except TypeError: msg = (_('The data type %(type)s is not supported. The supported' ' data type list is: %(supported)s') % {'type': type, 'supported': self._supported_types}) raise ClientSideError(msg) except Exception: msg = (_('Unexpected exception converting %(value)s to' ' the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) return converted_value class AlarmNotFound(ClientSideError): def __init__(self, alarm, auth_project): if not auth_project: msg = _('Alarm %s not found') % alarm else: msg = _('Alarm %(alarm_id)s not found in project %' '(project)s') % { 'alarm_id': alarm, 'project': auth_project} super(AlarmNotFound, self).__init__(msg, status_code=404) class AlarmRule(Base): """Base class Alarm Rule extension and wsme.types.""" @staticmethod def validate_alarm(alarm): pass @staticmethod def create_hook(alarm): pass @staticmethod def update_hook(alarm): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/capabilities.py0000664000175000017500000001013200000000000022120 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from aodh.api.controllers.v2 import base from aodh import profiler def _decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): temp = {} # If the input data is a dict, create an equivalent dict with a # predictable insertion order to avoid inconsistencies in the # message signature computation for equivalent payloads modulo # ordering for key, value in sorted(input.items()): temp[_decode_unicode(key)] = _decode_unicode(value) return temp elif isinstance(input, (tuple, list)): # When doing a pair of JSON encode/decode operations to the tuple, # the tuple would become list. So we have to generate the value as # list here. return [_decode_unicode(element) for element in input] elif isinstance(input, str): return input.encode('utf-8') else: return input def _recursive_keypairs(d, separator=':'): """Generator that produces sequence of keypairs for nested dictionaries.""" for name, value in sorted(d.items()): if isinstance(value, dict): for subname, subvalue in _recursive_keypairs(value, separator): yield ('%s%s%s' % (name, separator, subname), subvalue) elif isinstance(value, (tuple, list)): yield name, _decode_unicode(value) else: yield name, value def _flatten_capabilities(capabilities): return dict((k, v) for k, v in _recursive_keypairs(capabilities)) class Capabilities(base.Base): """A representation of the API and storage capabilities. Usually constrained by restrictions imposed by the storage driver. """ api = {wtypes.text: bool} "A flattened dictionary of API capabilities" alarm_storage = {wtypes.text: bool} "A flattened dictionary of alarm storage capabilities" @classmethod def sample(cls): return cls( api=_flatten_capabilities({ 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, }), alarm_storage=_flatten_capabilities( {'storage': {'production_ready': True}}), ) @profiler.trace_cls('api') class CapabilitiesController(rest.RestController): """Manages capabilities queries.""" @wsme_pecan.wsexpose(Capabilities) def get(self): """Returns a flattened dictionary of API capabilities. Capabilities supported by the currently configured storage driver. """ # variation in API capabilities is effectively determined by # the lack of strict feature parity across storage drivers alarm_conn = pecan.request.storage driver_capabilities = { 'alarms': alarm_conn.get_capabilities()['alarms'], } alarm_driver_perf = alarm_conn.get_storage_capabilities() return Capabilities(api=_flatten_capabilities(driver_capabilities), alarm_storage=_flatten_capabilities( alarm_driver_perf)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/query.py0000664000175000017500000003557700000000000020660 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json import jsonschema from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from aodh.api.controllers.v2 import alarms from aodh.api.controllers.v2 import base from aodh.api import rbac from aodh.i18n import _ from aodh import profiler from aodh.storage import models LOG = log.getLogger(__name__) class ComplexQuery(base.Base): """Holds a sample query encoded in json.""" filter = wtypes.text "The filter expression encoded in json." orderby = wtypes.text "List of single-element dicts for specifing the ordering of the results." limit = int "The maximum number of results to be returned." @classmethod def sample(cls): return cls(filter='{"and": [{"and": [{"=": ' + '{"counter_name": "cpu_util"}}, ' + '{">": {"counter_volume": 0.23}}, ' + '{"<": {"counter_volume": 0.26}}]}, ' + '{"or": [{"and": [{">": ' + '{"timestamp": "2013-12-01T18:00:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:15:00"}}]}, ' + '{"and": [{">": ' + '{"timestamp": "2013-12-01T18:30:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}', orderby='[{"counter_volume": "ASC"}, ' + '{"timestamp": "DESC"}]', limit=42 ) def _list_to_regexp(items, regexp_prefix=""): regexp = ["^%s$" % item for item in items] regexp = regexp_prefix + "|".join(regexp) return regexp @profiler.trace_cls('api') class ValidatedComplexQuery(object): complex_operators = ["and", "or"] order_directions = ["asc", "desc"] simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"] regexp_prefix = "(?i)" complex_ops = _list_to_regexp(complex_operators, regexp_prefix) simple_ops = _list_to_regexp(simple_ops, regexp_prefix) order_directions = _list_to_regexp(order_directions, regexp_prefix) timestamp_fields = ["timestamp", "state_timestamp"] def __init__(self, query, db_model, additional_name_mapping=None, metadata_allowed=False): additional_name_mapping = additional_name_mapping or {} self.name_mapping = {"user": "user_id", "project": "project_id"} self.name_mapping.update(additional_name_mapping) valid_keys = db_model.get_field_names() valid_keys = list(valid_keys) + list(self.name_mapping.keys()) valid_fields = _list_to_regexp(valid_keys) if metadata_allowed: valid_filter_fields = valid_fields + r"|^metadata\.[\S]+$" else: valid_filter_fields = valid_fields schema_value = { "oneOf": [{"type": "string"}, {"type": "number"}, {"type": "boolean"}], "minProperties": 1, "maxProperties": 1} schema_value_in = { "type": "array", "items": {"oneOf": [{"type": "string"}, {"type": "number"}]}, "minItems": 1} schema_field = { "type": "object", "patternProperties": {valid_filter_fields: schema_value}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_field_in = { "type": "object", "patternProperties": {valid_filter_fields: schema_value_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_in = { "type": "object", "patternProperties": {"(?i)^in$": schema_field_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_simple_ops = { "type": "object", "patternProperties": {self.simple_ops: schema_field}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_and_or_array = { "type": "array", "items": {"$ref": "#"}, "minItems": 2} schema_and_or = { "type": "object", "patternProperties": {self.complex_ops: schema_and_or_array}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_not = { "type": "object", "patternProperties": {"(?i)^not$": {"$ref": "#"}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} self.schema = { "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"}, {"$ref": "#/definitions/leaf_in"}, {"$ref": "#/definitions/and_or"}, {"$ref": "#/definitions/not"}], "minProperties": 1, "maxProperties": 1, "definitions": {"leaf_simple_ops": schema_leaf_simple_ops, "leaf_in": schema_leaf_in, "and_or": schema_and_or, "not": schema_not}} self.orderby_schema = { "type": "array", "items": { "type": "object", "patternProperties": {valid_fields: {"type": "string", "pattern": self.order_directions}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1}} self.original_query = query def validate(self, visibility_field): """Validates the query content and does the necessary conversions.""" if self.original_query.filter is wtypes.Unset: self.filter_expr = None else: try: self.filter_expr = json.loads(self.original_query.filter) self._validate_filter(self.filter_expr) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Filter expression not valid: %s") % str(e)) self._replace_isotime_with_datetime(self.filter_expr) self._convert_operator_to_lower_case(self.filter_expr) self._normalize_field_names_for_db_model(self.filter_expr) self._force_visibility(visibility_field) if self.original_query.orderby is wtypes.Unset: self.orderby = None else: try: self.orderby = json.loads(self.original_query.orderby) self._validate_orderby(self.orderby) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Order-by expression not valid: %s") % e) self._convert_orderby_to_lower_case(self.orderby) self._normalize_field_names_in_orderby(self.orderby) if self.original_query.limit is wtypes.Unset: self.limit = None else: self.limit = self.original_query.limit if self.limit is not None and self.limit <= 0: msg = _('Limit should be positive') raise base.ClientSideError(msg) @staticmethod def lowercase_values(mapping): """Converts the values in the mapping dict to lowercase.""" items = mapping.items() for key, value in items: mapping[key] = value.lower() def _convert_orderby_to_lower_case(self, orderby): for orderby_field in orderby: self.lowercase_values(orderby_field) def _normalize_field_names_in_orderby(self, orderby): for orderby_field in orderby: self._replace_field_names(orderby_field) def _traverse_postorder(self, tree, visitor): op = list(tree.keys())[0] if op.lower() in self.complex_operators: for i, operand in enumerate(tree[op]): self._traverse_postorder(operand, visitor) if op.lower() == "not": self._traverse_postorder(tree[op], visitor) visitor(tree) def _check_cross_project_references(self, own_project_id, visibility_field): """Do not allow other than own_project_id.""" def check_project_id(subfilter): op, value = list(subfilter.items())[0] if (op.lower() not in self.complex_operators and list(value.keys())[0] == visibility_field and value[visibility_field] != own_project_id): raise base.ProjectNotAuthorized(value[visibility_field]) self._traverse_postorder(self.filter_expr, check_project_id) def _force_visibility(self, visibility_field): """Force visibility field. If the tenant is not admin insert an extra "and =" clause to the query. """ authorized_project = rbac.get_limited_to_project( pecan.request.headers, pecan.request.enforcer) is_admin = authorized_project is None if not is_admin: self._restrict_to_project(authorized_project, visibility_field) self._check_cross_project_references(authorized_project, visibility_field) def _restrict_to_project(self, project_id, visibility_field): restriction = {"=": {visibility_field: project_id}} if self.filter_expr is None: self.filter_expr = restriction else: self.filter_expr = {"and": [restriction, self.filter_expr]} def _replace_isotime_with_datetime(self, filter_expr): def replace_isotime(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: field = list(value.keys())[0] if field in self.timestamp_fields: date_time = self._convert_to_datetime(subfilter[op][field]) subfilter[op][field] = date_time self._traverse_postorder(filter_expr, replace_isotime) def _normalize_field_names_for_db_model(self, filter_expr): def _normalize_field_names(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: self._replace_field_names(value) self._traverse_postorder(filter_expr, _normalize_field_names) def _replace_field_names(self, subfilter): field, value = list(subfilter.items())[0] if field in self.name_mapping: del subfilter[field] subfilter[self.name_mapping[field]] = value if field.startswith("metadata."): del subfilter[field] subfilter["resource_" + field] = value @staticmethod def lowercase_keys(mapping): """Converts the values of the keys in mapping to lowercase.""" loop_mapping = copy.deepcopy(mapping) items = loop_mapping.items() for key, value in items: del mapping[key] mapping[key.lower()] = value def _convert_operator_to_lower_case(self, filter_expr): self._traverse_postorder(filter_expr, self.lowercase_keys) @staticmethod def _convert_to_datetime(isotime): try: date_time = timeutils.parse_isotime(isotime) date_time = date_time.replace(tzinfo=None) return date_time except ValueError: LOG.exception("String %s is not a valid isotime", isotime) msg = _('Failed to parse the timestamp value %s') % isotime raise base.ClientSideError(msg) def _validate_filter(self, filter_expr): jsonschema.validate(filter_expr, self.schema) def _validate_orderby(self, orderby_expr): jsonschema.validate(orderby_expr, self.orderby_schema) class QueryAlarmHistoryController(rest.RestController): """Provides complex query possibilities for alarm history.""" @wsme_pecan.wsexpose([alarms.AlarmChange], body=ComplexQuery) def post(self, body): """Define query for retrieving AlarmChange data. :param body: Query rules for the alarm history to be returned. """ target = rbac.target_from_segregation_rule( pecan.request.headers, pecan.request.enforcer) rbac.enforce('query_alarm_history', pecan.request.headers, pecan.request.enforcer, target) query = ValidatedComplexQuery(body, models.AlarmChange) query.validate(visibility_field="on_behalf_of") conn = pecan.request.storage return [alarms.AlarmChange.from_db_model(s) for s in conn.query_alarm_history(query.filter_expr, query.orderby, query.limit)] class QueryAlarmsController(rest.RestController): """Provides complex query possibilities for alarms.""" history = QueryAlarmHistoryController() @wsme_pecan.wsexpose([alarms.Alarm], body=ComplexQuery) def post(self, body): """Define query for retrieving Alarm data. :param body: Query rules for the alarms to be returned. """ target = rbac.target_from_segregation_rule( pecan.request.headers, pecan.request.enforcer) rbac.enforce('query_alarm', pecan.request.headers, pecan.request.enforcer, target) query = ValidatedComplexQuery(body, models.Alarm) query.validate(visibility_field="project_id") conn = pecan.request.storage return [alarms.Alarm.from_db_model(s) for s in conn.query_alarms(query.filter_expr, query.orderby, query.limit)] class QueryController(rest.RestController): alarms = QueryAlarmsController() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/quotas.py0000664000175000017500000000640600000000000021014 0ustar00zuulzuul00000000000000# Copyright 2020 Catalyst Cloud LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from aodh.api.controllers.v2 import base from aodh.api import rbac LOG = log.getLogger(__name__) ALLOWED_RESOURCES = ('alarms',) class Quota(base.Base): resource = wtypes.wsattr(wtypes.Enum(str, *ALLOWED_RESOURCES), mandatory=True) limit = wsme.wsattr(wtypes.IntegerType(minimum=-1), mandatory=True) class Quotas(base.Base): project_id = wsme.wsattr(wtypes.text, mandatory=True) quotas = [Quota] class QuotasController(rest.RestController): """Quota API controller.""" @wsme_pecan.wsexpose(Quotas, str, ignore_extra_args=True) def get_all(self, project_id=None): """Get resource quotas of a project. - If no project given, get requested user's quota. - Admin user can get resource quotas of any project. """ request_project = pecan.request.headers.get('X-Project-Id') project_id = project_id if project_id else request_project is_admin = rbac.is_admin(pecan.request.headers) if project_id != request_project and not is_admin: raise base.ProjectNotAuthorized(project_id) LOG.debug('Getting resource quotas for project %s', project_id) db_quotas = pecan.request.storage.get_quotas(project_id=project_id) if len(db_quotas) == 0: project_alarm_quota = pecan.request.cfg.api.project_alarm_quota quotas = [{'resource': 'alarms', 'limit': project_alarm_quota}] db_quotas = pecan.request.storage.set_quotas(project_id, quotas) quotas = [Quota.from_db_model(i) for i in db_quotas] return Quotas(project_id=project_id, quotas=quotas) @wsme_pecan.wsexpose(Quotas, body=Quotas, status_code=201) def post(self, body): """Create or update quota.""" rbac.enforce('update_quotas', pecan.request.headers, pecan.request.enforcer, {}) params = body.to_dict() project_id = params['project_id'] input_quotas = [] for i in params.get('quotas', []): input_quotas.append(i.to_dict()) db_quotas = pecan.request.storage.set_quotas(project_id, input_quotas) quotas = [Quota.from_db_model(i) for i in db_quotas] return Quotas(project_id=project_id, quotas=quotas) @wsme_pecan.wsexpose(None, str, status_code=204) def delete(self, project_id): """Delete quotas for the given project.""" rbac.enforce('delete_quotas', pecan.request.headers, pecan.request.enforcer, {}) pecan.request.storage.delete_quotas(project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/root.py0000664000175000017500000000232600000000000020460 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from aodh.api.controllers.v2 import alarms from aodh.api.controllers.v2 import capabilities from aodh.api.controllers.v2 import query from aodh.api.controllers.v2 import quotas class V2Controller(object): """Version 2 API controller root.""" alarms = alarms.AlarmsController() query = query.QueryController() capabilities = capabilities.CapabilitiesController() quotas = quotas.QuotasController() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/controllers/v2/utils.py0000664000175000017500000003267600000000000020650 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from oslo_utils import timeutils import pecan from urllib import parse as urllib_parse import wsme from aodh.api.controllers.v2 import base from aodh.api import rbac from aodh.utils import get_func_valid_keys def get_auth_project(on_behalf_of=None): # when an alarm is created by an admin on behalf of another tenant # we must ensure for: # - threshold alarm, that an implicit query constraint on project_id is # added so that admin-level visibility on statistics is not leaked # Hence, for null auth_project (indicating admin-ness) we check if # the creating tenant differs from the tenant on whose behalf the # alarm is being created auth_project = rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer) created_by = pecan.request.headers.get('X-Project-Id') is_admin = auth_project is None if is_admin and on_behalf_of != created_by: auth_project = on_behalf_of return auth_project def sanitize_query(query, db_func, on_behalf_of=None): """Check the query. See if: 1) the request is coming from admin - then allow full visibility 2) non-admin - make sure that the query includes the requester's project. """ q = copy.copy(query) auth_project = get_auth_project(on_behalf_of) if auth_project: _verify_query_segregation(q, auth_project) proj_q = [i for i in q if i.field == 'project_id'] valid_keys = get_func_valid_keys(db_func) if not proj_q and 'on_behalf_of' not in valid_keys: # The user is restricted, but they didn't specify a project # so add it for them. q.append(base.Query(field='project_id', op='eq', value=auth_project)) return q def _verify_query_segregation(query, auth_project=None): """Ensure non-admin queries are not constrained to another project.""" auth_project = (auth_project or rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer)) if not auth_project: return for q in query: if q.field in ('project', 'project_id') and auth_project != q.value: raise base.ProjectNotAuthorized(q.value) def validate_query(query, db_func, internal_keys=None, allow_timestamps=True): """Validates the syntax of the query and verifies the query. Verification check if the query request is authorized for the included project. :param query: Query expression that should be validated :param db_func: the function on the storage level, of which arguments will form the valid_keys list, which defines the valid fields for a query expression :param internal_keys: internally used field names, that should not be used for querying :param allow_timestamps: defines whether the timestamp-based constraint is applicable for this query or not :raises InvalidInput: if an operator is not supported for a given field :raises InvalidInput: if timestamp constraints are allowed, but search_offset was included without timestamp constraint :raises UnknownArgument: if a field name is not a timestamp field, nor in the list of valid keys """ internal_keys = internal_keys or [] _verify_query_segregation(query) valid_keys = get_func_valid_keys(db_func) if 'alarm_type' in valid_keys: valid_keys.remove('alarm_type') valid_keys.append('type') if 'pagination' in valid_keys: valid_keys.remove('pagination') internal_timestamp_keys = ['end_timestamp', 'start_timestamp', 'end_timestamp_op', 'start_timestamp_op'] if 'start_timestamp' in valid_keys: internal_keys += internal_timestamp_keys valid_keys += ['timestamp', 'search_offset'] internal_keys.append('self') internal_keys.append('metaquery') valid_keys = set(valid_keys) - set(internal_keys) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource'} has_timestamp_query = _validate_timestamp_fields(query, 'timestamp', ('lt', 'le', 'gt', 'ge'), allow_timestamps) has_search_offset_query = _validate_timestamp_fields(query, 'search_offset', 'eq', allow_timestamps) if has_search_offset_query and not has_timestamp_query: raise wsme.exc.InvalidInput('field', 'search_offset', "search_offset cannot be used without " + "timestamp") def _is_field_metadata(field): return (field.startswith('metadata.') or field.startswith('resource_metadata.')) for i in query: if i.field not in ('timestamp', 'search_offset'): key = translation.get(i.field, i.field) operator = i.op if key in valid_keys or _is_field_metadata(i.field): if operator == 'eq': if key == 'enabled': i.get_value('boolean') elif _is_field_metadata(key): i.get_value() else: raise wsme.exc.InvalidInput('op', i.op, 'unimplemented operator for ' '%s' % i.field) else: msg = ("unrecognized field in query: %s, " "valid keys: %s") % (query, sorted(valid_keys)) raise wsme.exc.UnknownArgument(key, msg) def _validate_timestamp_fields(query, field_name, operator_list, allow_timestamps): """Validates the timestamp related constraints in a query if there are any. :param query: query expression that may contain the timestamp fields :param field_name: timestamp name, which should be checked (timestamp, search_offset) :param operator_list: list of operators that are supported for that timestamp, which was specified in the parameter field_name :param allow_timestamps: defines whether the timestamp-based constraint is applicable to this query or not :returns: True, if there was a timestamp constraint, containing a timestamp field named as defined in field_name, in the query and it was allowed and syntactically correct. :returns: False, if there wasn't timestamp constraint, containing a timestamp field named as defined in field_name, in the query :raises InvalidInput: if an operator is unsupported for a given timestamp field :raises UnknownArgument: if the timestamp constraint is not allowed in the query """ for item in query: if item.field == field_name: # If *timestamp* or *search_offset* field was specified in the # query, but timestamp is not supported on that resource, on # which the query was invoked, then raise an exception. if not allow_timestamps: raise wsme.exc.UnknownArgument(field_name, "not valid for " + "this resource") if item.op not in operator_list: raise wsme.exc.InvalidInput('op', item.op, 'unimplemented operator for %s' % item.field) return True return False def query_to_kwargs(query, db_func, internal_keys=None, allow_timestamps=True): validate_query(query, db_func, internal_keys=internal_keys, allow_timestamps=allow_timestamps) query = sanitize_query(query, db_func) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource', 'type': 'alarm_type'} stamp = {} kwargs = {} for i in query: if i.field == 'timestamp': if i.op in ('lt', 'le'): stamp['end_timestamp'] = i.value stamp['end_timestamp_op'] = i.op elif i.op in ('gt', 'ge'): stamp['start_timestamp'] = i.value stamp['start_timestamp_op'] = i.op else: if i.op == 'eq': if i.field == 'search_offset': stamp['search_offset'] = i.value elif i.field == 'enabled': kwargs[i.field] = i.get_value('boolean') else: key = translation.get(i.field, i.field) kwargs[key] = i.value if stamp: kwargs.update(_get_query_timestamps(stamp)) return kwargs def _get_query_timestamps(args=None): """Return any optional timestamp information in the request. Determine the desired range, if any, from the GET arguments. Set up the query range using the specified offset. [query_start ... start_timestamp ... end_timestamp ... query_end] Returns a dictionary containing: start_timestamp: First timestamp to use for query start_timestamp_op: First timestamp operator to use for query end_timestamp: Final timestamp to use for query end_timestamp_op: Final timestamp operator to use for query """ if args is None: return {} search_offset = int(args.get('search_offset', 0)) def _parse_timestamp(timestamp): if not timestamp: return None try: iso_timestamp = timeutils.parse_isotime(timestamp) iso_timestamp = iso_timestamp.replace(tzinfo=None) except ValueError: raise wsme.exc.InvalidInput('timestamp', timestamp, 'invalid timestamp format') return iso_timestamp start_timestamp = _parse_timestamp(args.get('start_timestamp')) end_timestamp = _parse_timestamp(args.get('end_timestamp')) start_timestamp = start_timestamp - datetime.timedelta( minutes=search_offset) if start_timestamp else None end_timestamp = end_timestamp + datetime.timedelta( minutes=search_offset) if end_timestamp else None return {'start_timestamp': start_timestamp, 'end_timestamp': end_timestamp, 'start_timestamp_op': args.get('start_timestamp_op'), 'end_timestamp_op': args.get('end_timestamp_op')} def set_resp_location_hdr(location): location = '%s%s' % (pecan.request.script_name, location) # NOTE(sileht): according the pep-3333 the headers must be # str in py2 and py3 even this is not the same thing in both # version # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues location = urllib_parse.quote(location) pecan.response.headers['Location'] = location def get_pagination_options(sort, limit, marker, api_model): sorts = list() if limit and limit <= 0: raise wsme.exc.InvalidInput('limit', limit, 'it should be a positive integer.') for s in sort or []: sort_key, __, sort_dir = s.partition(':') if sort_key not in api_model.SUPPORT_SORT_KEYS: raise wsme.exc.InvalidInput( 'sort', s, "the sort parameter should be a pair of sort " "key and sort dir combined with ':', or only" " sort key specified and sort dir will be default " "'asc', the supported sort keys are: %s" % str(api_model.SUPPORT_SORT_KEYS)) # the default sort direction is 'asc' sorts.append((sort_key, sort_dir or 'asc')) return {'limit': limit, 'marker': marker, 'sort': sorts} def get_query_value(queries, field, type=None): """Get value of the specified query field. :param queries: A list of Query object. :param field: Field name. """ for q in queries: if q.field == field: return q.get_value(type) raise wsme.exc.InvalidInput( 'field', field, "field %s is not provided" % field ) def is_field_exist(queries, field): """Check if a given field exists in a query list. :param queries: A list of Query object. :param field: Field name. """ for q in queries: if q.field == field: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/hooks.py0000664000175000017500000000422200000000000015720 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_policy import opts from pecan import hooks from aodh.api import policies # TODO(gmann): Remove overriding the default value of config options: # - 'policy_file' once oslo_policy change their default value to what is # overridden here. # - 'enforce_scope', and 'enforce_new_defaults' once aodh is ready with the # new RBAC (oslo_policy enable them by default) DEFAULT_POLICY_FILE = 'policy.yaml' opts.set_defaults( cfg.CONF, DEFAULT_POLICY_FILE, enforce_scope=False, enforce_new_defaults=False) class ConfigHook(hooks.PecanHook): """Attach the configuration and policy enforcer object to the request. That allows controllers to get it. """ def __init__(self, conf): self.conf = conf self.enforcer = policies.init(conf) def before(self, state): state.request.cfg = self.conf state.request.enforcer = self.enforcer class DBHook(hooks.PecanHook): def __init__(self, alarm_conn): self.storage = alarm_conn def before(self, state): state.request.storage = self.storage class TranslationHook(hooks.PecanHook): def after(self, state): # After a request has been done, we need to see if # ClientSideError has added an error onto the response. # If it has we need to get it info the thread-safe WSGI # environ to be used by the ParsableErrorMiddleware. if hasattr(state.response, 'translatable_error'): state.request.environ['translatable_error'] = ( state.response.translatable_error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/middleware.py0000664000175000017500000001206100000000000016712 0ustar00zuulzuul00000000000000# # Copyright 2013 IBM Corp. # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ import json from lxml import etree from oslo_log import log import webob from aodh import i18n LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" @staticmethod def best_match_language(accept_language): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not accept_language: return None all_languages = i18n.get_available_languages() return accept_language.best_match(all_languages) def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(( 'ErrorDocumentMiddleware received an invalid ' 'status %s' % status )) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) error = environ.get('translatable_error') user_locale = self.best_match_language(req.accept_language) if (req.accept.best_match(['application/json', 'application/xml']) == 'application/xml'): content_type = 'application/xml' try: # simple check xml is valid fault = etree.fromstring(b'\n'.join(app_iter)) # Add the translated error to the xml data if error is not None: for fault_string in fault.findall('faultstring'): fault_string.text = i18n.translate(error, user_locale) error_message = etree.tostring(fault) body = b''.join((b'', error_message, b'')) except etree.XMLSyntaxError as err: LOG.error('Error parsing HTTP response: %s', err) error_message = state['status_code'] body = '%s' % error_message body = body.encode('utf-8') else: content_type = 'application/json' app_data = b'\n'.join(app_iter) app_data = app_data.decode('utf-8') try: fault = json.loads(app_data) if error is not None and 'faultstring' in fault: fault['faultstring'] = i18n.translate(error, user_locale) except ValueError: fault = app_data body = json.dumps({'error_message': fault}) body = body.encode('utf-8') state['headers'].append(('Content-Length', str(len(body)))) state['headers'].append(('Content-Type', content_type)) body = [body] else: body = app_iter return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/policies.py0000664000175000017500000002537100000000000016414 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import versionutils from oslo_policy import policy RULE_CONTEXT_IS_ADMIN = 'rule:context_is_admin' RULE_ADMIN_OR_OWNER = 'rule:context_is_admin or project_id:%(project_id)s' UNPROTECTED = '' # Constants that represent common personas. PROJECT_ADMIN = 'role:admin and project_id:%(project_id)s' PROJECT_MEMBER = 'role:member and project_id:%(project_id)s' PROJECT_READER = 'role:reader and project_id:%(project_id)s' DEPRECATED_REASON = """ The alarm and quota APIs now support system-scope and default roles. """ deprecated_get_alarm = policy.DeprecatedRule( name="telemetry:get_alarm", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_get_alarms = policy.DeprecatedRule( name="telemetry:get_alarms", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_get_all_alarms = policy.DeprecatedRule( name="telemetry:get_alarms:all_projects", check_str=RULE_CONTEXT_IS_ADMIN, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_query_alarm = policy.DeprecatedRule( name="telemetry:query_alarm", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_create_alarm = policy.DeprecatedRule( name="telemetry:create_alarm", check_str=UNPROTECTED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_change_alarm = policy.DeprecatedRule( name="telemetry:change_alarm", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_delete_alarm = policy.DeprecatedRule( name="telemetry:delete_alarm", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_get_alarm_state = policy.DeprecatedRule( name="telemetry:get_alarm_state", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_change_alarm_state = policy.DeprecatedRule( name="telemetry:change_alarm_state", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_alarm_history = policy.DeprecatedRule( name="telemetry:alarm_history", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_query_alarm_history = policy.DeprecatedRule( name="telemetry:query_alarm_history", check_str=RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_update_quotas = policy.DeprecatedRule( name="telemetry:update_quotas", check_str=RULE_CONTEXT_IS_ADMIN, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_delete_quotas = policy.DeprecatedRule( name="telemetry:delete_quotas", check_str=RULE_CONTEXT_IS_ADMIN, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) rules = [ # This policy can be removed once all the policies in this file are no # longer deprecated and are using the new default policies with proper # scope support. policy.RuleDefault( name="context_is_admin", check_str="role:admin" ), policy.RuleDefault( name="segregation", check_str=RULE_CONTEXT_IS_ADMIN), # This policy can be removed once all the policies in this file are no # longer deprecated and are using the new default policies with proper # scope support. policy.RuleDefault( name="admin_or_owner", check_str=RULE_ADMIN_OR_OWNER ), # This policy can be removed once all the policies in this file are no # longer deprecated and are using the new default policies with proper # scope support. We shouldn't need a "default" policy if each policy has a # reasonable default. This concept of a broad "default" existed prior to # registering policies in code with their own default values. policy.RuleDefault( name="default", check_str=RULE_ADMIN_OR_OWNER ), policy.DocumentedRuleDefault( name="telemetry:get_alarm", check_str=PROJECT_READER, scope_types=['project'], description='Get an alarm.', operations=[ { 'path': '/v2/alarms/{alarm_id}', 'method': 'GET' } ], deprecated_rule=deprecated_get_alarm ), policy.DocumentedRuleDefault( name="telemetry:get_alarms", check_str=PROJECT_READER, scope_types=['project'], description='Get all alarms, based on the query provided.', operations=[ { 'path': '/v2/alarms', 'method': 'GET' } ], deprecated_rule=deprecated_get_alarms ), policy.DocumentedRuleDefault( name="telemetry:get_alarms:all_projects", check_str=PROJECT_ADMIN, scope_types=['project'], description='Get alarms of all projects.', operations=[ { 'path': '/v2/alarms', 'method': 'GET' } ], deprecated_rule=deprecated_get_all_alarms ), policy.DocumentedRuleDefault( name="telemetry:query_alarm", check_str=PROJECT_READER, scope_types=['project'], description='Get all alarms, based on the query provided.', operations=[ { 'path': '/v2/query/alarms', 'method': 'POST' } ], deprecated_rule=deprecated_query_alarm ), policy.DocumentedRuleDefault( name="telemetry:create_alarm", check_str=PROJECT_MEMBER, scope_types=['project'], description='Create a new alarm.', operations=[ { 'path': '/v2/alarms', 'method': 'POST' } ], deprecated_rule=deprecated_create_alarm ), policy.DocumentedRuleDefault( name="telemetry:change_alarm", check_str=PROJECT_MEMBER, scope_types=['project'], description='Modify this alarm.', operations=[ { 'path': '/v2/alarms/{alarm_id}', 'method': 'PUT' } ], deprecated_rule=deprecated_change_alarm ), policy.DocumentedRuleDefault( name="telemetry:delete_alarm", check_str=PROJECT_MEMBER, scope_types=['project'], description='Delete this alarm.', operations=[ { 'path': '/v2/alarms/{alarm_id}', 'method': 'DELETE' } ], deprecated_rule=deprecated_delete_alarm ), policy.DocumentedRuleDefault( name="telemetry:get_alarm_state", check_str=PROJECT_READER, scope_types=['project'], description='Get the state of this alarm.', operations=[ { 'path': '/v2/alarms/{alarm_id}/state', 'method': 'GET' } ], deprecated_rule=deprecated_get_alarm_state ), policy.DocumentedRuleDefault( name="telemetry:change_alarm_state", check_str=PROJECT_MEMBER, scope_types=['project'], description='Set the state of this alarm.', operations=[ { 'path': '/v2/alarms/{alarm_id}/state', 'method': 'PUT' } ], deprecated_rule=deprecated_change_alarm_state ), policy.DocumentedRuleDefault( name="telemetry:alarm_history", check_str=PROJECT_READER, scope_types=['project'], description='Assembles the alarm history requested.', operations=[ { 'path': '/v2/alarms/{alarm_id}/history', 'method': 'GET' } ], deprecated_rule=deprecated_alarm_history ), policy.DocumentedRuleDefault( name="telemetry:query_alarm_history", check_str=PROJECT_READER, scope_types=['project'], description='Define query for retrieving AlarmChange data.', operations=[ { 'path': '/v2/query/alarms/history', 'method': 'POST' } ], deprecated_rule=deprecated_query_alarm_history ), policy.DocumentedRuleDefault( name="telemetry:update_quotas", check_str=PROJECT_ADMIN, scope_types=['project'], description='Update resources quotas for project.', operations=[ { 'path': '/v2/quotas', 'method': 'POST' } ], deprecated_rule=deprecated_update_quotas ), policy.DocumentedRuleDefault( name="telemetry:delete_quotas", check_str=PROJECT_ADMIN, scope_types=['project'], description='Delete resources quotas for project.', operations=[ { 'path': '/v2/quotas/{project_id}', 'method': 'DELETE' } ], deprecated_rule=deprecated_delete_quotas ) ] def list_rules(): return rules def init(conf): enforcer = policy.Enforcer(conf, default_rule="default") # NOTE(gmann): Explictly disable the warnings for policies # changing their default check_str. With new RBAC policy # work, all the policy defaults have been changed and warning for # each policy started filling the logs limit for various tool. # Once we move to new defaults only world then we can enable these # warning again. enforcer.suppress_default_change_warnings = True enforcer.register_defaults(list_rules()) return enforcer def get_enforcer(): # This method is used by oslopolicy CLI scripts in order to generate policy # files from overrides on disk and defaults in code. cfg.CONF([], project='aodh') return init(cfg.CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/api/rbac.py0000664000175000017500000000667600000000000015523 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Access Control Lists (ACL's) control access the API server.""" import pecan def target_from_segregation_rule(headers, enforcer): """Return a target corresponding to an alarm returned by segregation rule This allows to use project_id: in an oslo_policy rule for query/listing. :param headers: HTTP headers dictionary :param enforcer: policy enforcer :returns: target """ project_id = get_limited_to_project(headers, enforcer) if project_id is not None: return {'project_id': project_id} return {} def enforce(policy_name, headers, enforcer, target): """Return the user and project the request should be limited to. :param policy_name: the policy name to validate authz against. :param headers: HTTP headers dictionary :param enforcer: policy enforcer :param target: the alarm or "auto" to """ rule_method = "telemetry:" + policy_name credentials = { 'roles': headers.get('X-Roles', "").split(","), 'user_id': headers.get('X-User-Id'), 'project_id': headers.get('X-Project-Id'), } if not enforcer.enforce(rule_method, target, credentials): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') # TODO(fabiog): these methods are still used because the scoping part is really # convoluted and difficult to separate out. def get_limited_to(headers, enforcer): """Return the user and project the request should be limited to. :param headers: HTTP headers dictionary :param enforcer: policy enforcer :return: A tuple of (user, project), set to None if there's no limit on one of these. """ # TODO(sileht): Only filtering on role work currently for segregation # oslo.policy expects the target to be the alarm. That will allow # creating more enhanced rbac. But for now we enforce the # scoping of request to the project-id, so... target = {} credentials = { 'roles': headers.get('X-Roles', "").split(","), } # maintain backward compat with Juno and previous by using context_is_admin # rule if the segregation rule (added in Kilo) is not defined rules = enforcer.rules.keys() rule_name = 'segregation' if 'segregation' in rules else 'context_is_admin' if not enforcer.enforce(rule_name, target, credentials): return headers.get('X-User-Id'), headers.get('X-Project-Id') return None, None def get_limited_to_project(headers, enforcer): """Return the project the request should be limited to. :param headers: HTTP headers dictionary :param enforcer: policy enforcer :return: A project, or None if there's no limit on it. """ return get_limited_to(headers, enforcer)[1] def is_admin(headers): return 'admin' in headers.get('X-Roles', "").split(",") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0710115 aodh-19.0.0/aodh/cmd/0000775000175000017500000000000000000000000014215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/cmd/__init__.py0000664000175000017500000000171300000000000016330 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys def config_generator(): try: from oslo_config import generator generator.main( ['--config-file', '%s/aodh-config-generator.conf' % os.path.dirname(__file__)] + sys.argv[1:]) except Exception as e: print("Unable to build sample configuration file: %s" % e) return 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/cmd/alarm.py0000664000175000017500000000311500000000000015663 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cotyledon from oslo_log import log from aodh import evaluator as evaluator_svc from aodh import event as event_svc from aodh import notifier as notifier_svc from aodh import service LOG = log.getLogger(__name__) def notifier(): conf = service.prepare_service() conf.log_opt_values(LOG, log.DEBUG) sm = cotyledon.ServiceManager() sm.add(notifier_svc.AlarmNotifierService, workers=conf.notifier.workers, args=(conf,)) sm.run() def evaluator(): conf = service.prepare_service() conf.log_opt_values(LOG, log.DEBUG) sm = cotyledon.ServiceManager() sm.add(evaluator_svc.AlarmEvaluationService, workers=conf.evaluator.workers, args=(conf,)) sm.run() def listener(): conf = service.prepare_service() conf.log_opt_values(LOG, log.DEBUG) sm = cotyledon.ServiceManager() sm.add(event_svc.EventAlarmEvaluationService, workers=conf.listener.workers, args=(conf,)) sm.run() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/cmd/aodh-config-generator.conf0000664000175000017500000000053000000000000021224 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 79 namespace = aodh namespace = aodh-auth namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.middleware.healthcheck namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = oslo.reports namespace = keystonemiddleware.auth_token ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/cmd/aodh-policy-generator.conf0000664000175000017500000000003300000000000021254 0ustar00zuulzuul00000000000000[DEFAULT] namespace = aodh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/cmd/status.py0000664000175000017500000000237000000000000016114 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck from aodh.i18n import _ CONF = cfg.CONF class Checks(upgradecheck.UpgradeCommands): """Contains upgrade checks Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ _upgrade_checks = ( (_('policy File JSON to YAML Migration'), (common_checks.check_policy_json, {'conf': CONF})), ) def main(): return upgradecheck.main( CONF, project='aodh', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/cmd/storage.py0000664000175000017500000000351200000000000016234 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from aodh import service from aodh import storage LOG = log.getLogger(__name__) def dbsync(): conf = service.prepare_service() storage.get_connection_from_config(conf).upgrade() def expirer(): conf = service.prepare_service() if conf.database.alarm_history_time_to_live > 0: LOG.debug("Clearing expired alarm history data") conn = storage.get_connection_from_config(conf) max_count = conf.database.alarm_histories_delete_batch_size try: if max_count > 0: conn.clear_expired_alarm_history_data( conf.database.alarm_history_time_to_live, max_count) else: deleted = max_count = 100 while deleted and deleted > 0: deleted = conn.clear_expired_alarm_history_data( conf.database.alarm_history_time_to_live, max_count) except TypeError: LOG.warning("Storage driver does not support " "'alarm_histories_delete_batch_size' config option.") else: LOG.info("Nothing to clean, database alarm history time to live " "is disabled") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/conf/0000775000175000017500000000000000000000000014377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/conf/__init__.py0000664000175000017500000000000000000000000016476 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/conf/defaults.py0000664000175000017500000000362500000000000016566 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_middleware import cors from oslo_policy import opts as policy_opts def set_lib_defaults(): """Update default value for configuration options from other namespace. Example, oslo lib config options. This is needed for config generator tool to pick these default value changes. https://docs.openstack.org/oslo.config/latest/cli/ generator.html#modifying-defaults-from-other-namespaces """ set_cors_middleware_defaults() # Update default value of oslo.policy policy_file, , # enforce_scope, and enforce_new_defaults config options. policy_opts.set_defaults(cfg.CONF, 'policy.yaml', enforce_scope=False, enforce_new_defaults=False) def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token'], expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/coordination.py0000664000175000017500000002173500000000000016524 0ustar00zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import bisect import struct from oslo_config import cfg from oslo_log import log from oslo_utils import encodeutils from oslo_utils.secretutils import md5 from oslo_utils import uuidutils import tenacity import tooz.coordination LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('backend_url', secret=True, help='The backend URL to use for distributed coordination. If ' 'left empty, alarm evaluation won\'t do workload ' 'partitioning and will only function correctly if a ' 'single instance of the service is running.'), cfg.FloatOpt('heartbeat_interval', default=1.0, deprecated_name='heartbeat', help='Number of seconds between heartbeats for distributed ' 'coordination.'), cfg.IntOpt('retry_backoff', default=1, help='Retry backoff factor when retrying to connect with' ' coordination backend'), cfg.IntOpt('max_retry_interval', default=30, help='Maximum number of seconds between retry to join ' 'partitioning group') ] class ErrorJoiningPartitioningGroup(Exception): def __init__(self): super(ErrorJoiningPartitioningGroup, self).__init__(( 'Error occurred when joining partitioning group')) class MemberNotInGroupError(Exception): def __init__(self, group_id, members, my_id): super(MemberNotInGroupError, self).__init__(( 'Group ID: %(group_id)s, Members: %(members)s, Me: %(me)s: ' 'Current agent is not part of group and cannot take tasks') % {'group_id': group_id, 'members': members, 'me': my_id}) class HashRing(object): def __init__(self, nodes, replicas=100): self._ring = dict() self._sorted_keys = [] for node in nodes: for r in range(replicas): hashed_key = self._hash('%s-%s' % (node, r)) self._ring[hashed_key] = node self._sorted_keys.append(hashed_key) self._sorted_keys.sort() @staticmethod def _hash(key): return struct.unpack_from( '>I', md5(str(key).encode(), usedforsecurity=False).digest())[0] def _get_position_on_ring(self, key): hashed_key = self._hash(key) position = bisect.bisect(self._sorted_keys, hashed_key) return position if position < len(self._sorted_keys) else 0 def get_node(self, key): if not self._ring: return None pos = self._get_position_on_ring(key) return self._ring[self._sorted_keys[pos]] class PartitionCoordinator(object): """Workload partitioning coordinator. This class uses the `tooz` library to manage group membership. To ensure that the other agents know this agent is still alive, the `heartbeat` method should be called periodically. Coordination errors and reconnects are handled under the hood, so the service using the partition coordinator need not care whether the coordination backend is down. The `extract_my_subset` will simply return an empty iterable in this case. """ def __init__(self, conf, my_id=None): self.conf = conf self.backend_url = self.conf.coordination.backend_url self._coordinator = None self._groups = set() self._my_id = my_id or \ encodeutils.safe_encode(uuidutils.generate_uuid()) def start(self): if self.backend_url: try: self._coordinator = tooz.coordination.get_coordinator( self.backend_url, self._my_id) self._coordinator.start() LOG.info('Coordination backend started successfully.') except tooz.coordination.ToozError: LOG.exception('Error connecting to coordination backend.') def stop(self): if not self._coordinator: return for group in list(self._groups): self.leave_group(group) try: self._coordinator.stop() except tooz.coordination.ToozError: LOG.exception('Error connecting to coordination backend.') finally: self._coordinator = None def is_active(self): return self._coordinator is not None def heartbeat(self): if self._coordinator: if not self._coordinator.is_started: # re-connect self.start() try: self._coordinator.heartbeat() except tooz.coordination.ToozError: LOG.exception('Error sending a heartbeat to coordination ' 'backend.') def join_group(self, group_id): if (not self._coordinator or not self._coordinator.is_started or not group_id): return @tenacity.retry( wait=tenacity.wait_exponential( multiplier=self.conf.coordination.retry_backoff, max=self.conf.coordination.max_retry_interval), retry=tenacity.retry_if_exception_type( ErrorJoiningPartitioningGroup)) def _inner(): try: join_req = self._coordinator.join_group(group_id) join_req.get() LOG.info('Joined partitioning group %s', group_id) except tooz.coordination.MemberAlreadyExist: return except tooz.coordination.GroupNotCreated: create_grp_req = self._coordinator.create_group(group_id) try: create_grp_req.get() except tooz.coordination.GroupAlreadyExist: pass raise ErrorJoiningPartitioningGroup() except tooz.coordination.ToozError: LOG.exception('Error joining partitioning group %s,' ' re-trying', group_id) raise ErrorJoiningPartitioningGroup() self._groups.add(group_id) return _inner() def leave_group(self, group_id): if group_id not in self._groups: return if self._coordinator: self._coordinator.leave_group(group_id) self._groups.remove(group_id) LOG.info('Left partitioning group %s', group_id) def _get_members(self, group_id): if not self._coordinator: return [self._my_id] while True: get_members_req = self._coordinator.get_members(group_id) try: return get_members_req.get() except tooz.coordination.GroupNotCreated: self.join_group(group_id) @tenacity.retry( wait=tenacity.wait_random(max=2), stop=tenacity.stop_after_attempt(5), retry=tenacity.retry_if_exception_type(MemberNotInGroupError), reraise=True) def extract_my_subset(self, group_id, universal_set): """Filters an iterable, returning only objects assigned to this agent. We have a list of objects and get a list of active group members from `tooz`. We then hash all the objects into buckets and return only the ones that hashed into *our* bucket. """ if not group_id or not self.is_active(): return universal_set if group_id not in self._groups: self.join_group(group_id) try: members = self._get_members(group_id) LOG.debug('Members of group: %s, Me: %s', members, self._my_id) if self._my_id not in members: LOG.warning('Cannot extract tasks because agent failed to ' 'join group properly. Rejoining group.') self.join_group(group_id) members = self._get_members(group_id) if self._my_id not in members: raise MemberNotInGroupError(group_id, members, self._my_id) LOG.debug('Members of group: %s, Me: %s', members, self._my_id) hr = HashRing(members) LOG.debug('Universal set: %s', universal_set) my_subset = [v for v in universal_set if hr.get_node(str(v)) == self._my_id] LOG.debug('My subset: %s', my_subset) return my_subset except tooz.coordination.ToozError: LOG.exception('Error getting group membership info from ' 'coordination backend.') return [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/evaluator/0000775000175000017500000000000000000000000015454 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/__init__.py0000664000175000017500000002620600000000000017573 0ustar00zuulzuul00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import datetime import json import threading from concurrent import futures import cotyledon import croniter from futurist import periodics from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from oslo_utils import uuidutils from stevedore import extension try: import zoneinfo except ImportError: # zoneinfo is available in Python >= 3.9 import pytz zoneinfo = None import aodh from aodh import coordination from aodh import keystone_client from aodh import messaging from aodh import queue from aodh import storage from aodh.storage import models from aodh.storage.sqlalchemy import models as sql_models LOG = log.getLogger(__name__) UNKNOWN = 'insufficient data' OK = 'ok' ALARM = 'alarm' OPTS = [ cfg.BoolOpt('record_history', default=True, help='Record alarm change events.' ), ] class Evaluator(object, metaclass=abc.ABCMeta): """Base class for alarm rule evaluator plugins.""" def __init__(self, conf): self.conf = conf self.notifier = queue.AlarmNotifier(self.conf) self.storage_conn = None self._ks_client = None self._alarm_change_notifier = None @property def ks_client(self): if self._ks_client is None: self._ks_client = keystone_client.get_client(self.conf) return self._ks_client @property def _storage_conn(self): if not self.storage_conn: self.storage_conn = storage.get_connection_from_config(self.conf) return self.storage_conn @property def alarm_change_notifier(self): if not self._alarm_change_notifier: transport = messaging.get_transport(self.conf) self._alarm_change_notifier = messaging.get_notifier( transport, publisher_id="aodh.evaluator") return self._alarm_change_notifier def _record_change(self, alarm, reason): if not self.conf.record_history: return type = models.AlarmChange.STATE_TRANSITION detail = json.dumps({'state': alarm.state, 'transition_reason': reason}) user_id, project_id = self.ks_client.user_id, self.ks_client.project_id on_behalf_of = alarm.project_id now = timeutils.utcnow() severity = alarm.severity payload = dict(event_id=uuidutils.generate_uuid(), alarm_id=alarm.alarm_id, type=type, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=on_behalf_of, timestamp=now, severity=severity) try: self._storage_conn.record_alarm_change(payload) except aodh.NotImplementedError: pass notification = "alarm.state_transition" self.alarm_change_notifier.info({}, notification, payload) def _refresh(self, alarm, state, reason, reason_data, always_record=False): """Refresh alarm state.""" try: previous = alarm.state alarm.state = state alarm.state_reason = reason if previous != state or always_record: LOG.info('alarm %(id)s transitioning to %(state)s because ' '%(reason)s', {'id': alarm.alarm_id, 'state': state, 'reason': reason}) try: self._storage_conn.update_alarm(alarm) except storage.AlarmNotFound: LOG.warning("Skip updating this alarm's state, the" "alarm: %s has been deleted", alarm.alarm_id) else: self._record_change(alarm, reason) self.notifier.notify(alarm, previous, reason, reason_data) elif alarm.repeat_actions: self.notifier.notify(alarm, previous, reason, reason_data) except Exception: # retry will occur naturally on the next evaluation # cycle (unless alarm state reverts in the meantime) LOG.exception('alarm state update failed') @classmethod def within_time_constraint(cls, alarm): """Check whether the alarm is within at least one of its time limits. If there are none, then the answer is yes. """ if not alarm.time_constraints: return True now_utc = timeutils.utcnow().replace(tzinfo=datetime.timezone.utc) for tc in alarm.time_constraints: if zoneinfo: tz = (zoneinfo.ZoneInfo(tc['timezone']) if tc['timezone'] else None) else: tz = pytz.timezone(tc['timezone']) if tc['timezone'] else None now_tz = now_utc.astimezone(tz) if tz else now_utc start_cron = croniter.croniter(tc['start'], now_tz) if cls._is_exact_match(start_cron, now_tz): return True # start_cron.cur has changed in _is_exact_match(), # croniter cannot recover properly in some corner case. start_cron = croniter.croniter(tc['start'], now_tz) latest_start = start_cron.get_prev(datetime.datetime) duration = datetime.timedelta(seconds=tc['duration']) if latest_start <= now_tz <= latest_start + duration: return True return False @staticmethod def _is_exact_match(cron, ts): """Handle edge in case when both parameters are equal. Handle edge case where if the timestamp is the same as the cron point in time to the minute, croniter returns the previous start, not the current. We can check this by first going one step back and then one step forward and check if we are at the original point in time. """ cron.get_prev() diff = (ts - cron.get_next(datetime.datetime)).total_seconds() return abs(diff) < 60 # minute precision @abc.abstractmethod def evaluate(self, alarm): """Interface definition. evaluate an alarm alarm Alarm: an instance of the Alarm """ class AlarmEvaluationService(cotyledon.Service): PARTITIONING_GROUP_NAME = "alarm_evaluator" EVALUATOR_EXTENSIONS_NAMESPACE = "aodh.evaluator" def __init__(self, worker_id, conf): super(AlarmEvaluationService, self).__init__(worker_id) self.conf = conf ef = lambda: futures.ThreadPoolExecutor(max_workers=10) # noqa: E731 self.periodic = periodics.PeriodicWorker.create( [], executor_factory=ef) self.evaluators = extension.ExtensionManager( namespace=self.EVALUATOR_EXTENSIONS_NAMESPACE, invoke_on_load=True, invoke_args=(self.conf,) ) self.storage_conn = storage.get_connection_from_config(self.conf) self.partition_coordinator = coordination.PartitionCoordinator( self.conf) self.partition_coordinator.start() self.partition_coordinator.join_group(self.PARTITIONING_GROUP_NAME) # allow time for coordination if necessary delay_start = self.partition_coordinator.is_active() evaluation_interval = self.conf.evaluator.evaluation_interval if self.evaluators: @periodics.periodic(spacing=evaluation_interval, run_immediately=not delay_start) def evaluate_alarms(): self._evaluate_assigned_alarms() self.periodic.add(evaluate_alarms) if self.partition_coordinator.is_active(): heartbeat_interval = min(self.conf.coordination.heartbeat_interval, evaluation_interval / 4) @periodics.periodic(spacing=heartbeat_interval, run_immediately=True) def heartbeat(): self.partition_coordinator.heartbeat() self.periodic.add(heartbeat) t = threading.Thread(target=self.periodic.start) t.daemon = True t.start() def terminate(self): self.periodic.stop() self.partition_coordinator.stop() self.periodic.wait() def _evaluate_assigned_alarms(self): try: alarms = self._assigned_alarms() LOG.info('initiating evaluation cycle on %d alarms', len(alarms)) for alarm in alarms: self._evaluate_alarm(alarm) except Exception: LOG.exception('alarm evaluation cycle failed') def _evaluate_alarm(self, alarm): """Evaluate the alarms assigned to this evaluator.""" if alarm.type not in self.evaluators: LOG.warning('Skipping alarm %s, unsupported type: %s', alarm.alarm_id, alarm.type) return # If the coordinator is not available, fallback to database non-locking # mechanism in order to support aodh-evaluator active/active # deployment. if not self.partition_coordinator.is_active(): modified = self.storage_conn.conditional_update( sql_models.Alarm, {'evaluate_timestamp': timeutils.utcnow()}, { 'alarm_id': alarm.alarm_id, 'evaluate_timestamp': alarm.evaluate_timestamp }, ) if not modified: LOG.debug( 'Alarm %s has been already handled by another evaluator', alarm.alarm_id ) return LOG.debug('Evaluating alarm %s', alarm.alarm_id) try: self.evaluators[alarm.type].obj.evaluate(alarm) except Exception: LOG.exception('Failed to evaluate alarm %s', alarm.alarm_id) def _assigned_alarms(self): before = (timeutils.utcnow() - datetime.timedelta( seconds=self.conf.evaluator.evaluation_interval / 2)) selected = self.storage_conn.get_alarms( enabled=True, type={'ne': 'event'}, evaluate_timestamp={'lt': before}, ) if self.partition_coordinator.is_active(): all_alarm_ids = [a.alarm_id for a in selected] selected_ids = self.partition_coordinator.extract_my_subset( self.PARTITIONING_GROUP_NAME, all_alarm_ids ) selected = [a for a in selected if a.alarm_id in selected_ids] return selected ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/composite.py0000664000175000017500000002203300000000000020030 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log import stevedore from aodh import evaluator from aodh.evaluator import threshold LOG = log.getLogger(__name__) STATE_CHANGE = {evaluator.ALARM: 'outside their threshold.', evaluator.OK: 'inside their threshold.', evaluator.UNKNOWN: 'state evaluated to unknown.'} class RuleTarget(object): def __init__(self, rule, rule_evaluator, rule_name): self.rule = rule self.type = rule.get('type') self.rule_evaluator = rule_evaluator self.rule_name = rule_name self.state = None self.trending_state = None self.statistics = None self.evaluated = False def evaluate(self): # Evaluate a sub-rule of composite rule if not self.evaluated: LOG.debug('Evaluating %(type)s rule: %(rule)s', {'type': self.type, 'rule': self.rule}) try: self.state, self.trending_state, self.statistics, __, __ = \ self.rule_evaluator.evaluate_rule(self.rule) except threshold.InsufficientDataError as e: self.state = evaluator.UNKNOWN self.trending_state = None self.statistics = e.statistics self.evaluated = True class RuleEvaluationBase(object): def __init__(self, rule_target): self.rule_target = rule_target def __str__(self): return self.rule_target.rule_name class OkEvaluation(RuleEvaluationBase): def __bool__(self): self.rule_target.evaluate() return self.rule_target.state == evaluator.OK __nonzero__ = __bool__ class AlarmEvaluation(RuleEvaluationBase): def __bool__(self): self.rule_target.evaluate() return self.rule_target.state == evaluator.ALARM __nonzero__ = __bool__ class AndOp(object): def __init__(self, rule_targets): self.rule_targets = rule_targets def __bool__(self): return all(self.rule_targets) def __str__(self): return '(' + ' and '.join(map(str, self.rule_targets)) + ')' __nonzero__ = __bool__ class OrOp(object): def __init__(self, rule_targets): self.rule_targets = rule_targets def __bool__(self): return any(self.rule_targets) def __str__(self): return '(' + ' or '.join(map(str, self.rule_targets)) + ')' __nonzero__ = __bool__ class CompositeEvaluator(evaluator.Evaluator): def __init__(self, conf): super(CompositeEvaluator, self).__init__(conf) self.conf = conf self._threshold_evaluators = None self.rule_targets = [] self.rule_name_prefix = 'rule' self.rule_num = 0 @property def threshold_evaluators(self): if not self._threshold_evaluators: threshold_types = ('prometheus', 'gnocchi_resources_threshold', 'gnocchi_aggregation_by_metrics_threshold', 'gnocchi_aggregation_by_resources_threshold') self._threshold_evaluators = stevedore.NamedExtensionManager( 'aodh.evaluator', threshold_types, invoke_on_load=True, invoke_args=(self.conf,)) return self._threshold_evaluators def _parse_composite_rule(self, alarm_rule): """Parse the composite rule. The composite rule is assembled by sub threshold rules with 'and', 'or', the form can be nested. e.g. the form of composite rule can be like this: { "and": [threshold_rule0, threshold_rule1, {'or': [threshold_rule2, threshold_rule3, threshold_rule4, threshold_rule5]}] } """ if (isinstance(alarm_rule, dict) and len(alarm_rule) == 1 and list(alarm_rule)[0] in ('and', 'or')): and_or_key = list(alarm_rule)[0] if and_or_key == 'and': rules = (self._parse_composite_rule(r) for r in alarm_rule['and']) rules_alarm, rules_ok = zip(*rules) return AndOp(rules_alarm), OrOp(rules_ok) else: rules = (self._parse_composite_rule(r) for r in alarm_rule['or']) rules_alarm, rules_ok = zip(*rules) return OrOp(rules_alarm), AndOp(rules_ok) elif alarm_rule['type'] in self.threshold_evaluators: rule_evaluator = self.threshold_evaluators[alarm_rule['type']].obj self.rule_num += 1 name = self.rule_name_prefix + str(self.rule_num) rule = RuleTarget(alarm_rule, rule_evaluator, name) self.rule_targets.append(rule) return AlarmEvaluation(rule), OkEvaluation(rule) else: LOG.error("Invalid rule type: %s" % alarm_rule['type']) return False, False def _reason(self, alarm, new_state, rule_target_alarm): transition = alarm.state != new_state reason_data = { 'type': 'composite', 'composition_form': str(rule_target_alarm)} root_cause_rules = {} for rule in self.rule_targets: if rule.state == new_state: root_cause_rules.update({rule.rule_name: rule.rule}) reason_data.update(causative_rules=root_cause_rules) params = {'state': new_state, 'expression': str(rule_target_alarm), 'rules': ', '.join(sorted(root_cause_rules)), 'description': STATE_CHANGE[new_state]} if transition: reason = (('Composite rule alarm with composition form: ' '%(expression)s transition to %(state)s, due to ' 'rules: %(rules)s %(description)s') % params) else: reason = (('Composite rule alarm with composition form: ' '%(expression)s remaining as %(state)s, due to ' 'rules: %(rules)s %(description)s') % params) return reason, reason_data def _evaluate_sufficient(self, alarm, rule_target_alarm, rule_target_ok): # Some of evaluated rules are unknown states or trending states. for rule in self.rule_targets: if rule.trending_state is not None: if alarm.state == evaluator.UNKNOWN: rule.state = rule.trending_state elif rule.trending_state == evaluator.ALARM: rule.state = evaluator.OK elif rule.trending_state == evaluator.OK: rule.state = evaluator.ALARM else: rule.state = alarm.state alarm_triggered = bool(rule_target_alarm) if alarm_triggered: reason, reason_data = self._reason(alarm, evaluator.ALARM, rule_target_alarm) self._refresh(alarm, evaluator.ALARM, reason, reason_data) return True ok_result = bool(rule_target_ok) if ok_result: reason, reason_data = self._reason(alarm, evaluator.OK, rule_target_alarm) self._refresh(alarm, evaluator.OK, reason, reason_data) return True return False def evaluate(self, alarm): if not self.within_time_constraint(alarm): LOG.debug('Attempted to evaluate alarm %s, but it is not ' 'within its time constraint.', alarm.alarm_id) return LOG.debug("Evaluating composite rule alarm %s ...", alarm.alarm_id) self.rule_targets = [] self.rule_num = 0 rule_target_alarm, rule_target_ok = self._parse_composite_rule( alarm.rule) sufficient = self._evaluate_sufficient(alarm, rule_target_alarm, rule_target_ok) if not sufficient: for rule in self.rule_targets: rule.evaluate() sufficient = self._evaluate_sufficient(alarm, rule_target_alarm, rule_target_ok) if not sufficient: # The following unknown situations is like these: # 1. 'unknown' and 'alarm' # 2. 'unknown' or 'ok' reason, reason_data = self._reason(alarm, evaluator.UNKNOWN, rule_target_alarm) if alarm.state != evaluator.UNKNOWN: self._refresh(alarm, evaluator.UNKNOWN, reason, reason_data) else: LOG.debug(reason) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/event.py0000664000175000017500000002260100000000000017150 0ustar00zuulzuul00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import json import operator from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from aodh import evaluator LOG = log.getLogger(__name__) COMPARATORS = { 'gt': operator.gt, 'lt': operator.lt, 'ge': operator.ge, 'le': operator.le, 'eq': operator.eq, 'ne': operator.ne, } OPTS = [ cfg.IntOpt('event_alarm_cache_ttl', default=60, help='TTL of event alarm caches, in seconds. ' 'Set to 0 to disable caching.'), ] def _sanitize_trait_value(value, trait_type): if trait_type in (2, 'integer'): return int(value) elif trait_type in (3, 'float'): return float(value) elif trait_type in (4, 'datetime'): return timeutils.normalize_time(timeutils.parse_isotime(value)) else: return str(value) class InvalidEvent(Exception): """Error raised when the received event is missing mandatory fields.""" class Event(object): """Wrapped event object to hold converted values for this evaluator.""" TRAIT_FIELD = 0 TRAIT_TYPE = 1 TRAIT_VALUE = 2 def __init__(self, event): self.obj = event self._validate() self.id = event.get('message_id') self._parse_traits() def _validate(self): """Validate received event has mandatory parameters.""" if not self.obj: LOG.error('Received invalid event (empty or None)') raise InvalidEvent() if not self.obj.get('event_type'): LOG.error('Failed to extract event_type from event = %s', self.obj) raise InvalidEvent() if not self.obj.get('message_id'): LOG.error('Failed to extract message_id from event = %s', self.obj) raise InvalidEvent() def _parse_traits(self): self.traits = {} self.project = '' for t in self.obj.get('traits', []): k = t[self.TRAIT_FIELD] v = _sanitize_trait_value(t[self.TRAIT_VALUE], t[self.TRAIT_TYPE]) self.traits[k] = v if k in ('tenant_id', 'project_id'): self.project = v def get_value(self, field): if field.startswith('traits.'): key = field.split('.', 1)[-1] return self.traits.get(key) v = self.obj for f in field.split('.'): if hasattr(v, 'get'): v = v.get(f) else: return None return v class Alarm(object): """Wrapped alarm object to hold converted values for this evaluator.""" TRAIT_TYPES = { 'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4, } def __init__(self, alarm): self.obj = alarm self.id = alarm.alarm_id self._parse_query() def _parse_query(self): self.query = [] for q in self.obj.rule.get('query', []): if not q['field'].startswith('traits.'): self.query.append(q) continue type_num = self.TRAIT_TYPES[q.get('type') or 'string'] field = q['field'] value = _sanitize_trait_value(q.get('value'), type_num) op = COMPARATORS[q.get('op', 'eq')] self.query.append({'field': field, 'value': value, 'op': op}) def fired_and_no_repeat(self): return (not self.obj.repeat_actions and self.obj.state == evaluator.ALARM) def event_type_to_watch(self, event_type): return fnmatch.fnmatch(event_type, self.obj.rule['event_type']) class EventAlarmEvaluator(evaluator.Evaluator): def __init__(self, conf): super(EventAlarmEvaluator, self).__init__(conf) self.caches = {} def evaluate_events(self, events): """Evaluate the events by referring related alarms.""" if not isinstance(events, list): events = [events] LOG.debug('Starting event alarm evaluation: #events = %d', len(events)) for e in events: LOG.debug('Evaluating event: event = %s', e) try: event = Event(e) except InvalidEvent: LOG.warning('Event <%s> is invalid, aborting evaluation ' 'for it.', e) continue for id, alarm in self._get_project_alarms(event.project).items(): try: self._evaluate_alarm(alarm, event) except Exception: LOG.exception('Failed to evaluate alarm (id=%(a)s) ' 'triggered by event = %(e)s.', {'a': id, 'e': e}) LOG.debug('Finished event alarm evaluation.') def _get_project_alarms(self, project): if self.conf.event_alarm_cache_ttl and project in self.caches: if timeutils.is_older_than(self.caches[project]['updated'], self.conf.event_alarm_cache_ttl): del self.caches[project] else: return self.caches[project]['alarms'] # TODO(r-mibu): Implement "changes-since" at the storage API and make # this function update only alarms changed from the last access. alarms = {a.alarm_id: Alarm(a) for a in self._storage_conn.get_alarms(enabled=True, type='event', project_id=project)} if self.conf.event_alarm_cache_ttl: self.caches[project] = { 'alarms': alarms, 'updated': timeutils.utcnow() } return alarms def _evaluate_alarm(self, alarm, event): """Evaluate the alarm by referring the received event. This function compares each condition of the alarm on the assumption that all conditions are combined by AND operator. When the received event met conditions defined in alarm 'event_type' and 'query', the alarm will be fired and updated to state='alarm' (alarmed). Note: by this evaluator, the alarm won't be changed to state='ok' nor state='insufficient data'. """ LOG.debug('Evaluating alarm (id=%(a)s) triggered by event ' '(message_id=%(e)s).', {'a': alarm.id, 'e': event.id}) if alarm.fired_and_no_repeat(): LOG.debug('Skip evaluation of the alarm id=%s which have already ' 'fired.', alarm.id) return if not alarm.event_type_to_watch(event.obj['event_type']): LOG.debug('Aborting evaluation of the alarm (id=%s) since ' 'event_type is not matched.', alarm.id) return def _compare(condition): v = event.get_value(condition['field']) LOG.debug('Comparing value=%(v)s against condition=%(c)s .', {'v': v, 'c': condition}) return condition['op'](v, condition['value']) for condition in alarm.query: if not _compare(condition): LOG.info('Aborting evaluation of the alarm %s due to ' 'unmet condition=%s .', alarm.id, condition) return LOG.info('Triggering the alarm %s by event for project %s, ' 'event_type: %s', alarm.id, event.project, event.obj.get('event_type')) self._fire_alarm(alarm, event) def _fire_alarm(self, alarm, event): """Update alarm state and fire alarm via alarm notifier.""" state = evaluator.ALARM reason = (('Event hits the ' 'query .') % {'id': event.id, 'event_type': event.get_value('event_type'), 'alarm_query': json.dumps(alarm.obj.rule['query'], sort_keys=True)}) reason_data = {'type': 'event', 'event': event.obj} always_record = alarm.obj.repeat_actions self._refresh(alarm.obj, state, reason, reason_data, always_record) def _refresh(self, alarm, state, reason, reason_data, always_record): super(EventAlarmEvaluator, self)._refresh(alarm, state, reason, reason_data, always_record) project = alarm.project_id if self.conf.event_alarm_cache_ttl and project in self.caches: self.caches[project]['alarms'][alarm.alarm_id].obj.state = state # NOTE(r-mibu): This method won't be used, but we have to define here in # order to overwrite the abstract method in the super class. # TODO(r-mibu): Change the base (common) class design for evaluators. def evaluate(self, alarm): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/gnocchi.py0000664000175000017500000002011400000000000017436 0ustar00zuulzuul00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from gnocchiclient import client from gnocchiclient import exceptions from oslo_log import log from aodh.evaluator import threshold from aodh import keystone_client LOG = log.getLogger(__name__) # The list of points that Gnocchi API returned is composed # of tuples with (timestamp, granularity, value) GRANULARITY = 1 VALUE = 2 class GnocchiBase(threshold.ThresholdEvaluator): def __init__(self, conf): super(GnocchiBase, self).__init__(conf) self._gnocchi_client = client.Client( '1', keystone_client.get_session(conf), adapter_options={ 'interface': conf.service_credentials.interface, 'region_name': conf.service_credentials.region_name}) @staticmethod def _sanitize(rule, statistics): """Return the datapoints that correspond to the alarm granularity""" # TODO(sileht): if there's no direct match, but there is an archive # policy with granularity that's an even divisor or the period, # we could potentially do a mean-of-means (or max-of-maxes or whatever, # but not a stddev-of-stddevs). # TODO(sileht): support alarm['exclude_outliers'] LOG.debug('sanitize stats %s', statistics) # NOTE(jamespage) # Dynamic Aggregates are returned in a dict struct so # check for this first. if isinstance(statistics, dict): # Pop array of measures from aggregated subdict statistics = statistics['measures']['aggregated'] statistics = [stats[VALUE] for stats in statistics if stats[GRANULARITY] == rule['granularity']] if not statistics: raise threshold.InsufficientDataError( "No datapoint for granularity %s" % rule['granularity'], []) statistics = statistics[-rule['evaluation_periods']:] LOG.debug('pruned statistics to %d', len(statistics)) return statistics class GnocchiResourceThresholdEvaluator(GnocchiBase): def _statistics(self, rule, start, end): try: return self._gnocchi_client.metric.get_measures( metric=rule['metric'], granularity=rule['granularity'], start=start, stop=end, resource_id=rule['resource_id'], aggregation=rule['aggregation_method']) except exceptions.MetricNotFound: raise threshold.InsufficientDataError( 'metric %s for resource %s does not exists' % (rule['metric'], rule['resource_id']), []) except exceptions.ResourceNotFound: raise threshold.InsufficientDataError( 'resource %s does not exists' % rule['resource_id'], []) except exceptions.NotFound: # TODO(sileht): gnocchiclient should raise a explicit # exception for AggregationNotFound, this API endpoint # can only raise 3 different 404, so we are safe to # assume this is an AggregationNotFound for now. raise threshold.InsufficientDataError( 'aggregation %s does not exist for ' 'metric %s of resource %s' % (rule['aggregation_method'], rule['metric'], rule['resource_id']), []) except Exception as e: msg = 'alarm statistics retrieval failed: %s' % e LOG.warning(msg) raise threshold.InsufficientDataError(msg, []) class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase): def _statistics(self, rule, start, end): try: _operations = [ 'aggregate', rule['aggregation_method'] ] for metric in rule['metrics']: _operations.append( [ 'metric', metric, rule['aggregation_method'].lstrip('rate:') ] ) # FIXME(sileht): In case of a heat autoscaling stack decide to # delete an instance, the gnocchi metrics associated to this # instance will be no more updated and when the alarm will ask # for the aggregation, gnocchi will raise a 'No overlap' # exception. # So temporary set 'needed_overlap' to 0 to disable the # gnocchi checks about missing points. For more detail see: # https://bugs.launchpad.net/gnocchi/+bug/1479429 return self._gnocchi_client.aggregates.fetch( operations=_operations, granularity=rule['granularity'], start=start, stop=end, needed_overlap=0) except exceptions.MetricNotFound: raise threshold.InsufficientDataError( 'At least of metrics in %s does not exist' % rule['metrics'], []) except exceptions.NotFound: # TODO(sileht): gnocchiclient should raise a explicit # exception for AggregationNotFound, this API endpoint # can only raise 3 different 404, so we are safe to # assume this is an AggregationNotFound for now. raise threshold.InsufficientDataError( 'aggregation %s does not exist for at least one ' 'metrics in %s' % (rule['aggregation_method'], rule['metrics']), []) except Exception as e: msg = 'alarm statistics retrieval failed: %s' % e LOG.warning(msg) raise threshold.InsufficientDataError(msg, []) class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase): def _statistics(self, rule, start, end): try: # FIXME(sileht): In case of a heat autoscaling stack decide to # delete an instance, the gnocchi metrics associated to this # instance will be no more updated and when the alarm will ask # for the aggregation, gnocchi will raise a 'No overlap' # exception. # So temporary set 'needed_overlap' to 0 to disable the # gnocchi checks about missing points. For more detail see: # https://bugs.launchpad.net/gnocchi/+bug/1479429 return self._gnocchi_client.aggregates.fetch( operations=[ 'aggregate', rule['aggregation_method'], [ 'metric', rule['metric'], rule['aggregation_method'].lstrip('rate:') ] ], granularity=rule['granularity'], search=json.loads(rule['query']), resource_type=rule["resource_type"], start=start, stop=end, needed_overlap=0) except exceptions.MetricNotFound: raise threshold.InsufficientDataError( 'metric %s does not exists' % rule['metric'], []) except exceptions.NotFound: # TODO(sileht): gnocchiclient should raise a explicit # exception for AggregationNotFound, this API endpoint # can only raise 3 different 404, so we are safe to # assume this is an AggregationNotFound for now. raise threshold.InsufficientDataError( 'aggregation %s does not exist for at least one ' 'metric of the query' % rule['aggregation_method'], []) except Exception as e: msg = 'alarm statistics retrieval failed: %s' % e LOG.warning(msg) raise threshold.InsufficientDataError(msg, []) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/loadbalancer.py0000664000175000017500000001256500000000000020446 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from dateutil import parser from octaviaclient.api.v2 import octavia from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from aodh import evaluator from aodh.evaluator import threshold from aodh import keystone_client as aodh_keystone LOG = log.getLogger(__name__) ALARM_TYPE = "loadbalancer_member_health" OPTS = [ cfg.IntOpt('member_creation_time', default=120, help='The time in seconds to wait for the load balancer ' 'member creation.' ), ] class LoadBalancerMemberHealthEvaluator(evaluator.Evaluator): def __init__(self, conf): super(LoadBalancerMemberHealthEvaluator, self).__init__(conf) self._lb_client = None @property def lb_client(self): if self._lb_client is None: endpoint = aodh_keystone.url_for( self.conf, service_type='load-balancer', interface=self.conf.service_credentials.interface, region_name=self.conf.service_credentials.region_name ) self._lb_client = octavia.OctaviaAPI( session=aodh_keystone.get_session(self.conf), service_type='load-balancer', endpoint=endpoint ) return self._lb_client def _get_unhealthy_members(self, pool_id): """Get number of unhealthy members in a pool. The member(virutual machine) operating_status keeps ERROR after creation before the application is up and running inside, it should be ignored during the check. """ unhealthy_members = [] try: ret = self.lb_client.member_list(pool_id) except Exception as e: LOG.warning("Failed to communicate with load balancing service, " "error: %s", str(e)) raise threshold.InsufficientDataError( 'failed to communicate with load balancing service', [] ) if getattr(ret, 'status_code', None): # Some error happened raise threshold.InsufficientDataError(ret.content, []) for m in ret.get("members", []): try: created_time = parser.parse(m['created_at'], ignoretz=True) except ValueError: LOG.warning('Failed to parse the member created time.') continue now = timeutils.utcnow() t = self.conf.member_creation_time if now - created_time < datetime.timedelta(seconds=t): LOG.debug("Ignore member which was created within %ss", t) continue if m["admin_state_up"] and m["operating_status"] == "ERROR": unhealthy_members.append(m) return unhealthy_members def _transition_alarm(self, alarm, new_state, members, count, unknown_reason, pool_id=None, stack_id=None, asg_id=None): transition = alarm.state != new_state last = members[-1] if members else None reason_data = { 'type': ALARM_TYPE, 'count': count, 'most_recent': last, 'unhealthy_members': members, "pool_id": pool_id, "stack_id": stack_id, "asg_id": asg_id } if transition: reason = ('Transition to %(state)s due to %(count)d members' ' unhealthy, most recent: %(most_recent)s' % dict(state=new_state, count=count, most_recent=last)) else: reason = ('Remaining as %(state)s' % dict(state=new_state)) reason = unknown_reason or reason # Refresh and trigger alarm based on state transition. self._refresh(alarm, new_state, reason, reason_data) def evaluate(self, alarm): if not self.within_time_constraint(alarm): LOG.debug('Attempted to evaluate alarm %s, but it is not ' 'within its time constraint.', alarm.alarm_id) return LOG.debug("Evaluating %s rule alarm %s ...", ALARM_TYPE, alarm.alarm_id) pool_id = alarm.rule["pool_id"] error_mems = [] try: error_mems = self._get_unhealthy_members(pool_id) except threshold.InsufficientDataError as e: evaluation = (evaluator.UNKNOWN, e.statistics, 0, e.reason) else: state = evaluator.ALARM if len(error_mems) > 0 else evaluator.OK evaluation = (state, error_mems, len(error_mems), None) self._transition_alarm(alarm, *evaluation, pool_id=pool_id, stack_id=alarm.rule.get("stack_id"), asg_id=alarm.rule.get("autoscaling_group_id")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/prometheus.py0000664000175000017500000000541500000000000020226 0ustar00zuulzuul00000000000000# # Copyright 2023 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from observabilityclient import client from aodh.evaluator import threshold from aodh import keystone_client LOG = log.getLogger(__name__) OPTS = [ cfg.BoolOpt('prometheus_disable_rbac', default=False, help='Disable RBAC for Prometheus evaluator.'), ] class PrometheusBase(threshold.ThresholdEvaluator): def __init__(self, conf): super(PrometheusBase, self).__init__(conf) self._set_obsclient(conf) self._no_rbac = conf.prometheus_disable_rbac def _set_obsclient(self, conf): session = keystone_client.get_session(conf) opts = {'interface': conf.service_credentials.interface, 'region_name': conf.service_credentials.region_name} self._prom = client.Client('1', session, adapter_options=opts) def _get_metric_data(self, query): LOG.debug(f'Querying Prometheus instance on: {query}') return self._prom.query.query(query, disable_rbac=self._no_rbac) class PrometheusEvaluator(PrometheusBase): def _sanitize(self, metric_data): sanitized = [float(m.value) for m in metric_data] LOG.debug(f'Sanited Prometheus metric data: {metric_data}' f' to statistics: {sanitized}') return sanitized def evaluate_rule(self, alarm_rule): """Evaluate alarm rule. :returns: state, trending state, statistics, number of samples outside threshold and reason """ metrics = self._get_metric_data(alarm_rule['query']) if not metrics: LOG.warning("Empty result fetched from Prometheus for query" f" {alarm_rule['query']}") statistics = self._sanitize(metrics) if not statistics: raise threshold.InsufficientDataError('datapoints are unknown', statistics) return self._process_statistics(alarm_rule, statistics) def _unknown_reason_data(self, alarm, statistics): LOG.warning(f'Transfering alarm {alarm} on unknown reason') last = None if not statistics else statistics[-1] return self._reason_data('unknown', len(statistics), last) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/threshold.py0000664000175000017500000001564400000000000020034 0ustar00zuulzuul00000000000000# # Copyright 2013-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from aodh import evaluator LOG = log.getLogger(__name__) COMPARATORS = { 'gt': operator.gt, 'lt': operator.lt, 'ge': operator.ge, 'le': operator.le, 'eq': operator.eq, 'ne': operator.ne, } OPTS = [ cfg.IntOpt('additional_ingestion_lag', min=0, default=0, help='The number of seconds to extend the evaluation windows ' 'to compensate the reporting/ingestion lag.') ] class InsufficientDataError(Exception): def __init__(self, reason, statistics): self.reason = reason self.statistics = statistics super(InsufficientDataError, self).__init__(reason) class ThresholdEvaluator(evaluator.Evaluator): # the sliding evaluation window is extended to allow # the reporting/ingestion lag this can be increased # with 'additional_ingestion_lag' seconds if needed. look_back = 1 def _bound_duration(self, rule): """Bound the duration of the statistics query.""" now = timeutils.utcnow() # when exclusion of weak datapoints is enabled, we extend # the look-back period so as to allow a clearer sample count # trend to be established window = ((rule.get('period', None) or rule['granularity']) * (rule['evaluation_periods'] + self.look_back) + self.conf.additional_ingestion_lag) start = now - datetime.timedelta(seconds=window) LOG.debug('query stats from %(start)s to ' '%(now)s', {'start': start, 'now': now}) return start.isoformat(), now.isoformat() @staticmethod def _reason_data(disposition, count, most_recent): """Create a reason data dictionary for this evaluator type.""" return {'type': 'threshold', 'disposition': disposition, 'count': count, 'most_recent': most_recent} @classmethod def _reason(cls, alarm, statistics, state, count): """Fabricate reason string.""" if state == evaluator.OK: disposition = 'inside' count = len(statistics) - count else: disposition = 'outside' last = statistics[-1] if statistics else None transition = alarm.state != state reason_data = cls._reason_data(disposition, count, last) if transition: return ('Transition to %(state)s due to %(count)d samples' ' %(disposition)s threshold, most recent:' ' %(most_recent)s' % dict(reason_data, state=state), reason_data) return ('Remaining as %(state)s due to %(count)d samples' ' %(disposition)s threshold, most recent: %(most_recent)s' % dict(reason_data, state=state), reason_data) def _process_statistics(self, alarm_rule, statistics): def _compare(value): op = COMPARATORS[alarm_rule['comparison_operator']] limit = alarm_rule['threshold'] LOG.debug('comparing value %(value)s against threshold' ' %(limit)s', {'value': value, 'limit': limit}) return op(value, limit) compared = list(map(_compare, statistics)) distilled = all(compared) unequivocal = distilled or not any(compared) number_outside = len([c for c in compared if c]) if unequivocal: state = evaluator.ALARM if distilled else evaluator.OK return state, None, statistics, number_outside, None else: trending_state = evaluator.ALARM if compared[-1] else evaluator.OK return None, trending_state, statistics, number_outside, None def evaluate_rule(self, alarm_rule): """Evaluate alarm rule. :returns: state, trending state and statistics. """ start, end = self._bound_duration(alarm_rule) statistics = self._statistics(alarm_rule, start, end) statistics = self._sanitize(alarm_rule, statistics) sufficient = len(statistics) >= alarm_rule['evaluation_periods'] if not sufficient: raise InsufficientDataError( '%d datapoints are unknown' % alarm_rule['evaluation_periods'], statistics) return self._process_statistics(alarm_rule, statistics) def _unknown_reason_data(self, alarm, statistics): LOG.warning(f'Expecting {alarm.rule["evaluation_periods"]} datapoints' f' but only get {len(statistics)}') # Reason is not same as log message because we want to keep # consistent since thirdparty software may depend on old format. last = None if not statistics else statistics[-1] return self._reason_data('unknown', alarm.rule['evaluation_periods'], last) def _transition_alarm(self, alarm, state, trending_state, statistics, outside_count, unknown_reason): unknown = alarm.state == evaluator.UNKNOWN continuous = alarm.repeat_actions if trending_state: if unknown or continuous: state = trending_state if unknown else alarm.state reason, reason_data = self._reason(alarm, statistics, state, outside_count) self._refresh(alarm, state, reason, reason_data) return if state == evaluator.UNKNOWN and not unknown: reason_data = self._unknown_reason_data(alarm, statistics) self._refresh(alarm, state, unknown_reason, reason_data) elif state and (alarm.state != state or continuous): reason, reason_data = self._reason(alarm, statistics, state, outside_count) self._refresh(alarm, state, reason, reason_data) def evaluate(self, alarm): if not self.within_time_constraint(alarm): LOG.debug('Attempted to evaluate alarm %s, but it is not ' 'within its time constraint.', alarm.alarm_id) return try: evaluation = self.evaluate_rule(alarm.rule) except InsufficientDataError as e: evaluation = (evaluator.UNKNOWN, None, e.statistics, 0, e.reason) self._transition_alarm(alarm, *evaluation) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/evaluator/utils.py0000664000175000017500000000317500000000000017174 0ustar00zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math def mean(s, key=lambda x: x): """Calculate the mean of a numeric list.""" count = float(len(s)) if count: return math.fsum(map(key, s)) / count return 0.0 def deltas(s, key, m=None): """Calculate the squared distances from mean for a numeric list.""" m = m or mean(s, key) return [(key(i) - m) ** 2 for i in s] def variance(s, key, m=None): """Calculate the variance of a numeric list.""" return mean(deltas(s, key, m)) def stddev(s, key, m=None): """Calculate the standard deviation of a numeric list.""" return math.sqrt(variance(s, key, m)) def outside(s, key, lower=0.0, upper=0.0): """Determine if value falls outside upper and lower bounds.""" v = key(s) return v < lower or v > upper def anomalies(s, key, lower=0.0, upper=0.0): """Separate anomalous data points from the in-liers.""" inliers = [] outliers = [] for i in s: if outside(i, key, lower, upper): outliers.append(i) else: inliers.append(i) return inliers, outliers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/event.py0000664000175000017500000000465000000000000015152 0ustar00zuulzuul00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cotyledon from oslo_config import cfg from oslo_log import log import oslo_messaging from aodh.evaluator import event from aodh import messaging from aodh import storage LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('event_alarm_topic', default='alarm.all', deprecated_group='DEFAULT', help='The topic that aodh uses for event alarm evaluation.'), cfg.IntOpt('batch_size', default=1, help='Number of notification messages to wait before ' 'dispatching them.'), cfg.IntOpt('batch_timeout', help='Number of seconds to wait before dispatching samples ' 'when batch_size is not reached (None means indefinitely).'), ] class EventAlarmEndpoint(object): def __init__(self, evaluator): self.evaluator = evaluator def sample(self, notifications): LOG.debug('Received %s messages in batch.', len(notifications)) for notification in notifications: self.evaluator.evaluate_events(notification['payload']) class EventAlarmEvaluationService(cotyledon.Service): def __init__(self, worker_id, conf): super(EventAlarmEvaluationService, self).__init__(worker_id) self.conf = conf self.storage_conn = storage.get_connection_from_config(self.conf) self.evaluator = event.EventAlarmEvaluator(self.conf) self.listener = messaging.get_batch_notification_listener( messaging.get_transport(self.conf), [oslo_messaging.Target( topic=self.conf.listener.event_alarm_topic)], [EventAlarmEndpoint(self.evaluator)], False, self.conf.listener.batch_size, self.conf.listener.batch_timeout) self.listener.start() def terminate(self): self.listener.stop() self.listener.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/i18n.py0000664000175000017500000000204100000000000014600 0ustar00zuulzuul00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html """ import oslo_i18n DOMAIN = 'aodh' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/keystone_client.py0000664000175000017500000001140500000000000017224 0ustar00zuulzuul00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from heatclient import client as heatclient from keystoneauth1 import exceptions as ka_exception from keystoneauth1.identity.generic import password from keystoneauth1 import loading as ka_loading from keystoneclient.v3 import client as ks_client_v3 from oslo_config import cfg CFG_GROUP = "service_credentials" def get_session(conf): """Get an aodh service credentials auth session.""" auth_plugin = ka_loading.load_auth_from_conf_options(conf, CFG_GROUP) return ka_loading.load_session_from_conf_options( conf, CFG_GROUP, auth=auth_plugin ) def get_client(conf): """Return a client for keystone v3 endpoint.""" sess = get_session(conf) return ks_client_v3.Client(session=sess) def get_trusted_client(conf, trust_id): # Ideally we would use load_session_from_conf_options, but we can't do that # *and* specify a trust, so let's create the object manually. auth_plugin = password.Password( username=conf[CFG_GROUP].username, password=conf[CFG_GROUP].password, auth_url=conf[CFG_GROUP].auth_url, user_domain_id=conf[CFG_GROUP].user_domain_id, user_domain_name=conf[CFG_GROUP].user_domain_name, trust_id=trust_id) sess = ka_loading.load_session_from_conf_options(conf, CFG_GROUP, auth=auth_plugin) return ks_client_v3.Client(session=sess) def get_auth_token(client): return client.session.auth.get_access(client.session).auth_token def get_client_on_behalf_user(conf, auth_plugin): """Return a client for keystone v3 endpoint.""" sess = ka_loading.load_session_from_conf_options(conf, CFG_GROUP, auth=auth_plugin) return ks_client_v3.Client(session=sess) def create_trust_id(conf, trustor_user_id, trustor_project_id, roles, auth_plugin): """Create a new trust using the aodh service user.""" admin_client = get_client(conf) trustee_user_id = admin_client.session.get_user_id() client = get_client_on_behalf_user(conf, auth_plugin) trust = client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_project_id, impersonation=True, role_names=roles) return trust.id def delete_trust_id(conf, trust_id, auth_plugin): """Delete a trust previously setup for the aodh user.""" client = get_client_on_behalf_user(conf, auth_plugin) try: client.trusts.delete(trust_id) except ka_exception.NotFound: pass def url_for(conf, **kwargs): sess = get_session(conf) return sess.get_endpoint(**kwargs) def get_heat_client_from_trust(conf, trust_id): ks_client = get_trusted_client(conf, trust_id) sess = ks_client.session endpoint = sess.get_endpoint( service_type='orchestration', interface=conf.service_credentials.interface, region_name=conf.service_credentials.region_name ) return heatclient.Client("1", endpoint=endpoint, session=sess) OPTS = [ cfg.StrOpt('region-name', default=os.environ.get('OS_REGION_NAME'), deprecated_name="os-region-name", help='Region name to use for OpenStack service endpoints.'), cfg.StrOpt('interface', default=os.environ.get( 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', 'public')), deprecated_name="os-endpoint-type", choices=('public', 'internal', 'admin', 'auth', 'publicURL', 'internalURL', 'adminURL'), help='Type of endpoint in Identity service catalog to use for ' 'communication with OpenStack services.'), ] def register_keystoneauth_opts(conf): ka_loading.register_auth_conf_options(conf, CFG_GROUP) ka_loading.register_session_conf_options( conf, CFG_GROUP, deprecated_opts={'cacert': [ cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] }) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/0000775000175000017500000000000000000000000014711 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0470076 aodh-19.0.0/aodh/locale/de/0000775000175000017500000000000000000000000015301 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000017066 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/de/LC_MESSAGES/aodh.po0000664000175000017500000001041600000000000020343 0ustar00zuulzuul00000000000000# OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Robert Simai , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-07 06:30+0000\n" "Last-Translator: Robert Simai \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "%(name)s Anzahl überschreitet den Maximalwert %(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s muss für den Alarmtyp %(type)s gesetzt sein" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s und %(rule2)s können nicht gleichzeitig festgelegt werden" #, python-format msgid "%s is not JSON serializable" msgstr "%s ist nicht JSON-serialisierbar" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Alarm %(alarm_id)s nicht gefunden in Projekt %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Alarm %s nicht gefunden" msgid "Alarm incorrect" msgstr "Alaram inkorrekt" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Alarmquote überschritten für Benutzer %(u)s bei Projekt %(p)s" #, python-format msgid "Alarm when %s event occurred." msgstr "Alarm wenn %s Ereignis auftritt." #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Zeitmarkenwert %s konnte nicht analysiert werden" #, python-format msgid "Filter expression not valid: %s" msgstr "Filterausdruck nicht gültig: %s" msgid "Limit should be positive" msgstr "Begrenzung muss positiv sein" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Nicht berechtigt für den Zugriff auf %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Benachrichtigung von Alarm %(alarm_name)s %(alarm_id)s mit Priorität " "%(severity)s von %(previous)s in %(current)s mit Aktion %(action)s wegen " "%(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "Ausdruck für 'Sortieren nach' nicht gültig: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Der Datentyp %(type)s wird nicht unterstützt. Die Liste der unterstützten " "Datentypen lautet: %(supported)s" msgid "Threshold rules should be combined with \"and\" or \"or\"" msgstr "Schwellenregeln sollten mit \"und\" oder \"oder\" kombiniert werden." msgid "Time constraint names must be unique for a given alarm." msgstr "Zeitvorgabennamen müssen für einen angegebenen Alarm eindeutig sein." #, python-format msgid "Timezone %s is not valid" msgstr "Zeitzone %s ist nicht gültig" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Wert %(value)s kann nicht in den erwarteten Datentyp %(type)s umgewandelt " "werden." #, python-format msgid "Unable to parse action %s" msgstr "Aktion %s konnte nicht analysiert werden" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Unerwartete Ausnahme beim Konvertieren von %(value)s in den erwarteten " "Datentyp %(type)s." #, python-format msgid "Unsupported action %s" msgstr "Nicht unterstützte Aktion %s" #, python-format msgid "You are not authorized to create action: %s" msgstr "Sie sind nicht zur Erstellung der Aktion berechtigt: %s" msgid "state invalid" msgstr "Zustand ungültig" msgid "state_timestamp should be datetime object" msgstr "state_timestamp sollte ein datetime-Objekt sein" msgid "timestamp should be datetime object" msgstr "timestamp sollte ein datetime-Objekt sein" msgid "type must be set in every rule" msgstr "Typ muss in jeder Regel gesetzt werden" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0470076 aodh-19.0.0/aodh/locale/en_GB/0000775000175000017500000000000000000000000015663 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000017450 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/en_GB/LC_MESSAGES/aodh.po0000664000175000017500000001204200000000000020722 0ustar00zuulzuul00000000000000# OpenStack Infra , 2015. #zanata # Andi Chandler , 2016. #zanata # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2021. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2021-01-13 10:46+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "%(name)s count exceeds maximum value %(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s must be set for %(type)s type alarm" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s and %(rule2)s cannot be set at the same time" #, python-format msgid "%s is not JSON serializable" msgstr "%s is not JSON serialisable" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Alarm %(alarm_id)s not found in project %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Alarm %s not found" msgid "Alarm incorrect" msgstr "Alarm incorrect" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Alarm quota exceeded for user %(u)s on project %(p)s" #, python-format msgid "Alarm when %s event occurred." msgstr "Alarm when %s event occurred." #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Failed to parse the timestamp value %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Filter expression not valid: %s" #, python-format msgid "" "Invalid input composite rule: %s, it should be a dict with an \"and\" or \"or" "\" as key, and the value of dict should be a list of basic threshold rules " "or sub composite rules, can be nested." msgstr "" "Invalid input composite rule: %s, it should be a dict with an \"and\" or \"or" "\" as key, and the value of dict should be a list of basic threshold rules " "or sub composite rules, can be nested." msgid "Limit should be positive" msgstr "Limit should be positive" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Not Authorised to access %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "Order-by expression not valid: %s" #, python-format msgid "Query value or traits invalid: %s" msgstr "Query value or traits invalid: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgid "Threshold rules should be combined with \"and\" or \"or\"" msgstr "Threshold rules should be combined with \"and\" or \"or\"" msgid "Time constraint names must be unique for a given alarm." msgstr "Time constraint names must be unique for a given alarm." #, python-format msgid "Timezone %s is not valid" msgstr "Timezone %s is not valid" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Unable to convert the value %(value)s to the expected data type %(type)s." #, python-format msgid "Unable to parse action %s" msgstr "Unable to parse action %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Unexpected exception converting %(value)s to the expected data type %(type)s." #, python-format msgid "Unsupported action %s" msgstr "Unsupported action %s" #, python-format msgid "" "Unsupported sub-rule type :%(rule)s in composite rule, should be one of: " "%(plugins)s" msgstr "" "Unsupported sub-rule type :%(rule)s in composite rule, should be one of: " "%(plugins)s" #, python-format msgid "You are not authorized to create action: %s" msgstr "You are not authorised to create action: %s" msgid "policy File JSON to YAML Migration" msgstr "policy File JSON to YAML Migration" msgid "state invalid" msgstr "state invalid" msgid "state_timestamp should be datetime object" msgstr "state_timestamp should be datetime object" msgid "timestamp should be datetime object" msgstr "timestamp should be datetime object" msgid "trust URL cannot contain a trust ID." msgstr "trust URL cannot contain a trust ID." msgid "type must be set in every rule" msgstr "type must be set in every rule" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0470076 aodh-19.0.0/aodh/locale/es/0000775000175000017500000000000000000000000015320 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/locale/es/LC_MESSAGES/0000775000175000017500000000000000000000000017105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/es/LC_MESSAGES/aodh.po0000664000175000017500000000707700000000000020373 0ustar00zuulzuul00000000000000# Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language-Team: Spanish\n" "Language: es\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s debe establecerse para la alarma de tipo %(type)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s y %(rule2)s no se pueden establecer al mismo tiempo" #, python-format msgid "%s is not JSON serializable" msgstr "%s no es serializable en JSON" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "La alarma %(alarm_id)s no se ha encontrado en el proyecto %(project)s" #, python-format msgid "Alarm %s not found" msgstr "No se ha encontrado la alarma %s" msgid "Alarm incorrect" msgstr "Alarma incorrecta" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "" "La cuota de alarma se ha excedido para el usuario %(u)s en el proyecto %(p)s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "No se ha podido analizar el valor de indicación de fecha y hora %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Expresión de filtro no válida: %s" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "No está autorizado para acceder a %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Notificando la alarma %(alarm_name)s %(alarm_id)s de prioridad %(severity)s " "de %(previous)s a %(current)s con la acción %(action)s debido a %(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "Expresión de ordenar por no válida: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "El tipo de datos %(type)s no es compatible. La lista de tipo de datos " "admitido es: %(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "" "Los nombres de restricción de tiempo deben ser exclusivos para una " "determinada alarma." #, python-format msgid "Timezone %s is not valid" msgstr "El huso horario %s no es válido" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "No se ha podido convertir el valor %(value)s al tipo de datos esperado " "%(type)s." #, python-format msgid "Unable to parse action %s" msgstr "No se puede analizar la acción %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Excepción inesperada al convertir %(value)s al tipo de dato esperado " "%(type)s." #, python-format msgid "Unsupported action %s" msgstr "Acción %s no admitida" msgid "state invalid" msgstr "estado no válido" msgid "state_timestamp should be datetime object" msgstr "state_timestamp debe ser el objeto datetime" msgid "timestamp should be datetime object" msgstr "" "La indicación de fecha y hora debe ser el objeto datetime (fecha y hora)" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0470076 aodh-19.0.0/aodh/locale/fr/0000775000175000017500000000000000000000000015320 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000017105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/fr/LC_MESSAGES/aodh.po0000664000175000017500000000714000000000000020362 0ustar00zuulzuul00000000000000# OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s doit être défini pour l'alarme de type %(type)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s et %(rule2)s ne peuvent pas être définis en même temps" #, python-format msgid "%s is not JSON serializable" msgstr "%s n'est pas sérialisable en JSON" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Alarme %(alarm_id)s introuvable dans le projet %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Alarme: %s non trouvé" msgid "Alarm incorrect" msgstr "Alarme incorrecte" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Quota d'alarme dépassé pour l'utilisateur %(u)s sur le projet %(p)s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Echec de l'analyse syntaxique de la valeur d'horodatage %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Filtre de l'expression n'est pas valide: %s" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Non autorisé à accéder %(aspect)s %(id)s " #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Notification de l'alarme %(alarm_name)s %(alarm_id)s de priorité " "%(severity)s de %(previous)s à %(current)s avec l'action %(action)s. Cause : " "%(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "L'expression de tri n'est pas valide : %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Le type de données %(type)s n'est pas supporté. Les types de données " "supportés sont: %(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "" "Les noms de contrainte de temps doivent être uniques pour une alarme donnée." #, python-format msgid "Timezone %s is not valid" msgstr "La timezone %s n'est pas valide" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Impossible de convertir la valeur %(value)s vers le type de données attendu " "%(type)s." #, python-format msgid "Unable to parse action %s" msgstr "Impossible d'analyser l'action %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exception inattendue lors de la conversion de %(value)s dans le type de " "donnée attendue %(type)s." #, python-format msgid "Unsupported action %s" msgstr "Action non supporté %s" msgid "state invalid" msgstr "Etat non valide" msgid "state_timestamp should be datetime object" msgstr "state_timestamp doit correspondre à l'objet date-heure" msgid "timestamp should be datetime object" msgstr "timestamp doit correspondre à l'objet date-heure" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0470076 aodh-19.0.0/aodh/locale/it/0000775000175000017500000000000000000000000015325 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/locale/it/LC_MESSAGES/0000775000175000017500000000000000000000000017112 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/it/LC_MESSAGES/aodh.po0000664000175000017500000000711000000000000020364 0ustar00zuulzuul00000000000000# Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata # KATO Tomoyuki , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-03 06:58+0000\n" "Last-Translator: KATO Tomoyuki \n" "Language-Team: Italian\n" "Language: it\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s deve essere impostata per la segnalazione di tipo %(type)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s e %(rule2)s non possono essere impostate contemporaneamente" #, python-format msgid "%s is not JSON serializable" msgstr "%s non è serializzabile mediante JSON" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Segnalazione %(alarm_id)s non trovata nel progetto %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Segnalazione %s non trovata" msgid "Alarm incorrect" msgstr "Segnalazione non corretta" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Quota di segnalazione superata per l'utente %(u)s nel progetto %(p)s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Impossibile analizzare il valore data/ora %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Espressione del filtro non valida: %s" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Non autorizzato ad accedere %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Notifica dell'allarme %(alarm_name)s %(alarm_id)s di priorità %(severity)s " "da %(previous)s a %(current)s con azione %(action)s a causa di %(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "L'espressione ordina per non è valida: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Il tipo di dati %(type)s non è supportato. L'elenco dei tipi di dati " "supportati è: %(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "" "I nomi dei limiti di tempo devono essere univoci per una data segnalazione." #, python-format msgid "Timezone %s is not valid" msgstr "Fuso orario %s non valido" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Impossibile convertire il valore %(value)s nel tipo di dati previsto " "%(type)s." #, python-format msgid "Unable to parse action %s" msgstr "Impossibile analizzare l'azione %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Eccezione non prevista durante la conversione di %(value)s per il tipo di " "dati previsto %(type)s." #, python-format msgid "Unsupported action %s" msgstr "Azione non supportata %s" msgid "state invalid" msgstr "stato non valido" msgid "state_timestamp should be datetime object" msgstr "state_timestamp deve essere un oggetto data/ora" msgid "timestamp should be datetime object" msgstr "timestamp deve essere un oggetto data/ora" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/ja/0000775000175000017500000000000000000000000015303 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0750122 aodh-19.0.0/aodh/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000017070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/ja/LC_MESSAGES/aodh.po0000664000175000017500000001053100000000000020343 0ustar00zuulzuul00000000000000# Akihiro Motoki , 2015. #zanata # KATO Tomoyuki , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Shinichi Take , 2016. #zanata # Yuta Hono , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "%(name)s が最大値 %(maximum)d を超えています" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(type)s タイプのアラームに %(rule)s を設定する必要があります" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s と %(rule2)s を同時に設定することはできません" #, python-format msgid "%s is not JSON serializable" msgstr "%s が JSON シリアライズ可能ではありません" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "アラーム %(alarm_id)s がプロジェクト %(project)s には見つかりません" #, python-format msgid "Alarm %s not found" msgstr "アラーム %s が見つかりません" msgid "Alarm incorrect" msgstr "アラームが正しくありません" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "プロジェクト %(p)s のユーザー %(u)s のアラームクォータを超過しました" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "タイムスタンプ値 %s を解析できませんでした" #, python-format msgid "Filter expression not valid: %s" msgstr "フィルター式が無効です: %s" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "%(aspect)s %(id)s にアクセスする権限がありません" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "優先順位 %(severity)s のアラーム %(alarm_name)s %(alarm_id)s をアクション " "%(action)s によって %(previous)s から %(current)s へ通知中。理由: " "%(reason)s。" #, python-format msgid "Order-by expression not valid: %s" msgstr "order-by 式が無効です: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "データ型 %(type)s はサポートされていません。サポートされているデータ型のリス" "ト: %(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "時間制約の名前は、指定されたアラームで一意でなければなりません。" #, python-format msgid "Timezone %s is not valid" msgstr "タイムゾーン %s が無効です" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "値 %(value)s を、想定されるデータ型 %(type)s に変換できません。" #, python-format msgid "Unable to parse action %s" msgstr "アクション %s を解析できません" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "%(value)s を想定されるデータ型 %(type)s に変換する際に、想定しない例外が発生" "しました。" #, python-format msgid "Unsupported action %s" msgstr "サポートされないアクション %s" #, python-format msgid "You are not authorized to create action: %s" msgstr "アクションの作成を許可されていません: %s" msgid "state invalid" msgstr "状態が無効です" msgid "state_timestamp should be datetime object" msgstr "state_timestamp は datetime オブジェクトでなければなりません" msgid "timestamp should be datetime object" msgstr "タイムスタンプは datetime オブジェクトでなければなりません" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/ko_KR/0000775000175000017500000000000000000000000015716 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000017503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/ko_KR/LC_MESSAGES/aodh.po0000664000175000017500000001162200000000000020760 0ustar00zuulzuul00000000000000# Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata # ByungYeol Woo , 2017. #zanata # Heetae Ahn , 2017. #zanata # SEOKJAE BARK , 2017. #zanata # johjuhyun , 2017. #zanata # Taehee Jang , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-09-02 01:31+0000\n" "Last-Translator: Taehee Jang \n" "Language-Team: Korean (South Korea)\n" "Language: ko_KR\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "%(name)s 카운트가 최대값 %(maximum)d을 초과하였습니다." #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(type)s 유형 알람에 %(rule)s을(를) 설정해야 함" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s 및 %(rule2)s을(를) 동시에 설정할 수 없음" #, python-format msgid "%s is not JSON serializable" msgstr "%s은(는) JSON 직렬화 할 수 없음" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "%(alarm_id)s 알람이 %(project)s 프로젝트에 없음" #, python-format msgid "Alarm %s not found" msgstr "%s 알람을 찾을 수 없음" msgid "Alarm incorrect" msgstr "알림이 올바르지 않습니다" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "%(p)s 프로젝트의 %(u)s 사용자에 대한 알람 할당량 초과" #, python-format msgid "Alarm when %s event occurred." msgstr "%s의 event가 발생했을 때 알람을 발생" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "시간소인 값 %s 구문 분석 실패" #, python-format msgid "Filter expression not valid: %s" msgstr "필터 표현식이 올바르지 않음: %s" msgid "Limit should be positive" msgstr "제한 값은 양수여야 합니다." #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "%(aspect)s %(id)s에 대한 액세스 권한이 부여되지 않음" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "%(severity)s 우선순위에 대한 알람 %(alarm_name)s %(alarm_id)s 알림, " "%(previous)s부터 %(current)s까지, 조치 %(action)s 사용. 이유: %(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "Order-by 표현식이 올바르지 않음: %s" #, python-format msgid "Query value or traits invalid: %s" msgstr "쿼리 값 혹은 특성이 유효하지 않음: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "데이터 유형 %(type)s이(가) 지원되지 않습니다. 지원되는 데이터 유형 목록은 " "%(supported)s입니다." msgid "Threshold rules should be combined with \"and\" or \"or\"" msgstr "임계점 규칙은 \"and\" 혹은 \"or\"과 함께 결합되어야 합니다" msgid "Time constraint names must be unique for a given alarm." msgstr "시간 제한조건 이름은 지정된 알람에 고유해야 합니다." #, python-format msgid "Timezone %s is not valid" msgstr "시간대 %s이(가) 올바르지 않음" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "%(value)s 값을 예상 데이터 유형 %(type)s(으)로 변환할 수 없습니다." #, python-format msgid "Unable to parse action %s" msgstr "%s 동작을 구문 분석할 수 없음" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "%(value)s을(를) 예상된 데이터 유형으로 변환하는 중에 예상치 않은 예외 발생 " "%(type)s." #, python-format msgid "Unsupported action %s" msgstr "지원되지 않는 동작 %s" #, python-format msgid "You are not authorized to create action: %s" msgstr "다음 동작을 수행할 권한이 없습니다: %s" msgid "state invalid" msgstr "상태가 잘못되었습니다" msgid "state_timestamp should be datetime object" msgstr "state_timestamp는 Datetime 오브젝트여야 함" msgid "timestamp should be datetime object" msgstr "시간소인은 Datetime 오브젝트여야 함" msgid "trust URL cannot contain a trust ID." msgstr "신뢰하는 URL은 신뢰하는 ID를 포함할 수 없습니다." msgid "type must be set in every rule" msgstr "타입은 모든 rule에 반드시 설정되어야 한다" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/pt/0000775000175000017500000000000000000000000015334 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/locale/pt/LC_MESSAGES/0000775000175000017500000000000000000000000017121 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/pt/LC_MESSAGES/aodh.po0000664000175000017500000000770000000000000020400 0ustar00zuulzuul00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # AnaFonseca , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 03:59+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "a contagem %(name)s excede o valor máximo %(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s devem ser definidas para o tipo de aviso %(type)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s e %(rule2)s não podem ser programadas ao mesmo tempo" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Alarme %(alarm_id)s não encontrado no projeto %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Alarme %s não encontrado" msgid "Alarm incorrect" msgstr "Alarme incorreto" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Aviso de quota excedida para o utilizador %(u)s no projeto %(p)s" #, python-format msgid "Alarm when %s event occurred." msgstr "Alarme quando evento %s ocorreu." #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Erro ao analisar o valor data/hora %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Expressão filtro inválida: %s" msgid "Limit should be positive" msgstr "O limite deve ser positivo" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Não Autorizado o acesso a %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Notificar alarme %(alarm_name)s %(alarm_id)s de %(severity)s prioridade de " "%(previous)s a %(current)s com a ação %(action)s devido a %(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "Expressão ordenar por inválida: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "O tipo de dados %(type)s não é suportado. A lista do tipo de dados " "suportados é: %(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "" "Os nomes das restrições de tempo deve ser únicos para um determinado aviso." #, python-format msgid "Timezone %s is not valid" msgstr "Fuso horário %s inválido" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Incapaz de converter o valor %(value)s para o tipo de dados esperados " "%(type)s." #, python-format msgid "Unable to parse action %s" msgstr "Incapaz de analisar a ação %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exceção inesperada ao converter %(value)s para o tipo de dados esperado " "%(type)s." #, python-format msgid "Unsupported action %s" msgstr "Ação não suportada %s" #, python-format msgid "You are not authorized to create action: %s" msgstr "Não tem permissão para criar a ação: %s" msgid "state invalid" msgstr "estato inválido" msgid "state_timestamp should be datetime object" msgstr "state_timestamp deve ser um objeto data/hora" msgid "timestamp should be datetime object" msgstr "o timestamp deve ser um objeto data/hora" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/pt_BR/0000775000175000017500000000000000000000000015717 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/locale/pt_BR/LC_MESSAGES/0000775000175000017500000000000000000000000017504 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/pt_BR/LC_MESSAGES/aodh.po0000664000175000017500000001121600000000000020760 0ustar00zuulzuul00000000000000# Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Ricardo Bras , 2017. #zanata # Rodrigo Loures , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-01-23 03:52+0000\n" "Last-Translator: Rodrigo Loures \n" "Language-Team: Portuguese (Brazil)\n" "Language: pt_BR\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "%(name)s Contagem excede o valor maximo de: %(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s deve ser definido para alarme de tipo %(type)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s e %(rule2)s não podem ser configurados ao mesmo tempo" #, python-format msgid "%s is not JSON serializable" msgstr "%s não é JSON serializável" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Alarme%(alarm_id)s não localizado no projeto%(project)s" #, python-format msgid "Alarm %s not found" msgstr "Alarme %s não localizado" msgid "Alarm incorrect" msgstr "Alarme incorreto" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Cota de alarme excedida para usuário %(u)s no projeto %(p)s" #, python-format msgid "Alarm when %s event occurred." msgstr "Alerta de evento ocorrido: %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Falha ao analisar o valor do registro de data e hora %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Expressão de filtro inválida: %s" msgid "Limit should be positive" msgstr "O limite deve ser positivo" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Não Autorizado a acessar %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Notificando alarme %(alarm_name)s %(alarm_id)s da prioridade %(severity)s do " "%(previous)s para %(current)s com ação %(action)s porque %(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "Expressão solicitada inválida: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "O tipo de dados %(type)s não é suportado. A lista de tipos de dados " "suportados é: %(supported)s" msgid "Threshold rules should be combined with \"and\" or \"or\"" msgstr "As regras de limiar devem ser combinadas com \"and\" ou \"or\"" msgid "Time constraint names must be unique for a given alarm." msgstr "" "Nomes de restrição de tempo devem ser exclusivos para um determinado alarme." #, python-format msgid "Timezone %s is not valid" msgstr "Fuso horário %s não é válido" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Não é possível converter o valor %(value)s para o tipo de dados esperado " "%(type)s." #, python-format msgid "Unable to parse action %s" msgstr "Não é possível analisar ação %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exceção inesperada convertendo %(value)s para o tipo de dado esperado " "%(type)s." #, python-format msgid "Unsupported action %s" msgstr "Ação não suportada %s" #, python-format msgid "" "Unsupported sub-rule type :%(rule)s in composite rule, should be one of: " "%(plugins)s" msgstr "" "Tipo de sub-regra não suportado: %(rule)s em regras compostas, deve estar " "fora de: %(plugins)s" #, fuzzy, python-format msgid "You are not authorized to create action: %s" msgstr "Você não têm autorização para criar a ação: %s" msgid "state invalid" msgstr "estado inválido" msgid "state_timestamp should be datetime object" msgstr "state_timestamp precisa ser objeto de data/hora" msgid "timestamp should be datetime object" msgstr "registro de data e hora precisa ser objeto de data/hora" msgid "trust URL cannot contain a trust ID." msgstr "a URL de confiança não pode conter um ID de segurança." msgid "type must be set in every rule" msgstr "o tipo deve ser definido para todas as regras" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/ru/0000775000175000017500000000000000000000000015337 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/locale/ru/LC_MESSAGES/0000775000175000017500000000000000000000000017124 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/ru/LC_MESSAGES/aodh.po0000664000175000017500000001537400000000000020411 0ustar00zuulzuul00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # Altinbek , 2015 # Lucas Palm , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Roman Gorshunov , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-08-20 06:45+0000\n" "Last-Translator: Roman Gorshunov \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Russian\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "%(name)s превышает максимальное значение %(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s должны быть установлены для сигналов тревоги типа%(type)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s и %(rule2)s нельзя задавать одновременно" #, python-format msgid "%s is not JSON serializable" msgstr "%s не является сериализуемым с помощью JSON" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Предупреждение %(alarm_id)s не найдено в проекте %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Предупреждение %s не найдено" msgid "Alarm incorrect" msgstr "Некорректное предупреждение" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Количество ошибок пользователя %(u)s превысило норму %(p)s" #, python-format msgid "Alarm when %s event occurred." msgstr "Сработал сигнал тревоги по событию %s." #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Ошибка анализа значения времени %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Недопустимое выражение фильтра: %s" #, python-format msgid "" "Invalid input composite rule: %s, it should be a dict with an \"and\" or \"or" "\" as key, and the value of dict should be a list of basic threshold rules " "or sub composite rules, can be nested." msgstr "" "Некорректное композитное правило: %s, оно должно быть типа dict (словарь) с " "ключём \"and\" или \"or\" в качестве ключа, и значение в словаре должно быть " "списком основных граничных правил или частью композитного правила (они могут " "быть вложенными)." msgid "Limit should be positive" msgstr "Ограничение должно быть положительным" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Нет прав доступа к %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Сигнал тревоги %(alarm_name)s %(alarm_id)s с приоритетом %(severity)s от " "%(previous)s и до %(current)s с действием %(action)s по причине %(reason)s." #, python-format msgid "Order-by expression not valid: %s" msgstr "Недопустимое выражение сортировки: %s" #, python-format msgid "Query value or traits invalid: %s" msgstr "Значение запроса или признаки недействительны: %s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Тип данных %(type)s не поддерживается. Список поддерживаемых типов данных: " "%(supported)s" msgid "Threshold rules should be combined with \"and\" or \"or\"" msgstr "Граничные правила должны комбинироваться с помощью \"and\" или \"or\"" msgid "Time constraint names must be unique for a given alarm." msgstr "" "Названия диапазонов времени должны отличаться для данного сигнала тревоги." #, python-format msgid "Timezone %s is not valid" msgstr "Временная зона %s не корректна" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Преобразовать значение %(value)s в ожидаемый тип данных %(type)s невозможно." #, python-format msgid "Unable to parse action %s" msgstr "Ошибка анализа действия %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Возникла непредвиденная исключительная ситуация при преобразовании %(value)s " "в ожидаемый тип данных %(type)s." #, python-format msgid "Unsupported action %s" msgstr "Неподдерживаемое действие %s" #, python-format msgid "" "Unsupported sub-rule type :%(rule)s in composite rule, should be one of: " "%(plugins)s" msgstr "" "Неподдерживаемый тип дочернего правила: %(rule)s в композитном правиле " "должно быть одним из %(plugins)s" #, python-format msgid "You are not authorized to create action: %s" msgstr "Вы не авторизованы, чтобы выполнять действие: %s" msgid "state invalid" msgstr "недопустимое состояние" msgid "state_timestamp should be datetime object" msgstr "state_timestamp должен быть объектом datetime" msgid "timestamp should be datetime object" msgstr "системное время должно быть объектом datetime" msgid "trust URL cannot contain a trust ID." msgstr "URL доверия не может содержать ID доверия." msgid "type must be set in every rule" msgstr "тип должен быть указан для каждого правила" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/zh_CN/0000775000175000017500000000000000000000000015712 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/locale/zh_CN/LC_MESSAGES/0000775000175000017500000000000000000000000017477 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/zh_CN/LC_MESSAGES/aodh.po0000664000175000017500000000710700000000000020757 0ustar00zuulzuul00000000000000# Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata # KATO Tomoyuki , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-17 02:24+0000\n" "Last-Translator: KATO Tomoyuki \n" "Language-Team: Chinese (China)\n" "Language: zh_CN\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "%(name)s数量超过最大值%(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "类型为%(type)s的告警必须设置%(rule)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s和%(rule2)s无法同时被设置" #, python-format msgid "%s is not JSON serializable" msgstr "%s 不是可序列化 JSON" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "告警%(alarm_id)s在项目%(project)s中未找到" #, python-format msgid "Alarm %s not found" msgstr "告警%s没有找到" msgid "Alarm incorrect" msgstr "警报不正确" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "用户%(u)s在项目%(p)s中的告警配额已溢出" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "解析时间戳%s失败" #, python-format msgid "Filter expression not valid: %s" msgstr "过滤表达式不合法:%s" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "权限不足以访问%(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "正在通知警报%(alarm_name)s %(alarm_id)s,警报级别%(severity)s,状态" "从%(previous)s变为%(current)s,动作为%(action)s,原因是%(reason)s。" #, python-format msgid "Order-by expression not valid: %s" msgstr "orderby表达式不合法:%s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "数据类型%(type)s不被支持。支持的数据类型列表:%(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "一个指定的告警的时间约束名称必须唯一" #, python-format msgid "Timezone %s is not valid" msgstr "时区%s不合法" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "无法转换%(value)s到预期的数据类型%(type)s。" #, python-format msgid "Unable to parse action %s" msgstr "无法解析动作%s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "在转换%(value)s到预期的数据类型%(type)s时发生了未预料的异常。" #, python-format msgid "Unsupported action %s" msgstr "动作%s不支持" #, python-format msgid "You are not authorized to create action: %s" msgstr "你没有权限创建动作:%s" msgid "state invalid" msgstr "状态无效" msgid "state_timestamp should be datetime object" msgstr "state_timestamp必须是datetime对象" msgid "timestamp should be datetime object" msgstr "timestamp必须是datatime对象" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0510082 aodh-19.0.0/aodh/locale/zh_TW/0000775000175000017500000000000000000000000015744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/locale/zh_TW/LC_MESSAGES/0000775000175000017500000000000000000000000017531 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/locale/zh_TW/LC_MESSAGES/aodh.po0000664000175000017500000000654300000000000021014 0ustar00zuulzuul00000000000000# Lucas Palm , 2015. #zanata # Jennifer , 2016. #zanata # KATO Tomoyuki , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: aodh VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2021-09-15 16:30+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-03 07:04+0000\n" "Last-Translator: KATO Tomoyuki \n" "Language-Team: Chinese (Taiwan)\n" "Language: zh_TW\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "必須為 %(type)s 類型警示設定 %(rule)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "無法同時設定 %(rule1)s 和 %(rule2)s" #, python-format msgid "%s is not JSON serializable" msgstr "%s 不可進行 JSON 序列化" #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "在專案 %(project)s 中找不到警示 %(alarm_id)s" #, python-format msgid "Alarm %s not found" msgstr "找不到警示 %s" msgid "Alarm incorrect" msgstr "警示不正確" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "在專案 %(p)s 上,針對使用者 %(u)s 已超出的警示配額" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "無法剖析時間戳記值 %s" #, python-format msgid "Filter expression not valid: %s" msgstr "過濾表示式無效:%s" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "未獲授權來存取 %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "正在以動作 %(action)s 通知優先順序為 %(severity)s 的警示 %(alarm_name)s " "%(alarm_id)s(從 %(previous)s 至 %(current)s),因為 %(reason)s。" #, python-format msgid "Order-by expression not valid: %s" msgstr "排序方式表示式無效:%s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "不支援資料類型 %(type)s。支援的資料類型清單為:%(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "針對給定的警示,時間限制名稱必須是唯一的。" #, python-format msgid "Timezone %s is not valid" msgstr "時區 %s 無效" #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "無法將值 %(value)s 轉換成預期的資料類型 %(type)s。" #, python-format msgid "Unable to parse action %s" msgstr "無法剖析動作 %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "將 %(value)s 轉換為預期的資料類型%(type)s 時發生非預期的異常狀況。" #, python-format msgid "Unsupported action %s" msgstr "不受支援的動作 %s" msgid "state invalid" msgstr "狀態無效" msgid "state_timestamp should be datetime object" msgstr "state_timestamp 應該為日期時間物件" msgid "timestamp should be datetime object" msgstr "時間戳記應該為日期時間物件" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/messaging.py0000664000175000017500000000444600000000000016011 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging from oslo_messaging import serializer as oslo_serializer DEFAULT_URL = "__default__" TRANSPORTS = {} _SERIALIZER = oslo_serializer.JsonPayloadSerializer() def setup(): oslo_messaging.set_transport_defaults('aodh') def get_transport(conf, url=None, optional=False, cache=True): """Initialise the oslo_messaging layer.""" global TRANSPORTS, DEFAULT_URL cache_key = url or DEFAULT_URL transport = TRANSPORTS.get(cache_key) if not transport or not cache: try: transport = oslo_messaging.get_notification_transport(conf, url) except (oslo_messaging.InvalidTransportURL, oslo_messaging.DriverLoadFailure): if not optional or url: # NOTE(sileht): oslo_messaging is configured but unloadable # so reraise the exception raise return None else: if cache: TRANSPORTS[cache_key] = transport return transport def get_batch_notification_listener(transport, targets, endpoints, allow_requeue=False, batch_size=1, batch_timeout=None): """Return a configured oslo_messaging notification listener.""" return oslo_messaging.get_batch_notification_listener( transport, targets, endpoints, executor='threading', allow_requeue=allow_requeue, batch_size=batch_size, batch_timeout=batch_timeout) def get_notifier(transport, publisher_id): """Return a configured oslo_messaging notifier.""" notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) return notifier.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/notifier/0000775000175000017500000000000000000000000015271 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/notifier/__init__.py0000664000175000017500000001340600000000000017406 0ustar00zuulzuul00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import cotyledon from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import netutils from stevedore import extension from aodh import messaging LOG = log.getLogger(__name__) OPTS = [ cfg.IntOpt('batch_size', default=1, help='Number of notification messages to wait before ' 'dispatching them.'), cfg.IntOpt('batch_timeout', help='Number of seconds to wait before dispatching samples ' 'when batch_size is not reached (None means indefinitely).' ), ] class AlarmNotifier(object, metaclass=abc.ABCMeta): """Base class for alarm notifier plugins.""" @staticmethod def __init__(conf): pass @abc.abstractmethod def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): """Notify that an alarm has been triggered. :param action: The action that is being attended, as a parsed URL. :param alarm_id: The triggered alarm. :param alarm_name: The name of triggered alarm. :param severity: The level of triggered alarm :param previous: The previous state of the alarm. :param current: The current state of the alarm. :param reason: The reason the alarm changed its state. :param reason_data: A dict representation of the reason. """ class AlarmNotifierService(cotyledon.Service): NOTIFIER_EXTENSIONS_NAMESPACE = "aodh.notifier" def __init__(self, worker_id, conf): super(AlarmNotifierService, self).__init__(worker_id) self.conf = conf transport = messaging.get_transport(self.conf) self.notifiers = extension.ExtensionManager( self.NOTIFIER_EXTENSIONS_NAMESPACE, invoke_on_load=True, invoke_args=(self.conf,)) target = oslo_messaging.Target(topic=self.conf.notifier_topic) self.listener = messaging.get_batch_notification_listener( transport, [target], [AlarmEndpoint(self.notifiers)], False, self.conf.notifier.batch_size, self.conf.notifier.batch_timeout) self.listener.start() def terminate(self): self.listener.stop() self.listener.wait() class AlarmEndpoint(object): def __init__(self, notifiers): self.notifiers = notifiers def sample(self, notifications): """Endpoint for alarm notifications""" LOG.debug('Received %s messages in batch.', len(notifications)) for notification in notifications: self._process_alarm(self.notifiers, notification['payload']) @staticmethod def _handle_action(notifiers, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): """Process action on alarm :param notifiers: list of possible notifiers. :param action: The action that is being attended, as a parsed URL. :param alarm_id: The triggered alarm. :param alarm_name: The name of triggered alarm. :param severity: The level of triggered alarm :param previous: The previous state of the alarm. :param current: The current state of the alarm. :param reason: The reason the alarm changed its state. :param reason_data: A dict representation of the reason. """ try: action = netutils.urlsplit(action) except Exception: LOG.error( ("Unable to parse action %(action)s for alarm " "%(alarm_id)s"), {'action': action, 'alarm_id': alarm_id}) return try: notifier = notifiers[action.scheme].obj except KeyError: scheme = action.scheme LOG.error( ("Action %(scheme)s for alarm %(alarm_id)s is unknown, " "cannot notify"), {'scheme': scheme, 'alarm_id': alarm_id}) return try: LOG.debug("Notifying alarm %(id)s with action %(act)s", {'id': alarm_id, 'act': action}) notifier.notify(action, alarm_id, alarm_name, severity, previous, current, reason, reason_data) except Exception: LOG.exception("Unable to notify alarm %s", alarm_id) @staticmethod def _process_alarm(notifiers, data): """Notify that alarm has been triggered. :param notifiers: list of possible notifiers :param data: (dict): alarm data """ actions = data.get('actions') if not actions: LOG.error("Unable to notify for an alarm with no action") return for action in actions: AlarmEndpoint._handle_action(notifiers, action, data.get('alarm_id'), data.get('alarm_name'), data.get('severity'), data.get('previous'), data.get('current'), data.get('reason'), data.get('reason_data')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/notifier/heat.py0000664000175000017500000001051000000000000016561 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from aodh import keystone_client as aodh_keystone from aodh import notifier LOG = log.getLogger(__name__) class TrustHeatAlarmNotifier(notifier.AlarmNotifier): """Heat autohealing notifier. The auto-healing notifier works together with loadbalancer_member_health evaluator. Presumably, the end user defines a Heat template which contains an autoscaling group and all the members in the group are joined in an Octavia load balancer in order to expose service to the outside, so that when the stack scales up or scales down, Heat makes sure the new members are joining the load balancer automatically and the old members are removed. However, this notifier deals with the situation that when some member fails, the stack could be recovered by marking the given autoscaling group member unhealthy, then update Heat stack in place. In order to do that, the notifier needs to know: - Heat top/root stack ID. - Heat autoscaling group ID. - The failed Octavia pool members. """ def __init__(self, conf): super(TrustHeatAlarmNotifier, self).__init__(conf) self.conf = conf def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): LOG.info( "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s " "priority from %(previous)s to %(current)s with action %(action)s" " because %(reason)s." % {'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'action': action.geturl(), 'reason': reason} ) trust_id = action.username stack_id = reason_data.get("stack_id") asg_id = reason_data.get("asg_id") unhealthy_members = reason_data.get("unhealthy_members", []) unhealthy_resources = [] if not stack_id or not asg_id: LOG.error( "stack_id and asg_id must exist to notify alarm %s", alarm_id ) return heat_client = aodh_keystone.get_heat_client_from_trust( self.conf, trust_id ) for member in unhealthy_members: target_resources = heat_client.resources.list( stack_id, nested_depth=3, filters={"physical_resource_id": member["id"]} ) if len(target_resources) > 0: # There should be only one item. unhealthy_resources.append( target_resources[0].parent_resource ) if not unhealthy_resources: LOG.warning("No unhealthy resource found for the alarm %s", alarm_id) return try: for res in unhealthy_resources: heat_client.resources.mark_unhealthy( asg_id, res, True, "unhealthy load balancer member" ) LOG.info( "Heat resource %(resource_id)s is marked as unhealthy " "for alarm %(alarm_id)s", {"resource_id": res, "alarm_id": alarm_id} ) heat_client.stacks.update(stack_id, existing=True) LOG.info( "Heat stack %(stack_id)s is updated for alarm " "%(alarm_id)s", {"stack_id": stack_id, "alarm_id": alarm_id} ) except Exception as e: LOG.exception("Failed to communicate with Heat service for alarm " "%s, error: %s", alarm_id, str(e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/notifier/log.py0000664000175000017500000000277200000000000016434 0ustar00zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Log alarm notifier.""" from oslo_log import log from aodh.i18n import _ from aodh import notifier LOG = log.getLogger(__name__) class LogAlarmNotifier(notifier.AlarmNotifier): "Log alarm notifier.""" @staticmethod def notify(action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): LOG.info(_( "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s " "priority from %(previous)s to %(current)s with action %(action)s" " because %(reason)s.") % ({'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'action': action.geturl(), 'reason': reason})) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/notifier/rest.py0000664000175000017500000001061200000000000016620 0ustar00zuulzuul00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Rest alarm notifier.""" import json from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils import requests import urllib.parse as urlparse from aodh import notifier LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('rest_notifier_certificate_file', default='', help='SSL Client certificate file for REST notifier.' ), cfg.StrOpt('rest_notifier_certificate_key', default='', help='SSL Client private key file for REST notifier.' ), cfg.StrOpt('rest_notifier_ca_bundle_certificate_path', help='SSL CA_BUNDLE certificate for REST notifier', ), cfg.BoolOpt('rest_notifier_ssl_verify', default=True, help='Whether to verify the SSL Server certificate when ' 'calling alarm action.' ), cfg.IntOpt('rest_notifier_max_retries', default=0, help='Number of retries for REST notifier', ), ] class RestAlarmNotifier(notifier.AlarmNotifier): """Rest alarm notifier.""" def __init__(self, conf): super(RestAlarmNotifier, self).__init__(conf) self.conf = conf def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data, headers=None): headers = headers or {} if 'x-openstack-request-id' not in headers: headers['x-openstack-request-id'] = b'req-' + \ uuidutils.generate_uuid().encode('ascii') LOG.info( "Notifying alarm %(alarm_name)s %(alarm_id)s with severity" " %(severity)s from %(previous)s to %(current)s with action " "%(action)s because %(reason)s. request-id: %(request_id)s " % ({'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'action': action, 'reason': reason, 'request_id': headers['x-openstack-request-id']})) body = {'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'reason': reason, 'reason_data': reason_data} headers['content-type'] = 'application/json' kwargs = {'data': json.dumps(body), 'headers': headers} if action.scheme == 'https': default_verify = int(self.conf.rest_notifier_ssl_verify) options = urlparse.parse_qs(action.query) verify = bool(int(options.get('aodh-alarm-ssl-verify', [default_verify])[-1])) if verify and self.conf.rest_notifier_ca_bundle_certificate_path: verify = self.conf.rest_notifier_ca_bundle_certificate_path kwargs['verify'] = verify cert = self.conf.rest_notifier_certificate_file key = self.conf.rest_notifier_certificate_key if cert: kwargs['cert'] = (cert, key) if key else cert # FIXME(rhonjo): Retries are automatically done by urllib3 in requests # library. However, there's no interval between retries in urllib3 # implementation. It will be better to put some interval between # retries (future work). max_retries = self.conf.rest_notifier_max_retries session = requests.Session() session.mount(action.geturl(), requests.adapters.HTTPAdapter(max_retries=max_retries)) resp = session.post(action.geturl(), **kwargs) LOG.info('Notifying alarm <%(id)s> gets response: %(status_code)s ' '%(reason)s.', {'id': alarm_id, 'status_code': resp.status_code, 'reason': resp.reason}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/notifier/test.py0000664000175000017500000000243600000000000016627 0ustar00zuulzuul00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test alarm notifier.""" from aodh import notifier class TestAlarmNotifier(notifier.AlarmNotifier): "Test alarm notifier.""" def __init__(self, conf): super(TestAlarmNotifier, self).__init__(conf) self.notifications = [] def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): self.notifications.append((action, alarm_id, alarm_name, severity, previous, current, reason, reason_data)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/notifier/trust.py0000664000175000017500000000414500000000000017030 0ustar00zuulzuul00000000000000# # Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Rest alarm notifier with trusted authentication.""" from urllib import parse from aodh import keystone_client from aodh.notifier import rest class TrustAlarmNotifierMixin(object): """Mixin class to add Keystone trust support to an AlarmNotifier. Provides a notify() method that interprets the trust ID and then calls the parent class's notify(), passing the necessary authentication data in the headers. """ def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): trust_id = action.username client = keystone_client.get_trusted_client(self.conf, trust_id) # Remove the fake user netloc = action.netloc.split("@")[1] # Remove the trust prefix scheme = action.scheme[6:] action = parse.SplitResult(scheme, netloc, action.path, action.query, action.fragment) headers = {'X-Auth-Token': keystone_client.get_auth_token(client)} super(TrustAlarmNotifierMixin, self).notify( action, alarm_id, alarm_name, severity, previous, current, reason, reason_data, headers) class TrustRestAlarmNotifier(TrustAlarmNotifierMixin, rest.RestAlarmNotifier): """Notifier supporting keystone trust authentication. This alarm notifier is intended to be used to call an endpoint using keystone authentication. It uses the aodh service user to authenticate using the trust ID provided. The URL must be in the form ``trust+http://host/action``. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/notifier/zaqar.py0000664000175000017500000002124700000000000016767 0ustar00zuulzuul00000000000000# # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Zaqar alarm notifier.""" from oslo_config import cfg from oslo_log import log from urllib import parse as urlparse from aodh import keystone_client from aodh import notifier from aodh.notifier import trust LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('zaqar', default='messaging', help='Message queue service type.'), ] class ZaqarAlarmNotifier(notifier.AlarmNotifier): """Zaqar notifier. This notifier posts alarm notifications either to a Zaqar subscription or to an existing Zaqar queue with a pre-signed URL. To create a new subscription in the service project, use a notification URL of the form:: zaqar://?topic=example&subscriber=mailto%3A//test%40example.com&ttl=3600 Multiple subscribers are allowed. ``ttl`` is the time to live of the subscription. The queue will be created automatically, in the service project, with a name based on the topic and the alarm ID. To use a pre-signed URL for an existing queue, use a notification URL with the scheme ``zaqar://`` and the pre-signing data from Zaqar in the query string:: zaqar://?queue_name=example&project_id=foo& paths=/messages&methods=POST&expires=1970-01-01T00:00Z& signature=abcdefg """ def __init__(self, conf): super(ZaqarAlarmNotifier, self).__init__(conf) self.conf = conf self._zclient = None self._zendpoint = None def _get_endpoint(self): if self._zendpoint is None: try: ks_client = keystone_client.get_client(self.conf) z_srv = ks_client.services.find( type=self.conf.service_types.zaqar) endpoint_type = self.conf.service_credentials.interface region_name = self.conf.service_credentials.region_name z_endpoint = ks_client.endpoints.find(service_id=z_srv.id, region_id=region_name, interface=endpoint_type) self._zendpoint = z_endpoint.url except Exception: LOG.error("Aodh was configured to use zaqar:// action," " but Zaqar endpoint could not be found in" " Keystone service catalog.") return self._zendpoint def _get_client_conf(self): conf = self.conf.service_credentials return { 'auth_opts': { 'backend': 'keystone', 'options': { 'os_username': conf.username, 'os_password': conf.password, 'os_project_name': conf.project_name, 'os_auth_url': conf.auth_url, 'insecure': '' } } } def get_zaqar_client(self, conf): try: from zaqarclient.queues import client as zaqar_client return zaqar_client.Client(self._get_endpoint(), version=2, conf=conf) except Exception: LOG.error("Failed to connect to Zaqar service ", exc_info=True) def _get_presigned_client_conf(self, queue_info): queue_name = queue_info.get('queue_name', [''])[0] if not queue_name: return None, None signature = queue_info.get('signature', [''])[0] expires = queue_info.get('expires', [''])[0] paths = queue_info.get('paths', [''])[0].split(',') methods = queue_info.get('methods', [''])[0].split(',') project_id = queue_info.get('project_id', [''])[0] conf = { 'auth_opts': { 'backend': 'signed-url', 'options': { 'signature': signature, 'expires': expires, 'methods': methods, 'paths': paths, 'os_project_id': project_id } } } return conf, queue_name def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data, headers=None): LOG.info( "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s " "priority from %(previous)s to %(current)s with action %(action)s" " because %(reason)s." % ({'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'action': action, 'reason': reason})) body = {'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'reason': reason, 'reason_data': reason_data} message = dict(body=body) self.notify_zaqar(action, message, headers) @property def client(self): if self._zclient is None: self._zclient = self.get_zaqar_client(self._get_client_conf()) return self._zclient def notify_zaqar(self, action, message, headers=None): queue_info = urlparse.parse_qs(action.query) try: # NOTE(flwang): Try to get build a pre-signed client if user has # provide enough information about that. Otherwise, go to build # a client with service account and queue name for this alarm. conf, queue_name = self._get_presigned_client_conf(queue_info) if conf is None: zaqar_client = self.client else: zaqar_client = self.get_zaqar_client(conf) if queue_name is None: # queue_name is a combination of - queue_name = "%s-%s" % (message['body']['alarm_id'], queue_info.get('topic')[-1]) # create a queue in zaqar queue = zaqar_client.queue(queue_name) subscriber_list = queue_info.get('subscriber', []) ttl = int(queue_info.get('ttl', ['3600'])[-1]) for subscriber in subscriber_list: # add subscriber to the zaqar queue subscription_data = dict(subscriber=subscriber, ttl=ttl) zaqar_client.subscription(queue_name, **subscription_data) # post the message to the queue queue.post(message) except IndexError: LOG.error("Required query option missing in action %s", action) except Exception: LOG.error("Unknown error occurred; Failed to post message to" " Zaqar queue", exc_info=True) class TrustZaqarAlarmNotifier(trust.TrustAlarmNotifierMixin, ZaqarAlarmNotifier): """Zaqar notifier using a Keystone trust to post to user-defined queues. The URL must be in the form ``trust+zaqar://?queue_name=example``. """ def _get_client_conf(self, auth_token): return { 'auth_opts': { 'backend': 'keystone', 'options': { 'os_auth_token': auth_token, } } } def notify_zaqar(self, action, message, headers): queue_info = urlparse.parse_qs(action.query) try: queue_name = queue_info.get('queue_name')[-1] except IndexError: LOG.error("Required 'queue_name' query option missing in" " action %s", action) return try: conf = self._get_client_conf(headers['X-Auth-Token']) client = self.get_zaqar_client(conf) queue = client.queue(queue_name) queue.post(message) except Exception: LOG.error("Unknown error occurred; Failed to post message to" " Zaqar queue", exc_info=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/opts.py0000664000175000017500000000500400000000000015010 0ustar00zuulzuul00000000000000# Copyright 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from keystoneauth1 import loading import aodh.api import aodh.api.controllers.v2.alarm_rules.gnocchi import aodh.api.controllers.v2.alarms import aodh.coordination import aodh.evaluator import aodh.evaluator.event import aodh.evaluator.gnocchi import aodh.evaluator.loadbalancer import aodh.evaluator.prometheus import aodh.evaluator.threshold import aodh.event import aodh.keystone_client import aodh.notifier.rest import aodh.notifier.zaqar import aodh.service import aodh.storage def list_opts(): return [ ('DEFAULT', itertools.chain( aodh.evaluator.OPTS, aodh.evaluator.event.OPTS, aodh.evaluator.prometheus.OPTS, aodh.evaluator.threshold.OPTS, aodh.evaluator.loadbalancer.OPTS, aodh.notifier.rest.OPTS, aodh.queue.OPTS, aodh.service.OPTS)), ('api', itertools.chain( aodh.api.OPTS, aodh.api.controllers.v2.alarm_rules.gnocchi.GNOCCHI_OPTS, aodh.api.controllers.v2.alarms.ALARM_API_OPTS)), ('coordination', aodh.coordination.OPTS), ('database', aodh.storage.OPTS), ('evaluator', aodh.service.EVALUATOR_OPTS), ('listener', itertools.chain(aodh.service.LISTENER_OPTS, aodh.event.OPTS)), ('notifier', itertools.chain(aodh.notifier.OPTS, aodh.service.NOTIFIER_OPTS)), ('service_credentials', aodh.keystone_client.OPTS), ('service_types', aodh.notifier.zaqar.SERVICE_OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options # for the password plugin that handles keystone v2 and v3 API # with discovery. But other options are possible. return [('service_credentials', ( loading.get_auth_common_conf_options() + loading.get_auth_plugin_conf_options('password')))] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/profiler.py0000664000175000017500000000434500000000000015654 0ustar00zuulzuul00000000000000# Copyright 2017 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_log import log from oslo_utils import importutils import webob.dec profiler = importutils.try_import('osprofiler.profiler') profiler_initializer = importutils.try_import('osprofiler.initializer') profiler_web = importutils.try_import('osprofiler.web') LOG = log.getLogger(__name__) class WsgiMiddleware(object): def __init__(self, application, **kwargs): self.application = application @classmethod def factory(cls, global_conf, **local_conf): if profiler_web: return profiler_web.WsgiMiddleware.factory(global_conf) def filter_(app): return cls(app) return filter_ @webob.dec.wsgify def __call__(self, request): return request.get_response(self.application) def setup(conf): if hasattr(conf, 'profiler') and conf.profiler.enabled: profiler_initializer.init_from_conf( conf=conf, context={}, project=conf.project, service=conf.prog, host=socket.gethostbyname(socket.gethostname())) LOG.info('OSprofiler is enabled.') def trace_cls(name, **kwargs): """Wrap the OSprofiler trace_cls. Wrap the OSprofiler trace_cls decorator so that it will not try to patch the class unless OSprofiler is present. :param name: The name of action. For example, wsgi, rpc, db, ... :param kwargs: Any other keyword args used by profiler.trace_cls """ def decorator(cls): if profiler: trace_decorator = profiler.trace_cls(name, **kwargs) return trace_decorator(cls) return cls return decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/queue.py0000664000175000017500000000407500000000000015156 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import oslo_messaging from aodh import messaging from aodh.storage import models OPTS = [ cfg.StrOpt('notifier_topic', default='alarming', help='The topic that aodh uses for alarm notifier ' 'messages.'), ] LOG = log.getLogger(__name__) class AlarmNotifier(object): def __init__(self, conf): self.notifier = oslo_messaging.Notifier( messaging.get_transport(conf), driver='messagingv2', publisher_id="alarming.evaluator", topics=[conf.notifier_topic]) def notify(self, alarm, previous, reason, reason_data): actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state]) if not actions: LOG.debug('alarm %(alarm_id)s has no action configured ' 'for state transition from %(previous)s to ' 'state %(state)s, skipping the notification.', {'alarm_id': alarm.alarm_id, 'previous': previous, 'state': alarm.state}) return payload = {'actions': actions, 'alarm_id': alarm.alarm_id, 'alarm_name': alarm.name, 'severity': alarm.severity, 'previous': previous, 'current': alarm.state, 'reason': str(reason), 'reason_data': reason_data} self.notifier.sample({}, 'alarm.update', payload) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/service.py0000664000175000017500000000745200000000000015474 0ustar00zuulzuul00000000000000# # Copyright 2013-2017 Red Hat, Inc # Copyright 2012-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystoneauth1 import loading as ka_loading from oslo_config import cfg from oslo_db import options as db_options import oslo_i18n from oslo_log import log from oslo_policy import opts as policy_opts from oslo_reports import guru_meditation_report as gmr from oslo_utils import importutils from aodh.conf import defaults from aodh import keystone_client from aodh import messaging from aodh import profiler from aodh import version profiler_opts = importutils.try_import('osprofiler.opts') OPTS = [ cfg.IntOpt('http_timeout', default=600, deprecated_for_removal=True, deprecated_reason=('This parameter is not used now and has no ' 'effect.'), help='Timeout seconds for HTTP requests. Set it to None to ' 'disable timeout.'), ] EVALUATOR_OPTS = [ cfg.IntOpt('workers', default=1, min=1, help='Number of workers for evaluator service. ' 'default value is 1.'), cfg.IntOpt('evaluation_interval', default=60, deprecated_group='DEFAULT', help='Period of evaluation cycle, should' ' be >= than configured pipeline interval for' ' collection of underlying meters.'), ] NOTIFIER_OPTS = [ cfg.IntOpt('workers', default=1, min=1, help='Number of workers for notifier service. ' 'default value is 1.') ] LISTENER_OPTS = [ cfg.IntOpt('workers', default=1, min=1, help='Number of workers for listener service. ' 'default value is 1.') ] def prepare_service(argv=None, config_files=None): conf = cfg.ConfigOpts() oslo_i18n.enable_lazy() log.register_options(conf) log_levels = ( conf.default_log_levels + [ 'futurist=INFO', 'keystoneclient=INFO', 'oslo_db.sqlalchemy=WARN', 'cotyledon=INFO' ] ) log.set_defaults(default_log_levels=log_levels) defaults.set_cors_middleware_defaults() db_options.set_defaults(conf) if profiler_opts: profiler_opts.set_defaults(conf) policy_opts.set_defaults(conf, policy_file=os.path.abspath( os.path.join(os.path.dirname(__file__), "api", "policy.yaml"))) from aodh import opts # Register our own Aodh options for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) keystone_client.register_keystoneauth_opts(conf) conf(argv, project='aodh', validate_default_values=True, default_config_files=config_files, version=version.version_info.version_string()) ka_loading.load_auth_from_conf_options(conf, "service_credentials") log.setup(conf, 'aodh') # NOTE(tkajinam): guru cannot run with service under apache daemon, so when # aod-api running with mod_wsgi, the argv is [], we don't start guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) profiler.setup(conf) messaging.setup() return conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0790129 aodh-19.0.0/aodh/storage/0000775000175000017500000000000000000000000015116 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/__init__.py0000664000175000017500000001175200000000000017235 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend management """ import datetime from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from stevedore import driver import tenacity from urllib import parse as urlparse _NAMESPACE = 'aodh.storage' LOG = log.getLogger(__name__) OPTS = [ cfg.IntOpt('alarm_history_time_to_live', default=-1, help=("Number of seconds that alarm histories are kept " "in the database for (<= 0 means forever).")), cfg.IntOpt('alarm_histories_delete_batch_size', default=0, min=0, help=("Number of alarm histories to be deleted in one " "iteration from the database (0 means all).")), ] class StorageBadVersion(Exception): """Error raised when the storage backend version is not good enough.""" class AlarmNotFound(Exception): """Error raised when the needed resource not found.""" def __init__(self, alarm_id): self.alarm_id = alarm_id super(AlarmNotFound, self).__init__("Alarm %s not found" % alarm_id) class InvalidMarker(Exception): """Invalid pagination marker parameters""" def get_connection_from_config(conf): retries = conf.database.max_retries url = conf.database.connection connection_scheme = urlparse.urlparse(url).scheme LOG.debug('looking for %(name)r driver in %(namespace)r', {'name': connection_scheme, 'namespace': _NAMESPACE}) mgr = driver.DriverManager(_NAMESPACE, connection_scheme) @tenacity.retry( wait=tenacity.wait_fixed(conf.database.retry_interval), stop=tenacity.stop_after_attempt(retries if retries >= 0 else 5), reraise=True) def _get_connection(): """Return an open connection to the database.""" return mgr.driver(conf) return _get_connection() class SampleFilter(object): """Holds the properties for building a query from a meter/sample filter. :param user: The sample owner. :param project: The sample project. :param start_timestamp: Earliest time point in the request. :param start_timestamp_op: Earliest timestamp operation in the request. :param end_timestamp: Latest time point in the request. :param end_timestamp_op: Latest timestamp operation in the request. :param resource: Optional filter for resource id. :param meter: Optional filter for meter type using the meter name. :param source: Optional source filter. :param message_id: Optional sample_id filter. :param metaquery: Optional filter on the metadata """ def __init__(self, user=None, project=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, resource=None, meter=None, source=None, message_id=None, metaquery=None): self.user = user self.project = project self.start_timestamp = self.sanitize_timestamp(start_timestamp) self.start_timestamp_op = start_timestamp_op self.end_timestamp = self.sanitize_timestamp(end_timestamp) self.end_timestamp_op = end_timestamp_op self.resource = resource self.meter = meter self.source = source self.metaquery = metaquery or {} self.message_id = message_id @staticmethod def sanitize_timestamp(timestamp): """Return a naive utc datetime object.""" if not timestamp: return timestamp if not isinstance(timestamp, datetime.datetime): timestamp = timeutils.parse_isotime(timestamp) return timeutils.normalize_time(timestamp) def __repr__(self): return ("" % (self.user, self.project, self.start_timestamp, self.start_timestamp_op, self.end_timestamp, self.end_timestamp_op, self.resource, self.meter, self.source, self.metaquery, self.message_id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/base.py0000664000175000017500000001732400000000000016411 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for storage engines """ import copy import aodh from aodh.utils import get_func_valid_keys def update_nested(original_dict, updates): """Updates the leaf nodes in a nest dict. Updates occur without replacing entire sub-dicts. """ dict_to_update = copy.deepcopy(original_dict) for key, value in updates.items(): if isinstance(value, dict): sub_dict = update_nested(dict_to_update.get(key, {}), value) dict_to_update[key] = sub_dict else: dict_to_update[key] = updates[key] return dict_to_update class Model(object): """base class for storage api models.""" def __init__(self, **kwds): self.fields = list(kwds) for k, v in kwds.items(): setattr(self, k, v) def as_dict(self): d = {} for f in self.fields: v = getattr(self, f) if isinstance(v, Model): v = v.as_dict() elif isinstance(v, list) and v and isinstance(v[0], Model): v = [sub.as_dict() for sub in v] d[f] = v return d def __eq__(self, other): return self.as_dict() == other.as_dict() def __ne__(self, other): return not self.__eq__(other) @classmethod def get_field_names(cls): fields = get_func_valid_keys(cls.__init__) return set(fields) - set(["self"]) class Connection(object): """Base class for alarm storage system connections.""" # A dictionary representing the capabilities of this driver. CAPABILITIES = { 'alarms': {'query': {'simple': False, 'complex': False}, 'history': {'query': {'simple': False, 'complex': False}}}, } STORAGE_CAPABILITIES = { 'storage': {'production_ready': False}, } def __init__(self, conf): pass @staticmethod def upgrade(): """Migrate the database to `version` or the most recent version.""" @staticmethod def get_alarms(*args, **kwargs): """Yields a lists of alarms that match filters.""" raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def create_alarm(alarm): """Create an alarm. Returns the alarm as created. :param alarm: The alarm to create. """ raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def update_alarm(alarm): """Update alarm.""" raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def delete_alarm(alarm_id): """Delete an alarm and its history data.""" raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def get_alarm_changes(alarm_id, on_behalf_of, user=None, project=None, alarm_type=None, severity=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, pagination=None): """Yields list of AlarmChanges describing alarm history Changes are always sorted in reverse order of occurrence, given the importance of currency. Segregation for non-administrative users is done on the basis of the on_behalf_of parameter. This allows such users to have visibility on both the changes initiated by themselves directly (generally creation, rule changes, or deletion) and also on those changes initiated on their behalf by the alarming service (state transitions after alarm thresholds are crossed). :param alarm_id: ID of alarm to return changes for :param on_behalf_of: ID of tenant to scope changes query (None for administrative user, indicating all projects) :param user: Optional ID of user to return changes for :param project: Optional ID of project to return changes for :param alarm_type: Optional change type :param severity: Optional change severity :param start_timestamp: Optional modified timestamp start range :param start_timestamp_op: Optional timestamp start range operation :param end_timestamp: Optional modified timestamp end range :param end_timestamp_op: Optional timestamp end range operation :param pagination: Pagination parameters. """ raise aodh.NotImplementedError('Alarm history not implemented') @staticmethod def record_alarm_change(alarm_change): """Record alarm change event.""" raise aodh.NotImplementedError('Alarm history not implemented') @staticmethod def clear(): """Clear database.""" @staticmethod def query_alarms(filter_expr=None, orderby=None, limit=None): """Return an iterable of model.Alarm objects. :param filter_expr: Filter expression for query. :param orderby: List of field name and direction pairs for order by. :param limit: Maximum number of results to return. """ raise aodh.NotImplementedError('Complex query for alarms ' 'is not implemented.') @staticmethod def query_alarm_history(filter_expr=None, orderby=None, limit=None): """Return an iterable of model.AlarmChange objects. :param filter_expr: Filter expression for query. :param orderby: List of field name and direction pairs for order by. :param limit: Maximum number of results to return. """ raise aodh.NotImplementedError('Complex query for alarms ' 'history is not implemented.') @classmethod def get_capabilities(cls): """Return an dictionary with the capabilities of each driver.""" return cls.CAPABILITIES @classmethod def get_storage_capabilities(cls): """Return a dictionary representing the performance capabilities. This is needed to evaluate the performance of each driver. """ return cls.STORAGE_CAPABILITIES @staticmethod def clear_expired_alarm_history_data(ttl, max_count=None): """Clear expired alarm history data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep alarm history records for. :param max_count: Number of records to delete. """ raise aodh.NotImplementedError('Clearing alarm history ' 'not implemented') @staticmethod def get_quotas(project_id): """Get resource quota for the given project.""" raise aodh.NotImplementedError('Getting resource quota not ' 'implemented') @staticmethod def set_quotas(project_id, quotas): """Set resource quota for the given user.""" raise aodh.NotImplementedError('Setting resource quota not ' 'implemented') @staticmethod def delete_quotas(project_id): raise aodh.NotImplementedError('Deleting resource quota not ' 'implemented') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/impl_log.py0000664000175000017500000000363200000000000017276 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simple logging storage backend. """ from oslo_log import log from aodh.storage import base LOG = log.getLogger(__name__) class Connection(base.Connection): """Log the data.""" @staticmethod def upgrade(): pass @staticmethod def clear(): pass @staticmethod def get_alarms(name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, alarm_type=None, severity=None, exclude=None, pagination=None): """Yields a lists of alarms that match filters.""" return [] @staticmethod def create_alarm(alarm): """Create alarm.""" return alarm @staticmethod def update_alarm(alarm): """Update alarm.""" return alarm @staticmethod def delete_alarm(alarm_id): """Delete an alarm and its history data.""" @staticmethod def clear_expired_alarm_history_data(ttl, max_count=None): """Clear expired alarm history data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep alarm history records for. :param max_count: Number of records to delete. """ LOG.info('Dropping alarm history %d data with TTL %d', max_count, ttl) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/impl_sqlalchemy.py0000664000175000017500000004677700000000000020700 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" import copy import datetime import os.path import threading from alembic import command from alembic import config from alembic import migration from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as oslo_sql_utils from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils import sqlalchemy from sqlalchemy import asc from sqlalchemy import desc from sqlalchemy.engine import url as sqlalchemy_url from sqlalchemy import func from sqlalchemy.orm import exc import aodh from aodh import storage from aodh.storage import base from aodh.storage import models as alarm_api_models from aodh.storage.sqlalchemy import models from aodh.storage.sqlalchemy import utils as sql_utils DB_CONFIGURED = False osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') _CONTEXT = threading.local() LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } def apply_filters(query, model, **filters): filter_dict = {} for key, value in filters.items(): column_attr = getattr(model, key) if isinstance(value, dict): if 'in' in value: query = query.filter(column_attr.in_(value['in'])) elif 'nin' in value: query = query.filter(~column_attr.in_(value['nin'])) elif 'ne' in value: query = query.filter(column_attr != value['ne']) elif 'gt' in value: query = query.filter(column_attr > value['gt']) elif 'ge' in value: query = query.filter(column_attr >= value['ge']) elif 'lt' in value: query = query.filter(column_attr < value['lt']) elif 'le' in value: query = query.filter(column_attr <= value['le']) elif 'eq' in value: query = query.filter(column_attr == value['eq']) elif 'has' in value: like_pattern = '%{0}%'.format(value['has']) query = query.filter(column_attr.like(like_pattern)) else: filter_dict[key] = value if filter_dict: query = query.filter_by(**filter_dict) return query def _session_for_read(): session = enginefacade.reader.using(_CONTEXT) if osprofiler_sqlalchemy: session = osprofiler_sqlalchemy.wrap_session(sqlalchemy, session) return session def _session_for_write(): session = enginefacade.writer.using(_CONTEXT) if osprofiler_sqlalchemy: session = osprofiler_sqlalchemy.wrap_session(sqlalchemy, session) return session class Connection(base.Connection): """Put the data into a SQLAlchemy database. """ CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = base.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def __init__(self, conf): # Set max_retries to 0, since oslo.db in certain cases may attempt # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function global DB_CONFIGURED if not DB_CONFIGURED: options = dict(conf.database.items()) options['connection'] = self.dress_url(conf.database.connection) options['max_retries'] = 0 options['sqlite_fk'] = True # oslo.db doesn't support options defined by Aodh for opt in storage.OPTS: options.pop(opt.name, None) enginefacade.configure(**options) DB_CONFIGURED = True self.conf = conf @staticmethod def dress_url(url): # If no explicit driver has been set, we default to pymysql if url.startswith("mysql://"): url = sqlalchemy_url.make_url(url) url = url.set(drivername="mysql+pymysql") return str(url) return url def _get_alembic_config(self): cfg = config.Config( "%s/sqlalchemy/alembic/alembic.ini" % os.path.dirname(__file__)) cfg.set_main_option('sqlalchemy.url', self.conf.database.connection.replace("%", "%%")) return cfg def upgrade(self, nocreate=False): cfg = self._get_alembic_config() cfg.conf = self.conf engine = enginefacade.writer.get_engine() with engine.connect() as conn, conn.begin(): cfg.attributes['connection'] = conn if nocreate: command.upgrade(cfg, "head") else: ctxt = migration.MigrationContext.configure(conn) current_version = ctxt.get_current_revision() if current_version is None: models.Base.metadata.create_all(conn, checkfirst=False) command.stamp(cfg, "head") else: command.upgrade(cfg, "head") def clear(self): engine = enginefacade.writer.get_engine() for table in reversed(models.Base.metadata.sorted_tables): with engine.connect() as conn, conn.begin(): conn.execute(table.delete()) engine.dispose() def _retrieve_data(self, filter_expr, orderby, limit, table): if limit == 0: return [] engine = enginefacade.reader.get_engine() with _session_for_read() as session: query = session.query(table) transformer = sql_utils.QueryTransformer(table, query, dialect=engine.dialect.name) if filter_expr is not None: transformer.apply_filter(filter_expr) transformer.apply_options(orderby, limit) retrieve = {models.Alarm: self._retrieve_alarms, models.AlarmChange: self._retrieve_alarm_history} return retrieve[table](transformer.get_query()) @staticmethod def _row_to_alarm_model(row): return alarm_api_models.Alarm( alarm_id=row.alarm_id, enabled=row.enabled, type=row.type, name=row.name, description=row.description, timestamp=row.timestamp, user_id=row.user_id, project_id=row.project_id, state=row.state, state_timestamp=row.state_timestamp, state_reason=row.state_reason, ok_actions=row.ok_actions, alarm_actions=row.alarm_actions, insufficient_data_actions=(row.insufficient_data_actions), rule=row.rule, time_constraints=row.time_constraints, repeat_actions=row.repeat_actions, severity=row.severity, evaluate_timestamp=row.evaluate_timestamp ) def _retrieve_alarms(self, query): return [self._row_to_alarm_model(x) for x in query.all()] @staticmethod def _get_pagination_query(session, query, pagination, api_model, model): if not pagination.get('sort'): pagination['sort'] = api_model.DEFAULT_SORT marker = None if pagination.get('marker'): key_attr = getattr(model, api_model.PRIMARY_KEY) marker_query = copy.copy(query) marker_query = marker_query.filter( key_attr == pagination['marker']) try: marker = marker_query.limit(1).one() except exc.NoResultFound: raise storage.InvalidMarker( 'Marker %s not found.' % pagination['marker']) limit = pagination.get('limit') # we sort by "severity" by its semantic than its alphabetical # order when "severity" specified in sorts. for sort_key, sort_dir in pagination['sort'][::-1]: if sort_key == 'severity': engine = session.connection() if engine.dialect.name != "mysql": raise aodh.NotImplementedError sort_dir_func = {'asc': asc, 'desc': desc}[sort_dir] query = query.order_by(sort_dir_func( func.field(getattr(model, sort_key), 'low', 'moderate', 'critical'))) pagination['sort'].remove((sort_key, sort_dir)) sort_keys = [s[0] for s in pagination['sort']] sort_dirs = [s[1] for s in pagination['sort']] return oslo_sql_utils.paginate_query( query, model, limit, sort_keys, sort_dirs=sort_dirs, marker=marker) def get_alarms(self, meter=None, pagination=None, **kwargs): """Yields a lists of alarms that match filters.""" pagination = pagination or {} with _session_for_read() as session: query = session.query(models.Alarm) query = apply_filters(query, models.Alarm, **kwargs) query = self._get_pagination_query( session, query, pagination, alarm_api_models.Alarm, models.Alarm, ) alarms = [self._row_to_alarm_model(x) for x in query.all()] # TODO(cmart): improve this by using sqlalchemy.func factory if meter is not None: alarms = filter(lambda row: row.rule.get('meter_name', None) == meter, alarms) return alarms def create_alarm(self, alarm): """Create an alarm. :param alarm: The alarm to create. """ with _session_for_write() as session: alarm_row = models.Alarm(alarm_id=alarm.alarm_id) alarm_row.update(alarm.as_dict()) session.add(alarm_row) return self._row_to_alarm_model(alarm_row) def update_alarm(self, alarm): """Update an alarm. :param alarm: the new Alarm to update """ with _session_for_write() as session: count = session.query(models.Alarm).filter( models.Alarm.alarm_id == alarm.alarm_id, ).update(alarm.as_dict()) if not count: raise storage.AlarmNotFound(alarm.alarm_id) return alarm def delete_alarm(self, alarm_id): """Delete an alarm and its history data. :param alarm_id: ID of the alarm to delete """ with _session_for_write() as session: session.query(models.Alarm).filter( models.Alarm.alarm_id == alarm_id, ).delete() # FIXME(liusheng): we should use delete cascade session.query(models.AlarmChange).filter( models.AlarmChange.alarm_id == alarm_id, ).delete() @staticmethod def _row_to_alarm_change_model(row): return alarm_api_models.AlarmChange(event_id=row.event_id, alarm_id=row.alarm_id, type=row.type, detail=row.detail, user_id=row.user_id, project_id=row.project_id, on_behalf_of=row.on_behalf_of, timestamp=row.timestamp, severity=row.severity) def query_alarms(self, filter_expr=None, orderby=None, limit=None): """Yields a lists of alarms that match filter.""" return self._retrieve_data(filter_expr, orderby, limit, models.Alarm) def _retrieve_alarm_history(self, query): return (self._row_to_alarm_change_model(x) for x in query.all()) def query_alarm_history(self, filter_expr=None, orderby=None, limit=None): """Return an iterable of model.AlarmChange objects.""" return self._retrieve_data(filter_expr, orderby, limit, models.AlarmChange) def get_alarm_changes(self, alarm_id, on_behalf_of, user=None, project=None, alarm_type=None, severity=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, pagination=None): """Yields list of AlarmChanges describing alarm history Changes are always sorted in reverse order of occurrence, given the importance of currency. Segregation for non-administrative users is done on the basis of the on_behalf_of parameter. This allows such users to have visibility on both the changes initiated by themselves directly (generally creation, rule changes, or deletion) and also on those changes initiated on their behalf by the alarming service (state transitions after alarm thresholds are crossed). :param alarm_id: ID of alarm to return changes for :param on_behalf_of: ID of tenant to scope changes query (None for administrative user, indicating all projects) :param user: Optional ID of user to return changes for :param project: Optional ID of project to return changes for :param alarm_type: Optional change type :param severity: Optional alarm severity :param start_timestamp: Optional modified timestamp start range :param start_timestamp_op: Optional timestamp start range operation :param end_timestamp: Optional modified timestamp end range :param end_timestamp_op: Optional timestamp end range operation :param pagination: Pagination query parameters. """ pagination = pagination or {} with _session_for_read() as session: query = session.query(models.AlarmChange) query = query.filter(models.AlarmChange.alarm_id == alarm_id) if on_behalf_of is not None: query = query.filter( models.AlarmChange.on_behalf_of == on_behalf_of) if user is not None: query = query.filter(models.AlarmChange.user_id == user) if project is not None: query = query.filter(models.AlarmChange.project_id == project) if alarm_type is not None: query = query.filter(models.AlarmChange.type == alarm_type) if severity is not None: query = query.filter(models.AlarmChange.severity == severity) if start_timestamp: if start_timestamp_op == 'gt': query = query.filter( models.AlarmChange.timestamp > start_timestamp) else: query = query.filter( models.AlarmChange.timestamp >= start_timestamp) if end_timestamp: if end_timestamp_op == 'le': query = query.filter( models.AlarmChange.timestamp <= end_timestamp) else: query = query.filter( models.AlarmChange.timestamp < end_timestamp) query = self._get_pagination_query( session, query, pagination, alarm_api_models.AlarmChange, models.AlarmChange) return (self._row_to_alarm_change_model(x) for x in query.all()) def record_alarm_change(self, alarm_change): """Record alarm change event.""" with _session_for_write() as session: alarm_change_row = models.AlarmChange( event_id=alarm_change['event_id']) alarm_change_row.update(alarm_change) session.add(alarm_change_row) def clear_expired_alarm_history_data(self, ttl, max_count=100): """Clear expired alarm history data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep alarm history records for. :param max_count: Number of records to delete. """ with _session_for_write() as session: end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) alarm_history_q = (session.query(models.AlarmChange.event_id) .filter(models.AlarmChange.timestamp < end)) event_ids = [i[0] for i in alarm_history_q.limit(max_count)] deleted_rows = session.query(models.AlarmChange).filter( models.AlarmChange.event_id.in_(event_ids) ).delete(synchronize_session="fetch") LOG.info("%d alarm histories are removed from database", deleted_rows) def conditional_update(self, model, values, expected_values, filters=None): """Compare-and-swap conditional update SQLAlchemy implementation.""" filters = filters or {} filters.update(expected_values) with _session_for_write() as session: query = session.query(model) if filters: query = query.filter_by(**filters) update_args = {'synchronize_session': False} result = query.update(values, **update_args) return 0 != result @staticmethod def _row_to_quota_model(row): return alarm_api_models.Quota( project_id=row.project_id, resource=row.resource, limit=row.limit, ) def get_quotas(self, project_id): """Get resource quota for the given project.""" filters = {'project_id': project_id} with _session_for_read() as session: query = session.query(models.Quota).filter_by(**filters) return [self._row_to_quota_model(x) for x in query.all()] def set_quotas(self, project_id, quotas): """Set resource quota for the given user.""" with _session_for_write() as session: for q in quotas: values = { 'project_id': project_id, 'resource': q['resource'], } quota = session.query(models.Quota).filter_by(**values).first() if not quota: new_quota = models.Quota(project_id=project_id, resource=q['resource'], limit=q['limit']) session.add(new_quota) else: values['limit'] = q['limit'] quota.update(values.copy()) filters = {'project_id': project_id} query = session.query(models.Quota).filter_by(**filters) return [self._row_to_quota_model(x) for x in query.all()] def delete_quotas(self, project_id): filters = {'project_id': project_id} with _session_for_write() as session: session.query(models.Quota).filter_by(**filters).delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/models.py0000664000175000017500000001342300000000000016756 0ustar00zuulzuul00000000000000# # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the storage API. """ import datetime from aodh.i18n import _ from aodh.storage import base class Alarm(base.Model): ALARM_INSUFFICIENT_DATA = 'insufficient data' ALARM_OK = 'ok' ALARM_ALARM = 'alarm' ALARM_ACTIONS_MAP = { ALARM_INSUFFICIENT_DATA: 'insufficient_data_actions', ALARM_OK: 'ok_actions', ALARM_ALARM: 'alarm_actions', } ALARM_LEVEL_LOW = 'low' ALARM_LEVEL_MODERATE = 'moderate' ALARM_LEVEL_CRITICAL = 'critical' SUPPORT_SORT_KEYS = ( 'alarm_id', 'enabled', 'name', 'type', 'severity', 'timestamp', 'user_id', 'project_id', 'state', 'repeat_actions', 'state_timestamp') DEFAULT_SORT = [('timestamp', 'desc')] PRIMARY_KEY = 'alarm_id' """ An alarm to monitor. :param alarm_id: UUID of the alarm :param type: type of the alarm :param name: The Alarm name :param description: User friendly description of the alarm :param enabled: Is the alarm enabled :param state: Alarm state (ok/alarm/insufficient data) :param state_reason: Alarm state reason :param rule: A rule that defines when the alarm fires :param user_id: the owner/creator of the alarm :param project_id: the project_id of the creator :param evaluation_periods: the number of periods :param period: the time period in seconds :param time_constraints: the list of the alarm's time constraints, if any :param timestamp: the timestamp when the alarm was last updated :param state_timestamp: the timestamp of the last state change :param ok_actions: the list of webhooks to call when entering the ok state :param alarm_actions: the list of webhooks to call when entering the alarm state :param insufficient_data_actions: the list of webhooks to call when entering the insufficient data state :param repeat_actions: Is the actions should be triggered on each alarm evaluation. :param severity: Alarm level (low/moderate/critical) :param evaluate_timestamp: The timestamp when the alarm is finished evaluating. """ def __init__(self, alarm_id, type, enabled, name, description, timestamp, user_id, project_id, state, state_timestamp, state_reason, ok_actions, alarm_actions, insufficient_data_actions, repeat_actions, rule, time_constraints, severity=None, evaluate_timestamp=None): if not isinstance(timestamp, datetime.datetime): raise TypeError(_("timestamp should be datetime object")) if not isinstance(state_timestamp, datetime.datetime): raise TypeError(_("state_timestamp should be datetime object")) base.Model.__init__( self, alarm_id=alarm_id, type=type, enabled=enabled, name=name, description=description, timestamp=timestamp, user_id=user_id, project_id=project_id, state=state, state_timestamp=state_timestamp, state_reason=state_reason, ok_actions=ok_actions, alarm_actions=alarm_actions, insufficient_data_actions=insufficient_data_actions, repeat_actions=repeat_actions, rule=rule, time_constraints=time_constraints, severity=severity, evaluate_timestamp=evaluate_timestamp) class AlarmChange(base.Model): """Record of an alarm change. :param event_id: UUID of the change event :param alarm_id: UUID of the alarm :param type: The type of change :param severity: The severity of alarm :param detail: JSON fragment describing change :param user_id: the user ID of the initiating identity :param project_id: the project ID of the initiating identity :param on_behalf_of: the tenant on behalf of which the change is being made :param timestamp: the timestamp of the change """ CREATION = 'creation' RULE_CHANGE = 'rule change' STATE_TRANSITION = 'state transition' DELETION = 'deletion' SUPPORT_SORT_KEYS = ( 'event_id', 'alarm_id', 'on_behalf_of', 'project_id', 'user_id', 'type', 'timestamp', 'severity') DEFAULT_SORT = [('timestamp', 'desc')] PRIMARY_KEY = 'event_id' def __init__(self, event_id, alarm_id, type, detail, user_id, project_id, on_behalf_of, severity=None, timestamp=None ): base.Model.__init__( self, event_id=event_id, alarm_id=alarm_id, type=type, severity=severity, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=on_behalf_of, timestamp=timestamp) class Quota(base.Model): def __init__(self, project_id, resource, limit): base.Model.__init__( self, project_id=project_id, resource=resource, limit=limit) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0830133 aodh-19.0.0/aodh/storage/sqlalchemy/0000775000175000017500000000000000000000000017260 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021357 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0830133 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/0000775000175000017500000000000000000000000020654 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/alembic.ini0000664000175000017500000000105300000000000022750 0ustar00zuulzuul00000000000000[alembic] script_location = aodh.storage.sqlalchemy:alembic sqlalchemy.url = [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = WARN handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/env.py0000664000175000017500000000554300000000000022025 0ustar00zuulzuul00000000000000# # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging.config import fileConfig from alembic import context from oslo_db.sqlalchemy import enginefacade from aodh.storage.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ conf = config.conf context.configure(url=conf.database.connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = config.attributes.get('connection', None) if connectable is None: engine = enginefacade.writer.get_engine() with engine.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() else: context.configure( connection=connectable, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if not hasattr(config, "conf"): from aodh import service config.conf = service.prepare_service([]) if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/script.py.mako0000664000175000017500000000204500000000000023461 0ustar00zuulzuul00000000000000# Copyright ${create_date.year} OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0830133 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/0000775000175000017500000000000000000000000022524 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/006_add_evaluate_timestamp_to_alarm.py0000664000175000017500000000211500000000000032041 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Add evaluate_timestamp column to alarm table Revision ID: 006 Revises: 6ae0d05d9451 Create Date: 2019-12-05 11:23:42.379029 """ # revision identifiers, used by Alembic. revision = '006' down_revision = '6ae0d05d9451' from alembic import op from oslo_utils import timeutils import sqlalchemy as sa def upgrade(): op.add_column( 'alarm', sa.Column('evaluate_timestamp', sa.DateTime(), nullable=True, server_default=str(timeutils.utcnow())) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/007_add_quota_table.py0000664000175000017500000000247600000000000026605 0ustar00zuulzuul00000000000000# Copyright 2020 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Add quota table Revision ID: 007 Revises: 006 Create Date: 2020-01-28 """ # revision identifiers, used by Alembic. revision = '007' down_revision = '006' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'quota', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('resource', sa.String(length=50), nullable=False), sa.Column('project_id', sa.String(length=128), nullable=False), sa.Column('limit', sa.Integer, nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('project_id', 'resource'), sa.Index( 'ix_quota_project_id_resource', 'project_id', 'resource' ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/12fe8fac9fe4_initial_base.py0000664000175000017500000000763500000000000027603 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """initial base Revision ID: 12fe8fac9fe4 Revises: Create Date: 2015-07-28 17:38:37.022899 """ # revision identifiers, used by Alembic. revision = '12fe8fac9fe4' down_revision = None branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy import types import aodh.storage.sqlalchemy.models class PreciseTimestamp(types.TypeDecorator): """Represents a timestamp precise to the microsecond.""" impl = sa.DateTime def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor( types.DECIMAL(precision=20, scale=6, asdecimal=True)) return dialect.type_descriptor(self.impl) def upgrade(): op.create_table( 'alarm_history', sa.Column('event_id', sa.String(length=128), nullable=False), sa.Column('alarm_id', sa.String(length=128), nullable=True), sa.Column('on_behalf_of', sa.String(length=128), nullable=True), sa.Column('project_id', sa.String(length=128), nullable=True), sa.Column('user_id', sa.String(length=128), nullable=True), sa.Column('type', sa.String(length=20), nullable=True), sa.Column('detail', sa.Text(), nullable=True), sa.Column('timestamp', PreciseTimestamp(), nullable=True), sa.PrimaryKeyConstraint('event_id') ) op.create_index( 'ix_alarm_history_alarm_id', 'alarm_history', ['alarm_id'], unique=False) op.create_table( 'alarm', sa.Column('alarm_id', sa.String(length=128), nullable=False), sa.Column('enabled', sa.Boolean(), nullable=True), sa.Column('name', sa.Text(), nullable=True), sa.Column('type', sa.String(length=50), nullable=True), sa.Column('severity', sa.String(length=50), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.Column('timestamp', PreciseTimestamp(), nullable=True), sa.Column('user_id', sa.String(length=128), nullable=True), sa.Column('project_id', sa.String(length=128), nullable=True), sa.Column('state', sa.String(length=255), nullable=True), sa.Column('state_timestamp', PreciseTimestamp(), nullable=True), sa.Column('ok_actions', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('alarm_actions', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('insufficient_data_actions', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('repeat_actions', sa.Boolean(), nullable=True), sa.Column('rule', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('time_constraints', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.PrimaryKeyConstraint('alarm_id') ) op.create_index( 'ix_alarm_project_id', 'alarm', ['project_id'], unique=False) op.create_index( 'ix_alarm_user_id', 'alarm', ['user_id'], unique=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/367aadf5485f_precisetimestamp_to_datetime.py0000664000175000017500000000466000000000000032747 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """precisetimestamp_to_datetime Revision ID: 367aadf5485f Revises: f8c31b1ffe11 Create Date: 2016-09-19 16:43:34.379029 """ # revision identifiers, used by Alembic. revision = '367aadf5485f' down_revision = 'f8c31b1ffe11' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy import func from aodh.storage.sqlalchemy import models def upgrade(): bind = op.get_bind() if bind and bind.engine.name == "mysql": # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER # TABLE … USING …". We need to copy everything and convert… for table_name, column_name in (("alarm", "timestamp"), ("alarm", "state_timestamp"), ("alarm_history", "timestamp")): existing_type = sa.types.DECIMAL( precision=20, scale=6, asdecimal=True) existing_col = sa.Column( column_name, existing_type, nullable=True) temp_col = sa.Column( column_name + "_ts", models.TimestampUTC(), nullable=True) op.add_column(table_name, temp_col) t = sa.sql.table(table_name, existing_col, temp_col) op.execute(t.update().values( **{column_name + "_ts": func.from_unixtime(existing_col)})) op.drop_column(table_name, column_name) op.alter_column(table_name, column_name + "_ts", nullable=True, type_=models.TimestampUTC(), existing_nullable=True, existing_type=existing_type, new_column_name=column_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/6ae0d05d9451_add_reason_column.py0000664000175000017500000000201700000000000030365 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_reason_column Revision ID: 6ae0d05d9451 Revises: 367aadf5485f Create Date: 2017-06-05 16:42:42.379029 """ # revision identifiers, used by Alembic. revision = '6ae0d05d9451' down_revision = '367aadf5485f' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('alarm', sa.Column('state_reason', sa.Text, nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/bb07adac380_add_severity_to_alarm_history.py0000664000175000017500000000204000000000000033071 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add severity to alarm history Revision ID: bb07adac380 Revises: 12fe8fac9fe4 Create Date: 2015-08-06 15:15:43.717068 """ # revision identifiers, used by Alembic. revision = 'bb07adac380' down_revision = '12fe8fac9fe4' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('alarm_history', sa.Column('severity', sa.String(length=50), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/alembic/versions/f8c31b1ffe11_add_index_for_enabled_and_type.py0000664000175000017500000000207400000000000033175 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add index for enabled and type Revision ID: f8c31b1ffe11 Revises: bb07adac380 Create Date: 2016-06-02 19:39:42.495020 """ # revision identifiers, used by Alembic. revision = 'f8c31b1ffe11' down_revision = 'bb07adac380' branch_labels = None depends_on = None from alembic import op def upgrade(): op.create_index( 'ix_alarm_enabled', 'alarm', ['enabled'], unique=False) op.create_index( 'ix_alarm_type', 'alarm', ['type'], unique=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/models.py0000664000175000017500000001032100000000000021112 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for aodh data. """ import json from oslo_utils import timeutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import Column, String, Index, Boolean, Text, DateTime, Integer from sqlalchemy.dialects import mysql from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.types import TypeDecorator class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = Text cache_ok = True @staticmethod def process_bind_param(value, dialect): if value is not None: value = json.dumps(value) return value @staticmethod def process_result_value(value, dialect): if value is not None: value = json.loads(value) return value class TimestampUTC(TypeDecorator): """Represents a timestamp precise to the microsecond.""" impl = DateTime cache_ok = True def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(mysql.DATETIME(fsp=6)) return self.impl class AodhBase(object): """Base class for Aodh Models.""" __table_args__ = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} __table_initialized__ = False def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def update(self, values): """Make the model object behave like a dict.""" for k, v in values.items(): setattr(self, k, v) Base = declarative_base(cls=AodhBase) class Alarm(Base): """Define Alarm data.""" __tablename__ = 'alarm' __table_args__ = ( Index('ix_alarm_user_id', 'user_id'), Index('ix_alarm_project_id', 'project_id'), Index('ix_alarm_enabled', 'enabled'), Index('ix_alarm_type', 'type'), ) alarm_id = Column(String(128), primary_key=True) enabled = Column(Boolean) name = Column(Text) type = Column(String(50)) severity = Column(String(50)) description = Column(Text) timestamp = Column(TimestampUTC, default=lambda: timeutils.utcnow()) user_id = Column(String(128)) project_id = Column(String(128)) state = Column(String(255)) state_reason = Column(Text) state_timestamp = Column(TimestampUTC, default=lambda: timeutils.utcnow()) ok_actions = Column(JSONEncodedDict) alarm_actions = Column(JSONEncodedDict) insufficient_data_actions = Column(JSONEncodedDict) repeat_actions = Column(Boolean) rule = Column(JSONEncodedDict) time_constraints = Column(JSONEncodedDict) evaluate_timestamp = Column(DateTime, default=lambda: timeutils.utcnow()) class AlarmChange(Base): """Define AlarmChange data.""" __tablename__ = 'alarm_history' __table_args__ = ( Index('ix_alarm_history_alarm_id', 'alarm_id'), ) event_id = Column(String(128), primary_key=True) alarm_id = Column(String(128)) on_behalf_of = Column(String(128)) project_id = Column(String(128)) user_id = Column(String(128)) type = Column(String(20)) detail = Column(Text) timestamp = Column(TimestampUTC, default=lambda: timeutils.utcnow()) severity = Column(String(50)) class Quota(Base): __tablename__ = 'quota' __table_args__ = ( sa.UniqueConstraint('project_id', 'resource'), Index('ix_%s_project_id_resource' % __tablename__, 'project_id', 'resource'), ) id = Column(String(36), primary_key=True, default=uuidutils.generate_uuid) project_id = Column(String(128)) resource = Column(String(50)) limit = Column(Integer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/storage/sqlalchemy/utils.py0000664000175000017500000000723500000000000021001 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import operator from sqlalchemy import and_ from sqlalchemy import asc from sqlalchemy import desc from sqlalchemy import func from sqlalchemy import not_ from sqlalchemy import or_ class QueryTransformer(object): operators = {"=": operator.eq, "<": operator.lt, ">": operator.gt, "<=": operator.le, "=<": operator.le, ">=": operator.ge, "=>": operator.ge, "!=": operator.ne, "in": lambda field_name, values: field_name.in_(values), "=~": lambda field, value: field.op("regexp")(value)} # operators which are different for different dialects dialect_operators = {'postgresql': {'=~': (lambda field, value: field.op("~")(value))}} complex_operators = {"or": or_, "and": and_, "not": not_} ordering_functions = {"asc": asc, "desc": desc} def __init__(self, table, query, dialect='mysql'): self.table = table self.query = query self.dialect_name = dialect def _get_operator(self, op): return (self.dialect_operators.get(self.dialect_name, {}).get(op) or self.operators[op]) def _handle_complex_op(self, complex_op, nodes): op = self.complex_operators[complex_op] if op == not_: nodes = [nodes] element_list = [] for node in nodes: element = self._transform(node) element_list.append(element) return op(*element_list) def _handle_simple_op(self, simple_op, nodes): op = self._get_operator(simple_op) field_name, value = list(nodes.items())[0] return op(getattr(self.table, field_name), value) def _transform(self, sub_tree): operator, nodes = list(sub_tree.items())[0] if operator in self.complex_operators: return self._handle_complex_op(operator, nodes) else: return self._handle_simple_op(operator, nodes) def apply_filter(self, expression_tree): condition = self._transform(expression_tree) self.query = self.query.filter(condition) def apply_options(self, orderby, limit): self._apply_order_by(orderby) if limit is not None: self.query = self.query.limit(limit) def _apply_order_by(self, orderby): if orderby is not None: for field in orderby: attr, order = list(field.items())[0] ordering_function = self.ordering_functions[order] if attr == 'severity': self.query = self.query.order_by(ordering_function( func.field(getattr(self.table, attr), 'low', 'moderate', 'critical'))) else: self.query = self.query.order_by(ordering_function( getattr(self.table, attr))) else: self.query = self.query.order_by(desc(self.table.timestamp)) def get_query(self): return self.query ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0830133 aodh-19.0.0/aodh/tests/0000775000175000017500000000000000000000000014614 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/__init__.py0000664000175000017500000000000000000000000016713 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/base.py0000664000175000017500000001510300000000000016100 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test base classes. """ import fixtures import functools import os.path import unittest import warnings import oslo_messaging.conffixture from oslo_utils import timeutils from oslotest import base from sqlalchemy import exc as sqla_exc import webtest import aodh from aodh import messaging class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super().setUp() self._original_warning_filters = warnings.filters[:] warnings.simplefilter('once', DeprecationWarning) # FIXME(stephenfin): Determine if we need to replace use of best_match warnings.filterwarnings( 'ignore', module='webob', message='The behavior of AcceptValidHeader.best_match is ', category=DeprecationWarning, ) # FIXME(stephenfin): Determine if we need to replace use of best_match warnings.filterwarnings( 'ignore', module='webob', message='The behavior of .best_match for the Accept classes is ', category=DeprecationWarning, ) # FIXME(stephenfin): Update tests to resolve these issues warnings.filterwarnings( 'ignore', module='oslo_policy', message='Policy ".*": ".*" failed scope check. ', category=UserWarning, ) # Enable deprecation warnings for aodh itself to capture upcoming # SQLAlchemy changes warnings.filterwarnings( 'ignore', category=sqla_exc.SADeprecationWarning, ) warnings.filterwarnings( 'error', module='aodh', category=sqla_exc.SADeprecationWarning, ) # Enable general SQLAlchemy warnings also to ensure we're not doing # silly stuff. It's possible that we'll need to filter things out here # with future SQLAlchemy versions, but that's a good thing warnings.filterwarnings( 'error', module='aodh', category=sqla_exc.SAWarning, ) self.addCleanup(self._reset_warning_filters) def _reset_warning_filters(self): warnings.filters[:] = self._original_warning_filters class BaseTestCase(base.BaseTestCase): def setup_messaging(self, conf, exchange=None): self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) self.useFixture(WarningsFixture()) conf.set_override("notification_driver", ["messaging"]) if not exchange: exchange = 'aodh' conf.set_override("control_exchange", exchange) # NOTE(sileht): Ensure a new oslo.messaging driver is loaded # between each tests self.transport = messaging.get_transport(conf, "fake://", cache=False) self.useFixture(fixtures.MockPatch( 'aodh.messaging.get_transport', return_value=self.transport)) def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. This relies on assertAlmostEqual to avoid rounding problem, and only checks up the first microsecond values. """ return self.assertAlmostEqual( timeutils.delta_seconds(first, second), 0.0, places=5) def assertIsEmpty(self, obj): try: if len(obj) != 0: self.fail("%s is not empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) def assertIsNotEmpty(self, obj): try: if len(obj) == 0: self.fail("%s is empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) def assertDictContains(self, parent, child): """Checks whether child dict is a subset of parent. assertDictContainsSubset() in standard Python 2.7 has been deprecated since Python 3.2 """ self.assertEqual(parent, dict(parent, **child)) @staticmethod def path_get(project_file=None): root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root def assert_single_item(self, items, **filters): return self.assert_multiple_items(items, 1, **filters)[0] def assert_multiple_items(self, items, count, **filters): def _matches(item, **props): for prop_name, prop_val in props.items(): v = (item[prop_name] if isinstance(item, dict) else getattr(item, prop_name)) if v != prop_val: return False return True filtered_items = list( [item for item in items if _matches(item, **filters)] ) found = len(filtered_items) if found != count: self.fail("Wrong number of items found [filters=%s, " "expected=%s, found=%s]" % (filters, count, found)) return filtered_items def _skip_decorator(func): @functools.wraps(func) def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except aodh.NotImplementedError as e: raise unittest.SkipTest(str(e)) except webtest.app.AppError as e: if 'not implemented' in str(e): raise unittest.SkipTest(str(e)) raise return skip_if_not_implemented class SkipNotImplementedMeta(type): def __new__(cls, name, bases, local): for attr in local: value = local[attr] if callable(value) and ( attr.startswith('test_') or attr == 'setUp'): local[attr] = _skip_decorator(value) return type.__new__(cls, name, bases, local) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/constants.py0000664000175000017500000000123500000000000017203 0ustar00zuulzuul00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime MIN_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.087014 aodh-19.0.0/aodh/tests/functional/0000775000175000017500000000000000000000000016756 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/__init__.py0000664000175000017500000000000000000000000021055 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.087014 aodh-19.0.0/aodh/tests/functional/api/0000775000175000017500000000000000000000000017527 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/__init__.py0000664000175000017500000001445100000000000021645 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests. """ import webtest from aodh.api import app from aodh.tests.functional import db as db_test_base class FunctionalTest(db_test_base.TestBase): """Used for functional tests of Pecan controllers. Used in case when you need to test your literal application and its integration with the framework. """ PATH_PREFIX = '' def setUp(self): super(FunctionalTest, self).setUp() self.setup_messaging(self.CONF) self.CONF.set_override('auth_mode', None, group='api') self.app = webtest.TestApp(app.load_app(self.CONF)) def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ return self.post_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) return response def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=None, groupby=None, status=None, override_params=None, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param groupby: list of fields to group by :param status: Expected status code of response :param override_params: literally encoded query param string :param params: content for wsgi.input of request """ q = q or [] groupby = groupby or [] full_path = self.PATH_PREFIX + path if override_params: all_params = override_params else: query_params = {'q.field': [], 'q.value': [], 'q.op': [], 'q.type': [], } for query in q: for name in ['field', 'op', 'value', 'type']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) if groupby: all_params.update({'groupby': groupby}) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors, status=status) if not expect_errors: response = response.json return response ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.087014 aodh-19.0.0/aodh/tests/functional/api/v2/0000775000175000017500000000000000000000000020056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/v2/__init__.py0000664000175000017500000000130400000000000022165 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from aodh.tests.functional import api class FunctionalTest(api.FunctionalTest): PATH_PREFIX = '/v2' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/v2/policy.yaml-test0000664000175000017500000000053600000000000023222 0ustar00zuulzuul00000000000000# WARNING: Below rules are either deprecated rules # or extra rules in policy file, it is strongly # recommended to switch to new rules. "context_is_admin": "role:admin" "segregation": "rule:context_is_admin" "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s" "default": "rule:admin_or_owner" "telemetry:get_alarms": "role:admin" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/v2/test_alarm_scenarios.py0000664000175000017500000017603000000000000024640 0ustar00zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests alarm operation.""" import copy import datetime import json as jsonlib import os from unittest import mock import fixtures from oslo_utils import uuidutils import webtest from aodh.api import app from aodh import messaging from aodh.storage import models from aodh.tests import constants from aodh.tests.functional.api import v2 RULE_KEY = 'gnocchi_aggregation_by_metrics_threshold_rule' def default_alarms(auth_headers): return [models.Alarm(name='name1', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, alarm_id='a', description='a', state='insufficient data', state_reason='Not evaluated', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=auth_headers['X-User-Id'], project_id=auth_headers['X-Project-Id'], time_constraints=[dict(name='testcons', start='0 11 * * *', duration=300)], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', evaluation_periods=60, granularity=1, metrics=[ '41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e' ]) ), models.Alarm(name='name2', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, alarm_id='b', description='b', state='insufficient data', state_reason='Not evaluated', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=auth_headers['X-User-Id'], project_id=auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=4.0, aggregation_method='mean', evaluation_periods=60, granularity=1, metrics=[ '41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e' ]) ), models.Alarm(name='name3', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, alarm_id='c', description='c', state='insufficient data', state_reason='Not evaluated', severity='moderate', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=auth_headers['X-User-Id'], project_id=auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=3.0, aggregation_method='mean', evaluation_periods=60, granularity=1, metrics=[ '95f3c171-5605-4021-87ed-eede77101268', 'bf588a78-56c7-4ba4-be46-d71e5002e030', ]) )] class TestAlarmsBase(v2.FunctionalTest): def setUp(self): super(TestAlarmsBase, self).setUp() self.project_id = uuidutils.generate_uuid() self.user_id = uuidutils.generate_uuid() self.auth_headers = {'X-User-Id': self.user_id, 'X-Project-Id': self.project_id} c = mock.Mock() c.capabilities.list.return_value = {'aggregation_methods': [ 'count', 'mean', 'max', 'min', 'first', 'last', 'std']} self.useFixture(fixtures.MockPatch( 'aodh.api.controllers.v2.alarm_rules.gnocchi.client.Client', return_value=c )) def _verify_alarm(self, json, alarm, expected_name=None): if expected_name and alarm.name != expected_name: self.fail("Alarm not found") for key in json: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(json[key], getattr(alarm, storage_key)) def _get_alarm(self, id, auth_headers=None): headers = auth_headers or self.auth_headers url_path = "/alarms" if headers.get('X-Roles') == 'admin': url_path = '/alarms?q.field=all_projects&q.op=eq&q.value=true' data = self.get_json(url_path, headers=headers) match = [a for a in data if a['alarm_id'] == id] self.assertEqual(1, len(match), 'alarm %s not found' % id) return match[0] def _update_alarm(self, id, updated_data, auth_headers=None): data = self._get_alarm(id, auth_headers) data.update(updated_data) self.put_json('/alarms/%s' % id, params=data, headers=auth_headers or self.auth_headers) def _delete_alarm(self, id, auth_headers=None): self.delete('/alarms/%s' % id, headers=auth_headers or self.auth_headers, status=204) class TestAlarms(TestAlarmsBase): def setUp(self): super(TestAlarms, self).setUp() for alarm in default_alarms(self.auth_headers): self.alarm_conn.create_alarm(alarm) def test_list_alarms_all_projects_by_admin(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' alarms = self.get_json( '/alarms', headers=auth_headers, q=[{'field': 'all_projects', 'op': 'eq', 'value': 'true'}] ) self.assertEqual(3, len(alarms)) def test_get_alarm_project_filter_normal_user(self): project = self.auth_headers['X-Project-Id'] def _test(field): alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': field, 'op': 'eq', 'value': project}]) self.assertEqual(3, len(alarms)) _test('project_id') def test_get_alarm_other_project_normal_user(self): def _test(field): response = self.get_json('/alarms', q=[{'field': field, 'op': 'eq', 'value': 'other-project'}], expect_errors=True, status=401, headers=self.auth_headers) faultstring = 'Not Authorized to access' self.assertIn(faultstring, response.json['error_message']['faultstring']) _test('project_id') def test_get_alarm_forbiden(self): pf = os.path.abspath('aodh/tests/functional/api/v2/policy.yaml-test') self.CONF.set_override('policy_file', pf, group='oslo_policy') self.CONF.set_override('auth_mode', None, group='api') self.app = webtest.TestApp(app.load_app(self.CONF)) response = self.get_json('/alarms', expect_errors=True, status=403, headers=self.auth_headers) faultstring = 'RBAC Authorization Failed' self.assertEqual(403, response.status_code) self.assertEqual(faultstring, response.json['error_message']['faultstring']) def test_post_alarm_noauth(self): json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'severity': 'low', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': '3', 'granularity': '180', } } self.post_json('/alarms', params=json, status=201) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) # to check to BoundedInt type conversion json[RULE_KEY]['evaluation_periods'] = 3 json[RULE_KEY]['granularity'] = 180 if alarms[0].name == 'added_alarm': for key in json: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(getattr(alarms[0], storage_key), json[key]) else: self.fail("Alarm not found") @staticmethod def _alarm_representation_owned_by(identifiers): json = { 'name': 'added_alarm', 'enabled': False, 'type': 'gnocchi_aggregation_by_metrics_threshold', 'ok_actions': ['http://something/ok'], RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': 3, 'granularity': 180, } } for aspect, id in identifiers.items(): json['%s_id' % aspect] = id return json def _do_test_post_alarm_as_nonadmin_on_behalf_of_another(self, identifiers): """Test posting an alarm. Test that posting an alarm as non-admin on behalf of another user/project fails with an explicit 401 instead of reverting to the requestor's identity. """ json = self._alarm_representation_owned_by(identifiers) headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'demo' resp = self.post_json('/alarms', params=json, status=401, headers=headers) aspect = 'user' if 'user' in identifiers else 'project' params = dict(aspect=aspect, id=identifiers[aspect]) self.assertEqual("Not Authorized to access %(aspect)s %(id)s" % params, resp.json['error_message']['faultstring']) def test_post_alarm_as_nonadmin_on_behalf_of_another_user(self): identifiers = dict(user='auseridthatisnotmine') self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_another_project(self): identifiers = dict(project='aprojectidthatisnotmine') self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_another_creds(self): identifiers = dict(user='auseridthatisnotmine', project='aprojectidthatisnotmine') self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) def _do_test_post_alarm_as_nonadmin_on_behalf_of_self(self, identifiers): """Test posting an alarm. Test posting an alarm as non-admin on behalf of own user/project creates alarm associated with the requestor's identity. """ json = self._alarm_representation_owned_by(identifiers) headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'demo' self.post_json('/alarms', params=json, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self.assertEqual(alarms[0].user_id, self.auth_headers['X-User-Id']) self.assertEqual(alarms[0].project_id, self.auth_headers['X-Project-Id']) def test_post_alarm_as_nonadmin_on_behalf_of_own_user(self): identifiers = dict(user=self.auth_headers['X-User-Id']) self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_own_project(self): identifiers = dict(project=self.auth_headers['X-Project-Id']) self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_own_creds(self): identifiers = dict(user=self.auth_headers['X-User-Id'], project=self.auth_headers['X-Project-Id']) self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) def test_post_alarm_with_mismatch_between_type_and_rule(self): """Test the creation of an combination alarm with threshold rule.""" json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'gnocchi_resources_threshold', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': '3', 'granularity': '180', } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) self.assertEqual( "gnocchi_resources_threshold_rule must " "be set for gnocchi_resources_threshold type alarm", resp.json['error_message']['faultstring']) def test_post_alarm_normal_user_set_log_actions(self): body = { 'name': 'log_alarm_actions', 'type': 'gnocchi_aggregation_by_metrics_threshold', RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': '3', 'granularity': '180', }, 'alarm_actions': ['log://'] } resp = self.post_json('/alarms', params=body, expect_errors=True, headers=self.auth_headers) self.assertEqual(401, resp.status_code) expected_msg = ("You are not authorized to create action: log://") self.assertEqual(expected_msg, resp.json['error_message']['faultstring']) def test_post_alarm_normal_user_set_test_actions(self): body = { 'name': 'test_alarm_actions', 'type': 'gnocchi_aggregation_by_metrics_threshold', RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': '3', 'granularity': '180', }, 'alarm_actions': ['test://'] } resp = self.post_json('/alarms', params=body, expect_errors=True, headers=self.auth_headers) self.assertEqual(401, resp.status_code) expected_msg = ("You are not authorized to create action: test://") self.assertEqual(expected_msg, resp.json['error_message']['faultstring']) def test_post_alarm_admin_user_set_log_test_actions(self): body = { 'name': 'admin_alarm_actions', 'type': 'gnocchi_aggregation_by_metrics_threshold', RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': '3', 'granularity': '180', }, 'alarm_actions': ['test://', 'log://'] } headers = self.auth_headers headers['X-Roles'] = 'admin' self.post_json('/alarms', params=body, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(name='admin_alarm_actions')) self.assertEqual(1, len(alarms)) self.assertEqual(['test://', 'log://'], alarms[0].alarm_actions) def test_post_alarm_trust(self): json = { 'name': 'added_alarm_defaults', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'ok_actions': ['trust+http://my.server:1234/foo'], RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'aggregation_method': 'mean', 'threshold': 300.0 } } auth = mock.Mock() trust_client = mock.Mock() with mock.patch('aodh.keystone_client.get_client') as client: mock_session = mock.Mock() mock_session.get_user_id.return_value = 'my_user' client.return_value = mock.Mock(session=mock_session) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client trust_client.trusts.create.return_value = mock.Mock(id='5678') self.post_json('/alarms', params=json, status=201, headers=self.auth_headers, extra_environ={'keystone.token_auth': auth}) trust_client.trusts.create.assert_called_once_with( trustor_user=self.auth_headers['X-User-Id'], trustee_user='my_user', project=self.auth_headers['X-Project-Id'], impersonation=True, role_names=[]) alarms = list(self.alarm_conn.get_alarms()) for alarm in alarms: if alarm.name == 'added_alarm_defaults': self.assertEqual( ['trust+http://5678:delete@my.server:1234/foo'], alarm.ok_actions) break else: self.fail("Alarm not found") data = self._get_alarm(alarm.alarm_id) self.assertEqual( ['trust+http://my.server:1234/foo'], data['ok_actions']) with mock.patch('aodh.keystone_client.get_client') as client: client.return_value = mock.Mock( auth_ref=mock.Mock(user_id='my_user')) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client self.delete('/alarms/%s' % alarm.alarm_id, headers=self.auth_headers, status=204, extra_environ={'keystone.token_auth': auth}) trust_client.trusts.delete.assert_called_once_with('5678') def test_put_alarm_as_admin(self): json = { 'user_id': 'myuserid', 'project_id': 'myprojectid', 'enabled': False, 'name': 'name_put', 'state': 'ok', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': 3, 'granularity': 180, } } headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'admin' data = self.get_json('/alarms', headers=headers, q=[{'field': 'name', 'value': 'name1', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] self.put_json('/alarms/%s' % alarm_id, params=json, headers=headers) alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, enabled=False))[0] self.assertEqual('myuserid', alarm.user_id) self.assertEqual('myprojectid', alarm.project_id) self._verify_alarm(json, alarm) def test_put_alarm_trust(self): data = self._get_alarm('a') data.update({'ok_actions': ['trust+http://something/ok']}) trust_client = mock.Mock() with mock.patch('aodh.keystone_client.get_client') as client: client.return_value = mock.Mock( auth_ref=mock.Mock(user_id='my_user')) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client trust_client.trusts.create.return_value = mock.Mock(id='5678') self.put_json('/alarms/%s' % data['alarm_id'], params=data, headers=self.auth_headers) for alarm in list(self.alarm_conn.get_alarms()): if alarm.alarm_id == data['alarm_id']: self.assertEqual( ['trust+http://5678:delete@something/ok'], alarm.ok_actions) break data = self._get_alarm('a') self.assertEqual( ['trust+http://something/ok'], data['ok_actions']) data.update({'ok_actions': ['http://no-trust-something/ok']}) with mock.patch('aodh.keystone_client.get_client') as client: client.return_value = mock.Mock( auth_ref=mock.Mock(user_id='my_user')) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client self.put_json('/alarms/%s' % data['alarm_id'], params=data, headers=self.auth_headers) trust_client.trusts.delete.assert_called_once_with('5678') data = self._get_alarm('a') self.assertEqual( ['http://no-trust-something/ok'], data['ok_actions']) def test_alarms_sends_notification(self): # Hit the AlarmsController ... json = { 'name': 'sent_notification', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'severity': 'low', RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'gt', 'threshold': 2.0, 'aggregation_method': 'mean', } } with mock.patch.object(messaging, 'get_notifier') as get_notifier: notifier = get_notifier.return_value self.post_json('/alarms', params=json, headers=self.auth_headers) get_notifier.assert_called_once_with(mock.ANY, publisher_id='aodh.api') calls = notifier.info.call_args_list self.assertEqual(1, len(calls)) args, _ = calls[0] context, event_type, payload = args self.assertEqual('alarm.creation', event_type) self.assertEqual('sent_notification', payload['detail']['name']) self.assertEqual(['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], payload['detail']['rule']['metrics']) self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', 'project_id', 'timestamp', 'type', 'user_id']).issubset(payload.keys())) def test_alarm_sends_notification(self): with mock.patch.object(messaging, 'get_notifier') as get_notifier: notifier = get_notifier.return_value self._update_alarm('a', dict(name='new_name')) get_notifier.assert_called_once_with(mock.ANY, publisher_id='aodh.api') calls = notifier.info.call_args_list self.assertEqual(1, len(calls)) args, _ = calls[0] context, event_type, payload = args self.assertEqual('alarm.rule_change', event_type) self.assertEqual('new_name', payload['detail']['name']) self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', 'project_id', 'timestamp', 'type', 'user_id']).issubset(payload.keys())) def test_delete_alarm_sends_notification(self): with mock.patch.object(messaging, 'get_notifier') as get_notifier: notifier = get_notifier.return_value self._delete_alarm(default_alarms(self.auth_headers)[1].alarm_id) get_notifier.assert_called_once_with(mock.ANY, publisher_id='aodh.api') calls = notifier.info.call_args_list self.assertEqual(1, len(calls)) args, _ = calls[0] context, event_type, payload = args self.assertEqual('alarm.deletion', event_type) self.assertEqual('insufficient data', payload['detail']['state']) self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', 'project_id', 'timestamp', 'type', 'severity', 'user_id']).issubset(payload.keys())) class TestAlarmsHistory(TestAlarmsBase): def setUp(self): super(TestAlarmsHistory, self).setUp() alarm = models.Alarm( name='name1', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, alarm_id='a', description='a', state='insufficient data', state_reason='insufficient data', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[dict(name='testcons', start='0 11 * * *', duration=300)], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', evaluation_periods=60, granularity=1, metrics=['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'])) self.alarm_conn.create_alarm(alarm) def _get_alarm_history(self, alarm_id, auth_headers=None, query=None, expect_errors=False, status=200): url = '/alarms/%s/history' % alarm_id if query: url += '?q.op=%(op)s&q.value=%(value)s&q.field=%(field)s' % query resp = self.get_json(url, headers=auth_headers or self.auth_headers, expect_errors=expect_errors) if expect_errors: self.assertEqual(status, resp.status_code) return resp def _assert_is_subset(self, expected, actual): for k, v in expected.items(): current = actual.get(k) if k == 'detail' and isinstance(v, dict): current = jsonlib.loads(current) self.assertEqual(v, current, 'mismatched field: %s' % k) self.assertIsNotNone(actual['event_id']) def _assert_in_json(self, expected, actual): actual = jsonlib.dumps(jsonlib.loads(actual), sort_keys=True) for k, v in expected.items(): fragment = jsonlib.dumps({k: v}, sort_keys=True)[1:-1] self.assertIn(fragment, actual, '%s not in %s' % (fragment, actual)) def test_record_alarm_history_config(self): self.CONF.set_override('record_history', False) history = self._get_alarm_history('a') self.assertEqual([], history) self._update_alarm('a', dict(name='renamed')) history = self._get_alarm_history('a') self.assertEqual([], history) self.CONF.set_override('record_history', True) self._update_alarm('a', dict(name='foobar')) history = self._get_alarm_history('a') self.assertEqual(1, len(history)) def test_record_alarm_history_statistic(self): alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual([], history) self.assertEqual('mean', alarm[RULE_KEY]['aggregation_method']) rule = alarm[RULE_KEY].copy() rule['aggregation_method'] = 'min' data = dict(gnocchi_aggregation_by_metrics_threshold_rule=rule) self._update_alarm('a', data) new_alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual(1, len(history)) self.assertEqual("min", jsonlib.loads(history[0]['detail']) ['rule']["aggregation_method"]) self.assertEqual('min', new_alarm[RULE_KEY]['aggregation_method']) def test_get_recorded_alarm_history_state_transition_on_behalf_of(self): # credentials for new non-admin user, on who's behalf the alarm # is created member_user = uuidutils.generate_uuid() member_project = uuidutils.generate_uuid() member_auth = {'X-Roles': 'member', 'X-User-Id': member_user, 'X-Project-Id': member_project} new_alarm = { 'name': 'new_alarm', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'state': 'ok', RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'max', 'threshold': 42.0, 'evaluation_periods': 1, 'granularity': 60 } } self.post_json('/alarms', params=new_alarm, status=201, headers=member_auth) alarm = self.get_json('/alarms', headers=member_auth)[0] # effect a state transition as a new administrative user admin_user = uuidutils.generate_uuid() admin_project = uuidutils.generate_uuid() admin_auth = {'X-Roles': 'admin', 'X-User-Id': admin_user, 'X-Project-Id': admin_project} data = dict(state='alarm') self._update_alarm(alarm['alarm_id'], data, auth_headers=admin_auth) new_alarm['rule'] = new_alarm[RULE_KEY] del new_alarm[RULE_KEY] # ensure that both the creation event and state transition # are visible to the non-admin alarm owner and admin user alike for auth in [member_auth, admin_auth]: history = self._get_alarm_history(alarm['alarm_id'], auth_headers=auth) self.assertEqual(2, len(history), 'hist: %s' % history) self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], detail={"state": "alarm", "state_reason": "Manually set via API"}, on_behalf_of=alarm['project_id'], project_id=admin_project, type='rule change', user_id=admin_user), history[0]) self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], on_behalf_of=alarm['project_id'], project_id=member_project, type='creation', user_id=member_user), history[1]) self._assert_in_json(new_alarm, history[1]['detail']) # ensure on_behalf_of cannot be constrained in an API call query = dict(field='on_behalf_of', op='eq', value=alarm['project_id']) self._get_alarm_history(alarm['alarm_id'], auth_headers=auth, query=query, expect_errors=True, status=400) def test_delete_alarm_history_after_deletion(self): self._update_alarm('a', dict(name='renamed')) history = self._get_alarm_history('a') self.assertEqual(1, len(history)) self.delete('/alarms/%s' % 'a', headers=self.auth_headers, status=204) self._get_alarm_history('a', expect_errors=True, status=404) def test_get_alarm_history_ordered_by_recentness(self): for i in range(10): self._update_alarm('a', dict(name='%s' % i)) history = self._get_alarm_history('a') self.assertEqual(10, len(history), 'hist: %s' % history) self._assert_is_subset(dict(alarm_id='a', type='rule change'), history[0]) for i in range(1, 11): detail = '{"name": "%s"}' % (10 - i) self._assert_is_subset(dict(alarm_id='a', detail=detail, type='rule change'), history[i - 1]) def test_get_alarm_history_constrained_by_timestamp(self): alarm = self._get_alarm('a') self._update_alarm('a', dict(name='renamed')) after = datetime.datetime.utcnow().isoformat() query = dict(field='timestamp', op='gt', value=after) history = self._get_alarm_history('a', query=query) self.assertEqual(0, len(history)) query['op'] = 'le' history = self._get_alarm_history('a', query=query) self.assertEqual(1, len(history)) detail = '{"name": "renamed"}' self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], detail=detail, on_behalf_of=alarm['project_id'], project_id=alarm['project_id'], type='rule change', user_id=alarm['user_id']), history[0]) def test_get_alarm_history_constrained_by_type(self): alarm = self._get_alarm('a') self._update_alarm('a', dict(name='renamed2')) query = dict(field='type', op='eq', value='rule change') history = self._get_alarm_history('a', query=query) self.assertEqual(1, len(history)) detail = '{"name": "renamed2"}' self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], detail=detail, on_behalf_of=alarm['project_id'], project_id=alarm['project_id'], type='rule change', user_id=alarm['user_id']), history[0]) def test_get_alarm_history_constrained_by_alarm_id_failed(self): query = dict(field='alarm_id', op='eq', value='a') resp = self._get_alarm_history('a', query=query, expect_errors=True, status=400) msg = ('Unknown argument: "alarm_id": unrecognized' " field in query: [], valid keys: ['project', " "'search_offset', 'severity', 'timestamp'," " 'type', 'user']") msg = msg.format(key='alarm_id', value='a') self.assertEqual(msg, resp.json['error_message']['faultstring']) def test_get_alarm_history_constrained_by_not_supported_rule(self): query = dict(field='abcd', op='eq', value='abcd') resp = self._get_alarm_history('a', query=query, expect_errors=True, status=400) msg = ('Unknown argument: "abcd": unrecognized' " field in query: [], valid keys: ['project', " "'search_offset', 'severity', 'timestamp'," " 'type', 'user']") msg = msg.format(key='abcd', value='abcd') self.assertEqual(msg, resp.json['error_message']['faultstring']) def test_get_alarm_history_constrained_by_severity(self): self._update_alarm('a', dict(severity='low')) query = dict(field='severity', op='eq', value='low') history = self._get_alarm_history('a', query=query) self.assertEqual(1, len(history)) self.assertEqual(jsonlib.dumps({'severity': 'low'}), history[0]['detail']) class TestAlarmsQuotas(TestAlarmsBase): def setUp(self): super(TestAlarmsQuotas, self).setUp() self.alarm = { 'name': 'alarm', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'user_id': self.user_id, 'project_id': self.project_id, RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'max', 'threshold': 42.0, 'granularity': 60, 'evaluation_periods': 1, } } def _create_alarm(self, alarm=None): if not alarm: alarm = self.alarm resp = self.post_json('/alarms', params=alarm, headers=self.auth_headers, status=201) return resp def _test_alarm_quota(self): """Failed on the second creation.""" resp = self._create_alarm() alarms = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(1, len(alarms)) alarm = copy.copy(self.alarm) alarm['name'] = 'another_user_alarm' resp = self.post_json('/alarms', params=alarm, expect_errors=True, headers=self.auth_headers, status=403) faultstring = 'Alarm quota exceeded for user' self.assertIn(faultstring, resp.json['error_message']['faultstring']) alarms = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(1, len(alarms)) def test_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 1, 'api') self.CONF.set_override('project_alarm_quota', 1, 'api') self._test_alarm_quota() def test_project_alarms_quotas(self): self.CONF.set_override('project_alarm_quota', 1, 'api') self._test_alarm_quota() def test_user_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 1, 'api') self._test_alarm_quota() def test_larger_limit_project_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 1, 'api') self.CONF.set_override('project_alarm_quota', 2, 'api') self._test_alarm_quota() def test_larger_limit_user_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 2, 'api') self.CONF.set_override('project_alarm_quota', 1, 'api') self._test_alarm_quota() def test_larger_limit_user_alarm_quotas_multitenant_user(self): self.CONF.set_override('user_alarm_quota', 2, 'api') self.CONF.set_override('project_alarm_quota', 1, 'api') def _test(field, value): query = [{ 'field': field, 'op': 'eq', 'value': value }] alarms = self.get_json('/alarms', q=query, headers=self.auth_headers) self.assertEqual(1, len(alarms)) alarm = { 'name': 'alarm', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'user_id': self.auth_headers['X-User-Id'], 'project_id': self.auth_headers['X-Project-Id'], RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'comparison_operator': 'le', 'aggregation_method': 'max', 'threshold': 42.0, 'granularity': 60, 'evaluation_periods': 1, } } resp = self.post_json('/alarms', params=alarm, headers=self.auth_headers) self.assertEqual(201, resp.status_code) _test('project_id', self.auth_headers['X-Project-Id']) self.auth_headers['X-Project-Id'] = uuidutils.generate_uuid() alarm['name'] = 'another_user_alarm' alarm['project_id'] = self.auth_headers['X-Project-Id'] resp = self.post_json('/alarms', params=alarm, headers=self.auth_headers) self.assertEqual(201, resp.status_code) _test('project_id', self.auth_headers['X-Project-Id']) self.auth_headers["X-roles"] = "admin" alarms = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(1, len(alarms)) def test_overquota_by_quota_api(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' # Update project quota. self.post_json( '/quotas', { "project_id": self.project_id, "quotas": [ { "resource": "alarms", "limit": 1 } ] }, headers=auth_headers, status=201 ) self._test_alarm_quota() # Update project quota back self.post_json( '/quotas', { "project_id": self.project_id, "quotas": [ { "resource": "alarms", "limit": -1 } ] }, headers=auth_headers, status=201 ) def test_overquota_by_user_quota_config(self): self.CONF.set_override('user_alarm_quota', 1, 'api') auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' # Update project quota. self.post_json( '/quotas', { "project_id": self.project_id, "quotas": [ { "resource": "alarms", "limit": 2 } ] }, headers=auth_headers, status=201 ) self._test_alarm_quota() # Update project quota back self.post_json( '/quotas', { "project_id": self.project_id, "quotas": [ { "resource": "alarms", "limit": -1 } ] }, headers=auth_headers, status=201 ) class TestAlarmsRuleThreshold(TestAlarmsBase): def test_post_threshold_rule_defaults(self): to_check = { 'name': 'added_alarm_defaults', 'state': 'insufficient data', 'description': ('gnocchi_aggregation_by_metrics_threshold ' 'alarm rule'), 'type': 'gnocchi_aggregation_by_metrics_threshold', RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'threshold': 300.0, 'comparison_operator': 'eq', 'aggregation_method': 'mean', 'evaluation_periods': 1, 'granularity': 60, } } json = { 'name': 'added_alarm_defaults', 'type': 'gnocchi_aggregation_by_metrics_threshold', RULE_KEY: { 'metrics': ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], 'aggregation_method': 'mean', 'threshold': 300.0 } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(alarms)) for alarm in alarms: if alarm.name == 'added_alarm_defaults': for key in to_check: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(to_check[key], getattr(alarm, storage_key)) break else: self.fail("Alarm not found") class TestAlarmsRuleGnocchi(TestAlarmsBase): def setUp(self): super(TestAlarmsRuleGnocchi, self).setUp() for alarm in [ models.Alarm(name='name1', type='gnocchi_resources_threshold', enabled=True, alarm_id='e', description='e', state='insufficient data', state_reason='Not evaluated', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', granularity=60, evaluation_periods=1, metric='meter.test', resource_type='instance', resource_id=( '6841c175-d7c4-4bc2-bc7a-1c7832271b8f'), ) ), models.Alarm(name='name2', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, alarm_id='f', description='f', state='insufficient data', state_reason='Not evaluated', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', evaluation_periods=1, granularity=60, metrics=[ '41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'] ), ), models.Alarm(name='name3', type='gnocchi_aggregation_by_resources_threshold', enabled=True, alarm_id='g', description='f', state='insufficient data', state_reason='Not evaluated', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', granularity=60, evaluation_periods=1, metric='meter.test', resource_type='instance', query='{"=": {"server_group": ' '"my_autoscaling_group"}}') ), ]: self.alarm_conn.create_alarm(alarm) def test_list_alarms(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(3, len(data)) self.assertEqual(set(['name1', 'name2', 'name3']), set(r['name'] for r in data)) self.assertEqual(set(['meter.test']), set(r['gnocchi_resources_threshold_rule']['metric'] for r in data if 'gnocchi_resources_threshold_rule' in r)) def test_post_gnocchi_metrics_alarm_cached(self): # NOTE(gordc): cache is a decorator and therefore, gets mocked across # entire scenario. ideally we should test both scenario but tough. # assume cache will return aggregation_method == ['count'] always. json = { 'enabled': False, 'name': 'name_post', 'state': 'ok', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, RULE_KEY: { 'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb', '009d4faf-c275-46f0-8f2d-670b15bac2b0'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': 3, 'granularity': 180, } } with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: c = clientlib.Client.return_value c.capabilities.list.return_value = { 'aggregation_methods': ['count']} self.post_json('/alarms', params=json, headers=self.auth_headers) with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: self.post_json('/alarms', params=json, headers=self.auth_headers) self.assertFalse(clientlib.called) @mock.patch('aodh.keystone_client.get_client') def test_post_gnocchi_aggregation_alarm_project_constraint(self, get_client): json = { 'enabled': False, 'name': 'project_constraint', 'state': 'ok', 'type': 'gnocchi_aggregation_by_resources_threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'gnocchi_aggregation_by_resources_threshold_rule': { 'metric': 'ameter', 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': 3, 'granularity': 180, 'resource_type': 'instance', 'query': '{"=": {"server_group": "my_autoscaling_group"}}', } } expected_query = {"and": [ {"or": [ {"=": {"created_by_project_id": self.auth_headers['X-Project-Id']}}, {"and": [ {"=": {"created_by_project_id": ""}}, {"=": {"project_id": self.auth_headers['X-Project-Id']}} ]}, ]}, {"=": {"server_group": "my_autoscaling_group"}}, ]} ks_client = mock.Mock() ks_client.domains.list.return_value = [mock.Mock( id='', name='Default')] ks_client.projects.find.return_value = mock.Mock(id='') get_client.return_value = ks_client with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: c = clientlib.Client.return_value c.capabilities.list.return_value = { 'aggregation_methods': ['count']} self.post_json('/alarms', params=json, headers=self.auth_headers) self.assertEqual([mock.call( operations=[ 'aggregate', 'count', ['metric', 'ameter', 'count'] ], needed_overlap=0, start="-1 day", stop="now", search=expected_query, resource_type="instance")], c.aggregates.fetch.mock_calls), alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) json['gnocchi_aggregation_by_resources_threshold_rule']['query'] = ( jsonlib.dumps(expected_query)) self._verify_alarm(json, alarms[0]) class TestAlarmsCompositeRule(TestAlarmsBase): def setUp(self): super(TestAlarmsCompositeRule, self).setUp() self.sub_rule1 = { "type": "gnocchi_aggregation_by_metrics_threshold", "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "evaluation_periods": 5, "threshold": 0.8, "aggregation_method": "mean", "granularity": 60, "comparison_operator": "gt" } self.sub_rule2 = { "type": "gnocchi_aggregation_by_metrics_threshold", "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "evaluation_periods": 4, "threshold": 200, "aggregation_method": "max", "granularity": 60, "comparison_operator": "gt" } self.sub_rule3 = { "type": "gnocchi_aggregation_by_metrics_threshold", "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "evaluation_periods": 3, "threshold": 1000, "aggregation_method": "mean", "granularity": 60, "comparison_operator": "gt" } self.rule = { "or": [self.sub_rule1, { "and": [self.sub_rule2, self.sub_rule3] }]} def test_post_with_composite_rule(self): json = { "type": "composite", "name": "composite_alarm", "composite_rule": self.rule, "repeat_actions": False } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(alarms)) self.assertEqual(self.rule, alarms[0].rule) def test_post_with_sub_rule_with_wrong_type(self): self.sub_rule1['type'] = 'non-type' json = { "type": "composite", "name": "composite_alarm", "composite_rule": self.rule, "repeat_actions": False } response = self.post_json('/alarms', params=json, status=400, expect_errors=True, headers=self.auth_headers) err = "Unsupported sub-rule type" faultstring = response.json['error_message']['faultstring'] self.assertIn(err, faultstring) def test_post_with_sub_rule_with_only_required_params(self): sub_rulea = { "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "threshold": 0.8, "aggregation_method": "mean", "type": "gnocchi_aggregation_by_metrics_threshold"} sub_ruleb = { "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "threshold": 200, "aggregation_method": "mean", "type": "gnocchi_aggregation_by_metrics_threshold"} json = { "type": "composite", "name": "composite_alarm", "composite_rule": {"and": [sub_rulea, sub_ruleb]}, "repeat_actions": False } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(alarms)) def test_post_with_sub_rule_with_invalid_params(self): self.sub_rule1['threshold'] = False json = { "type": "composite", "name": "composite_alarm", "composite_rule": self.rule, "repeat_actions": False } response = self.post_json('/alarms', params=json, status=400, expect_errors=True, headers=self.auth_headers) faultstring = ("Invalid input for field/attribute threshold. " "Value: 'False'. Wrong type. Expected '%s', got '%s'" % (type(1.0), type(True))) self.assertEqual(faultstring, response.json['error_message']['faultstring']) class TestPaginationQuery(TestAlarmsBase): def setUp(self): super(TestPaginationQuery, self).setUp() for alarm in default_alarms(self.auth_headers): self.alarm_conn.create_alarm(alarm) def test_sort_by_severity_with_its_value(self): if self.engine != "mysql": self.skipTest("This is only implemented for MySQL") data = self.get_json('/alarms?sort=severity:asc', headers=self.auth_headers) severities = [a['severity'] for a in data] self.assertEqual(['moderate', 'critical', 'critical'], severities) data = self.get_json('/alarms?sort=severity:desc', headers=self.auth_headers) severities = [a['severity'] for a in data] self.assertEqual(['critical', 'critical', 'moderate'], severities) def test_pagination_query_history_data(self): for i in range(10): self._update_alarm('a', dict(name='%s' % i)) url = '/alarms/a/history?sort=event_id:desc&sort=timestamp:desc' data = self.get_json(url, headers=self.auth_headers) sorted_data = sorted(data, key=lambda d: (d['event_id'], d['timestamp']), reverse=True) self.assertEqual(sorted_data, data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/v2/test_app.py0000664000175000017500000001060500000000000022251 0ustar00zuulzuul00000000000000# # Copyright 2013 IBM Corp. # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test basic aodh-api app """ import json from unittest import mock import wsme from aodh import i18n from aodh.tests.functional.api import v2 class TestApiMiddleware(v2.FunctionalTest): no_lang_translated_error = 'No lang translated error' en_US_translated_error = 'en-US translated error' def _fake_translate(self, message, user_locale): if user_locale is None: return self.no_lang_translated_error else: return self.en_US_translated_error def test_json_parsable_error_middleware_translation_400(self): # Ensure translated messages get placed properly into json faults with mock.patch.object(i18n, 'translate', side_effect=self._fake_translate): response = self.post_json('/alarms', params={ 'name': 'foobar', 'type': 'gnocchi_resources_threshold'}, expect_errors=True, headers={"Accept": "application/json"}) self.assertEqual(400, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) self.assertEqual(self.no_lang_translated_error, response.json['error_message']['faultstring']) def test_xml_parsable_error_middleware_translation_400(self): # Ensure translated messages get placed properly into xml faults with mock.patch.object(i18n, 'translate', side_effect=self._fake_translate): response = self.post_json('/alarms', params={ 'name': 'foobar', 'type': 'gnocchi_resources_threshold'}, expect_errors=True, headers={"Accept": "application/xml,*/*"}) self.assertEqual(400, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) fault = response.xml.findall('./error/faultstring') for fault_string in fault: self.assertEqual(self.no_lang_translated_error, fault_string.text) def test_best_match_language(self): # Ensure that we are actually invoking language negotiation with mock.patch.object(i18n, 'translate', side_effect=self._fake_translate): response = self.post_json('/alarms', params={ 'name': 'foobar', 'type': 'gnocchi_resources_threshold'}, expect_errors=True, headers={"Accept": "application/xml,*/*", "Accept-Language": "en-US"}) self.assertEqual(400, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) fault = response.xml.findall('./error/faultstring') for fault_string in fault: self.assertEqual(self.en_US_translated_error, fault_string.text) def test_translated_then_untranslated_error(self): resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) self.assertEqual(404, resp.status_code) body = resp.body.decode('utf-8') self.assertEqual("Alarm alarm-id-3 not found", json.loads(body)['error_message'] ['faultstring']) with mock.patch('aodh.api.controllers.' 'v2.base.AlarmNotFound') as CustomErrorClass: CustomErrorClass.return_value = wsme.exc.ClientSideError( "untranslated_error", status_code=404) resp = self.get_json('/alarms/alarm-id-5', expect_errors=True) self.assertEqual(404, resp.status_code) body = resp.body.decode('utf-8') self.assertEqual("untranslated_error", json.loads(body)['error_message'] ['faultstring']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/v2/test_complex_query.py0000664000175000017500000002273200000000000024371 0ustar00zuulzuul00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to complex query.""" import datetime from unittest import mock import fixtures import jsonschema from oslotest import base import wsme from aodh.api.controllers.v2 import query from aodh.storage import models as alarm_models class FakeComplexQuery(query.ValidatedComplexQuery): def __init__(self, db_model, additional_name_mapping=None, metadata=False): super(FakeComplexQuery, self).__init__(query=None, db_model=db_model, additional_name_mapping=( additional_name_mapping or {}), metadata_allowed=metadata) class TestComplexQuery(base.BaseTestCase): def setUp(self): super(TestComplexQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) self.query = FakeComplexQuery(alarm_models.Alarm) self.query_alarmchange = FakeComplexQuery( alarm_models.AlarmChange) def test_replace_isotime_utc(self): filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_timezone_removed(self): filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_wrong_syntax(self): filter_expr = {"=": {"timestamp": "not a valid isotime string"}} self.assertRaises(wsme.exc.ClientSideError, self.query._replace_isotime_with_datetime, filter_expr) def test_replace_isotime_in_complex_filter(self): filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["and"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["and"][1]["="]["timestamp"]) def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): subfilter = {"and": [{"=": {"project_id": 42}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, subfilter]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["or"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["or"][1]["and"][1]["="]["timestamp"]) def test_convert_operator_to_lower_case(self): filter_expr = {"AND": [{"=": {"project_id": 42}}, {"=": {"project_id": 44}}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("and", list(filter_expr.keys())[0]) filter_expr = {"Or": [{"=": {"project_id": 43}}, {"anD": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("or", list(filter_expr.keys())[0]) self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) def test_invalid_filter_misstyped_field_name_samples(self): filter = {"=": {"project_id11": 42}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_filter_misstyped_field_name_alarms(self): filter = {"=": {"enabbled": True}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_filter_misstyped_field_name_alarmchange(self): filter = {"=": {"tpe": "rule change"}} self.assertRaises(jsonschema.ValidationError, self.query_alarmchange._validate_filter, filter) def test_invalid_complex_filter_wrong_field_names(self): filter = {"and": [{"=": {"non_existing_field": 42}}, {"=": {"project_id": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"and": [{"=": {"project_id": 42}}, {"=": {"non_existing_field": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"and": [{"=": {"project_id11": 42}}, {"=": {"project_id": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query_alarmchange._validate_filter, filter) filter = {"or": [{"=": {"non_existing_field": 42}}, {"and": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"or": [{"=": {"project_id": 43}}, {"and": [{"=": {"project_id": 44}}, {"=": {"non_existing_field": 42}}]}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_convert_orderby(self): orderby = [] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([], orderby) orderby = [{"project_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "desc"}], orderby) orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], orderby) def test_validate_orderby_empty_direction(self): orderby = [{"project_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"resource_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_order_string(self): orderby = [{"project_id": "not a valid order"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_multiple_item_order_string(self): orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_empty_field_name(self): orderby = [{"": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"": "desc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name(self): orderby = [{"project_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_metadata_is_not_allowed(self): orderby = [{"metadata.display_name": "asc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/v2/test_complex_query_scenarios.py0000664000175000017500000003535500000000000026444 0ustar00zuulzuul00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests complex queries for alarms """ import datetime from oslo_utils import timeutils from aodh.storage import models from aodh.tests.functional.api import v2 as tests_api admin_header = {"X-Roles": "admin", "X-Project-Id": "project-id1"} non_admin_header = {"X-Roles": "Member", "X-Project-Id": "project-id1"} RULE_TYPE = 'gnocchi_aggregation_by_metrics_threshold' class TestQueryAlarmsController(tests_api.FunctionalTest): def setUp(self): super(TestQueryAlarmsController, self).setUp() self.alarm_url = '/query/alarms' for state in ['ok', 'alarm', 'insufficient data']: for date in [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 2, 2)]: for id in [1, 2]: alarm_id = "-".join([state, date.isoformat(), str(id)]) project_id = "project-id%d" % id alarm = models.Alarm(name=alarm_id, type=RULE_TYPE, enabled=True, alarm_id=alarm_id, description='a', state=state, state_reason="state_reason", state_timestamp=date, timestamp=date, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id="user-id%d" % id, project_id=project_id, time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', evaluation_periods=60, granularity=1, metrics=[]), severity='critical') self.alarm_conn.create_alarm(alarm) def test_query_all(self): data = self.post_json(self.alarm_url, headers=admin_header, params={}) self.assertEqual(12, len(data.json)) def test_filter_with_isotime_timestamp(self): date_time = datetime.datetime(2013, 1, 1) isotime = date_time.isoformat() data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{">": {"timestamp": "' + isotime + '"}}'}) self.assertEqual(6, len(data.json)) for alarm in data.json: result_time = timeutils.parse_isotime(alarm['timestamp']) result_time = result_time.replace(tzinfo=None) self.assertGreater(result_time, date_time) def test_filter_with_isotime_state_timestamp(self): date_time = datetime.datetime(2013, 1, 1) isotime = date_time.isoformat() data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{">": {"state_timestamp": "' + isotime + '"}}'}) self.assertEqual(6, len(data.json)) for alarm in data.json: result_time = timeutils.parse_isotime(alarm['state_timestamp']) result_time = result_time.replace(tzinfo=None) self.assertGreater(result_time, date_time) def test_non_admin_tenant_sees_only_its_own_project(self): data = self.post_json(self.alarm_url, params={}, headers=non_admin_header) for alarm in data.json: self.assertEqual("project-id1", alarm['project_id']) def test_non_admin_tenant_cannot_query_others_project(self): data = self.post_json(self.alarm_url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, expect_errors=True, headers=non_admin_header) self.assertEqual(401, data.status_int) self.assertIn(b"Not Authorized to access project project-id2", data.body) def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): data = self.post_json(self.alarm_url, params={"filter": '{"=": {"project_id": "project-id1"}}'}, headers=non_admin_header) for alarm in data.json: self.assertEqual("project-id1", alarm['project_id']) def test_admin_tenant_sees_every_project(self): data = self.post_json(self.alarm_url, params={}, headers=admin_header) self.assertEqual(12, len(data.json)) for alarm in data.json: self.assertIn(alarm['project_id'], (["project-id1", "project-id2"])) def test_admin_tenant_can_query_any_project(self): data = self.post_json(self.alarm_url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, headers=admin_header) self.assertEqual(6, len(data.json)) for alarm in data.json: self.assertIn(alarm['project_id'], set(["project-id2"])) def test_query_with_field_project(self): data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{"=": {"project": "project-id2"}}'}) self.assertEqual(6, len(data.json)) for alarm_item in data.json: self.assertIn(alarm_item['project_id'], set(["project-id2"])) def test_query_with_field_user_in_orderby(self): data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{"=": {"state": "alarm"}}', "orderby": '[{"user": "DESC"}]'}) self.assertEqual(4, len(data.json)) self.assertEqual(["user-id2", "user-id2", "user-id1", "user-id1"], [s["user_id"] for s in data.json]) def test_query_with_filter_orderby_and_limit(self): orderby = '[{"state_timestamp": "DESC"}]' data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{"=": {"state": "alarm"}}', "orderby": orderby, "limit": 3}) self.assertEqual(3, len(data.json)) self.assertEqual(["2013-02-02T00:00:00", "2013-02-02T00:00:00", "2013-01-01T00:00:00"], [a["state_timestamp"] for a in data.json]) for alarm in data.json: self.assertEqual("alarm", alarm["state"]) def test_query_with_orderby_severity(self): if self.engine != "mysql": self.skipTest("This is only implemented for MySQL") orderby = '[{"severity": "ASC"}]' data = self.post_json(self.alarm_url, headers=admin_header, params={"orderby": orderby}) alarms = list(data.json) severities = [a['severity'] for a in alarms] severity_choices = ['low', 'moderate', 'critical'] sorted_severities = sorted(severities, key=severity_choices.index) self.assertEqual(sorted_severities, severities) orderby = '[{"severity": "DESC"}]' data = self.post_json(self.alarm_url, headers=admin_header, params={"orderby": orderby}) alarms = list(data.json) severities = [a['severity'] for a in alarms] sorted_severities = sorted(severities, key=severity_choices.index, reverse=True) self.assertEqual(sorted_severities, severities) def test_limit_should_be_positive(self): data = self.post_json(self.alarm_url, headers=admin_header, params={"limit": 0}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Limit should be positive", data.body) class TestQueryAlarmsHistoryController(tests_api.FunctionalTest): def setUp(self): super(TestQueryAlarmsHistoryController, self).setUp() self.url = '/query/alarms/history' for id in [1, 2]: for type in ["creation", "state transition"]: for date in [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 2, 2)]: event_id = "-".join([str(id), type, date.isoformat()]) alarm_change = {"event_id": event_id, "alarm_id": "alarm-id%d" % id, "type": type, "detail": "", "user_id": "user-id%d" % id, "project_id": "project-id%d" % id, "on_behalf_of": "project-id%d" % id, "timestamp": date} self.alarm_conn.record_alarm_change(alarm_change) def test_query_all(self): data = self.post_json(self.url, headers=admin_header, params={}) self.assertEqual(8, len(data.json)) def test_filter_with_isotime(self): date_time = datetime.datetime(2013, 1, 1) isotime = date_time.isoformat() data = self.post_json(self.url, headers=admin_header, params={"filter": '{">": {"timestamp":"' + isotime + '"}}'}) self.assertEqual(4, len(data.json)) for history in data.json: result_time = timeutils.parse_isotime(history['timestamp']) result_time = result_time.replace(tzinfo=None) self.assertGreater(result_time, date_time) def test_non_admin_tenant_sees_only_its_own_project(self): data = self.post_json(self.url, params={}, headers=non_admin_header) for history in data.json: self.assertEqual("project-id1", history['on_behalf_of']) def test_non_admin_tenant_cannot_query_others_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"on_behalf_of":' + ' "project-id2"}}'}, expect_errors=True, headers=non_admin_header) self.assertEqual(401, data.status_int) self.assertIn(b"Not Authorized to access project project-id2", data.body) def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"on_behalf_of":' + ' "project-id1"}}'}, headers=non_admin_header) for history in data.json: self.assertEqual("project-id1", history['on_behalf_of']) def test_admin_tenant_sees_every_project(self): data = self.post_json(self.url, params={}, headers=admin_header) self.assertEqual(8, len(data.json)) for history in data.json: self.assertIn(history['on_behalf_of'], (["project-id1", "project-id2"])) def test_query_with_filter_for_project_orderby_with_user(self): data = self.post_json(self.url, headers=admin_header, params={"filter": '{"=": {"project": "project-id1"}}', "orderby": '[{"user": "DESC"}]', "limit": 3}) self.assertEqual(3, len(data.json)) self.assertEqual(["user-id1", "user-id1", "user-id1"], [h["user_id"] for h in data.json]) for history in data.json: self.assertEqual("project-id1", history['project_id']) def test_query_with_filter_orderby_and_limit(self): data = self.post_json(self.url, headers=admin_header, params={"filter": '{"=": {"type": "creation"}}', "orderby": '[{"timestamp": "DESC"}]', "limit": 3}) self.assertEqual(3, len(data.json)) self.assertEqual(["2013-02-02T00:00:00", "2013-02-02T00:00:00", "2013-01-01T00:00:00"], [h["timestamp"] for h in data.json]) for history in data.json: self.assertEqual("creation", history['type']) def test_limit_should_be_positive(self): data = self.post_json(self.url, params={"limit": 0}, headers=admin_header, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Limit should be positive", data.body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/api/v2/test_quotas.py0000664000175000017500000001645700000000000023020 0ustar00zuulzuul00000000000000# Copyright 2020 Catalyst Cloud LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from oslo_utils import uuidutils from aodh.tests.functional.api import v2 class TestQuotas(v2.FunctionalTest): @classmethod def setUpClass(cls): super(TestQuotas, cls).setUpClass() cls.project = uuidutils.generate_uuid() cls.user = uuidutils.generate_uuid() cls.auth_headers = {'X-User-Id': cls.user, 'X-Project-Id': cls.project} cls.other_project = uuidutils.generate_uuid() def test_get_quotas_by_user(self): resp = self.get_json('/quotas', headers=self.auth_headers, status=200) self.assertEqual(self.project, resp.get('project_id')) self.assertGreater(len(resp.get('quotas', [])), 0) def test_get_project_quotas_by_user(self): resp = self.get_json('/quotas?project_id=%s' % self.project, headers=self.auth_headers, status=200) self.assertEqual(self.project, resp.get('project_id')) self.assertGreater(len(resp.get('quotas', [])), 0) def test_get_other_project_quotas_by_user_failed(self): self.get_json( '/quotas?project_id=%s' % self.other_project, headers=self.auth_headers, expect_errors=True, status=401 ) def test_get_project_quotas_by_admin(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' resp = self.get_json('/quotas?project_id=%s' % self.other_project, headers=auth_headers, status=200) self.assertEqual(self.other_project, resp.get('project_id')) self.assertGreater(len(resp.get('quotas', [])), 0) def test_post_quotas_by_admin(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' resp = self.post_json( '/quotas', { "project_id": self.other_project, "quotas": [ { "resource": "alarms", "limit": 30 } ] }, headers=auth_headers, status=201 ) resp_json = resp.json self.assertEqual(self.other_project, resp_json.get('project_id')) self.assert_single_item(resp_json.get('quotas', []), resource='alarms', limit=30) def test_post_quotas_by_user_failed(self): self.post_json( '/quotas', { "project_id": self.other_project, "quotas": [ { "resource": "alarms", "limit": 20 } ] }, headers=self.auth_headers, expect_errors=True, status=403 ) def test_post_quotas_no_limit_failed(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' resp = self.post_json( '/quotas', { "project_id": self.project, "quotas": [ { "resource": "alarms" } ] }, headers=auth_headers, expect_errors=True, status=400 ) self.assertIn('Mandatory field missing', resp.json['error_message']['faultstring']) def test_post_quotas_no_resource_failed(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' resp = self.post_json( '/quotas', { "project_id": self.project, "quotas": [ { "limit": 1 } ] }, headers=auth_headers, expect_errors=True, status=400 ) self.assertIn('Mandatory field missing', resp.json['error_message']['faultstring']) def test_post_quotas_wrong_limit_failed(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' resp = self.post_json( '/quotas', { "project_id": self.project, "quotas": [ { "resource": "alarms", "limit": -5 } ] }, headers=auth_headers, expect_errors=True, status=400 ) self.assertIn('Value should be greater or equal to -1', resp.json['error_message']['faultstring']) def test_post_quotas_unsupported_resource_failed(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' resp = self.post_json( '/quotas', { "project_id": self.project, "quotas": [ { "resource": "other_resource", "limit": 1 } ] }, headers=auth_headers, expect_errors=True, status=400 ) self.assertIn('Value should be one of', resp.json['error_message']['faultstring']) def test_delete_project_quota_by_admin(self): auth_headers = copy.copy(self.auth_headers) auth_headers['X-Roles'] = 'admin' self.post_json( '/quotas', { "project_id": self.other_project, "quotas": [ { "resource": "alarms", "limit": 30 } ] }, headers=auth_headers, status=201 ) resp = self.get_json('/quotas?project_id=%s' % self.other_project, headers=auth_headers, status=200) self.assert_single_item(resp['quotas'], resource='alarms', limit=30) self.delete('/quotas/%s' % self.other_project, headers=auth_headers, status=204) resp = self.get_json('/quotas?project_id=%s' % self.other_project, headers=auth_headers, status=200) self.assert_multiple_items(resp['quotas'], 0, resource='alarms', limit=30) def test_delete_project_quota_by_user_failed(self): self.delete('/quotas/%s' % self.other_project, headers=self.auth_headers, expect_errors=True, status=403) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/db.py0000664000175000017500000000745300000000000017726 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests.""" import os import fixtures from oslo_config import fixture as fixture_config from oslo_utils import uuidutils from urllib import parse as urlparse from aodh import service from aodh import storage from aodh.tests import base as test_base class SQLManager(fixtures.Fixture): def __init__(self, conf): self.conf = conf db_name = 'aodh_%s' % uuidutils.generate_uuid(dashed=False) import sqlalchemy self._engine = sqlalchemy.create_engine(conf.database.connection) self._conn = self._engine.connect() self._create_db(self._conn, db_name) self._conn.close() self._engine.dispose() parsed = list(urlparse.urlparse(conf.database.connection)) # NOTE(jd) We need to set an host otherwise urlunparse() will not # construct a proper URL if parsed[1] == '': parsed[1] = 'localhost' parsed[2] = '/' + db_name self.url = urlparse.urlunparse(parsed) class PgSQLManager(SQLManager): @staticmethod def _create_db(conn, db_name): conn.connection.set_isolation_level(0) conn.exec_driver_sql( 'CREATE DATABASE %s WITH TEMPLATE template0;' % db_name ) conn.connection.set_isolation_level(1) class MySQLManager(SQLManager): @staticmethod def _create_db(conn, db_name): conn.exec_driver_sql('CREATE DATABASE %s;' % db_name) class SQLiteManager(fixtures.Fixture): def __init__(self, conf): self.url = "sqlite://" class TestBase(test_base.BaseTestCase, metaclass=test_base.SkipNotImplementedMeta): DRIVER_MANAGERS = { 'mysql': MySQLManager, 'postgresql': PgSQLManager, 'sqlite': SQLiteManager, } def setUp(self): super(TestBase, self).setUp() db_url = os.environ.get( 'AODH_TEST_STORAGE_URL', 'sqlite://', ).replace( "mysql://", "mysql+pymysql://", ) engine = urlparse.urlparse(db_url).scheme # In case some drivers have additional specification, for example: # PyMySQL will have scheme mysql+pymysql. self.engine = engine.split('+')[0] conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.CONF.set_override('connection', db_url, group="database") manager = self.DRIVER_MANAGERS.get(self.engine) if not manager: self.skipTest("missing driver manager: %s" % self.engine) self.db_manager = manager(self.CONF) self.useFixture(self.db_manager) self.CONF.set_override('connection', self.db_manager.url, group="database") self.alarm_conn = storage.get_connection_from_config(self.CONF) self.alarm_conn.upgrade() self.useFixture(fixtures.MockPatch( 'aodh.storage.get_connection_from_config', side_effect=self._get_connection)) def tearDown(self): self.alarm_conn.clear() self.alarm_conn = None super(TestBase, self).tearDown() def _get_connection(self, conf): return self.alarm_conn ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.087014 aodh-19.0.0/aodh/tests/functional/hooks/0000775000175000017500000000000000000000000020101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/hooks/post_test_hook.sh0000775000175000017500000000130200000000000023500 0ustar00zuulzuul00000000000000#!/bin/bash -xe #FIXME(sileht): remove me when dsvm gate job is removed # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. set -e exit 0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.087014 aodh-19.0.0/aodh/tests/functional/storage/0000775000175000017500000000000000000000000020422 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/storage/__init__.py0000664000175000017500000000000000000000000022521 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.087014 aodh-19.0.0/aodh/tests/functional/storage/sqlalchemy/0000775000175000017500000000000000000000000022564 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/storage/sqlalchemy/__init__.py0000664000175000017500000000000000000000000024663 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/storage/sqlalchemy/test_migrations.py0000664000175000017500000000257100000000000026356 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from unittest import mock from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_migrations from aodh.storage.sqlalchemy import models from aodh.tests import base from aodh.tests.functional import db as tests_db class ABCSkip(base.SkipNotImplementedMeta, abc.ABCMeta): pass class ModelsMigrationsSync(tests_db.TestBase, test_migrations.ModelsMigrationsSync, metaclass=ABCSkip): def setUp(self): super(ModelsMigrationsSync, self).setUp() self.db = mock.Mock() @staticmethod def get_metadata(): return models.Base.metadata def get_engine(self): return enginefacade.writer.get_engine() def db_sync(self, engine): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/storage/test_get_connection.py0000664000175000017500000000626500000000000025042 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/storage/ """ from unittest import mock from oslo_config import fixture as fixture_config from oslotest import base from aodh import service from aodh import storage from aodh.storage import impl_log class EngineTest(base.BaseTestCase): def setUp(self): super(EngineTest, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_get_connection(self): self.CONF.set_override('connection', 'log://localhost', group='database') engine = storage.get_connection_from_config(self.CONF) self.assertIsInstance(engine, impl_log.Connection) def test_get_connection_no_such_engine(self): self.CONF.set_override('connection', 'no-such-engine://localhost', group='database') self.CONF.set_override('max_retries', 0, 'database') try: storage.get_connection_from_config(self.CONF) except RuntimeError as err: self.assertIn('no-such-engine', str(err)) class ConnectionRetryTest(base.BaseTestCase): def setUp(self): super(ConnectionRetryTest, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_retries(self): max_retries = 5 with mock.patch.object( storage.impl_log.Connection, '__init__') as log_init: class ConnectionError(Exception): pass def x(conf): raise ConnectionError log_init.side_effect = x self.CONF.set_override("connection", "log://", "database") self.CONF.set_override("retry_interval", 0.00001, "database") self.CONF.set_override("max_retries", max_retries, "database") self.assertRaises(ConnectionError, storage.get_connection_from_config, self.CONF) self.assertEqual(max_retries, log_init.call_count) class ConnectionConfigTest(base.BaseTestCase): def setUp(self): super(ConnectionConfigTest, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_only_default_url(self): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/storage/test_impl_log.py0000664000175000017500000000146300000000000023641 0ustar00zuulzuul00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslotest import base from aodh.storage import impl_log class ConnectionTest(base.BaseTestCase): @staticmethod def test_get_connection(): impl_log.Connection(cfg.CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/storage/test_impl_sqlalchemy.py0000664000175000017500000000252700000000000025224 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/storage/impl_sqlalchemy.py .. note:: In order to run the tests against real SQL server set the environment variable aodh_TEST_SQL_URL to point to a SQL server before running the tests. """ from aodh.storage import impl_sqlalchemy as impl_sqla_alarm from aodh.tests import base as test_base class CapabilitiesTest(test_base.BaseTestCase): def test_alarm_capabilities(self): expected_capabilities = { 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, } actual_capabilities = impl_sqla_alarm.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional/storage/test_storage_scenarios.py0000664000175000017500000005406600000000000025560 0ustar00zuulzuul00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for DB backend implementation test """ import datetime from unittest import mock from oslo_utils import timeutils from aodh import storage from aodh.storage import models as alarm_models from aodh.tests import constants from aodh.tests.functional import db as tests_db ALARM_TYPE = 'gnocchi_aggregation_by_metrics_threshold' METRIC_IDS = ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'] class DBTestBase(tests_db.TestBase): @staticmethod def create_side_effect(method, exception_type, test_exception): def side_effect(*args, **kwargs): if test_exception.pop(): raise exception_type else: return method(*args, **kwargs) return side_effect def setUp(self): super(DBTestBase, self).setUp() patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) class AlarmTestBase(DBTestBase): def add_some_alarms(self): alarms = [alarm_models.Alarm(alarm_id='r3d', enabled=True, type=ALARM_TYPE, name='red-alert', description='my red-alert', timestamp=datetime.datetime(2015, 7, 2, 10, 25), user_id='me', project_id='and-da-boys', state="insufficient data", state_reason="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=['http://nowhere/alarms'], insufficient_data_actions=[], repeat_actions=False, time_constraints=[dict(name='testcons', start='0 11 * * *', duration=300)], rule=dict(comparison_operator='eq', threshold=36, aggregation_method='count', evaluation_periods=1, granularity=60, metrics=METRIC_IDS), severity='low' ), alarm_models.Alarm(alarm_id='0r4ng3', enabled=True, type=ALARM_TYPE, name='orange-alert', description='a orange', timestamp=datetime.datetime(2015, 7, 2, 10, 40), user_id='me', project_id='and-da-boys', state="insufficient data", state_reason="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=['http://nowhere/alarms'], insufficient_data_actions=[], repeat_actions=False, time_constraints=[], rule=dict(comparison_operator='gt', threshold=75, aggregation_method='avg', evaluation_periods=1, granularity=60, metrics=METRIC_IDS), severity='low' ), alarm_models.Alarm(alarm_id='y3ll0w', enabled=False, type=ALARM_TYPE, name='yellow-alert', description='yellow', timestamp=datetime.datetime(2015, 7, 2, 10, 10), user_id='me', project_id='and-da-boys', state="insufficient data", state_reason="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=['http://nowhere/alarms'], insufficient_data_actions=[], repeat_actions=False, time_constraints=[], rule=dict(comparison_operator='lt', threshold=10, aggregation_method='min', evaluation_periods=1, granularity=60, metrics=METRIC_IDS), severity='low' )] for a in alarms: self.alarm_conn.create_alarm(a) class AlarmTest(AlarmTestBase): def test_empty(self): alarms = list(self.alarm_conn.get_alarms()) self.assertEqual([], alarms) def test_list(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) def test_list_ordered_by_timestamp(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(len(alarms), 3) alarm_l = [a.timestamp for a in alarms] alarm_l_ordered = [datetime.datetime(2015, 7, 2, 10, 40), datetime.datetime(2015, 7, 2, 10, 25), datetime.datetime(2015, 7, 2, 10, 10)] self.assertEqual(alarm_l_ordered, alarm_l) def test_list_enabled(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms(enabled=True)) self.assertEqual(2, len(alarms)) def test_list_disabled(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) def test_list_by_type(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms(type=ALARM_TYPE)) self.assertEqual(3, len(alarms)) def test_list_excluded_by_name(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms(name={'ne': 'yellow-alert'})) self.assertEqual(2, len(alarms)) alarm_names = sorted([a.name for a in alarms]) self.assertEqual(['orange-alert', 'red-alert'], alarm_names) def test_add(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) metrics = sorted([a.rule['metrics'] for a in alarms]) self.assertEqual([METRIC_IDS, METRIC_IDS, METRIC_IDS], metrics) def test_update(self): self.add_some_alarms() metrics = ['6841c175-d7c4-4bc2-bc7a-1c7832271b8f', 'bc1efaa5-93b4-4518-8337-18519917c15a'] orange = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] orange.enabled = False orange.state = alarm_models.Alarm.ALARM_INSUFFICIENT_DATA orange.rule['metrics'] = metrics updated = self.alarm_conn.update_alarm(orange) self.assertFalse(updated.enabled) self.assertEqual(alarm_models.Alarm.ALARM_INSUFFICIENT_DATA, updated.state) self.assertEqual(metrics, updated.rule['metrics']) def test_update_llu(self): llu = alarm_models.Alarm(alarm_id='llu', enabled=True, type=ALARM_TYPE, name='llu', description='llu', timestamp=constants.MIN_DATETIME, user_id='bla', project_id='ffo', state="insufficient data", state_reason="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=[], insufficient_data_actions=[], repeat_actions=False, time_constraints=[], rule=dict(comparison_operator='lt', threshold=34, aggregation_method='max', evaluation_periods=1, granularity=60, metrics=METRIC_IDS) ) updated = self.alarm_conn.create_alarm(llu) updated.state = alarm_models.Alarm.ALARM_OK updated.description = ':)' self.alarm_conn.update_alarm(updated) all = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(all)) def test_update_deleted_alarm_failed(self): self.add_some_alarms() alarm1 = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] self.alarm_conn.delete_alarm(alarm1.alarm_id) survivors = list(self.alarm_conn.get_alarms()) self.assertEqual(2, len(survivors)) alarm1.state = alarm_models.Alarm.ALARM_ALARM self.assertRaises(storage.AlarmNotFound, self.alarm_conn.update_alarm, alarm1) survivors = list(self.alarm_conn.get_alarms()) self.assertEqual(2, len(survivors)) def test_delete(self): self.add_some_alarms() victim = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] self.alarm_conn.delete_alarm(victim.alarm_id) survivors = list(self.alarm_conn.get_alarms()) self.assertEqual(2, len(survivors)) for s in survivors: self.assertNotEqual(victim.name, s.name) class AlarmHistoryTest(AlarmTestBase): def setUp(self): super(AlarmTestBase, self).setUp() self.add_some_alarms() self.prepare_alarm_history() def prepare_alarm_history(self): alarms = list(self.alarm_conn.get_alarms()) for alarm in alarms: i = alarms.index(alarm) alarm_change = { "event_id": "3e11800c-a3ca-4991-b34b-d97efb6047d%s" % i, "alarm_id": alarm.alarm_id, "type": alarm_models.AlarmChange.CREATION, "detail": "detail %s" % alarm.name, "user_id": alarm.user_id, "project_id": alarm.project_id, "on_behalf_of": alarm.project_id, "timestamp": datetime.datetime(2014, 4, 7, 7, 30 + i) } self.alarm_conn.record_alarm_change(alarm_change=alarm_change) def _clear_alarm_history(self, utcnow, ttl, count): self.mock_utcnow.return_value = utcnow self.alarm_conn.clear_expired_alarm_history_data(ttl, 100) history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(count, len(history)) def test_clear_alarm_history_no_data_to_remove(self): utcnow = datetime.datetime(2013, 4, 7, 7, 30) self._clear_alarm_history(utcnow, 1, 3) def test_clear_some_alarm_history(self): utcnow = datetime.datetime(2014, 4, 7, 7, 35) self._clear_alarm_history(utcnow, 3 * 60, 1) def test_clear_all_alarm_history(self): utcnow = datetime.datetime(2014, 4, 7, 7, 45) self._clear_alarm_history(utcnow, 3 * 60, 0) def test_delete_history_when_delete_alarm(self): alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(3, len(history)) for alarm in alarms: self.alarm_conn.delete_alarm(alarm.alarm_id) self.assertEqual(3, len(alarms)) history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(0, len(history)) def test_record_severity_when_alarm_change(self): alarm = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] severity = "low" alarm_change = { "event_id": "3d22800c-a3ca-4991-b34b-d97efb6047d9", "alarm_id": alarm.alarm_id, "type": alarm_models.AlarmChange.STATE_TRANSITION, "detail": "detail %s" % alarm.name, "user_id": alarm.user_id, "project_id": alarm.project_id, "on_behalf_of": alarm.project_id, "severity": severity, "timestamp": datetime.datetime(2014, 4, 7, 7, 34) } self.alarm_conn.record_alarm_change(alarm_change=alarm_change) filter_expr = {"=": {"severity": "low"}} history = list(self.alarm_conn.query_alarm_history( filter_expr=filter_expr)) self.assertEqual(1, len(history)) self.assertEqual("low", history[0].severity) class ComplexAlarmQueryTest(AlarmTestBase): def test_no_filter(self): self.add_some_alarms() result = list(self.alarm_conn.query_alarms()) self.assertEqual(3, len(result)) def test_no_filter_with_limit(self): self.add_some_alarms() result = list(self.alarm_conn.query_alarms(limit=2)) self.assertEqual(2, len(result)) def test_filter(self): self.add_some_alarms() filter_expr = {"and": [{"or": [{"=": {"name": "yellow-alert"}}, {"=": {"name": "red-alert"}}]}, {"=": {"enabled": True}}]} result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) self.assertEqual(1, len(result)) for a in result: self.assertIn(a.name, set(["yellow-alert", "red-alert"])) self.assertTrue(a.enabled) def test_filter_with_regexp(self): self.add_some_alarms() filter_expr = {"and": [{"or": [{"=": {"name": "yellow-alert"}}, {"=": {"name": "red-alert"}}]}, {"=~": {"description": "yel.*"}}]} result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) self.assertEqual(1, len(result)) for a in result: self.assertEqual("yellow", a.description) def test_filter_for_alarm_id(self): self.add_some_alarms() filter_expr = {"=": {"alarm_id": "0r4ng3"}} result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) self.assertEqual(1, len(result)) for a in result: self.assertEqual("0r4ng3", a.alarm_id) def test_filter_and_orderby(self): self.add_some_alarms() result = list(self.alarm_conn.query_alarms(filter_expr=( {"=": {"enabled": True}}), orderby=[{"name": "asc"}])) self.assertEqual(2, len(result)) self.assertEqual(["orange-alert", "red-alert"], [a.name for a in result]) for a in result: self.assertTrue(a.enabled) class ComplexAlarmHistoryQueryTest(AlarmTestBase): def setUp(self): super(DBTestBase, self).setUp() self.filter_expr = {"and": [{"or": [{"=": {"type": "rule change"}}, {"=": {"type": "state transition"}}]}, {"=": {"alarm_id": "0r4ng3"}}]} self.add_some_alarms() self.prepare_alarm_history() def prepare_alarm_history(self): alarms = list(self.alarm_conn.get_alarms()) name_index = { 'red-alert': 0, 'orange-alert': 1, 'yellow-alert': 2 } for alarm in alarms: i = name_index[alarm.name] alarm_change = dict(event_id=( "16fd2706-8baf-433b-82eb-8c7fada847c%s" % i), alarm_id=alarm.alarm_id, type=alarm_models.AlarmChange.CREATION, detail="detail %s" % alarm.name, user_id=alarm.user_id, project_id=alarm.project_id, on_behalf_of=alarm.project_id, timestamp=datetime.datetime(2012, 9, 24, 7 + i, 30 + i)) self.alarm_conn.record_alarm_change(alarm_change=alarm_change) alarm_change2 = dict(event_id=( "16fd2706-8baf-433b-82eb-8c7fada847d%s" % i), alarm_id=alarm.alarm_id, type=alarm_models.AlarmChange.RULE_CHANGE, detail="detail %s" % i, user_id=alarm.user_id, project_id=alarm.project_id, on_behalf_of=alarm.project_id, timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i)) self.alarm_conn.record_alarm_change(alarm_change=alarm_change2) alarm_change3 = dict( event_id="16fd2706-8baf-433b-82eb-8c7fada847e%s" % i, alarm_id=alarm.alarm_id, type=alarm_models.AlarmChange.STATE_TRANSITION, detail="detail %s" % (i + 1), user_id=alarm.user_id, project_id=alarm.project_id, on_behalf_of=alarm.project_id, timestamp=datetime.datetime(2012, 9, 26, 10 + i, 30 + i) ) if alarm.name == "red-alert": alarm_change3['on_behalf_of'] = 'and-da-girls' self.alarm_conn.record_alarm_change(alarm_change=alarm_change3) def test_alarm_history_with_no_filter(self): history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(9, len(history)) def test_alarm_history_with_no_filter_and_limit(self): history = list(self.alarm_conn.query_alarm_history(limit=3)) self.assertEqual(3, len(history)) def test_alarm_history_with_filter(self): history = list( self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr)) self.assertEqual(2, len(history)) def test_alarm_history_with_regexp(self): filter_expr = {"and": [{"=~": {"type": "(rule)|(state)"}}, {"=": {"alarm_id": "0r4ng3"}}]} history = list( self.alarm_conn.query_alarm_history(filter_expr=filter_expr)) self.assertEqual(2, len(history)) def test_alarm_history_with_filter_and_orderby(self): history = list( self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, orderby=[{"timestamp": "asc"}])) self.assertEqual([alarm_models.AlarmChange.RULE_CHANGE, alarm_models.AlarmChange.STATE_TRANSITION], [h.type for h in history]) def test_alarm_history_with_filter_and_orderby_and_limit(self): history = list( self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, orderby=[{"timestamp": "asc"}], limit=1)) self.assertEqual(alarm_models.AlarmChange.RULE_CHANGE, history[0].type) def test_alarm_history_with_on_behalf_of_filter(self): filter_expr = {"=": {"on_behalf_of": "and-da-girls"}} history = list(self.alarm_conn.query_alarm_history( filter_expr=filter_expr)) self.assertEqual(1, len(history)) self.assertEqual("16fd2706-8baf-433b-82eb-8c7fada847e0", history[0].event_id) def test_alarm_history_with_alarm_id_as_filter(self): filter_expr = {"=": {"alarm_id": "r3d"}} history = list(self.alarm_conn.query_alarm_history( filter_expr=filter_expr, orderby=[{"timestamp": "asc"}])) self.assertEqual(3, len(history)) self.assertEqual([alarm_models.AlarmChange.CREATION, alarm_models.AlarmChange.RULE_CHANGE, alarm_models.AlarmChange.STATE_TRANSITION], [h.type for h in history]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0910146 aodh-19.0.0/aodh/tests/functional_live/0000775000175000017500000000000000000000000017775 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional_live/__init__.py0000664000175000017500000000000000000000000022074 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0910146 aodh-19.0.0/aodh/tests/functional_live/gabbi/0000775000175000017500000000000000000000000021041 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional_live/gabbi/__init__.py0000664000175000017500000000000000000000000023140 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0910146 aodh-19.0.0/aodh/tests/functional_live/gabbi/gabbits-live/0000775000175000017500000000000000000000000023411 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional_live/gabbi/gabbits-live/alarms.yaml0000664000175000017500000000774600000000000025572 0ustar00zuulzuul00000000000000defaults: request_headers: x-auth-token: $ENVIRON['AODH_SERVICE_TOKEN'] x-roles: $ENVIRON['AODH_SERVICE_ROLES'] tests: - name: list alarms none desc: Lists alarms, none yet exist GET: /v2/alarms response_strings: - "[]" - name: try to PUT an alarm desc: what does PUT do PUT: /v2/alarms request_headers: content-type: application/json data: name: added_alarm_defaults2 type: gnocchi_resources_threshold gnocchi_resources_threshold_rule: metric: ameter resource_id: random_id resource_type: instance aggregation_method: max threshold: 300.0 status: 405 response_headers: allow: GET, POST - name: createAlarm desc: Creates an alarm. POST: /v2/alarms request_headers: content-type: application/json data: name: added_alarm_defaults type: gnocchi_resources_threshold gnocchi_resources_threshold_rule: metric: ameter resource_id: random_id resource_type: instance aggregation_method: max threshold: 300.0 status: 201 response_headers: location: /$SCHEME://$NETLOC/v2/alarms/ content-type: application/json response_json_paths: $.severity: low $.gnocchi_resources_threshold_rule.threshold: 300.0 $.gnocchi_resources_threshold_rule.comparison_operator: eq - name: showAlarm desc: Shows information for a specified alarm. GET: /v2/alarms/$RESPONSE['$.alarm_id'] response_json_paths: $.severity: low $.alarm_id: $RESPONSE['$.alarm_id'] $.gnocchi_resources_threshold_rule.threshold: 300.0 $.gnocchi_resources_threshold_rule.comparison_operator: eq response_headers: content-type: application/json - name: updateAlarm desc: Updates a specified alarm. PUT: /v2/alarms/$RESPONSE['$.alarm_id'] request_headers: content-type: application/json data: name: added_alarm_defaults severity: moderate type: gnocchi_resources_threshold gnocchi_resources_threshold_rule: metric: ameter resource_id: random_id resource_type: instance aggregation_method: max threshold: 200.0 # TODO(chdent): why do we have a response, why not status: 204? # status: 204 response_json_paths: $.gnocchi_resources_threshold_rule.threshold: 200.0 $.severity: moderate $.state: insufficient data - name: showAlarmHistory desc: Assembles the history for a specified alarm. GET: /v2/alarms/$RESPONSE['$.alarm_id']/history?q.field=type&q.op=eq&q.value=rule%20change response_json_paths: $[0].type: rule change - name: updateAlarmState desc: Sets the state of a specified alarm. PUT: /v2/alarms/$RESPONSE['$[0].alarm_id']/state request_headers: content-type: application/json data: '"alarm"' # TODO(chdent): really? Of what possible use is this? response_json_paths: $: alarm # Get a list of alarms so we can extract an id for the next test - name: list alarms desc: Lists alarms, only one GET: /v2/alarms response_json_paths: $[0].name: added_alarm_defaults - name: showAlarmState desc: Gets the state of a specified alarm. GET: /v2/alarms/$RESPONSE['$[0].alarm_id']/state response_headers: content-type: application/json response_json_paths: $: alarm - name: deleteAlarm desc: Deletes a specified alarm. DELETE: /v2/alarms/$HISTORY['list alarms'].$RESPONSE['$[0].alarm_id'] status: 204 - name: list alarms none end desc: Lists alarms, none now exist GET: /v2/alarms response_strings: - "[]" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/functional_live/gabbi/test_gabbi_live.py0000664000175000017500000000350200000000000024535 0ustar00zuulzuul00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A test module to exercise the Gnocchi API with gabbi. This is designed to run against a real running web server (started by devstack). """ import os from gabbi import driver from urllib import parse as urlparse TESTS_DIR = 'gabbits-live' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" aodh_url = os.getenv('AODH_URL') if aodh_url: parsed_url = urlparse.urlsplit(aodh_url) prefix = parsed_url.path.rstrip('/') # turn it into a prefix # NOTE(chdent): gabbi requires a port be passed or it will # default to 8001, so we must dance a little dance to get # the right ports. Probably gabbi needs to change. # https://github.com/cdent/gabbi/issues/50 port = 443 if parsed_url.scheme == 'https' else 80 if parsed_url.port: port = parsed_url.port test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=parsed_url.hostname, port=port, prefix=prefix) elif os.getenv('GABBI_LIVE_FAIL_IF_NO_TEST'): raise RuntimeError('AODH_URL is not set') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0910146 aodh-19.0.0/aodh/tests/unit/0000775000175000017500000000000000000000000015573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/__init__.py0000664000175000017500000000000000000000000017672 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0910146 aodh-19.0.0/aodh/tests/unit/cmd/0000775000175000017500000000000000000000000016336 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/cmd/__init__.py0000664000175000017500000000000000000000000020435 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/cmd/test_status.py0000664000175000017500000000234600000000000021277 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_upgradecheck.upgradecheck import Code from aodh.cmd import status from aodh.tests import base class TestUpgradeChecks(base.BaseTestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() self.cmd = status.Checks() cfg.CONF(args=[], project='aodh') def test_checks(self): for name, func in self.cmd._upgrade_checks: if isinstance(func, tuple): func_name, kwargs = func result = func_name(self, **kwargs) else: result = func(self) self.assertEqual(Code.SUCCESS, result.code) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0950153 aodh-19.0.0/aodh/tests/unit/evaluator/0000775000175000017500000000000000000000000017575 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/evaluator/__init__.py0000664000175000017500000000000000000000000021674 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/evaluator/base.py0000664000175000017500000000377100000000000021071 0ustar00zuulzuul00000000000000# # Copyright 2013 eNovance # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import fixture from oslotest import base from aodh import service class TestEvaluatorBase(base.BaseTestCase): def setUp(self): super(TestEvaluatorBase, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.conf = self.useFixture(fixture.Config(conf)).conf self.evaluator = self.EVALUATOR(self.conf) self.notifier = mock.MagicMock() self.evaluator.notifier = self.notifier self.storage_conn = mock.MagicMock() self.evaluator.storage_conn = self.storage_conn self.evaluator._ks_client = mock.Mock(user_id='fake_user_id', project_id='fake_project_id', auth_token='fake_token') self.prepare_alarms() def prepare_alarms(self): self.alarms = [] def _evaluate_all_alarms(self): for alarm in self.alarms: self.evaluator.evaluate(alarm) def _set_all_alarms(self, state): for alarm in self.alarms: alarm.state = state def _assert_all_alarms(self, state): for alarm in self.alarms: self.assertEqual(state, alarm.state) def assertDictContains(self, parent, child): """Checks whether child dict is a subset of parent.""" self.assertEqual(parent, dict(parent, **child)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/evaluator/test_base.py0000664000175000017500000001441300000000000022123 0ustar00zuulzuul00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_utils import timeutils from oslotest import base from aodh import evaluator from aodh import queue class TestEvaluatorBaseClass(base.BaseTestCase): def setUp(self): super(TestEvaluatorBaseClass, self).setUp() self.called = False def _notify(self, alarm, previous, reason, details): self.called = True raise Exception('Boom!') @mock.patch.object(queue, 'AlarmNotifier') def test_base_refresh(self, notifier): notifier.notify = self._notify class EvaluatorSub(evaluator.Evaluator): def evaluate(self, alarm): pass ev = EvaluatorSub(mock.MagicMock()) ev.notifier = notifier ev.storage_conn = mock.MagicMock() ev._record_change = mock.MagicMock() ev._refresh(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) ev.storage_conn.update_alarm.assert_called_once_with(mock.ANY) ev._record_change.assert_called_once_with(mock.ANY, mock.ANY) self.assertTrue(self.called) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints(self, mock_utcnow): alarm = mock.MagicMock() alarm.time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': ''}, {'name': 'test2', 'description': 'test', 'start': '0 23 * * *', # daily at 23:00 'duration': 10800, # 3 hours 'timezone': ''}, ] cls = evaluator.Evaluator mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 12, 0, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 1, 0, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 5, 0, 0) self.assertFalse(cls.within_time_constraint(alarm)) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints_by_month(self, mock_utcnow): alarm = mock.MagicMock() alarm.time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 31 1,3,5,7,8,10,12 *', # every 31st at 11:00 'duration': 10800, # 3 hours 'timezone': ''}, ] cls = evaluator.Evaluator mock_utcnow.return_value = datetime.datetime(2015, 3, 31, 11, 30, 0) self.assertTrue(cls.within_time_constraint(alarm)) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints_complex(self, mock_utcnow): alarm = mock.MagicMock() alarm.time_constraints = [ {'name': 'test', 'description': 'test', # Every consecutive 2 minutes (from the 3rd to the 57th) past # every consecutive 2 hours (between 3:00 and 12:59) on every day. 'start': '3-57/2 3-12/2 * * *', 'duration': 30, 'timezone': ''} ] cls = evaluator.Evaluator # test minutes inside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 3, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 57, 0) self.assertTrue(cls.within_time_constraint(alarm)) # test minutes outside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 2, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 4, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 58, 0) self.assertFalse(cls.within_time_constraint(alarm)) # test hours inside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 5, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 11, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) # test hours outside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 1, 31, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 4, 31, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 12, 31, 0) self.assertFalse(cls.within_time_constraint(alarm)) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints_timezone(self, mock_utcnow): alarm = mock.MagicMock() cls = evaluator.Evaluator mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 11, 0, 0) alarm.time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] self.assertTrue(cls.within_time_constraint(alarm)) alarm.time_constraints = [ {'name': 'test2', 'description': 'test2', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'US/Eastern'} ] self.assertFalse(cls.within_time_constraint(alarm)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/evaluator/test_composite.py0000664000175000017500000005430300000000000023215 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/evaluator/composite.py """ from unittest import mock import fixtures import os from oslo_utils import timeutils from oslo_utils import uuidutils from aodh import evaluator from aodh.evaluator import composite from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base # NOTE(mmagr): Overriding PrometheusEvaluator setting to avoid # complains during init. os.environ['PROMETHEUS_HOST'] = '127.0.0.1' os.environ['PROMETHEUS_PORT'] = '666' class BaseCompositeEvaluate(base.TestEvaluatorBase): EVALUATOR = composite.CompositeEvaluator def setUp(self): self.client = self.useFixture(fixtures.MockPatch( 'aodh.evaluator.gnocchi.client' )).mock.Client.return_value super(BaseCompositeEvaluate, self).setUp() @staticmethod def _get_gnocchi_stats(granularity, values, aggregated=False): now = timeutils.utcnow_ts() if aggregated: return { 'measures': { 'aggregated': [[str(now - len(values) * granularity), granularity, value] for value in values] } } return [[str(now - len(values) * granularity), granularity, value] for value in values] @staticmethod def _reason(new_state, user_expression, causative_rules=(), transition=True): root_cause_rules = {} for index, rule in causative_rules: name = 'rule%s' % index root_cause_rules.update({name: rule}) description = {evaluator.ALARM: 'outside their threshold.', evaluator.OK: 'inside their threshold.', evaluator.UNKNOWN: 'state evaluated to unknown.'} params = {'state': new_state, 'expression': user_expression, 'rules': ', '.join(sorted(root_cause_rules.keys())), 'description': description[new_state]} reason_data = { 'type': 'composite', 'composition_form': user_expression} reason_data.update(causative_rules=root_cause_rules) if transition: reason = ('Composite rule alarm with composition form: ' '%(expression)s transition to %(state)s, due to ' 'rules: %(rules)s %(description)s' % params) else: reason = ('Composite rule alarm with composition form: ' '%(expression)s remaining as %(state)s, due to ' 'rules: %(rules)s %(description)s' % params) return reason, reason_data class CompositeTest(BaseCompositeEvaluate): sub_rule1 = { "type": "gnocchi_aggregation_by_metrics_threshold", "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "evaluation_periods": 5, "threshold": 0.8, "aggregation_method": "mean", "granularity": 60, "exclude_outliers": False, "comparison_operator": "gt" } sub_rule2 = { "type": "gnocchi_aggregation_by_metrics_threshold", "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "evaluation_periods": 4, "threshold": 200, "aggregation_method": "max", "granularity": 60, "exclude_outliers": False, "comparison_operator": "gt" } sub_rule3 = { "type": "gnocchi_aggregation_by_metrics_threshold", "metrics": ['41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'], "evaluation_periods": 3, "threshold": 1000, "aggregation_method": "mean", "granularity": 60, "exclude_outliers": False, "comparison_operator": "gt" } sub_rule4 = { "type": "gnocchi_resources_threshold", 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 5, 'aggregation_method': 'mean', 'granularity': 60, 'metric': 'cpu_util', 'resource_type': 'instance', 'resource_id': 'my_instance', } sub_rule5 = { "type": "gnocchi_aggregation_by_metrics_threshold", 'comparison_operator': 'le', 'threshold': 10.0, 'evaluation_periods': 4, 'aggregation_method': 'max', 'granularity': 300, 'metrics': ['0bb1604d-1193-4c0a-b4b8-74b170e35e83', '9ddc209f-42f8-41e1-b8f1-8804f59c4053'] } sub_rule6 = { "type": "gnocchi_aggregation_by_resources_threshold", 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 6, 'aggregation_method': 'mean', 'granularity': 50, 'metric': 'cpu_util', 'resource_type': 'instance', 'query': '{"=": {"server_group": "my_autoscaling_group"}}' } def prepare_alarms(self): self.alarms = [ models.Alarm(name='alarm_threshold_nest', description='alarm with sub rules nested combined', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', alarm_id=uuidutils.generate_uuid(), state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule={ "or": [self.sub_rule1, {"and": [self.sub_rule2, self.sub_rule3] }] }, severity='critical'), models.Alarm(name='alarm_threshold_or', description='alarm on one of sub rules triggered', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=uuidutils.generate_uuid(), time_constraints=[], rule={ "or": [self.sub_rule1, self.sub_rule2, self.sub_rule3] }, severity='critical' ), models.Alarm(name='alarm_threshold_and', description='alarm on all the sub rules triggered', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=uuidutils.generate_uuid(), time_constraints=[], rule={ "and": [self.sub_rule1, self.sub_rule2, self.sub_rule3] }, severity='critical' ), models.Alarm(name='alarm_multi_type_rules', description='alarm with threshold and gnocchi rules', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', alarm_id=uuidutils.generate_uuid(), state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule={ "and": [self.sub_rule2, self.sub_rule3, {'or': [self.sub_rule1, self.sub_rule4, self.sub_rule5, self.sub_rule6]}] }, severity='critical' ), ] def test_simple_insufficient(self): self._set_all_alarms('ok') self.client.aggregates.fetch.return_value = [] self.client.metric.get_measures.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) expected = [mock.call(self.alarms[0], 'ok', *self._reason( 'insufficient data', '(rule1 or (rule2 and rule3))', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)))), mock.call(self.alarms[1], 'ok', *self._reason( 'insufficient data', '(rule1 or rule2 or rule3)', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)))), mock.call(self.alarms[2], 'ok', *self._reason( 'insufficient data', '(rule1 and rule2 and rule3)', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)))), mock.call( self.alarms[3], 'ok', *self._reason( 'insufficient data', '(rule1 and rule2 and (rule3 or rule4 or rule5 ' 'or rule6))', ((1, self.sub_rule2), (2, self.sub_rule3), (3, self.sub_rule1), (4, self.sub_rule4), (5, self.sub_rule5), (6, self.sub_rule6))))] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_alarm_full_trip_with_multi_type_rules(self): alarm = self.alarms[3] alarm.state = 'ok' # following results of sub-rules evaluation to trigger # final "alarm" state: # self.sub_rule2: alarm # self.sub_rule3: alarm # self.sub_rule1: ok # self.sub_rule4: ok # self.sub_rule5: ok # self.sub_rule6: alarm maxs = self._get_gnocchi_stats( 60, [self.sub_rule2['threshold'] + v for v in range(1, 5)], aggregated=True) avgs1 = self._get_gnocchi_stats( 60, [self.sub_rule3['threshold'] + v for v in range(1, 4)]) avgs2 = self._get_gnocchi_stats( 60, [self.sub_rule1['threshold'] - v for v in range(1, 6)], aggregated=True) gavgs1 = self._get_gnocchi_stats( 60, [self.sub_rule4['threshold'] - v for v in range(1, 6)], aggregated=True) gmaxs = self._get_gnocchi_stats( 300, [self.sub_rule5['threshold'] + v for v in range(1, 5)], aggregated=True) gavgs2 = self._get_gnocchi_stats( 50, [self.sub_rule6['threshold'] + v for v in range(1, 7)], aggregated=True) self.client.metric.get_measures.side_effect = [gavgs1] self.client.aggregates.fetch.side_effect = [maxs, avgs1, avgs2, gmaxs, gavgs2] self.evaluator.evaluate(alarm) self.assertEqual(1, self.client.metric.get_measures.call_count) self.assertEqual(5, self.client.aggregates.fetch.call_count) self.assertEqual('alarm', alarm.state) expected = mock.call( alarm, 'ok', *self._reason( 'alarm', '(rule1 and rule2 and (rule3 or rule4 or rule5 or rule6))', ((1, self.sub_rule2), (2, self.sub_rule3), (6, self.sub_rule6)))) self.assertEqual(expected, self.notifier.notify.call_args) def test_alarm_with_short_circuit_logic(self): alarm = self.alarms[1] # self.sub_rule1: alarm avgs = self._get_gnocchi_stats( 60, [self.sub_rule1['threshold'] + v for v in range(1, 6)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs] self.evaluator.evaluate(alarm) self.assertEqual('alarm', alarm.state) self.assertEqual(1, self.client.aggregates.fetch.call_count) expected = mock.call(self.alarms[1], 'insufficient data', *self._reason( 'alarm', '(rule1 or rule2 or rule3)', ((1, self.sub_rule1),))) self.assertEqual(expected, self.notifier.notify.call_args) def test_ok_with_short_circuit_logic(self): alarm = self.alarms[2] # self.sub_rule1: ok avgs = self._get_gnocchi_stats( 60, [self.sub_rule1['threshold'] - v for v in range(1, 6)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs] self.evaluator.evaluate(alarm) self.assertEqual('ok', alarm.state) self.assertEqual(1, self.client.aggregates.fetch.call_count) expected = mock.call(self.alarms[2], 'insufficient data', *self._reason( 'ok', '(rule1 and rule2 and rule3)', ((1, self.sub_rule1),))) self.assertEqual(expected, self.notifier.notify.call_args) def test_unknown_state_with_sub_rules_trending_state(self): alarm = self.alarms[0] maxs = self._get_gnocchi_stats( 60, [self.sub_rule2['threshold'] + v for v in range(-1, 4)], aggregated=True) avgs = self._get_gnocchi_stats( 60, [self.sub_rule3['threshold'] + v for v in range(-1, 3)], aggregated=True) avgs2 = self._get_gnocchi_stats( 60, [self.sub_rule1['threshold'] - v for v in range(1, 6)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs] self.evaluator.evaluate(alarm) self.assertEqual('alarm', alarm.state) expected = mock.call(self.alarms[0], 'insufficient data', *self._reason( 'alarm', '(rule1 or (rule2 and rule3))', ((2, self.sub_rule2), (3, self.sub_rule3)))) self.assertEqual(expected, self.notifier.notify.call_args) def test_known_state_with_sub_rules_trending_state(self): alarm = self.alarms[0] alarm.repeat_actions = True alarm.state = 'ok' maxs = self._get_gnocchi_stats( 60, [self.sub_rule2['threshold'] + v for v in range(-1, 4)], aggregated=True) avgs = self._get_gnocchi_stats( 60, [self.sub_rule3['threshold'] + v for v in range(-1, 3)], aggregated=True) avgs2 = self._get_gnocchi_stats( 60, [self.sub_rule1['threshold'] - v for v in range(1, 6)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs] self.evaluator.evaluate(alarm) self.assertEqual('ok', alarm.state) expected = mock.call(self.alarms[0], 'ok', *self._reason( 'ok', '(rule1 or (rule2 and rule3))', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)), False)) self.assertEqual(expected, self.notifier.notify.call_args) def test_known_state_with_sub_rules_trending_state_and_not_repeat(self): alarm = self.alarms[2] alarm.state = 'ok' maxs = self._get_gnocchi_stats( 60, [self.sub_rule2['threshold'] + v for v in range(-1, 4)], aggregated=True) avgs = self._get_gnocchi_stats( 60, [self.sub_rule3['threshold'] + v for v in range(-1, 3)], aggregated=True) avgs2 = self._get_gnocchi_stats( 60, [self.sub_rule1['threshold'] - v for v in range(1, 6)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs2, maxs, avgs] self.evaluator.evaluate(alarm) self.assertEqual('ok', alarm.state) self.assertEqual([], self.notifier.notify.mock_calls) class OtherCompositeTest(BaseCompositeEvaluate): sub_rule1 = { 'evaluation_periods': 3, 'metric': 'radosgw.objects.containers', 'resource_id': 'alarm-resource-1', 'aggregation_method': 'mean', 'granularity': 60, 'threshold': 5.0, 'type': 'gnocchi_resources_threshold', 'comparison_operator': 'ge', 'resource_type': 'ceph_account' } sub_rule2 = { 'evaluation_periods': 3, 'metric': 'radosgw.objects.containers', 'resource_id': 'alarm-resource-2', 'aggregation_method': 'mean', 'granularity': 60, 'threshold': 5.0, 'type': 'gnocchi_resources_threshold', 'comparison_operator': 'ge', 'resource_type': 'ceph_account' } def prepare_alarms(self): self.alarms = [ models.Alarm(name='composite-GRT-OR-GRT', description='composite alarm converted', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=['log://'], ok_actions=['log://'], alarm_actions=['log://'], repeat_actions=False, alarm_id=uuidutils.generate_uuid(), time_constraints=[], rule={ "or": [self.sub_rule1, self.sub_rule2] }, severity='critical' ), ] def test_simple_ok(self): self._set_all_alarms('alarm') gavgs1 = [['2016-11-24T10:00:00+00:00', 3600.0, 3.0], ['2016-11-24T10:00:00+00:00', 900.0, 3.0], ['2016-11-24T10:00:00+00:00', 300.0, 3.0], ['2016-11-24T10:01:00+00:00', 60.0, 2.0], ['2016-11-24T10:02:00+00:00', 60.0, 3.0], ['2016-11-24T10:03:00+00:00', 60.0, 4.0], ['2016-11-24T10:04:00+00:00', 60.0, 5.0]] gavgs2 = [['2016-11-24T10:00:00+00:00', 3600.0, 3.0], ['2016-11-24T10:00:00+00:00', 900.0, 3.0], ['2016-11-24T10:00:00+00:00', 300.0, 3.0], ['2016-11-24T10:01:00+00:00', 60.0, 2.0], ['2016-11-24T10:02:00+00:00', 60.0, 3.0], ['2016-11-24T10:03:00+00:00', 60.0, 4.0], ['2016-11-24T10:04:00+00:00', 60.0, 5.0]] self.client.metric.get_measures.side_effect = [gavgs1, gavgs2] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) expected = [mock.call(self.alarms[0], 'alarm', *self._reason('ok', '(rule1 or rule2)', ((1, self.sub_rule1), (2, self.sub_rule2))))] self.assertEqual(expected, self.notifier.notify.call_args_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/evaluator/test_event.py0000664000175000017500000004427200000000000022340 0ustar00zuulzuul00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import json from unittest import mock from oslo_utils import timeutils from oslo_utils import uuidutils from aodh import evaluator from aodh.evaluator import event as event_evaluator from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base class TestEventAlarmEvaluate(base.TestEvaluatorBase): EVALUATOR = event_evaluator.EventAlarmEvaluator @staticmethod def _alarm(**kwargs): alarm_id = kwargs.get('id') or uuidutils.generate_uuid() return models.Alarm(name=kwargs.get('name', alarm_id), type='event', enabled=True, alarm_id=alarm_id, description='desc', state=kwargs.get('state', 'insufficient data'), state_reason='reason', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=kwargs.get('repeat', False), user_id='user', project_id=kwargs.get('project', ''), time_constraints=[], rule=dict(event_type=kwargs.get('event_type', '*'), query=kwargs.get('query', []))) @staticmethod def _event(**kwargs): return {'message_id': kwargs.get('id') or uuidutils.generate_uuid(), 'event_type': kwargs.get('event_type', 'type0'), 'traits': kwargs.get('traits', [])} def _setup_alarm_storage(self, alarms): self._stored_alarms = {a.alarm_id: copy.deepcopy(a) for a in alarms} self._update_history = [] def get_alarms(**kwargs): return (a for a in self._stored_alarms.values()) def update_alarm(alarm): self._stored_alarms[alarm.alarm_id] = copy.deepcopy(alarm) self._update_history.append(dict(alarm_id=alarm.alarm_id, state=alarm.state)) self.storage_conn.get_alarms.side_effect = get_alarms self.storage_conn.update_alarm.side_effect = update_alarm def _setup_alarm_notifier(self): self._notification_history = [] def notify(alarm, previous, reason, data): self._notification_history.append(dict(alarm_id=alarm.alarm_id, state=alarm.state, previous=previous, reason=reason, data=data)) self.notifier.notify.side_effect = notify def _do_test_event_alarm(self, alarms, events, expect_db_queries=None, expect_alarm_states=None, expect_alarm_updates=None, expect_notifications=None): self._setup_alarm_storage(alarms) self._setup_alarm_notifier() self.evaluator.evaluate_events(events) if expect_db_queries is not None: expected = [mock.call(enabled=True, type='event', project_id=p) for p in expect_db_queries] self.assertEqual(expected, self.storage_conn.get_alarms.call_args_list) if expect_alarm_states is not None: for alarm_id, state in expect_alarm_states.items(): self.assertEqual(state, self._stored_alarms[alarm_id].state) if expect_alarm_updates is not None: self.assertEqual(len(expect_alarm_updates), len(self._update_history)) for alarm, h in zip(expect_alarm_updates, self._update_history): expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM) self.assertEqual(expected, h) if expect_notifications is not None: self.assertEqual(len(expect_notifications), len(self._notification_history)) for n, h in zip(expect_notifications, self._notification_history): alarm = n['alarm'] event = n['event'] previous = n.get('previous', evaluator.UNKNOWN) reason = ('Event hits the ' 'query .') % { 'e': event['message_id'], 'type': event['event_type'], 'query': json.dumps(alarm.rule['query'], sort_keys=True)} data = {'type': 'event', 'event': event} expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM, previous=previous, reason=reason, data=data) self.assertEqual(expected, h) def test_fire_alarm_in_the_same_project_id(self): alarm = self._alarm(project='project1') event = self._event(traits=[['project_id', 1, 'project1']]) self._do_test_event_alarm( [alarm], [event], expect_db_queries=['project1'], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_fire_alarm_in_the_same_tenant_id(self): alarm = self._alarm(project='project1') event = self._event(traits=[['tenant_id', 1, 'project1']]) self._do_test_event_alarm( [alarm], [event], expect_db_queries=['project1'], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_fire_alarm_in_project_none(self): alarm = self._alarm(project='') event = self._event() self._do_test_event_alarm( [alarm], [event], expect_db_queries=[''], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_continue_following_evaluation_after_exception(self): alarms = [ self._alarm(id=1), self._alarm(id=2), ] event = self._event() original = self.evaluator._fire_alarm with mock.patch.object(event_evaluator.EventAlarmEvaluator, '_fire_alarm') as _fire_alarm: def _side_effect(*args, **kwargs): _fire_alarm.side_effect = original return Exception('boom') _fire_alarm.side_effect = _side_effect self._do_test_event_alarm( alarms, [event], expect_alarm_states={alarms[0].alarm_id: evaluator.UNKNOWN, alarms[1].alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarms[1]], expect_notifications=[dict(alarm=alarms[1], event=event)]) def test_skip_event_missing_event_type(self): alarm = self._alarm() event = {'message_id': uuidutils.generate_uuid(), 'traits': []} self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_skip_event_missing_message_id(self): alarm = self._alarm() event = {'event_type': 'type1', 'traits': []} self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_continue_alarming_when_repeat_actions_enabled(self): alarm = self._alarm(repeat=True, state=evaluator.ALARM) event = self._event() self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event, previous=evaluator.ALARM)]) def test_do_not_continue_alarming_when_repeat_actions_disabled(self): alarm = self._alarm(repeat=False, state=evaluator.ALARM) event = self._event() self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[], expect_notifications=[]) def test_skip_uninterested_event_type(self): alarm = self._alarm(event_type='compute.instance.exists') event = self._event(event_type='compute.instance.update') self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_event_type_pattern_matched(self): alarm = self._alarm(event_type='compute.instance.*') event = self._event(event_type='compute.instance.update') self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_event_type_pattern_unmatched(self): alarm = self._alarm(event_type='compute.instance.*') event = self._event(event_type='dummy.compute.instance') self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_string(self): alarm = self._alarm(query=[dict(field="traits.state", value="stopped", op="eq")]) event = self._event(traits=[['state', 1, 'stopped']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_string(self): alarm = self._alarm(query=[dict(field="traits.state", value="stopped", op="eq")]) event = self._event(traits=[['state', 1, 'active']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_integer(self): alarm = self._alarm(query=[dict(field="traits.instance_type_id", type="integer", value="5", op="eq")]) event = self._event(traits=[['instance_type_id', 2, 5]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_integer(self): alarm = self._alarm(query=[dict(field="traits.instance_type_id", type="integer", value="5", op="eq")]) event = self._event(traits=[['instance_type_id', 2, 6]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_float(self): alarm = self._alarm(query=[dict(field="traits.io_read_kbs", type="float", value="123.456", op="eq")]) event = self._event(traits=[['io_read_kbs', 3, 123.456]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_float(self): alarm = self._alarm(query=[dict(field="traits.io_read_kbs", type="float", value="123.456", op="eq")]) event = self._event(traits=[['io_read_kbs', 3, 456.123]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_datetime(self): alarm = self._alarm(query=[dict(field="traits.created_at", type="datetime", value="2015-09-01T18:52:27.214309", op="eq")]) event = self._event(traits=[['created_at', 4, '2015-09-01T18:52:27.214309']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_datetime(self): alarm = self._alarm(query=[dict(field="traits.created_at", type="datetime", value="2015-09-01T18:52:27.214309", op="eq")]) event = self._event(traits=[['created_at', 4, '2015-09-02T18:52:27.214309']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_skip_alarm_due_to_uncompareable_trait(self): alarm = self._alarm(query=[dict(field="traits.created_at", type="datetime", value="2015-09-01T18:52:27.214309", op="eq")]) event = self._event(traits=[['created_at', 3, 123.456]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_event_alarm_cache_hit(self): alarm = self._alarm(project='project2', event_type='none') events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project2']]), ] self._do_test_event_alarm([alarm], events, expect_db_queries=['project2']) def test_event_alarm_cache_updated_after_fired(self): alarm = self._alarm(project='project2', event_type='type1', repeat=False) events = [ self._event(event_type='type1', traits=[['project_id', 1, 'project2']]), self._event(event_type='type1', traits=[['project_id', 1, 'project2']]), ] self._do_test_event_alarm( [alarm], events, expect_db_queries=['project2'], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=events[0])]) def test_event_alarm_caching_disabled(self): alarm = self._alarm(project='project2', event_type='none') events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project2']]), ] self.evaluator.conf.event_alarm_cache_ttl = 0 self._do_test_event_alarm([alarm], events, expect_db_queries=['project2', 'project2']) @mock.patch.object(timeutils, 'utcnow') def test_event_alarm_cache_expired(self, mock_utcnow): alarm = self._alarm(project='project2', event_type='none') events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project2']]), ] mock_utcnow.side_effect = [ datetime.datetime(2015, 1, 1, 0, 0, 0), datetime.datetime(2015, 1, 1, 1, 0, 0), datetime.datetime(2015, 1, 1, 1, 1, 0), ] self._do_test_event_alarm([alarm], events, expect_db_queries=['project2', 'project2']) def test_event_alarm_cache_miss(self): events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project3']]), ] self._do_test_event_alarm([], events, expect_db_queries=['project2', 'project3']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/evaluator/test_gnocchi.py0000664000175000017500000007126700000000000022635 0ustar00zuulzuul00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import fixtures import json from unittest import mock from gnocchiclient import exceptions from oslo_utils import timeutils from oslo_utils import uuidutils try: import zoneinfo except ImportError: # zoneinfo is available in Python >= 3.9 import pytz zoneinfo = None from aodh.evaluator import gnocchi from aodh import messaging from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base class TestGnocchiEvaluatorBase(base.TestEvaluatorBase): def setUp(self): self.client = self.useFixture(fixtures.MockPatch( 'aodh.evaluator.gnocchi.client' )).mock.Client.return_value self.prepared_alarms = [ models.Alarm(name='instance_running_hot', description='instance_running_hot', type='gnocchi_resources_threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=uuidutils.generate_uuid(), state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], severity='low', rule=dict( comparison_operator='gt', threshold=80.0, evaluation_periods=5, aggregation_method='mean', granularity=60, metric='cpu_util', resource_type='instance', resource_id='my_instance') ), models.Alarm(name='group_running_idle', description='group_running_idle', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, user_id='foobar', project_id='snafu', state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=uuidutils.generate_uuid(), time_constraints=[], rule=dict( comparison_operator='le', threshold=10.0, evaluation_periods=4, aggregation_method='max', granularity=300, metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83', '9ddc209f-42f8-41e1-b8f1-8804f59c4053']), ), models.Alarm(name='instance_not_running', description='instance_running_hot', type='gnocchi_aggregation_by_resources_threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=uuidutils.generate_uuid(), state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( comparison_operator='gt', threshold=80.0, evaluation_periods=6, aggregation_method='rate:mean', granularity=50, metric='cpu', resource_type='instance', query='{"=": {"server_group": ' '"my_autoscaling_group"}}') ), ] super(TestGnocchiEvaluatorBase, self).setUp() @staticmethod def _get_stats(granularity, values, aggregated=False): now = timeutils.utcnow_ts() if aggregated: return { 'measures': { 'aggregated': [[str(now - len(values) * granularity), granularity, value] for value in values] } } return [[str(now - len(values) * granularity), granularity, value] for value in values] @staticmethod def _reason_data(disposition, count, most_recent): return {'type': 'threshold', 'disposition': disposition, 'count': count, 'most_recent': most_recent} def _set_all_rules(self, field, value): for alarm in self.alarms: alarm.rule[field] = value def _test_retry_transient(self): self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') self._evaluate_all_alarms() self._assert_all_alarms('ok') def _test_simple_insufficient(self): self._set_all_alarms('ok') self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) expected = [mock.call( alarm, 'ok', ('No datapoint for granularity %s' % alarm.rule['granularity']), self._reason_data('unknown', alarm.rule['evaluation_periods'], None)) for alarm in self.alarms] self.assertEqual(expected, self.notifier.notify.call_args_list) class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase): EVALUATOR = gnocchi.GnocchiResourceThresholdEvaluator def prepare_alarms(self): self.alarms = self.prepared_alarms[0:1] def test_retry_transient_api_failure(self): means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v for v in range(5)]) self.client.metric.get_measures.side_effect = [ exceptions.ClientException(501, "error2"), means] self._test_retry_transient() def test_simple_insufficient(self): self.client.metric.get_measures.return_value = [] self._test_simple_insufficient() @mock.patch.object(timeutils, 'utcnow') def test_simple_alarm_trip(self, utcnow): utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) self._set_all_alarms('ok') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() start_alarm = "2015-01-26T12:51:00" end = "2015-01-26T12:57:00" self.assertEqual( [mock.call.get_measures(aggregation='mean', metric='cpu_util', granularity=60, resource_id='my_instance', start=start_alarm, stop=end)], self.client.metric.mock_calls) reason = ('Transition to alarm due to 5 samples outside threshold,' ' most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_simple_alarm_clear(self): self._set_all_alarms('alarm') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v for v in range(5)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to ok due to 5 samples inside' ' threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('inside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def _construct_payloads(self): payloads = [] reasons = ["Transition to alarm due to 5 samples outside threshold, " "most recent: 85.0", "Transition to alarm due to 4 samples outside threshold, " "most recent: 7.0"] for alarm in self.alarms: num = self.alarms.index(alarm) type = models.AlarmChange.STATE_TRANSITION detail = json.dumps({'state': alarm.state, 'transition_reason': reasons[num]}) on_behalf_of = alarm.project_id severity = alarm.severity payload = dict( event_id='fake_event_id_%s' % num, alarm_id=alarm.alarm_id, type=type, detail=detail, user_id='fake_user_id', project_id='fake_project_id', on_behalf_of=on_behalf_of, timestamp=datetime.datetime(2015, 7, 26, 3, 33, 21, 876795), severity=severity) payloads.append(payload) return payloads @mock.patch.object(uuidutils, 'generate_uuid') @mock.patch.object(timeutils, 'utcnow') @mock.patch.object(messaging, 'get_notifier') def test_alarm_change_record(self, get_notifier, utcnow, mock_uuid): # the context.RequestContext() method need to generate uuid, # so we need to provide 'fake_uuid_0' and 'fake_uuid_1' for that. mock_uuid.side_effect = ['fake_event_id_0', 'fake_event_id_1'] change_notifier = mock.MagicMock() get_notifier.return_value = change_notifier utcnow.return_value = datetime.datetime(2015, 7, 26, 3, 33, 21, 876795) self._set_all_alarms('ok') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) payloads = self._construct_payloads() expected_payloads = [mock.call(p) for p in payloads] change_records = \ self.storage_conn.record_alarm_change.call_args_list self.assertEqual(expected_payloads, change_records) notify_calls = change_notifier.info.call_args_list notification = "alarm.state_transition" expected_payloads = [mock.call(mock.ANY, notification, p) for p in payloads] self.assertEqual(expected_payloads, notify_calls) def test_equivocal_from_known_state_ok(self): self._set_all_alarms('ok') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in range(5)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list) def test_state_change_and_repeat_actions(self): self._set_all_alarms('ok') self.alarms[0].repeat_actions = True avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to alarm due to 5 samples outside ' 'threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_equivocal_from_unknown(self): self._set_all_alarms('insufficient data') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to alarm due to 5 samples outside' ' threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'insufficient data', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) @mock.patch.object(timeutils, 'utcnow') def test_no_state_change_outside_time_constraint(self, mock_utcnow): self._set_all_alarms('ok') self.alarms[0].time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] if zoneinfo: tzinfo = zoneinfo.ZoneInfo('Europe/Ljubljana') else: tzinfo = pytz.timezone('Europe/Ljubljana') dt = datetime.datetime(2014, 1, 1, 15, 0, 0, tzinfo=tzinfo) mock_utcnow.return_value = dt.astimezone(datetime.timezone.utc) self.client.metric.get_measures.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('ok') update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual([], update_calls, "Alarm should not change state if the current " " time is outside its time constraint.") self.assertEqual([], self.notifier.notify.call_args_list) @mock.patch.object(timeutils, 'utcnow') def test_state_change_inside_time_constraint(self, mock_utcnow): self._set_all_alarms('ok') self.alarms[0].time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] if zoneinfo: tzinfo = zoneinfo.ZoneInfo('Europe/Ljubljana') else: tzinfo = pytz.timezone('Europe/Ljubljana') dt = datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=tzinfo) mock_utcnow.return_value = dt.astimezone(datetime.timezone.utc) self.client.metric.get_measures.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls, "Alarm should change state if the current " "time is inside its time constraint.") expected = [mock.call( alarm, 'ok', 'No datapoint for granularity 60', self._reason_data('unknown', alarm.rule['evaluation_periods'], None)) for alarm in self.alarms] self.assertEqual(expected, self.notifier.notify.call_args_list) @mock.patch.object(timeutils, 'utcnow') def test_lag_configuration(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) self.client.metric.get_measures.return_value = [] self._set_all_alarms('ok') self._evaluate_all_alarms() self._set_all_alarms('ok') self.conf.set_override("additional_ingestion_lag", 42) self._evaluate_all_alarms() self.assertEqual([ mock.call(aggregation='mean', granularity=60, metric='cpu_util', resource_id='my_instance', start='2012-07-02T10:39:00', stop='2012-07-02T10:45:00'), mock.call(aggregation='mean', granularity=60, metric='cpu_util', resource_id='my_instance', start='2012-07-02T10:38:18', stop='2012-07-02T10:45:00') ], self.client.metric.get_measures.mock_calls) @mock.patch.object(timeutils, 'utcnow') def test_evaluation_keep_alarm_attributes_constant(self, utcnow): utcnow.return_value = datetime.datetime(2015, 7, 26, 3, 33, 21, 876795) self._set_all_alarms('ok') original_alarms = copy.deepcopy(self.alarms) avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') primitive_alarms = [a.as_dict() for a in self.alarms] for alarm in original_alarms: alarm.state = 'alarm' alarm.state_reason = mock.ANY primitive_original_alarms = [a.as_dict() for a in original_alarms] self.assertEqual(primitive_original_alarms, primitive_alarms) class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase): EVALUATOR = gnocchi.GnocchiAggregationMetricsThresholdEvaluator def prepare_alarms(self): self.alarms = self.prepared_alarms[1:2] def test_retry_transient_api_failure(self): maxs = self._get_stats( 300, [self.alarms[0].rule['threshold'] + v for v in range(4)], aggregated=True ) self.client.aggregates.fetch.side_effect = [Exception('boom'), maxs] self._test_retry_transient() def test_simple_insufficient(self): self.client.aggregates.fetch.return_value = [] self._test_simple_insufficient() @mock.patch.object(timeutils, 'utcnow') def test_simple_alarm_trip(self, utcnow): utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) self._set_all_alarms('ok') maxs = self._get_stats( 300, [self.alarms[0].rule['threshold'] - v for v in range(4)], aggregated=True ) self.client.aggregates.fetch.side_effect = [maxs] self._evaluate_all_alarms() start_alarm = "2015-01-26T12:32:00" end = "2015-01-26T12:57:00" self.assertEqual( [mock.call.fetch( operations=[ 'aggregate', 'max', ['metric', '0bb1604d-1193-4c0a-b4b8-74b170e35e83', 'max'], # noqa ['metric', '9ddc209f-42f8-41e1-b8f1-8804f59c4053', 'max'], # noqa ], granularity=300, needed_overlap=0, start=start_alarm, stop=end)], self.client.aggregates.mock_calls) self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) maxs = maxs['measures']['aggregated'] reason = ('Transition to alarm due to 4 samples outside ' 'threshold, most recent: %s' % maxs[-1][2]) reason_data = self._reason_data('outside', 4, maxs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_simple_alarm_clear(self): self._set_all_alarms('alarm') maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v for v in range(1, 5)], aggregated=True) self.client.aggregates.fetch.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) maxs = maxs['measures']['aggregated'] reason = ('Transition to ok due to 4 samples inside ' 'threshold, most recent: %s' % maxs[-1][2]) reason_data = self._reason_data('inside', 4, maxs[-1][2]) expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_equivocal_from_known_state_ok(self): self._set_all_alarms('ok') maxs = self._get_stats( 300, [self.alarms[0].rule['threshold'] - v for v in range(-1, 3)], aggregated=True ) self.client.aggregates.fetch.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual( [], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list) def test_equivocal_ok_to_alarm(self): self._set_all_alarms('ok') # NOTE(sileht): we add one useless point (81.0) that will break # the test if the evaluator doesn't remove it. maxs = self._get_stats( 300, [self.alarms[0].rule['threshold'] - v for v in range(-1, 5)], aggregated=True ) self.client.aggregates.fetch.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') def test_equivocal_from_known_state_and_repeat_actions(self): self._set_all_alarms('ok') self.alarms[0].repeat_actions = True maxs = self._get_stats( 300, [self.alarms[0].rule['threshold'] - v for v in range(-1, 3)], aggregated=True ) self.client.aggregates.fetch.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) reason = ('Remaining as ok due to 1 samples inside' ' threshold, most recent: 8.0') reason_datas = self._reason_data('inside', 1, 8.0) expected = [mock.call(self.alarms[0], 'ok', reason, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_unequivocal_from_known_state_and_repeat_actions(self): self._set_all_alarms('alarm') self.alarms[0].repeat_actions = True maxs = self._get_stats( 300, [self.alarms[0].rule['threshold'] - v for v in range(4)], aggregated=True ) self.client.aggregates.fetch.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) reason = ('Remaining as alarm due to 4 samples outside' ' threshold, most recent: 7.0') reason_datas = self._reason_data('outside', 4, 7.0) expected = [mock.call(self.alarms[0], 'alarm', reason, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) class TestGnocchiAggregationResourcesThresholdEvaluate( TestGnocchiEvaluatorBase): EVALUATOR = gnocchi.GnocchiAggregationResourcesThresholdEvaluator def prepare_alarms(self): self.alarms = self.prepared_alarms[2:3] def test_retry_transient_api_failure(self): avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v for v in range(6)], aggregated=True) self.client.aggregates.fetch.side_effect = [ exceptions.ClientException(500, "error"), avgs2] self._test_retry_transient() def test_simple_insufficient(self): self.client.aggregates.fetch.return_value = [] self._test_simple_insufficient() @mock.patch.object(timeutils, 'utcnow') def test_simple_alarm_trip(self, utcnow): utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) self._set_all_alarms('ok') avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v for v in range(1, 7)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs] self._evaluate_all_alarms() start_alarm = "2015-01-26T12:51:10" end = "2015-01-26T12:57:00" self.assertEqual( [mock.call.fetch( operations=[ 'aggregate', 'rate:mean', ['metric', 'cpu', 'mean'], ], granularity=50, search={"=": {"server_group": "my_autoscaling_group"}}, resource_type='instance', start=start_alarm, stop=end, needed_overlap=0)], self.client.aggregates.mock_calls) self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) avgs = avgs['measures']['aggregated'] reason = ('Transition to alarm due to 6 samples outside ' 'threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 6, avgs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_simple_alarm_clear(self): self._set_all_alarms('alarm') avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v for v in range(6)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) avgs = avgs['measures']['aggregated'] reason = ('Transition to ok due to 6 samples inside ' 'threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('inside', 6, avgs[-1][2]) expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_equivocal_from_known_state_ok(self): self._set_all_alarms('ok') avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v for v in range(6)], aggregated=True) self.client.aggregates.fetch.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual( [], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/evaluator/test_loadbalancer.py0000664000175000017500000001304700000000000023622 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from unittest import mock from oslo_utils import timeutils from oslo_utils import uuidutils from aodh import evaluator from aodh.evaluator import loadbalancer from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base @mock.patch('octaviaclient.api.v2.octavia.OctaviaAPI') @mock.patch('aodh.keystone_client.get_session') class TestLoadBalancerMemberHealthEvaluator(base.TestEvaluatorBase): EVALUATOR = loadbalancer.LoadBalancerMemberHealthEvaluator def test_evaluate(self, mock_session, mock_octavia): alarm = models.Alarm( name='lb_member_alarm', description='lb_member_alarm', type=loadbalancer.ALARM_TYPE, enabled=True, user_id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(dashed=False), alarm_id=uuidutils.generate_uuid(), state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], severity='low', rule=dict( pool_id=uuidutils.generate_uuid(), stack_id=uuidutils.generate_uuid(), autoscaling_group_id=uuidutils.generate_uuid(), ) ) mock_client = mock.MagicMock() mock_octavia.return_value = mock_client created_at = timeutils.utcnow() - datetime.timedelta(days=1) mock_client.member_list.return_value = { 'members': [ { 'created_at': created_at.isoformat(), 'admin_state_up': True, 'operating_status': 'ERROR', } ] } self.evaluator.evaluate(alarm) self.assertEqual(evaluator.ALARM, alarm.state) def test_evaluate_octavia_error(self, mock_session, mock_octavia): class Response(object): def __init__(self, status_code, content): self.status_code = status_code self.content = content alarm = models.Alarm( name='lb_member_alarm', description='lb_member_alarm', type=loadbalancer.ALARM_TYPE, enabled=True, user_id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(dashed=False), alarm_id=uuidutils.generate_uuid(), state='insufficient data', state_reason='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], severity='low', rule=dict( pool_id=uuidutils.generate_uuid(), stack_id=uuidutils.generate_uuid(), autoscaling_group_id=uuidutils.generate_uuid(), ) ) mock_client = mock.MagicMock() mock_octavia.return_value = mock_client msg = 'Pool NotFound' mock_client.member_list.return_value = Response(404, msg) self.evaluator.evaluate(alarm) self.assertEqual(evaluator.UNKNOWN, alarm.state) self.assertEqual(msg, alarm.state_reason) def test_evaluate_alarm_to_ok(self, mock_session, mock_octavia): alarm = models.Alarm( name='lb_member_alarm', description='lb_member_alarm', type=loadbalancer.ALARM_TYPE, enabled=True, user_id=uuidutils.generate_uuid(), project_id=uuidutils.generate_uuid(dashed=False), alarm_id=uuidutils.generate_uuid(), state=evaluator.ALARM, state_reason='alarm', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], severity='low', rule=dict( pool_id=uuidutils.generate_uuid(), stack_id=uuidutils.generate_uuid(), autoscaling_group_id=uuidutils.generate_uuid(), ) ) mock_client = mock.MagicMock() mock_octavia.return_value = mock_client created_at = timeutils.utcnow() - datetime.timedelta(days=1) mock_client.member_list.return_value = { 'members': [ { 'created_at': created_at.isoformat(), 'admin_state_up': True, 'operating_status': 'ACTIVE', } ] } self.evaluator.evaluate(alarm) self.assertEqual(evaluator.OK, alarm.state) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0950153 aodh-19.0.0/aodh/tests/unit/notifier/0000775000175000017500000000000000000000000017412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/notifier/__init__.py0000664000175000017500000000000000000000000021511 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/notifier/base.py0000664000175000017500000000165100000000000020701 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import fixture from oslotest import base from aodh import service class TestNotifierBase(base.BaseTestCase): def setUp(self): super(TestNotifierBase, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.conf = self.useFixture(fixture.Config(conf)).conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/notifier/test_heat.py0000664000175000017500000000607000000000000021747 0ustar00zuulzuul00000000000000# Copyright 2019 Catalyst Cloud Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_utils import netutils from aodh.notifier import heat as heat_notifier from aodh.tests.unit.notifier import base class TestTrustHeatAlarmNotifier(base.TestNotifierBase): @mock.patch("aodh.keystone_client.get_heat_client_from_trust") def test_notify(self, mock_heatclient): action = netutils.urlsplit("trust+autohealer://fake_trust_id:delete@") alarm_id = "fake_alarm_id" alarm_name = "fake_alarm_name" severity = "low" previous = "ok" current = "alarm" reason = "no good reason" reason_data = { "stack_id": "fake_stack_id", "asg_id": "fake_asg_id", "unhealthy_members": [ {"id": "3bd8bc5a-7632-11e9-84cd-00224d6b7bc1"} ] } class FakeResource(object): def __init__(self, resource_name): self.parent_resource = resource_name mock_client = mock_heatclient.return_value mock_client.resources.list.return_value = [ FakeResource("fake_resource_name") ] notifier = heat_notifier.TrustHeatAlarmNotifier(self.conf) notifier.notify(action, alarm_id, alarm_name, severity, previous, current, reason, reason_data) mock_heatclient.assert_called_once_with(self.conf, "fake_trust_id") mock_client.resources.mark_unhealthy.assert_called_once_with( "fake_asg_id", "fake_resource_name", True, "unhealthy load balancer member" ) mock_client.stacks.update.assert_called_once_with( "fake_stack_id", existing=True ) @mock.patch("aodh.keystone_client.get_heat_client_from_trust") def test_notify_stack_id_missing(self, mock_heatclient): action = netutils.urlsplit("trust+autohealer://fake_trust_id:delete@") alarm_id = "fake_alarm_id" alarm_name = "fake_alarm_name" severity = "low" previous = "ok" current = "alarm" reason = "no good reason" reason_data = { "asg_id": "fake_asg_id", "unhealthy_members": [ {"tags": ["3bd8bc5a-7632-11e9-84cd-00224d6b7bc1"]} ] } notifier = heat_notifier.TrustHeatAlarmNotifier(self.conf) notifier.notify(action, alarm_id, alarm_name, severity, previous, current, reason, reason_data) self.assertFalse(mock_heatclient.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_api_v2_capabilities.py0000664000175000017500000000441700000000000023103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from aodh.api.controllers.v2 import capabilities class TestCapabilities(base.BaseTestCase): def test_recursive_keypairs(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B'}} pairs = list(capabilities._recursive_keypairs(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested:a', 'A'), ('nested:b', 'B')], pairs) def test_recursive_keypairs_with_separator(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } separator = '.' pairs = list(capabilities._recursive_keypairs(data, separator)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B')], pairs) def test_recursive_keypairs_with_list_of_dict(self): small = 1 big = 1 << 64 expected = [('a', 'A'), ('b', 'B'), ('nested:list', [{small: 99, big: 42}])] data = {'a': 'A', 'b': 'B', 'nested': {'list': [{small: 99, big: 42}]}} pairs = list(capabilities._recursive_keypairs(data)) self.assertEqual(len(expected), len(pairs)) for k, v in pairs: # the keys 1 and 1<<64 cause a hash collision on 64bit platforms if k == 'nested:list': self.assertIn(v, [[{small: 99, big: 42}], [{big: 42, small: 99}]]) else: self.assertIn((k, v), expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_bin.py0000664000175000017500000001020100000000000017746 0ustar00zuulzuul00000000000000# # Copyright 2012-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from oslo_utils import fileutils from aodh.tests import base class BinTestCase(base.BaseTestCase): def setUp(self): super(BinTestCase, self).setUp() content = ("[database]\n" "connection=log://localhost\n") content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='aodh', suffix='.conf') def tearDown(self): super(BinTestCase, self).tearDown() os.remove(self.tempfile) def test_dbsync_run(self): subp = subprocess.Popen(['aodh-dbsync', "--config-file=%s" % self.tempfile]) self.assertEqual(0, subp.wait()) def test_run_expirer_ttl_disabled(self): subp = subprocess.Popen(['aodh-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, __ = subp.communicate() self.assertEqual(0, subp.poll()) self.assertIn(b"Nothing to clean, database alarm history " b"time to live is disabled", out) def test_run_expirer_ttl_enabled(self): content = ("[database]\n" "alarm_history_time_to_live=1\n" "alarm_histories_delete_batch_size=10\n" "connection=log://localhost\n") content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='aodh', suffix='.conf') subp = subprocess.Popen(['aodh-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, __ = subp.communicate() self.assertEqual( 0, subp.poll(), f'Failed with stdout:\n{out.decode()}', ) msg = "Dropping alarm history 10 data with TTL 1" msg = msg.encode('utf-8') self.assertIn(msg, out) class BinEvaluatorTestCase(base.BaseTestCase): def setUp(self): super(BinEvaluatorTestCase, self).setUp() content = ("[database]\n" "connection=log://localhost\n") content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='aodh', suffix='.conf') self.subp = None def tearDown(self): super(BinEvaluatorTestCase, self).tearDown() if self.subp: self.subp.kill() os.remove(self.tempfile) def test_starting_evaluator(self): self.subp = subprocess.Popen(['aodh-evaluator', "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) self.assertIsNone(self.subp.poll()) class BinNotifierTestCase(BinEvaluatorTestCase): def test_starting_notifier(self): self.subp = subprocess.Popen(['aodh-notifier', "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) self.assertIsNone(self.subp.poll()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_coordination.py0000664000175000017500000002342300000000000021700 0ustar00zuulzuul00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import fixture as fixture_config import tooz.coordination from aodh import coordination from aodh import service from aodh.tests import base class MockToozCoordinator(object): def __init__(self, member_id, shared_storage): self._member_id = member_id self._groups = shared_storage self.is_started = False def start(self): self.is_started = True def stop(self): pass def heartbeat(self): pass def create_group(self, group_id): if group_id in self._groups: return MockAsyncError( tooz.coordination.GroupAlreadyExist(group_id)) self._groups[group_id] = {} return MockAsyncResult(None) def join_group(self, group_id, capabilities=b''): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) if self._member_id in self._groups[group_id]: return MockAsyncError( tooz.coordination.MemberAlreadyExist(group_id, self._member_id)) self._groups[group_id][self._member_id] = { "capabilities": capabilities, } return MockAsyncResult(None) def leave_group(self, group_id): return MockAsyncResult(None) def get_members(self, group_id): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) return MockAsyncResult(self._groups[group_id]) class MockToozCoordExceptionRaiser(MockToozCoordinator): def start(self): raise tooz.coordination.ToozError('error') def heartbeat(self): raise tooz.coordination.ToozError('error') def join_group(self, group_id, capabilities=b''): raise tooz.coordination.ToozError('error') def get_members(self, group_id): raise tooz.coordination.ToozError('error') class MockAsyncResult(tooz.coordination.CoordAsyncResult): def __init__(self, result): self.result = result def get(self, timeout=0): return self.result @staticmethod def done(): return True class MockAsyncError(tooz.coordination.CoordAsyncResult): def __init__(self, error): self.error = error def get(self, timeout=0): raise self.error @staticmethod def done(): return True class TestHashRing(base.BaseTestCase): def test_hash_ring(self): num_nodes = 10 num_keys = 1000 nodes = [str(x) for x in range(num_nodes)] hr = coordination.HashRing(nodes) buckets = [0] * num_nodes assignments = [-1] * num_keys for k in range(num_keys): n = int(hr.get_node(str(k))) self.assertLessEqual(0, n) self.assertLessEqual(n, num_nodes) buckets[n] += 1 assignments[k] = n # at least something in each bucket self.assertTrue(all((c > 0 for c in buckets))) # approximately even distribution diff = max(buckets) - min(buckets) self.assertLess(diff, 0.3 * (num_keys / num_nodes)) # consistency num_nodes += 1 nodes.append(str(num_nodes + 1)) hr = coordination.HashRing(nodes) for k in range(num_keys): n = int(hr.get_node(str(k))) assignments[k] -= n reassigned = len([c for c in assignments if c != 0]) self.assertLess(reassigned, num_keys / num_nodes) class TestPartitioning(base.BaseTestCase): def setUp(self): super(TestPartitioning, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.shared_storage = {} def _get_new_started_coordinator(self, shared_storage, agent_id=None, coordinator_cls=None): coordinator_cls = coordinator_cls or MockToozCoordinator self.CONF.set_override('backend_url', 'xxx://yyy', group='coordination') with mock.patch('tooz.coordination.get_coordinator', lambda _, member_id: coordinator_cls(member_id, shared_storage)): pc = coordination.PartitionCoordinator(self.CONF, agent_id) pc.start() return pc def _usage_simulation(self, *agents_kwargs): partition_coordinators = [] for kwargs in agents_kwargs: partition_coordinator = self._get_new_started_coordinator( self.shared_storage, kwargs['agent_id'], kwargs.get( 'coordinator_cls')) partition_coordinator.join_group(kwargs['group_id']) partition_coordinators.append(partition_coordinator) for i, kwargs in enumerate(agents_kwargs): all_resources = kwargs.get('all_resources', []) expected_resources = kwargs.get('expected_resources', []) actual_resources = partition_coordinators[i].extract_my_subset( kwargs['group_id'], all_resources) self.assertEqual(expected_resources, actual_resources) def test_single_group(self): agents = [dict(agent_id='agent1', group_id='group'), dict(agent_id='agent2', group_id='group')] self._usage_simulation(*agents) self.assertEqual(['group'], sorted(self.shared_storage.keys())) self.assertEqual(['agent1', 'agent2'], sorted(self.shared_storage['group'].keys())) def test_multiple_groups(self): agents = [dict(agent_id='agent1', group_id='group1'), dict(agent_id='agent2', group_id='group2')] self._usage_simulation(*agents) self.assertEqual(['group1', 'group2'], sorted(self.shared_storage.keys())) def test_member_id(self): agent = 'agent'.encode('ascii') coord = self._get_new_started_coordinator({}, agent) self.assertEqual(agent, coord._my_id) def test_partitioning(self): all_resources = ['resource_%s' % i for i in range(1000)] agents = ['agent_%s' % i for i in range(10)] expected_resources = [list() for _ in range(len(agents))] hr = coordination.HashRing(agents) for r in all_resources: key = agents.index(hr.get_node(r)) expected_resources[key].append(r) agents_kwargs = [] for i, agent in enumerate(agents): agents_kwargs.append(dict(agent_id=agent, group_id='group', all_resources=all_resources, expected_resources=expected_resources[i])) self._usage_simulation(*agents_kwargs) @mock.patch.object(coordination.LOG, 'exception') def test_coordination_backend_offline(self, mocked_exception): agents = [dict(agent_id='agent1', group_id='group', all_resources=['res1', 'res2'], expected_resources=[], coordinator_cls=MockToozCoordExceptionRaiser)] self._usage_simulation(*agents) called = [mock.call('Error connecting to coordination backend.'), mock.call('Error getting group membership info from ' 'coordination backend.')] self.assertEqual(called, mocked_exception.call_args_list) @mock.patch.object(coordination.LOG, 'exception') @mock.patch.object(coordination.LOG, 'info') def test_reconnect(self, mock_info, mocked_exception): coord = self._get_new_started_coordinator({}, 'a', MockToozCoordExceptionRaiser) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordExceptionRaiser('a', {})): coord.heartbeat() called = [mock.call('Error connecting to coordination backend.'), mock.call('Error connecting to coordination backend.'), mock.call('Error sending a heartbeat to coordination ' 'backend.')] self.assertEqual(called, mocked_exception.call_args_list) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordinator('a', {})): coord.heartbeat() mock_info.assert_called_with('Coordination backend started ' 'successfully.') def test_group_id_none(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) with mock.patch.object(coord._coordinator, 'join_group') as mocked: coord.join_group(None) self.assertEqual(0, mocked.call_count) with mock.patch.object(coord._coordinator, 'leave_group') as mocked: coord.leave_group(None) self.assertEqual(0, mocked.call_count) def test_stop(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) coord.join_group("123") coord.stop() self.assertIsEmpty(coord._groups) self.assertIsNone(coord._coordinator) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_evaluator.py0000664000175000017500000002375300000000000021220 0ustar00zuulzuul00000000000000# # Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh.evaluator.AlarmEvaluationService. """ import fixtures import time from unittest import mock from observabilityclient import prometheus_client from oslo_config import fixture as fixture_config from stevedore import extension from aodh import evaluator from aodh import service from aodh.evaluator import prometheus from aodh.tests import base as tests_base class TestAlarmEvaluationService(tests_base.BaseTestCase): def setUp(self): super(TestAlarmEvaluationService, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.CONF.set_override('workers', 1, 'evaluator') self.setup_messaging(self.CONF) self.threshold_eval = mock.MagicMock() self._fake_conn = mock.Mock() self._fake_conn.get_alarms.return_value = [] self._fake_pc = mock.Mock() self._fake_em = extension.ExtensionManager.make_test_instance( [ extension.Extension( 'gnocchi_aggregation_by_metrics_threshold', None, None, self.threshold_eval), ] ) self.useFixture(fixtures.MockPatch( 'stevedore.extension.ExtensionManager', return_value=self._fake_em )) self.useFixture(fixtures.MockPatch( 'aodh.coordination.PartitionCoordinator', return_value=self._fake_pc )) self.useFixture(fixtures.MockPatch( 'aodh.storage.get_connection_from_config', return_value=self._fake_conn )) def _do_test_start(self, test_interval=120, coordination_heartbeat_interval=1.0, coordination_active=False): self.CONF.set_override('evaluation_interval', test_interval, group='evaluator') self.CONF.set_override('heartbeat_interval', coordination_heartbeat_interval, group='coordination') self._fake_pc.is_active.return_value = coordination_active svc = evaluator.AlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) svc.terminate() svc.partition_coordinator.start.assert_called_once_with() svc.partition_coordinator.join_group.assert_called_once_with( svc.PARTITIONING_GROUP_NAME) def test_start_singleton(self): self._do_test_start(coordination_active=False) def test_start_coordinated(self): self._do_test_start(coordination_active=True) def test_start_coordinated_high_hb_interval(self): self._do_test_start(coordination_active=True, test_interval=10, coordination_heartbeat_interval=5) def test_evaluation_cycle(self): alarm = mock.Mock(type='gnocchi_aggregation_by_metrics_threshold', alarm_id="alarm_id1") self._fake_pc.extract_my_subset.return_value = ["alarm_id1"] self._fake_pc.is_active.side_effect = [False, False, True, True] self._fake_conn.get_alarms.return_value = [alarm] self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None] svc = evaluator.AlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) time.sleep(1) target = svc.partition_coordinator.extract_my_subset target.assert_called_once_with(svc.PARTITIONING_GROUP_NAME, ["alarm_id1"]) self.threshold_eval.evaluate.assert_called_once_with(alarm) def test_evaluation_cycle_with_bad_alarm(self): alarms = [ mock.Mock(type='gnocchi_aggregation_by_metrics_threshold', name='bad', alarm_id='a'), mock.Mock(type='gnocchi_aggregation_by_metrics_threshold', name='good', alarm_id='b'), ] self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None] self._fake_pc.is_active.side_effect = [False, False, True, True, True] self._fake_pc.extract_my_subset.return_value = ['a', 'b'] self._fake_conn.get_alarms.return_value = alarms svc = evaluator.AlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) time.sleep(1) self.assertEqual([mock.call(alarms[0]), mock.call(alarms[1])], self.threshold_eval.evaluate.call_args_list) def test_unknown_extension_skipped(self): alarms = [ mock.Mock(type='not_existing_type', alarm_id='a'), mock.Mock(type='gnocchi_aggregation_by_metrics_threshold', alarm_id='b') ] self._fake_pc.is_active.return_value = False self._fake_pc.extract_my_subset.return_value = ['a', 'b'] self._fake_conn.get_alarms.return_value = alarms svc = evaluator.AlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) time.sleep(1) self.threshold_eval.evaluate.assert_called_once_with(alarms[1]) def test_check_alarm_query_constraints(self): self._fake_conn.get_alarms.return_value = [] self._fake_pc.extract_my_subset.return_value = [] self._fake_pc.is_active.return_value = False svc = evaluator.AlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) time.sleep(1) child = {'enabled': True, 'type': {'ne': 'event'}} self.assertDictContains(svc.storage_conn.get_alarms.call_args[1], child) def test_evaluation_cycle_no_coordination(self): alarm = mock.Mock(type='gnocchi_aggregation_by_metrics_threshold', alarm_id="alarm_id1") self._fake_pc.is_active.return_value = False self._fake_conn.get_alarms.return_value = [alarm] self._fake_conn.conditional_update.return_value = True svc = evaluator.AlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) time.sleep(1) target = svc.partition_coordinator.extract_my_subset self.assertEqual(0, target.call_count) self.threshold_eval.evaluate.assert_called_once_with(alarm) def test_evaluation_cycle_no_coordination_alarm_modified(self): alarm = mock.Mock(type='gnocchi_aggregation_by_metrics_threshold', alarm_id="alarm_id1") self._fake_pc.is_active.return_value = False self._fake_conn.get_alarms.return_value = [alarm] self._fake_conn.conditional_update.return_value = False svc = evaluator.AlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) time.sleep(1) target = svc.partition_coordinator.extract_my_subset self.assertEqual(0, target.call_count) self.assertEqual(0, self.threshold_eval.evaluate.call_count) class TestPrometheusEvaluator(tests_base.BaseTestCase): def setUp(self): super(TestPrometheusEvaluator, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_rule_evaluation(self): metric_list = [ prometheus_client.PrometheusMetric({'metric': 'mtr', 'value': (0, 10)}), prometheus_client.PrometheusMetric({'metric': 'mtr', 'value': (1, 15)}), prometheus_client.PrometheusMetric({'metric': 'mtr', 'value': (2, 20)}), prometheus_client.PrometheusMetric({'metric': 'mtr', 'value': (3, 25)}), prometheus_client.PrometheusMetric({'metric': 'mtr', 'value': (4, 30)}), prometheus_client.PrometheusMetric({'metric': 'mtr', 'value': (5, 15)}), ] with mock.patch.object(prometheus.PrometheusEvaluator, '_set_obsclient', return_value=None): # mock Prometheus client ev = prometheus.PrometheusEvaluator(self.CONF) ev._get_metric_data = mock.Mock(return_value=metric_list) # test transfer to alarm state state, trend, stats, outside, reason = ev.evaluate_rule( {'query': 'mtr', 'threshold': 9, 'comparison_operator': 'gt'}) self.assertEqual('alarm', state) self.assertEqual(6, outside) # test transfer to ok state state, trend, stats, outside, reason = ev.evaluate_rule( {'query': 'mtr', 'threshold': 31, 'comparison_operator': 'gt'}) self.assertEqual('ok', state) self.assertEqual(0, outside) # test trending to alarm state state, trend, stats, outside, reason = ev.evaluate_rule( {'query': 'mtr', 'threshold': 14, 'comparison_operator': 'gt'}) self.assertEqual('alarm', trend) self.assertEqual(5, outside) # test trending to ok state state, trend, stats, outside, reason = ev.evaluate_rule( {'query': 'mtr', 'threshold': 20, 'comparison_operator': 'gt'}) self.assertEqual('ok', trend) self.assertEqual(2, outside) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_event.py0000664000175000017500000000457300000000000020336 0ustar00zuulzuul00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock from oslo_config import fixture as fixture_config import oslo_messaging from aodh import event from aodh import service from aodh.tests import base as tests_base class TestEventAlarmEvaluationService(tests_base.BaseTestCase): def setUp(self): super(TestEventAlarmEvaluationService, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.CONF.set_override("batch_size", 2, 'listener') self.setup_messaging(self.CONF) @mock.patch('aodh.storage.get_connection_from_config', mock.MagicMock()) @mock.patch('aodh.event.EventAlarmEndpoint.sample') def test_batch_event_listener(self, mocked): msg_notifier = oslo_messaging.Notifier( self.transport, topics=['alarm.all'], driver='messaging', publisher_id='test-publisher') received_events = [] mocked.side_effect = lambda msg: received_events.append(msg) event1 = {'event_type': 'compute.instance.update', 'traits': ['foo', 'bar'], 'message_id': '20d03d17-4aba-4900-a179-dba1281a3451', 'generated': '2016-04-23T06:50:21.622739'} event2 = {'event_type': 'compute.instance.update', 'traits': ['foo', 'bar'], 'message_id': '20d03d17-4aba-4900-a179-dba1281a3452', 'generated': '2016-04-23T06:50:23.622739'} msg_notifier.sample({}, 'event', event1) msg_notifier.sample({}, 'event', event2) svc = event.EventAlarmEvaluationService(0, self.CONF) self.addCleanup(svc.terminate) time.sleep(1) self.assertEqual(1, len(received_events)) self.assertEqual(2, len(received_events[0])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_messaging.py0000664000175000017500000000516500000000000021170 0ustar00zuulzuul00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config import oslo_messaging.conffixture from oslotest import base from aodh import messaging class MessagingTests(base.BaseTestCase): def setUp(self): super(MessagingTests, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) def test_get_transport_invalid_url(self): self.assertRaises(oslo_messaging.InvalidTransportURL, messaging.get_transport, self.CONF, "notvalid!") def test_get_transport_url_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://') self.assertEqual(t1, t2) def test_get_transport_default_url_caching(self): t1 = messaging.get_transport(self.CONF, ) t2 = messaging.get_transport(self.CONF, ) self.assertEqual(t1, t2) def test_get_transport_default_url_no_caching(self): t1 = messaging.get_transport(self.CONF, cache=False) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_no_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://', cache=False) t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_default_url_caching_mix(self): t1 = messaging.get_transport(self.CONF, ) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_caching_mix(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_optional(self): self.CONF.set_override('transport_url', 'non-url') self.assertIsNone(messaging.get_transport(self.CONF, optional=True, cache=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_notifier.py0000664000175000017500000005105300000000000021027 0ustar00zuulzuul00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import json import time from unittest import mock from oslo_config import cfg from oslo_config import fixture as fixture_config import oslo_messaging import requests from urllib import parse as urlparse from aodh import keystone_client from aodh import notifier from aodh import service from aodh.tests import base as tests_base DATA_JSON = json.loads( '{"current": "ALARM", "alarm_id": "foobar", "alarm_name": "testalarm",' ' "severity": "critical", "reason": "what ?",' ' "reason_data": {"test": "test"}, "previous": "OK"}' ) NOTIFICATION = dict(alarm_id='foobar', alarm_name='testalarm', severity='critical', condition=dict(threshold=42), reason='what ?', reason_data={'test': 'test'}, previous='OK', current='ALARM') class TestAlarmNotifierService(tests_base.BaseTestCase): def setUp(self): super(TestAlarmNotifierService, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.setup_messaging(self.CONF) def test_init_host_queue(self): self.service = notifier.AlarmNotifierService(0, self.CONF) self.service.terminate() class TestKeystoneClient(tests_base.BaseTestCase): def setUp(self): super(TestKeystoneClient, self).setUp() self.config = fixture_config.Config( service.prepare_service(argv=[], config_files=[])) self.config.setUp() self.config.register_opts([ cfg.StrOpt('user_domain_id', default=""), cfg.StrOpt('user_domain_name', default=""), cfg.StrOpt('username', default="username"), cfg.StrOpt('password', default="password"), cfg.StrOpt('auth_url', default="testdomain") ], "service_credentials") def test_get_trusted_client_domain_id(self): self.config.config( **{'group': "service_credentials", 'user_domain_id': "uuid-domain"}) client = keystone_client.get_trusted_client( self.config.conf, "testing") self.assertEqual(client.session.auth._user_domain_id, "uuid-domain") self.assertEqual(client.session.auth._user_domain_name, '') def test_get_trusted_client_domain_name(self): self.config.config( **{'group': "service_credentials", 'user_domain_name': "testdomain"}) client = keystone_client.get_trusted_client( self.config.conf, "testing") self.assertEqual(client.session.auth._user_domain_name, "testdomain") self.assertEqual(client.session.auth._user_domain_id, '') def test_get_trusted_client_domain(self): self.config.config(**{'group': "service_credentials", 'user_domain_name': "testdomain", 'user_domain_id': "uuid-gen", }) client = keystone_client.get_trusted_client(self.config.conf, "testing") self.assertEqual(client.session.auth._user_domain_name, "testdomain") self.assertEqual(client.session.auth._user_domain_id, "uuid-gen") class TestAlarmNotifier(tests_base.BaseTestCase): def setUp(self): super(TestAlarmNotifier, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.setup_messaging(self.CONF) self._msg_notifier = oslo_messaging.Notifier( self.transport, topics=['alarming'], driver='messaging', publisher_id='testpublisher') self.zaqar = FakeZaqarClient(self) self.useFixture(fixtures.MockPatch( 'aodh.notifier.zaqar.ZaqarAlarmNotifier.get_zaqar_client', return_value=self.zaqar)) self.service = notifier.AlarmNotifierService(0, self.CONF) self.addCleanup(self.service.terminate) def test_notify_alarm(self): data = { 'actions': ['test://'], 'alarm_id': 'foobar', 'alarm_name': 'testalarm', 'severity': 'critical', 'previous': 'OK', 'current': 'ALARM', 'reason': 'Everything is on fire', 'reason_data': {'fire': 'everywhere'} } self._msg_notifier.sample({}, 'alarm.update', data) time.sleep(1) notifications = self.service.notifiers['test'].obj.notifications self.assertEqual(1, len(notifications)) self.assertEqual((urlparse.urlsplit(data['actions'][0]), data['alarm_id'], data['alarm_name'], data['severity'], data['previous'], data['current'], data['reason'], data['reason_data']), notifications[0]) @mock.patch('aodh.notifier.LOG.debug') def test_notify_alarm_with_batch_listener(self, logger): data1 = { 'actions': ['test://'], 'alarm_id': 'foobar', 'alarm_name': 'testalarm', 'severity': 'critical', 'previous': 'OK', 'current': 'ALARM', 'reason': 'Everything is on fire', 'reason_data': {'fire': 'everywhere'} } data2 = { 'actions': ['test://'], 'alarm_id': 'foobar2', 'alarm_name': 'testalarm2', 'severity': 'low', 'previous': 'ALARM', 'current': 'OK', 'reason': 'Everything is fine', 'reason_data': {'fine': 'fine'} } self.service.terminate() self.CONF.set_override("batch_size", 2, 'notifier') # Init a new service with new configuration self.svc = notifier.AlarmNotifierService(0, self.CONF) self.addCleanup(self.svc.terminate) self._msg_notifier.sample({}, 'alarm.update', data1) self._msg_notifier.sample({}, 'alarm.update', data2) time.sleep(1) notifications = self.svc.notifiers['test'].obj.notifications self.assertEqual(2, len(notifications)) self.assertEqual((urlparse.urlsplit(data1['actions'][0]), data1['alarm_id'], data1['alarm_name'], data1['severity'], data1['previous'], data1['current'], data1['reason'], data1['reason_data']), notifications[0]) self.assertEqual((urlparse.urlsplit(data2['actions'][0]), data2['alarm_id'], data2['alarm_name'], data2['severity'], data2['previous'], data2['current'], data2['reason'], data2['reason_data']), notifications[1]) self.assertEqual(mock.call('Received %s messages in batch.', 2), logger.call_args_list[0]) @staticmethod def _notification(action): notification = {} notification.update(NOTIFICATION) notification['actions'] = [action] return notification @mock.patch('aodh.notifier.rest.LOG') def test_notify_alarm_rest_action_ok(self, m_log): action = 'http://host/action' with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY) args, kwargs = poster.call_args self.assertEqual( { 'x-openstack-request-id': kwargs['headers']['x-openstack-request-id'], 'content-type': 'application/json' }, kwargs['headers']) self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) self.assertEqual(2, len(m_log.info.call_args_list)) expected = mock.call('Notifying alarm <%(id)s> gets response: ' '%(status_code)s %(reason)s.', mock.ANY) self.assertEqual(expected, m_log.info.call_args_list[1]) def test_notify_alarm_rest_action_with_ssl_client_cert(self): action = 'https://host/action' certificate = "/etc/ssl/cert/whatever.pem" self.CONF.set_override("rest_notifier_certificate_file", certificate) with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, cert=certificate, verify=True) args, kwargs = poster.call_args self.assertEqual( { 'x-openstack-request-id': kwargs['headers']['x-openstack-request-id'], 'content-type': 'application/json' }, kwargs['headers']) self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self): action = 'https://host/action' certificate = "/etc/ssl/cert/whatever.pem" key = "/etc/ssl/cert/whatever.key" self.CONF.set_override("rest_notifier_certificate_file", certificate) self.CONF.set_override("rest_notifier_certificate_key", key) with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, cert=(certificate, key), verify=True) args, kwargs = poster.call_args self.assertEqual( { 'x-openstack-request-id': kwargs['headers']['x-openstack-request-id'], 'content-type': 'application/json'}, kwargs['headers']) self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self): action = 'https://host/action' self.CONF.set_override("rest_notifier_ssl_verify", False) with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, verify=False) args, kwargs = poster.call_args self.assertEqual( { 'x-openstack-request-id': kwargs['headers']['x-openstack-request-id'], 'content-type': 'application/json' }, kwargs['headers']) self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_server_verify_enable(self): action = 'https://host/action' ca_bundle = "/path/to/custom_cert.pem" self.CONF.set_override("rest_notifier_ca_bundle_certificate_path", ca_bundle) with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, verify=ca_bundle) args, kwargs = poster.call_args self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_verify_disable(self): action = 'https://host/action?aodh-alarm-ssl-verify=0' with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, verify=False) args, kwargs = poster.call_args self.assertEqual( { 'x-openstack-request-id': kwargs['headers']['x-openstack-request-id'], 'content-type': 'application/json' }, kwargs['headers']) self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self): action = 'https://host/action?aodh-alarm-ssl-verify=1' self.CONF.set_override("rest_notifier_ssl_verify", False) with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, verify=True) args, kwargs = poster.call_args self.assertEqual( { 'x-openstack-request-id': kwargs['headers']['x-openstack-request-id'], 'content-type': 'application/json' }, kwargs['headers']) self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) @staticmethod def _fake_urlsplit(*args, **kwargs): raise Exception("Evil urlsplit!") def test_notify_alarm_invalid_url(self): with mock.patch('oslo_utils.netutils.urlsplit', self._fake_urlsplit): LOG = mock.MagicMock() with mock.patch('aodh.notifier.LOG', LOG): self._msg_notifier.sample( {}, 'alarm.update', { 'actions': ['no-such-action-i-am-sure'], 'alarm_id': 'foobar', 'condition': {'threshold': 42}, }) time.sleep(1) self.assertTrue(LOG.error.called) def test_notify_alarm_invalid_action(self): LOG = mock.MagicMock() with mock.patch('aodh.notifier.LOG', LOG): self._msg_notifier.sample( {}, 'alarm.update', { 'actions': ['no-such-action-i-am-sure://'], 'alarm_id': 'foobar', 'condition': {'threshold': 42}, }) time.sleep(1) self.assertTrue(LOG.error.called) def test_notify_alarm_trust_action(self): action = 'trust+http://trust-1234@host/action' url = 'http://host/action' client = mock.MagicMock() client.session.auth.get_access.return_value.auth_token = 'token_1234' self.useFixture( fixtures.MockPatch('aodh.keystone_client.get_trusted_client', lambda *args: client)) with mock.patch.object(requests.Session, 'post') as poster: self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) poster.assert_called_with( url, data=mock.ANY, headers=mock.ANY) args, kwargs = poster.call_args self.assertEqual( { 'X-Auth-Token': 'token_1234', 'x-openstack-request-id': kwargs['headers']['x-openstack-request-id'], 'content-type': 'application/json' }, kwargs['headers']) self.assertEqual(DATA_JSON, json.loads(kwargs['data'])) def test_zaqar_notifier_action(self): with mock.patch.object(notifier.zaqar.ZaqarAlarmNotifier, '_get_client_conf') as get_conf: action = ('zaqar://?topic=critical' '&subscriber=http://example.com/data' '&subscriber=mailto:foo@example.com&ttl=7200') self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) get_conf.assert_called() self.assertEqual(self.zaqar, self.service.notifiers['zaqar'].obj._zclient) self.assertEqual(2, self.zaqar.subscriptions) self.assertEqual(1, self.zaqar.posts) def test_presigned_zaqar_notifier_action(self): action = ('zaqar://?' 'subscriber=http://example.com/data&ttl=7200' '&signature=mysignature&expires=2016-06-29T01:49:56' '&paths=/v2/queues/beijing/messages' '&methods=GET,PATCH,POST,PUT&queue_name=foobar-critical' '&project_id=my_project_id') self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) self.assertEqual(1, self.zaqar.subscriptions) self.assertEqual(1, self.zaqar.posts) def test_trust_zaqar_notifier_action(self): client = mock.MagicMock() client.session.auth.get_access.return_value.auth_token = 'token_1234' self.useFixture( fixtures.MockPatch('aodh.keystone_client.get_trusted_client', lambda *args: client)) action = 'trust+zaqar://trust-1234:delete@?queue_name=foobar-critical' self._msg_notifier.sample({}, 'alarm.update', self._notification(action)) time.sleep(1) self.assertEqual(0, self.zaqar.subscriptions) self.assertEqual(1, self.zaqar.posts) class FakeZaqarClient(object): def __init__(self, testcase): self.testcase = testcase self.subscriptions = 0 self.posts = 0 def queue(self, queue_name, **kwargs): self.testcase.assertEqual('foobar-critical', queue_name) self.testcase.assertEqual({}, kwargs) return FakeZaqarQueue(self) def subscription(self, queue_name, **kwargs): self.testcase.assertEqual('foobar-critical', queue_name) subscribers = ['http://example.com/data', 'mailto:foo@example.com'] self.testcase.assertIn(kwargs['subscriber'], subscribers) self.testcase.assertEqual(7200, kwargs['ttl']) self.subscriptions += 1 class FakeZaqarQueue(object): def __init__(self, client): self.client = client self.testcase = client.testcase def post(self, message): expected_message = {'body': {'alarm_name': 'testalarm', 'reason_data': {'test': 'test'}, 'current': 'ALARM', 'alarm_id': 'foobar', 'reason': 'what ?', 'severity': 'critical', 'previous': 'OK'}} self.testcase.assertEqual(expected_message, message) self.client.posts += 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_query.py0000664000175000017500000003243600000000000020361 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to query.""" import datetime from unittest import mock import fixtures from oslo_utils import timeutils from oslotest import base import wsme from aodh.api.controllers.v2 import base as v2_base from aodh.api.controllers.v2 import utils from aodh import storage from aodh.storage import base as alarm_storage_base from aodh.tests import base as tests_base class TestQuery(base.BaseTestCase): def setUp(self): super(TestQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) def test_get_value_with_integer(self): query = v2_base.Query(field='metadata.size', op='eq', value='123', type='integer') expected = 123 self.assertEqual(expected, query.get_value()) def test_get_value_with_float(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='float') expected = 123.456 self.assertEqual(expected, query.get_value()) def test_get_value_with_boolean(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True', type='boolean') expected = True self.assertEqual(expected, query.get_value()) def test_get_value_with_string(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux', type='string') expected = 'linux' self.assertEqual(expected, query.get_value()) def test_get_value_with_datetime(self): query = v2_base.Query(field='metadata.date', op='eq', value='2014-01-01T05:00:00', type='datetime') self.assertIsInstance(query.get_value(), datetime.datetime) self.assertIsNone(query.get_value().tzinfo) def test_get_value_with_integer_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123') expected = 123 self.assertEqual(expected, query.get_value()) def test_get_value_with_float_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456') expected = 123.456 self.assertEqual(expected, query.get_value()) def test_get_value_with_boolean_without_type(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True') expected = True self.assertEqual(expected, query.get_value()) def test_get_value_with_string_without_type(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux') expected = 'linux' self.assertEqual(expected, query.get_value()) def test_get_value_with_bad_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='blob') self.assertRaises(wsme.exc.ClientSideError, query.get_value) def test_get_value_with_bad_value(self): query = v2_base.Query(field='metadata.size', op='eq', value='fake', type='integer') self.assertRaises(wsme.exc.ClientSideError, query.get_value) def test_get_value_integer_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='WWW-Layer-4a80714f') expected = 'WWW-Layer-4a80714f' self.assertEqual(expected, query.get_value()) def test_get_value_boolean_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='True or False') expected = 'True or False' self.assertEqual(expected, query.get_value()) def test_get_value_with_syntax_error(self): # bug 1221736 value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' query = v2_base.Query(field='group_id', op='eq', value=value) expected = value self.assertEqual(expected, query.get_value()) def test_get_value_with_syntax_error_colons(self): # bug 1221736 value = 'Ref::StackId' query = v2_base.Query(field='field_name', op='eq', value=value) expected = value self.assertEqual(expected, query.get_value()) class TestQueryToKwArgs(tests_base.BaseTestCase): def setUp(self): super(TestQueryToKwArgs, self).setUp() self.useFixture(fixtures.MockPatchObject( utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) self.useFixture(fixtures.MockPatchObject( utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) def test_sample_filter_single(self): q = [v2_base.Query(field='user_id', op='eq', value='uid')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertIn('user', kwargs) self.assertEqual(1, len(kwargs)) self.assertEqual('uid', kwargs['user']) def test_sample_filter_multi(self): q = [v2_base.Query(field='user_id', op='eq', value='uid'), v2_base.Query(field='project_id', op='eq', value='pid'), v2_base.Query(field='resource_id', op='eq', value='rid'), v2_base.Query(field='source', op='eq', value='source_name'), v2_base.Query(field='meter', op='eq', value='meter_name')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(5, len(kwargs)) self.assertEqual('uid', kwargs['user']) self.assertEqual('pid', kwargs['project']) self.assertEqual('rid', kwargs['resource']) self.assertEqual('source_name', kwargs['source']) self.assertEqual('meter_name', kwargs['meter']) def test_sample_filter_timestamp(self): ts_start = timeutils.utcnow() ts_end = ts_start + datetime.timedelta(minutes=5) q = [v2_base.Query(field='timestamp', op='lt', value=str(ts_end)), v2_base.Query(field='timestamp', op='gt', value=str(ts_start))] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(4, len(kwargs)) self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) self.assertEqual('gt', kwargs['start_timestamp_op']) self.assertEqual('lt', kwargs['end_timestamp_op']) def test_sample_filter_non_equality_on_metadata(self): queries = [v2_base.Query(field='resource_metadata.image_id', op='gt', value='image', type='string'), v2_base.Query(field='metadata.ramdisk_id', op='le', value='ramdisk', type='string')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, queries, storage.SampleFilter.__init__) def test_sample_filter_invalid_field(self): q = [v2_base.Query(field='invalid', op='eq', value='20')] self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_invalid_op(self): q = [v2_base.Query(field='user_id', op='lt', value='20')] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_timestamp_invalid_op(self): ts_start = timeutils.utcnow() q = [v2_base.Query(field='timestamp', op='eq', value=str(ts_start))] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_exclude_internal(self): queries = [v2_base.Query(field=f, op='eq', value='fake', type='string') for f in ['y', 'on_behalf_of', 'x']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises(wsme.exc.ClientSideError, utils.query_to_kwargs, queries, storage.SampleFilter.__init__, internal_keys=['on_behalf_of']) def test_sample_filter_self_always_excluded(self): queries = [v2_base.Query(field='user_id', op='eq', value='20')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) self.assertNotIn('self', kwargs) def test_sample_filter_translation(self): queries = [v2_base.Query(field=f, op='eq', value='fake_%s' % f, type='string') for f in ['user_id', 'project_id', 'resource_id']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) for o in ['user', 'project', 'resource']: self.assertEqual('fake_%s_id' % o, kwargs.get(o)) def test_timestamp_validation(self): q = [v2_base.Query(field='timestamp', op='le', value='123')] exc = self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) expected_exc = wsme.exc.InvalidInput('timestamp', '123', 'invalid timestamp format') self.assertEqual(str(expected_exc), str(exc)) def test_get_alarm_changes_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, alarm_storage_base.Connection.get_alarm_changes) valid_keys = ['alarm_id', 'on_behalf_of', 'project', 'search_offset', 'severity', 'timestamp', 'type', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) def test_get_alarms_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, alarm_storage_base.Connection.get_alarms ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/tests/unit/test_wsme_custom_type.py0000664000175000017500000000213600000000000022614 0ustar00zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base import wsme from aodh.api.controllers.v2 import base as v2_base class TestWsmeCustomType(base.BaseTestCase): def test_advenum_default(self): class dummybase(wsme.types.Base): ae = v2_base.AdvEnum("name", str, "one", "other", default="other") obj = dummybase() self.assertEqual("other", obj.ae) obj = dummybase(ae="one") self.assertEqual("one", obj.ae) self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/utils.py0000664000175000017500000000125000000000000015162 0ustar00zuulzuul00000000000000# Copyright 2019 - Nokia Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect def get_func_valid_keys(func): return inspect.getfullargspec(func)[0] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/aodh/version.py0000664000175000017500000000120300000000000015505 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('aodh') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0670109 aodh-19.0.0/aodh.egg-info/0000775000175000017500000000000000000000000015144 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/aodh.egg-info/PKG-INFO0000664000175000017500000000364100000000000016245 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: aodh Version: 19.0.0 Summary: OpenStack Telemetry Alarming Home-page: https://docs.openstack.org/aodh/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: aodh ==== Aodh is the alarming service for OpenStack. ------------- Documentation ------------- Documentation for the project can be found at: https://docs.openstack.org/aodh/latest/ Release notes can be read online at: https://docs.openstack.org/aodh/latest/contributor/releasenotes/index.html Code Repository --------------- - Server: https://opendev.org/openstack/aodh/ Bug Tracking ------------ Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/aodh/ IRC --- IRC Channel: #openstack-telemetry on `OFTC`_. .. _OFTC: https://oftc.net/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Topic :: System :: Monitoring Requires-Python: >=3.8 Provides-Extra: mysql Provides-Extra: postgresql Provides-Extra: test Provides-Extra: zaqar ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866891.0 aodh-19.0.0/aodh.egg-info/SOURCES.txt0000664000175000017500000002600500000000000017033 0ustar00zuulzuul00000000000000.coveragerc .mailmap .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini aodh/__init__.py aodh/coordination.py aodh/event.py aodh/i18n.py aodh/keystone_client.py aodh/messaging.py aodh/opts.py aodh/profiler.py aodh/queue.py aodh/service.py aodh/utils.py aodh/version.py aodh.egg-info/PKG-INFO aodh.egg-info/SOURCES.txt aodh.egg-info/dependency_links.txt aodh.egg-info/entry_points.txt aodh.egg-info/not-zip-safe aodh.egg-info/pbr.json aodh.egg-info/requires.txt aodh.egg-info/top_level.txt aodh/api/__init__.py aodh/api/api-paste.ini aodh/api/app.py aodh/api/app.wsgi aodh/api/hooks.py aodh/api/middleware.py aodh/api/policies.py aodh/api/rbac.py aodh/api/controllers/__init__.py aodh/api/controllers/root.py aodh/api/controllers/v2/__init__.py aodh/api/controllers/v2/alarms.py aodh/api/controllers/v2/base.py aodh/api/controllers/v2/capabilities.py aodh/api/controllers/v2/query.py aodh/api/controllers/v2/quotas.py aodh/api/controllers/v2/root.py aodh/api/controllers/v2/utils.py aodh/api/controllers/v2/alarm_rules/__init__.py aodh/api/controllers/v2/alarm_rules/composite.py aodh/api/controllers/v2/alarm_rules/event.py aodh/api/controllers/v2/alarm_rules/gnocchi.py aodh/api/controllers/v2/alarm_rules/loadbalancer.py aodh/api/controllers/v2/alarm_rules/prometheus.py aodh/cmd/__init__.py aodh/cmd/alarm.py aodh/cmd/aodh-config-generator.conf aodh/cmd/aodh-policy-generator.conf aodh/cmd/status.py aodh/cmd/storage.py aodh/conf/__init__.py aodh/conf/defaults.py aodh/evaluator/__init__.py aodh/evaluator/composite.py aodh/evaluator/event.py aodh/evaluator/gnocchi.py aodh/evaluator/loadbalancer.py aodh/evaluator/prometheus.py aodh/evaluator/threshold.py aodh/evaluator/utils.py aodh/locale/de/LC_MESSAGES/aodh.po aodh/locale/en_GB/LC_MESSAGES/aodh.po aodh/locale/es/LC_MESSAGES/aodh.po aodh/locale/fr/LC_MESSAGES/aodh.po aodh/locale/it/LC_MESSAGES/aodh.po aodh/locale/ja/LC_MESSAGES/aodh.po aodh/locale/ko_KR/LC_MESSAGES/aodh.po aodh/locale/pt/LC_MESSAGES/aodh.po aodh/locale/pt_BR/LC_MESSAGES/aodh.po aodh/locale/ru/LC_MESSAGES/aodh.po aodh/locale/zh_CN/LC_MESSAGES/aodh.po aodh/locale/zh_TW/LC_MESSAGES/aodh.po aodh/notifier/__init__.py aodh/notifier/heat.py aodh/notifier/log.py aodh/notifier/rest.py aodh/notifier/test.py aodh/notifier/trust.py aodh/notifier/zaqar.py aodh/storage/__init__.py aodh/storage/base.py aodh/storage/impl_log.py aodh/storage/impl_sqlalchemy.py aodh/storage/models.py aodh/storage/sqlalchemy/__init__.py aodh/storage/sqlalchemy/models.py aodh/storage/sqlalchemy/utils.py aodh/storage/sqlalchemy/alembic/alembic.ini aodh/storage/sqlalchemy/alembic/env.py aodh/storage/sqlalchemy/alembic/script.py.mako aodh/storage/sqlalchemy/alembic/versions/006_add_evaluate_timestamp_to_alarm.py aodh/storage/sqlalchemy/alembic/versions/007_add_quota_table.py aodh/storage/sqlalchemy/alembic/versions/12fe8fac9fe4_initial_base.py aodh/storage/sqlalchemy/alembic/versions/367aadf5485f_precisetimestamp_to_datetime.py aodh/storage/sqlalchemy/alembic/versions/6ae0d05d9451_add_reason_column.py aodh/storage/sqlalchemy/alembic/versions/bb07adac380_add_severity_to_alarm_history.py aodh/storage/sqlalchemy/alembic/versions/f8c31b1ffe11_add_index_for_enabled_and_type.py aodh/tests/__init__.py aodh/tests/base.py aodh/tests/constants.py aodh/tests/functional/__init__.py aodh/tests/functional/db.py aodh/tests/functional/api/__init__.py aodh/tests/functional/api/v2/__init__.py aodh/tests/functional/api/v2/policy.yaml-test aodh/tests/functional/api/v2/test_alarm_scenarios.py aodh/tests/functional/api/v2/test_app.py aodh/tests/functional/api/v2/test_complex_query.py aodh/tests/functional/api/v2/test_complex_query_scenarios.py aodh/tests/functional/api/v2/test_quotas.py aodh/tests/functional/hooks/post_test_hook.sh aodh/tests/functional/storage/__init__.py aodh/tests/functional/storage/test_get_connection.py aodh/tests/functional/storage/test_impl_log.py aodh/tests/functional/storage/test_impl_sqlalchemy.py aodh/tests/functional/storage/test_storage_scenarios.py aodh/tests/functional/storage/sqlalchemy/__init__.py aodh/tests/functional/storage/sqlalchemy/test_migrations.py aodh/tests/functional_live/__init__.py aodh/tests/functional_live/gabbi/__init__.py aodh/tests/functional_live/gabbi/test_gabbi_live.py aodh/tests/functional_live/gabbi/gabbits-live/alarms.yaml aodh/tests/unit/__init__.py aodh/tests/unit/test_api_v2_capabilities.py aodh/tests/unit/test_bin.py aodh/tests/unit/test_coordination.py aodh/tests/unit/test_evaluator.py aodh/tests/unit/test_event.py aodh/tests/unit/test_messaging.py aodh/tests/unit/test_notifier.py aodh/tests/unit/test_query.py aodh/tests/unit/test_wsme_custom_type.py aodh/tests/unit/cmd/__init__.py aodh/tests/unit/cmd/test_status.py aodh/tests/unit/evaluator/__init__.py aodh/tests/unit/evaluator/base.py aodh/tests/unit/evaluator/test_base.py aodh/tests/unit/evaluator/test_composite.py aodh/tests/unit/evaluator/test_event.py aodh/tests/unit/evaluator/test_gnocchi.py aodh/tests/unit/evaluator/test_loadbalancer.py aodh/tests/unit/notifier/__init__.py aodh/tests/unit/notifier/base.py aodh/tests/unit/notifier/test_heat.py devstack/README.rst devstack/apache-aodh.template devstack/plugin.sh devstack/settings doc/Makefile doc/requirements.txt doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/_static/.placeholder doc/source/admin/index.rst doc/source/admin/resource-quota.rst doc/source/admin/telemetry-alarms.rst doc/source/cli/aodh-status.rst doc/source/cli/index.rst doc/source/configuration/aodh-config-file.rst doc/source/configuration/aodh-config-options.rst doc/source/configuration/index.rst doc/source/configuration/policy.rst doc/source/configuration/sample-policy-yaml.rst doc/source/contributor/architecture.rst doc/source/contributor/contributing.rst doc/source/contributor/event-alarm.rst doc/source/contributor/gmr.rst doc/source/contributor/index.rst doc/source/contributor/testing.rst doc/source/contributor/install/development.rst doc/source/contributor/install/index.rst doc/source/contributor/install/manual.rst doc/source/contributor/install/mod_wsgi.rst doc/source/contributor/install/uwsgi.rst doc/source/contributor/releasenotes/index.rst doc/source/contributor/webapi/index.rst doc/source/contributor/webapi/v2.rst doc/source/install/configure-common.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-obs.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/next-steps.rst doc/source/install/prereq-common.rst doc/source/install/verify.rst rally-jobs/README.rst rally-jobs/ceilometer.yaml rally-jobs/extra/README.rst rally-jobs/extra/fake.img rally-jobs/plugins/README.rst rally-jobs/plugins/plugin_sample.py releasenotes/notes/.placeholder releasenotes/notes/Add-state-reason-to-the-API-7bc5a9465466db2b.yaml releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml releasenotes/notes/add-upgrade-check-framework-ab35e6eb65504bc3.yaml releasenotes/notes/auto-healing-notifier-794b64de776811e9.yaml releasenotes/notes/bug-1929178-46493335946174a5.yaml releasenotes/notes/bug1540395-reason-string-0aad56966007d0e3.yaml releasenotes/notes/composite-alarm-1b1ca9ea0e8f55c8.yaml releasenotes/notes/deprecate-combination-alarms-7ff26b73b61a0e59.yaml releasenotes/notes/deprecate-json-formatted-policy-file-fgb26387a9bdb3b9.yaml releasenotes/notes/deprecate-nosql-backends-13079883eec7e8e5.yaml releasenotes/notes/deprecate-threshold-alarm-d89da351d4f6f50f.yaml releasenotes/notes/deprecate-unused-http_timeout-74fd60a4c26afd88.yaml releasenotes/notes/drop-py-2-7-54a9be4bfb8e9172.yaml releasenotes/notes/drop-python-3-6-and-3-7-89f2b7300c0166ca.yaml releasenotes/notes/enable-aodh-service-multi-processes-67ed9a0b7fac69aa.yaml releasenotes/notes/event-listener-batch-support-04e6ff159ef34d8c.yaml releasenotes/notes/fix-ceilometerclient-init-8bc7a6742937c3e2.yaml releasenotes/notes/fix-combination-alarms-8097adf08b837a50.yaml releasenotes/notes/fix-empty-statistics-3852da99b1c0b297.yaml releasenotes/notes/fix-gnocchi-aggregation-eval-7c2c1c67bdf2d11c.yaml releasenotes/notes/fix-rbac-50825144e0897d7d.yaml releasenotes/notes/fix-ssl-request-8107616b6a85a217.yaml releasenotes/notes/gmr-3dd0a582af010bd4.yaml releasenotes/notes/gnocchi-capability-cache-75d011e77b8ecc72.yaml releasenotes/notes/gnocchi-client-a62ca5a0c717807e.yaml releasenotes/notes/gnocchi-external-resource-owner-3fad253d30746b0d.yaml releasenotes/notes/healthcheck-560700b72ae68e18.yaml releasenotes/notes/heartbeat_interval-d46e0f5efbd56264.yaml releasenotes/notes/ingestion-lag-2317725887287fbc.yaml releasenotes/notes/keystone-v3-support-ffc0f804dbe9d7e9.yaml releasenotes/notes/load-api-paste-ini-from-config-dirs-69480861a9633df4.yaml releasenotes/notes/loadbalancer-evaluator-85732c5e5f6e11e9.yaml releasenotes/notes/migrate-evaluation_interval-c65ba5cbe5fabb35.yaml releasenotes/notes/mysql-precise-datetime-e374c77e6707985e.yaml releasenotes/notes/notifier-batch-listener-01796e2cb06344dd.yaml releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml releasenotes/notes/pecan-debug-removed-7c7a528a1aea98bf.yaml releasenotes/notes/policy-defaults-refresh-95b565bee059f611.yaml releasenotes/notes/policy-in-code-79edd9282f1e4603.yaml releasenotes/notes/queue-communication-1b884feab4078dde.yaml releasenotes/notes/remove-alarm-name-unique-constraint-4fb0b14f3ad46f0b.yaml releasenotes/notes/remove-check_watchers-df14cecc258a3510.yaml releasenotes/notes/remove-combination-alarms-a1a53655f3f7d1d1.yaml releasenotes/notes/remove-eventlet-18ada1cff213af5e.yaml releasenotes/notes/remove-no-sql-drivers-21dfdbd750751340.yaml releasenotes/notes/remove-threshold-alarm-a7901991d2da09f2.yaml releasenotes/notes/support-batch-delete-events-32496f15b1169887.yaml releasenotes/notes/support-combination-to-composite-conversion-3e688a6b7d01a57e.yaml releasenotes/notes/ussuri-support-builtin-active-active-aodh-evaluator-a935577e17a211ea.yaml releasenotes/notes/ussuri-support-query-all-projects-alarms-by-admin-3ecccf2217d711ea.yaml releasenotes/notes/ussuri-support-quota-api-92f2fd0643d311ea.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po releasenotes/source/locale/pt_BR/LC_MESSAGES/releasenotes.po././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/aodh.egg-info/dependency_links.txt0000664000175000017500000000000100000000000021212 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/aodh.egg-info/entry_points.txt0000664000175000017500000000503300000000000020443 0ustar00zuulzuul00000000000000[aodh.alarm.rule] composite = aodh.api.controllers.v2.alarm_rules.composite:composite_rule event = aodh.api.controllers.v2.alarm_rules.event:AlarmEventRule gnocchi_aggregation_by_metrics_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricsByIdLookupRule gnocchi_aggregation_by_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricByResourcesLookupRule gnocchi_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:MetricOfResourceRule loadbalancer_member_health = aodh.api.controllers.v2.alarm_rules.loadbalancer:LoadBalancerMemberHealthRule prometheus = aodh.api.controllers.v2.alarm_rules.prometheus:PrometheusRule [aodh.evaluator] composite = aodh.evaluator.composite:CompositeEvaluator gnocchi_aggregation_by_metrics_threshold = aodh.evaluator.gnocchi:GnocchiAggregationMetricsThresholdEvaluator gnocchi_aggregation_by_resources_threshold = aodh.evaluator.gnocchi:GnocchiAggregationResourcesThresholdEvaluator gnocchi_resources_threshold = aodh.evaluator.gnocchi:GnocchiResourceThresholdEvaluator loadbalancer_member_health = aodh.evaluator.loadbalancer:LoadBalancerMemberHealthEvaluator prometheus = aodh.evaluator.prometheus:PrometheusEvaluator [aodh.notifier] http = aodh.notifier.rest:RestAlarmNotifier https = aodh.notifier.rest:RestAlarmNotifier log = aodh.notifier.log:LogAlarmNotifier test = aodh.notifier.test:TestAlarmNotifier trust+heat = aodh.notifier.heat:TrustHeatAlarmNotifier trust+http = aodh.notifier.trust:TrustRestAlarmNotifier trust+https = aodh.notifier.trust:TrustRestAlarmNotifier trust+zaqar = aodh.notifier.zaqar:TrustZaqarAlarmNotifier zaqar = aodh.notifier.zaqar:ZaqarAlarmNotifier [aodh.storage] log = aodh.storage.impl_log:Connection mysql = aodh.storage.impl_sqlalchemy:Connection mysql+pymysql = aodh.storage.impl_sqlalchemy:Connection postgresql = aodh.storage.impl_sqlalchemy:Connection sqlite = aodh.storage.impl_sqlalchemy:Connection [console_scripts] aodh-config-generator = aodh.cmd:config_generator aodh-dbsync = aodh.cmd.storage:dbsync aodh-evaluator = aodh.cmd.alarm:evaluator aodh-expirer = aodh.cmd.storage:expirer aodh-listener = aodh.cmd.alarm:listener aodh-notifier = aodh.cmd.alarm:notifier aodh-status = aodh.cmd.status:main [oslo.config.opts] aodh = aodh.opts:list_opts aodh-auth = aodh.opts:list_keystoneauth_opts [oslo.config.opts.defaults] aodh = aodh.conf.defaults:set_lib_defaults [oslo.policy.enforcer] aodh = aodh.api.policies:get_enforcer [oslo.policy.policies] aodh = aodh.api.policies:list_rules [wsgi_scripts] aodh-api = aodh.api.app:build_wsgi_app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/aodh.egg-info/not-zip-safe0000664000175000017500000000000100000000000017372 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/aodh.egg-info/pbr.json0000664000175000017500000000005700000000000016624 0ustar00zuulzuul00000000000000{"git_version": "946cbc19", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/aodh.egg-info/requires.txt0000664000175000017500000000202300000000000017541 0ustar00zuulzuul00000000000000PasteDeploy>=1.5.0 SQLAlchemy>=1.4.1 WSME>=0.12.1 WebOb>=1.2.3 cachetools>=1.1.6 cotyledon>=1.7.3 croniter>=0.3.4 futurist>=0.11.0 gnocchiclient>=6.0.0 jsonschema>=3.2.0 keystoneauth1>=2.1 keystonemiddleware>=5.1.0 lxml>=2.3 oslo.config>=6.8.0 oslo.db>=11.0.0 oslo.i18n>=1.5.0 oslo.log>=4.3.0 oslo.messaging>=5.2.0 oslo.middleware>=3.22.0 oslo.policy>=3.11.0 oslo.reports>=1.18.0 oslo.upgradecheck>=1.3.0 oslo.utils>=4.7.0 pbr>=2.0.0 pecan>=0.8.0 python-dateutil>=2.8.2 python-heatclient>=1.17.0 python-keystoneclient>=1.6.0 python-observabilityclient>=0.0.4 python-octaviaclient>=1.8.0 requests>=2.5.2 stevedore>=1.5.0 tenacity>=3.2.1 tooz>=1.28.0 voluptuous>=0.8.10 [:(python_version<"3.9")] pytz>=2013.6 [:(python_version>="3.9")] tzdata>=2022.4 [mysql] PyMySQL>=0.6.2 SQLAlchemy-Utils>=0.41.0 alembic>=0.7.2 [postgresql] SQLAlchemy-Utils>=0.41.0 alembic>=0.7.2 psycopg2>=2.9.6 [test] SQLAlchemy-Utils>=0.39.0 WebTest>=3.0.0 coverage>=3.6 fixtures>=1.3.1 gabbi>=1.30.0 oslotest>=2.15.0 stestr>=2.0.0 [zaqar] python-zaqarclient>=1.2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866890.0 aodh-19.0.0/aodh.egg-info/top_level.txt0000664000175000017500000000000500000000000017671 0ustar00zuulzuul00000000000000aodh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/bindep.txt0000664000175000017500000000050300000000000014537 0ustar00zuulzuul00000000000000postgresql postgresql-client [platform:dpkg] libpq-dev [platform:dpkg] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] mysql-server [!platform:debian] mariadb-server [platform:debian] mysql-client [platform:dpkg !platform:debian] mysql [platform:rpm] gettext [platform:dpkg] python37 [platform:rpm py37] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0950153 aodh-19.0.0/devstack/0000775000175000017500000000000000000000000014343 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/devstack/README.rst0000664000175000017500000000142700000000000016036 0ustar00zuulzuul00000000000000========================= Enabling Aodh in DevStack ========================= 1. Download DevStack:: git clone https://opendev.org/openstack/devstack.git cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] enable_plugin aodh https://opendev.org/openstack/aodh To use stable branches, make sure devstack is on that branch, and specify the branch name to enable_plugin, for example:: enable_plugin aodh https://opendev.org/openstack/aodh stable/mitaka There are some options, such as AODH_BACKEND, defined in ``aodh/devstack/settings``, they can be used to configure the installation of Aodh. If you don't want to use their default value, you can set a new one in ``local.conf``. 3. Run ``stack.sh``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/devstack/apache-aodh.template0000664000175000017500000000074500000000000020240 0ustar00zuulzuul00000000000000Listen %PORT% WSGIDaemonProcess aodh-api processes=%APIWORKERS% threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup aodh-api WSGIScriptAlias / %WSGIAPP% WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/aodh.log CustomLog /var/log/%APACHE_NAME%/aodh_access.log combined WSGISocketPrefix /var/run/%APACHE_NAME% ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/devstack/plugin.sh0000664000175000017500000002606600000000000016207 0ustar00zuulzuul00000000000000# Install and start **Aodh** service in devstack # # To enable Aodh in devstack add an entry to local.conf that # looks like # # [[local|localrc]] # enable_plugin aodh https://opendev.org/openstack/aodh # # By default all aodh services are started (see # devstack/settings). # # AODH_BACKEND: Database backend (e.g. 'mysql') # AODH_COORDINATION_URL: URL for group membership service provided by tooz. # Support potential entry-points console scripts in VENV or not if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["aodh"]=${AODH_DIR}.venv AODH_BIN_DIR=${PROJECT_VENV["aodh"]}/bin else AODH_BIN_DIR=$(get_python_exec_prefix) fi if [ -z "$AODH_DEPLOY" ]; then # Default AODH_DEPLOY=simple # Fallback to common wsgi devstack configuration if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then AODH_DEPLOY=mod_wsgi # Deprecated config elif [ -n "$AODH_USE_MOD_WSGI" ] ; then echo_summary "AODH_USE_MOD_WSGI is deprecated, use AODH_DEPLOY instead" if [ "$AODH_USE_MOD_WSGI" == True ]; then AODH_DEPLOY=mod_wsgi fi fi fi # Test if any Aodh services are enabled # is_aodh_enabled function is_aodh_enabled { [[ ,${ENABLED_SERVICES} =~ ,"aodh-" ]] && return 0 return 1 } function aodh_service_url { echo "$AODH_SERVICE_PROTOCOL://$AODH_SERVICE_HOST:$AODH_SERVICE_PORT" } # _install_redis() - Install the redis server and python lib. function _aodh_install_redis { if is_ubuntu; then install_package redis-server restart_service redis-server else # This will fail (correctly) where a redis package is unavailable install_package redis restart_service redis fi pip_install_gr redis } # Configure mod_wsgi function _aodh_config_apache_wsgi { sudo mkdir -p $AODH_WSGI_DIR local aodh_apache_conf=$(apache_site_config_for aodh) local venv_path="" # Copy proxy vhost and wsgi file sudo cp $AODH_DIR/aodh/api/app.wsgi $AODH_WSGI_DIR/app if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["aodh"]}/lib/$(python_version)/site-packages" fi sudo cp $AODH_DIR/devstack/apache-aodh.template $aodh_apache_conf sudo sed -e " s|%PORT%|$AODH_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%WSGIAPP%|$AODH_WSGI_DIR/app|g; s|%USER%|$STACK_USER|g; s|%APIWORKERS%|$API_WORKERS|g; s|%VIRTUALENV%|$venv_path|g " -i $aodh_apache_conf } # Install required services for coordination function _aodh_prepare_coordination { if echo $AODH_COORDINATION_URL | grep -q '^memcached:'; then install_package memcached elif echo $AODH_COORDINATION_URL | grep -q '^redis:'; then _aodh_install_redis fi } # Create aodh related accounts in Keystone function _aodh_create_accounts { if is_service_enabled aodh-api; then create_service_user "aodh" "admin" local aodh_service=$(get_or_create_service "aodh" \ "alarming" "OpenStack Alarming Service") get_or_create_endpoint $aodh_service \ "$REGION_NAME" \ "$(aodh_service_url)" \ "$(aodh_service_url)" \ "$(aodh_service_url)" fi } # Activities to do before aodh has been installed. function preinstall_aodh { # Needed to build psycopg2 if is_ubuntu; then install_package libpq-dev else install_package postgresql-devel fi } # Remove WSGI files, disable and remove Apache vhost file function _aodh_cleanup_apache_wsgi { sudo rm -f $AODH_WSGI_DIR/* sudo rm -f $(apache_site_config_for aodh) } # cleanup_aodh() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_aodh { if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then _aodh_cleanup_apache_wsgi fi } # Set configuration for storage backend. function _aodh_configure_storage_backend { if [ "$AODH_BACKEND" = 'mysql' ] || [ "$AODH_BACKEND" = 'postgresql' ] ; then iniset $AODH_CONF database connection $(database_connection_url aodh) else die $LINENO "Unable to configure unknown AODH_BACKEND $AODH_BACKEND" fi } # Configure Aodh function configure_aodh { iniset_rpc_backend aodh $AODH_CONF iniset $AODH_CONF oslo_messaging_notifications topics "$AODH_NOTIFICATION_TOPICS" iniset $AODH_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [[ -n "$AODH_COORDINATION_URL" ]]; then iniset $AODH_CONF coordination backend_url $AODH_COORDINATION_URL fi # Set up logging if [ "$SYSLOG" != "False" ]; then iniset $AODH_CONF DEFAULT use_syslog "True" fi # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$AODH_DEPLOY" != "mod_wsgi" ]; then setup_colorized_logging $AODH_CONF DEFAULT fi # The alarm evaluator needs these options to call gnocchi/ceilometer APIs iniset $AODH_CONF service_credentials auth_type password iniset $AODH_CONF service_credentials username aodh iniset $AODH_CONF service_credentials user_domain_id default iniset $AODH_CONF service_credentials project_domain_id default iniset $AODH_CONF service_credentials password $SERVICE_PASSWORD iniset $AODH_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $AODH_CONF service_credentials region_name $REGION_NAME iniset $AODH_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI configure_auth_token_middleware $AODH_CONF aodh $AODH_AUTH_CACHE_DIR # Configured storage _aodh_configure_storage_backend # NOTE: This must come after database configuration as those can # call cleanup_aodh which will wipe the WSGI config. if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then _aodh_config_apache_wsgi elif [ "$AODH_DEPLOY" == "uwsgi" ]; then # iniset creates these files when it's called if they don't exist. AODH_UWSGI_FILE=$AODH_CONF_DIR/aodh-uwsgi.ini rm -f "$AODH_UWSGI_FILE" iniset "$AODH_UWSGI_FILE" uwsgi http $AODH_SERVICE_HOST:$AODH_SERVICE_PORT iniset "$AODH_UWSGI_FILE" uwsgi wsgi-file "$AODH_DIR/aodh/api/app.wsgi" # This is running standalone iniset "$AODH_UWSGI_FILE" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down iniset "$AODH_UWSGI_FILE" uwsgi die-on-term true iniset "$AODH_UWSGI_FILE" uwsgi exit-on-reload true iniset "$AODH_UWSGI_FILE" uwsgi threads 10 iniset "$AODH_UWSGI_FILE" uwsgi processes $API_WORKERS iniset "$AODH_UWSGI_FILE" uwsgi enable-threads true iniset "$AODH_UWSGI_FILE" uwsgi plugins python iniset "$AODH_UWSGI_FILE" uwsgi lazy-apps true # uwsgi recommends this to prevent thundering herd on accept. iniset "$AODH_UWSGI_FILE" uwsgi thunder-lock true # Override the default size for headers from the 4k default. iniset "$AODH_UWSGI_FILE" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. iniset "$AODH_UWSGI_FILE" uwsgi add-header "Connection: close" fi } # init_aodh() - Initialize etc. function init_aodh { # Get aodh keystone settings in place _aodh_create_accounts # Create cache dir sudo install -d -o $STACK_USER $AODH_AUTH_CACHE_DIR rm -f $AODH_AUTH_CACHE_DIR/* if is_service_enabled mysql postgresql; then if [ "$AODH_BACKEND" = 'mysql' ] || [ "$AODH_BACKEND" = 'postgresql' ] ; then recreate_database aodh $AODH_BIN_DIR/aodh-dbsync fi fi } # Install Aodh. # The storage and coordination backends are installed here because the # virtualenv context is active at this point and python drivers need to be # installed. The context is not active during preinstall (when it would # otherwise makes sense to do the backend services). function install_aodh { _aodh_prepare_coordination install_aodhclient pip_install -e "$AODH_DIR"[test,$AODH_BACKEND] sudo install -d -o $STACK_USER -m 755 $AODH_CONF_DIR if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then install_apache_wsgi elif [ "$AODH_DEPLOY" == "uwsgi" ]; then pip_install uwsgi fi } # install_aodhclient() - Collect source and prepare function install_aodhclient { if use_library_from_git "python-aodhclient"; then git_clone_by_name "python-aodhclient" setup_dev_lib "python-aodhclient" else pip_install_gr aodhclient fi aodh complete | sudo tee /etc/bash_completion.d/aodh.bash_completion > /dev/null } # start_aodh() - Start running processes, including screen function start_aodh { if [[ "$AODH_DEPLOY" == "mod_wsgi" ]]; then enable_apache_site aodh restart_apache_server elif [ "$AODH_DEPLOY" == "uwsgi" ]; then run_process aodh-api "$AODH_BIN_DIR/uwsgi $AODH_UWSGI_FILE" else run_process aodh-api "$AODH_BIN_DIR/aodh-api -p $AODH_SERVICE_PORT" fi # Only die on API if it was actually intended to be turned on if is_service_enabled aodh-api; then echo "Waiting for aodh-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $(aodh_service_url)/v2/; then die $LINENO "aodh-api did not start" fi fi run_process aodh-notifier "$AODH_BIN_DIR/aodh-notifier --config-file $AODH_CONF" run_process aodh-evaluator "$AODH_BIN_DIR/aodh-evaluator --config-file $AODH_CONF" run_process aodh-listener "$AODH_BIN_DIR/aodh-listener --config-file $AODH_CONF" } # configure_tempest_for_aodh() # NOTE (gmann): Configure all the Tempest setting for Aodh service in # this function. function configure_tempest_for_aodh { if is_service_enabled tempest; then iniset $TEMPEST_CONFIG service_available aodh True fi } # stop_aodh() - Stop running processes function stop_aodh { if [ "$AODH_DEPLOY" == "mod_wsgi" ]; then disable_apache_site aodh restart_apache_server fi # Kill the aodh screen windows for serv in aodh-api aodh-notifier aodh-evaluator aodh-listener; do stop_process $serv done } # This is the main for plugin.sh if is_service_enabled aodh; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up other services echo_summary "Configuring system services for Aodh" preinstall_aodh elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Aodh" # Use stack_install_service here to account for virtualenv stack_install_service aodh elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Aodh" configure_aodh elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Aodh" # Tidy base for aodh init_aodh # Start the services start_aodh elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then echo_summary "Configuring Tempest for Aodh" configure_tempest_for_aodh fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting Down Aodh" stop_aodh fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning Aodh" cleanup_aodh fi fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/devstack/settings0000664000175000017500000000237200000000000016132 0ustar00zuulzuul00000000000000# turn on all the aodh services by default # API service enable_service aodh-api # Alarming enable_service aodh-notifier aodh-evaluator # Listener for Event Alarming enable_service aodh-listener # Default directories AODH_DIR=$DEST/aodh AODH_CONF_DIR=/etc/aodh AODH_CONF=$AODH_CONF_DIR/aodh.conf AODH_AUTH_CACHE_DIR=${AODH_AUTH_CACHE_DIR:-/var/cache/aodh} AODH_WSGI_DIR=${AODH_WSGI_DIR:-/var/www/aodh} # Set up database backend AODH_BACKEND=${AODH_BACKEND:-mysql} # Aodh connection info. AODH_SERVICE_PROTOCOL=http AODH_SERVICE_HOST=$SERVICE_HOST AODH_SERVICE_PORT=${AODH_SERVICE_PORT:-8042} # AODH_DEPLOY defines how Aodh is deployed, allowed values: # - mod_wsgi : Run Aodh under Apache HTTPd mod_wsgi # - simple : Run aodh-api # - uwsgi : Run Aodh under uwsgi # - : Fallback to AODH_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES AODH_DEPLOY=${AODH_DEPLOY} AODH_NOTIFICATION_TOPICS=${AODH_NOTIFICATION_TOPICS:-notifications} AODH_COORDINATION_URL=${AODH_COORDINATION_URL:-} # Set up default directories for client GITDIR["python-aodhclient"]=$DEST/python-aodhclient GITREPO["python-aodhclient"]=$GIT_BASE/openstack/python-aodhclient.git # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.0950153 aodh-19.0.0/doc/0000775000175000017500000000000000000000000013304 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/Makefile0000664000175000017500000001361300000000000014750 0ustar00zuulzuul00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " wadl to build a WADL file for api.openstack.org" clean: -rm -rf $(BUILDDIR)/* html: check-dependencies $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: check-dependencies check-dependencies: @python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1) wadl: $(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl @echo @echo "Build finished. The WADL pages are in $(BUILDDIR)/wadl." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Ceilometer.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Ceilometer.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Ceilometer" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Ceilometer" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/requirements.txt0000664000175000017500000000022400000000000016566 0ustar00zuulzuul00000000000000openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinx>=2.1.1 # BSD sphinxcontrib-httpdomain>=1.8.1 sphinxcontrib-pecanwsme>=0.10.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.099016 aodh-19.0.0/doc/source/0000775000175000017500000000000000000000000014604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.099016 aodh-19.0.0/doc/source/_static/0000775000175000017500000000000000000000000016232 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/_static/.placeholder0000664000175000017500000000000000000000000020503 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.099016 aodh-19.0.0/doc/source/admin/0000775000175000017500000000000000000000000015674 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/admin/index.rst0000664000175000017500000000034400000000000017536 0ustar00zuulzuul00000000000000==================== Administration Guide ==================== This guide contains information that will help you understand how to deploy, operate, and upgrade Aodh .. toctree:: telemetry-alarms.rst resource-quota.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/admin/resource-quota.rst0000664000175000017500000000414700000000000021412 0ustar00zuulzuul00000000000000.. Copyright (c) 2020 Catalyst Cloud Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================= Resource Quota Management ========================= The amount of resources(e.g. alarms) that could be created by each OpenStack project is controlled by quota. The default resource quota for each project is set in Aodh config file as follows unless changed by the cloud administrator via Quota API. .. code-block:: ini [api] user_alarm_quota = -1 project_alarm_quota = -1 alarm_max_actions = -1 user_alarm_quota The default alarm quota for an openstack user, default is unlimited. Sometimes the alarm creation request satisfies the project quota but fails the user quota. project_alarm_quota The default alarm quota for an openstack project, default is unlimited. The cloud administrator can change project quota using Quota API, see examples below. alarm_max_actions The maximum number of alarm actions could be created per alarm, default is unlimited. Quota API --------- Aodh Quota API is aiming for multi-tenancy support. By default, only the admin user is able to change the resource quota for projects as defined by the default policy rule 'telemetry:update_quotas'. User alarm quota and alarm action quota are not supported in Quota API. An HTTP request example using ``httpie`` command: .. code-block:: console cat <`_ Log actions These are a lightweight alternative to webhooks, whereby the state transition is simply logged by the ``alarm-notifier``, and are intended primarily for testing purposes by admin users. If none of the above actions satisfy your requirement, you can implement your own alarm actions according to the current suppported actions in ``aodh/notifier`` folder. Using alarms ~~~~~~~~~~~~ Alarm creation -------------- Threshold based alarm ````````````````````` An example of creating a Gnocchi threshold-oriented alarm, based on an upper bound on the CPU utilization for a particular instance: .. code-block:: console $ aodh alarm create \ --name cpu_hi \ --type gnocchi_resources_threshold \ --description 'instance running hot' \ --metric cpu_util \ --threshold 70.0 \ --comparison-operator gt \ --aggregation-method mean \ --granularity 600 \ --evaluation-periods 3 \ --alarm-action 'log://' \ --resource-id INSTANCE_ID \ --resource-type instance This creates an alarm that will fire when the average CPU utilization for an individual instance exceeds 70% for three consecutive 10 minute periods. The notification in this case is simply a log message, though it could alternatively be a webhook URL. .. note:: Alarm names must be unique for the alarms associated with an individual project. Administrator can limit the maximum resulting actions for three different states, and the ability for a normal user to create ``log://`` and ``test://`` notifiers is disabled. This prevents unintentional consumption of disk and memory resources by the Telemetry service. The sliding time window over which the alarm is evaluated is 30 minutes in this example. This window is not clamped to wall-clock time boundaries, rather it's anchored on the current time for each evaluation cycle, and continually creeps forward as each evaluation cycle rolls around (by default, this occurs every minute). .. note:: The alarm granularity must match the granularities of the metric configured in Gnocchi. Otherwise the alarm will tend to flit in and out of the ``insufficient data`` state due to the mismatch between the actual frequency of datapoints in the metering store and the statistics queries used to compare against the alarm threshold. If a shorter alarm period is needed, then the corresponding interval should be adjusted in the ``pipeline.yaml`` file. Other notable alarm attributes that may be set on creation, or via a subsequent update, include: state The initial alarm state (defaults to ``insufficient data``). description A free-text description of the alarm (defaults to a synopsis of the alarm rule). enabled True if evaluation and actioning is to be enabled for this alarm (defaults to ``True``). repeat-actions True if actions should be repeatedly notified while the alarm remains in the target state (defaults to ``False``). ok-action An action to invoke when the alarm state transitions to ``ok``. insufficient-data-action An action to invoke when the alarm state transitions to ``insufficient data``. time-constraint Used to restrict evaluation of the alarm to certain times of the day or days of the week (expressed as ``cron`` expression with an optional timezone). Composite alarm ``````````````` An example of creating a composite alarm, based on the composite of two basic rules: .. code-block:: console $ aodh alarm create \ --name meta \ --type composite \ --composite-rule '{"or": [{"threshold": 0.8, "metric": "cpu_util", \ "type": "gnocchi_resources_threshold", "resource_id": INSTANCE_ID1, \ "resource_type": "instance", "aggregation_method": "last"}, \ {"threshold": 0.8, "metric": "cpu_util", \ "type": "gnocchi_resources_threshold", "resource_id": INSTANCE_ID2, \ "resource_type": "instance", "aggregation_method": "last"}]}' \ --alarm-action 'http://example.org/notify' This creates an alarm that will fire when either of two basic rules meets the condition. The notification in this case is a webhook call. Any number of basic rules can be composed into a composite rule this way, using either ``and`` or ``or``. Additionally, composite rules can contain nested conditions: .. note:: Observe the *underscore in* ``resource_id`` & ``resource_type`` in composite rule as opposed to ``--resource-id`` & ``--resource-type`` CLI arguments. .. code-block:: console $ aodh alarm create \ --name meta \ --type composite \ --composite-rule '{"or": [ALARM_1, {"and": [ALARM_2, ALARM_3]}]}' \ --alarm-action 'http://example.org/notify' Event based alarm ````````````````` An example of creating a event alarm based on power state of instance: .. code-block:: console $ aodh alarm create \ --type event \ --name instance_off \ --description 'Instance powered OFF' \ --event-type "compute.instance.power_off.*" \ --enable True \ --query "traits.instance_id=string::INSTANCE_ID" \ --alarm-action 'log://' \ --ok-action 'log://' \ --insufficient-data-action 'log://' Valid list of ``event-type`` and ``traits`` can be found in ``event_definitions.yaml`` file . ``--query`` may also contain mix of traits for example to create alarm when instance is powered on but went into error state: .. code-block:: console $ aodh alarm create \ --type event \ --name instance_on_but_in_err_state \ --description 'Instance powered ON but in error state' \ --event-type "compute.instance.power_on.*" \ --enable True \ --query "traits.instance_id=string::INSTANCE_ID;traits.state=string::error" \ --alarm-action 'log://' \ --ok-action 'log://' \ --insufficient-data-action 'log://' Sample output of alarm type **event**: .. code-block:: console +---------------------------+---------------------------------------------------------------+ | Field | Value | +---------------------------+---------------------------------------------------------------+ | alarm_actions | ['log://'] | | alarm_id | 15c0da26-524d-40ad-8fba-3e55ee0ddc91 | | description | Instance powered ON but in error state | | enabled | True | | event_type | compute.instance.power_on.* | | insufficient_data_actions | ['log://'] | | name | instance_on_state_err | | ok_actions | ['log://'] | | project_id | 9ee200732f4c4d10a6530bac746f1b6e | | query | traits.instance_id = bb912729-fa51-443b-bac6-bf4c795f081d AND | | | traits.state = error | | repeat_actions | False | | severity | low | | state | insufficient data | | state_timestamp | 2017-07-15T02:28:31.114455 | | time_constraints | [] | | timestamp | 2017-07-15T02:28:31.114455 | | type | event | | user_id | 89b4e48bcbdb4816add7800502bd5122 | +---------------------------+---------------------------------------------------------------+ .. note:: To enable event alarms please refer `Configuration `_ Alarm retrieval --------------- You can display all your alarms via (some attributes are omitted for brevity): .. code-block:: console $ aodh alarm list +----------+-----------+--------+-------------------+----------+---------+ | alarm_id | type | name | state | severity | enabled | +----------+-----------+--------+-------------------+----------+---------+ | ALARM_ID | threshold | cpu_hi | insufficient data | low | True | +----------+-----------+--------+-------------------+----------+---------+ In this case, the state is reported as ``insufficient data`` which could indicate that: * meters have not yet been gathered about this instance over the evaluation window into the recent past (for example a brand-new instance) * *or*, that the identified instance is not visible to the user/project owning the alarm * *or*, simply that an alarm evaluation cycle hasn't kicked off since the alarm was created (by default, alarms are evaluated once per minute). .. note:: The visibility of alarms depends on the role and project associated with the user issuing the query: * admin users see *all* alarms, regardless of the owner * non-admin users see only the alarms associated with their project (as per the normal project segregation in OpenStack) Alarm update ------------ Once the state of the alarm has settled down, we might decide that we set that bar too low with 70%, in which case the threshold (or most any other alarm attribute) can be updated thusly: .. code-block:: console $ aodh alarm update ALARM_ID --threshold 75 The change will take effect from the next evaluation cycle, which by default occurs every minute. Most alarm attributes can be changed in this way, but there is also a convenient short-cut for getting and setting the alarm state: .. code-block:: console $ openstack alarm state get ALARM_ID $ openstack alarm state set --state ok ALARM_ID Over time the state of the alarm may change often, especially if the threshold is chosen to be close to the trending value of the statistic. You can follow the history of an alarm over its lifecycle via the audit API: .. code-block:: console $ aodh alarm-history show ALARM_ID +-----------+------------------+---------------------------------------------------+----------+ | timestamp | type | detail | event_id | +-----------+------------------+---------------------------------------------------+----------+ | TIME_3 | rule change | {"rule": {"evaluation_periods": 3, "metric": | EVENT_ID | | | | "cpu_util", "resource_id": RESOURCE_ID, | | | | | "aggregation_method": "mean", "granularity":600, | | | | | "threshold": 75.0, "comparison_operator": "gt" | | | | | "resource_type": "instance"}} | | | TIME_2 | state transition | {"transition_reason": "Transition to alarm due 3 | EVENT_ID | | | | samples outside threshold, most recent: | | | | | 81.4108514719", "state": "alarm"} | | | TIME_1 | state transition | {"transition_reason": "Transition to ok due to 1 | EVENT_ID | | | | samples inside threshold, most recent: | | | | | 67.952938019089", "state": "ok"} | | | TIME_0 | creation | {"alarm_actions": ["log://"], "user_id": USER_ID, | EVENT_ID | | | | "name": "cup_hi", "state": "insufficient data", | | | | | "timestamp": TIME_0, "description": "instance | | | | | running hot", "enabled": true, "state_timestamp": | | | | | TIME_0, "rule": {"evaluation_periods": 3, | | | | | "metric": "cpu_util", "resource_id": RESOURCE_ID, | | | | | "aggregation_method": "mean", "granularity": 600, | | | | | "resource_type": "instance"}, "alarm_id": | | | | | ALARM_ID, "time_constraints": [], | | | | | "insufficient_data_actions": [], | | | | | "repeat_actions": false, "ok_actions": [], | | | | | "project_id": PROJECT_ID, "type": | | | | | "gnocchi_resources_threshold", "severity": "low"} | | +-----------+------------------+---------------------------------------------------+----------+ Alarm deletion -------------- An alarm that is no longer required can be disabled so that it is no longer actively evaluated: .. code-block:: console $ aodh alarm update --enabled False ALARM_ID or even deleted permanently (an irreversible step): .. code-block:: console $ aodh alarm delete ALARM_ID Debug alarms ------------ A good place to start is to add ``--debug`` flag when creating or updating an alarm. For example: .. code-block:: console $ aodh --debug alarm create Look for the state to transition when event is triggered in ``/var/log/aodh/listener.log`` file. For example, the below logs shows the transition state of alarm with id ``85a2942f-a2ec-4310-baea-d58f9db98654`` triggered by event id ``abe437a3-b75b-40b4-a3cb-26022a919f5e`` .. code-block:: console 2017-07-15 07:03:20.149 2866 INFO aodh.evaluator [-] alarm 85a2942f-a2ec-4310-baea-d58f9db98654 transitioning to alarm because Event hits the query . The below entry in ``/var/log/aodh/notifier.log`` also confirms that event id ``abe437a3-b75b-40b4-a3cb-26022a919f5e`` hits the query matching instance id ``bb912729-fa51-443b-bac6-bf4c795f081d`` .. code-block:: console 2017-07-15 07:03:24.071 2863 INFO aodh.notifier.log [-] Notifying alarm instance_off 85a2942f-a2ec-4310-baea-d58f9db98654 of low priority from insufficient data to alarm with action log: because Event hits the query ``aodh alarm-history`` as mentioned earlier will also display the transition: .. code-block:: console $ aodh alarm-history show 85a2942f-a2ec-4310-baea-d58f9db98654 +----------------------------+------------------+--------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ | timestamp | type | detail | event_id | +----------------------------+------------------+--------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ | 2017-07-15T01:33:20.390623 | state transition | {"transition_reason": "Event hits | c5ca92ae-584b-4da6-a12c-b7a00dd39fef | | | | the query .", "state": "alarm"} | | | 2017-07-15T01:31:14.516188 | creation | {"alarm_actions": ["log://"], "user_id": "89b4e48bcbdb4816add7800502bd5122", "name": "instance_off", "state": | fb31f4c2-e357-44c3-9b6a-bd2aaaa4ae68 | | | | "insufficient data", "timestamp": "2017-07-15T01:31:14.516188", "description": "event_instance_power_off", "enabled": | | | | | true, "state_timestamp": "2017-07-15T01:31:14.516188", "rule": {"query": [{"field": "traits.instance_id", "type": | | | | | "string", "value": "bb912729-fa51-443b-bac6-bf4c795f081d", "op": "eq"}], "event_type": "compute.instance.power_off.*"}, | | | | | "alarm_id": "85a2942f-a2ec-4310-baea-d58f9db98654", "time_constraints": [], "insufficient_data_actions": ["log://"], | | | | | "repeat_actions": false, "ok_actions": ["log://"], "project_id": "9ee200732f4c4d10a6530bac746f1b6e", "type": "event", | | | | | "severity": "low"} | | +----------------------------+------------------+--------------------------------------------------------------------------------------------------------------------------+--------------------------------------+ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.099016 aodh-19.0.0/doc/source/cli/0000775000175000017500000000000000000000000015353 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/cli/aodh-status.rst0000664000175000017500000000357600000000000020354 0ustar00zuulzuul00000000000000=========== aodh-status =========== -------------------------------------- CLI interface for Aodh status commands -------------------------------------- Synopsis ======== :: aodh-status [] Description =========== :program:`aodh-status` is a tool that provides routines for checking the status of a Aodh deployment. Options ======= The standard pattern for executing a :program:`aodh-status` command is:: aodh-status [] Run without arguments to see a list of available command categories:: aodh-status Categories are: * ``upgrade`` Detailed descriptions are below: You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: aodh-status upgrade These sections describe the available categories and arguments for :program:`aodh-status`. Upgrade ~~~~~~~ .. _aodh-status-checks: ``aodh-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. For example, missing or changed configuration options, incompatible object states, or other conditions that could lead to failures while upgrading. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **8.0.0 (Stein)** * Sample check to be filled in with checks as they are added in Stein. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/cli/index.rst0000664000175000017500000000030200000000000017207 0ustar00zuulzuul00000000000000====================== Aodh CLI Documentation ====================== In this section you will find information on Aodh’s command line interface. .. toctree:: :maxdepth: 1 aodh-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/conf.py0000664000175000017500000002306600000000000016112 0ustar00zuulzuul00000000000000# # Aodh documentation build configuration file, created by # sphinx-quickstart on Thu Oct 27 11:38:59 2011. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT) sys.path.insert(0, BASE_DIR) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = [ 'openstackdocstheme', 'sphinx.ext.autodoc', 'wsmeext.sphinxext', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxcontrib.pecanwsme.rest', 'stevedore.sphinxext', 'oslo_config.sphinxconfiggen', 'oslo_config.sphinxext', 'oslo_policy.sphinxpolicygen', 'oslo_policy.sphinxext', 'sphinxcontrib.httpdomain', ] config_generator_config_file = os.path.join(ROOT, 'aodh/cmd/aodh-config-generator.conf') sample_config_basename = '_static/aodh' policy_generator_config_file = ( '../../aodh/cmd/aodh-policy-generator.conf' ) sample_policy_basename = '_static/aodh' wsme_protocols = ['restjson', 'restxml'] todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2012-2015, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**/#*', '**~', '**/#*#', '**/*alembic*'] # The reST default role (used for this markup: `text`) # to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] primary_domain = 'py' nitpicky = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ['.'] html_theme = 'openstackdocs' # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [openstackdocstheme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # openstackdocstheme options openstackdocs_repo_name = 'openstack/aodh' openstackdocs_pdf_link = True openstackdocs_bug_project = 'aodh' openstackdocs_bug_tag = '' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Aodhdoc' # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-aodh.tex', 'Aodh Documentation', 'OpenStack Foundation', 'manual'), ] latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', 'maxlistdepth': '10', } # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False # Disable smartquotes, they don't work in latex smartquotes_excludes = {'builders': ['latex']} latex_domain_indices = False # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'aodh', 'Aodh Documentation', ['OpenStack'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Aodh', 'Aodh Documentation', 'OpenStack', 'Aodh', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = 'Aodh' epub_author = 'OpenStack' epub_publisher = 'OpenStack' epub_copyright = '2012-2015, OpenStack' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # NOTE(dhellmann): pbr used to set this option but now that we are # using Sphinx>=1.6.2 it does not so we have to set it ourselves. suppress_warnings = [ 'app.add_directive', 'app.add_role', 'app.add_generic_role', 'app.add_node', 'image.nonlocal_uri', ] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.099016 aodh-19.0.0/doc/source/configuration/0000775000175000017500000000000000000000000017453 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/configuration/aodh-config-file.rst0000664000175000017500000000116300000000000023301 0ustar00zuulzuul00000000000000.. _aodh-config-file: Aodh Sample Configuration File ============================== Configure Aodh by editing /etc/aodh/aodh.conf. No config file is provided with the source code, it will be created during the installation. In case where no configuration file was installed, one can be easily created by running:: aodh-config-generator .. only:: html The following is a sample Aodh configuration for adaptation and use. It is auto-generated from Aodh when this documentation is built, and can also be viewed in `file form <../_static/aodh.conf.sample>`_. .. literalinclude:: ../_static/aodh.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/configuration/aodh-config-options.rst0000664000175000017500000000015000000000000024050 0ustar00zuulzuul00000000000000Aodh Configuration Options ========================== .. show-options:: :split-namespaces: aodh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/configuration/index.rst0000664000175000017500000000026300000000000021315 0ustar00zuulzuul00000000000000.. _configuring: =================== Configuration Guide =================== .. toctree:: aodh-config-file.rst aodh-config-options.rst policy sample-policy-yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/configuration/policy.rst0000664000175000017500000000121400000000000021502 0ustar00zuulzuul00000000000000===================================== Aodh Sample Policy Configuration File ===================================== .. warning:: JSON formatted policy file is deprecated since Aodh 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html The following is an overview of all available policies in Aodh. For a sample configuration file, refer to :doc:`sample-policy-yaml`. .. show-policy:: :config-file: ../../aodh/cmd/aodh-policy-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/configuration/sample-policy-yaml.rst0000664000175000017500000000100400000000000023716 0ustar00zuulzuul00000000000000=========== policy.yaml =========== .. warning:: JSON formatted policy file is deprecated since Aodh 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Use the ``policy.yaml`` file to define additional access controls that will be applied to Aodh: .. literalinclude:: ../_static/aodh.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1030166 aodh-19.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017156 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/architecture.rst0000664000175000017500000000355000000000000022375 0ustar00zuulzuul00000000000000.. _architecture: =================== System Architecture =================== High-Level Architecture ======================= Each of Aodh's services are designed to scale horizontally. Additional workers and nodes can be added depending on the expected load. It provides daemons to evaluate and notify based on defined alarming rules. Evaluating the data =================== Alarming Service ---------------- The alarming component of Aodh, first delivered in Ceilometer service during Havana development cycle then split out to this independent project in Liberty development cycle, allows you to set alarms based on threshold evaluation for a collection of samples or a dedicate event. An alarm can be set on a single meter, or on a combination. For example, you may want to trigger an alarm when the memory consumption reaches 70% on a given instance if the instance has been up for more than 10 min. To setup an alarm, you will call :ref:`Aodh's API server ` specifying the alarm conditions and an action to take. Of course, if you are not administrator of the cloud itself, you can only set alarms on meters for your own components. There can be multiple form of actions, but only several actions have been implemented so far: 1. :term:`HTTP callback`: you provide a URL to be called whenever the alarm has been set off. The payload of the request contains all the details of why the alarm was triggered. 2. :term:`log`: mostly useful for debugging, stores alarms in a log file. 3. :term:`zaqar`: Send notification to messaging service via Zaqar API. Alarm Rules =========== .. list-plugins:: aodh.alarm.rule :detailed: Alarm Evaluators ================ .. list-plugins:: aodh.evaluator :detailed: Alarm Notifiers =============== .. list-plugins:: aodh.notifier :detailed: Alarm Storage ============= .. list-plugins:: aodh.storage :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000230700000000000022421 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _contributing: ==================== Contributing to Aodh ==================== Aodh follows the same workflow as other OpenStack projects. To start contributing to Aodh, please follow the workflow found here_. .. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow Project Hosting Details ======================= :Bug tracker: https://bugs.launchpad.net/aodh :Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev (prefix subjects with ``[Aodh]`` for faster responses) :Code Hosting: https://opendev.org/openstack/aodh/ :Code Review: https://review.opendev.org/#/q/status:open+project:openstack/aodh,n,z ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/event-alarm.rst0000664000175000017500000000621500000000000022127 0ustar00zuulzuul00000000000000.. Copyright 2014 Huawei Technologies Co., Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========== Event alarm =========== Aodh allows users to define alarms which can be evaluated based on events passed from other OpenStack services. The events can be emitted when the resources from other OpenStack services have been updated, created or deleted, such as 'compute.instance.reboot.end', 'scheduler.select_destinations.end'. When creating an alarm with type of "event", an event_type can be specified to identify the type of event that will trigger the alarm. The event_type field support fuzzy matching with wildcard. Additionally, users can also specify query conditions to filter specific events used to trigger the alarm. This feature was implemented with proposal event-alarm_. .. _event-alarm: https://blueprints.launchpad.net/ceilometer/+spec/event-alarm-evaluator Usage ===== When creating an alarm of "event" type, the "event_rule" need to be specified, which includes an "event_type" field and a "query" field, the "event_type" allow users to specify a specific event type used to match the incoming events when evaluating alarm, and the "query" field includes a list of query conditions used to filter specific events when evaluating the alarm. The following is an example of event alarm rule:: "event_rule": { "event_type": "compute.instance.update", "query" : [ { "field" : "traits.instance_id", "type" : "string", "value" : "153462d0-a9b8-4b5b-8175-9e4b05e9b856", "op" : "eq", }, { "field" : "traits.state", "type" : "string", "value" : "error", "op" : "eq", }, ] } Configuration ============= To enable this functionality, config the Ceilometer to be able to publish events to the queue the aodh-listener service listen on. The *event_alarm_topic* config option of Aodh identify which messaging topic the aodh-listener on, the default value is "alarm.all". In Ceilometer side, a publisher of notifier type need to be configured in the event pipeline config file(``event_pipeline.yaml`` as default), the notifier should be with a messaging topic same as the *event_alarm_topic* option defined. For an example:: --- sources: - name: event_source events: - "*" sinks: - event_sink sinks: - name: event_sink transformers: publishers: - notifier:// - notifier://?topic=alarm.all ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/gmr.rst0000664000175000017500000000576000000000000020505 0ustar00zuulzuul00000000000000.. Copyright (c) 2021 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Aodh contains a mechanism whereby developers and system administrators can generate a report about the state of a running Aodh executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR1* signal to any Aodh process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``aodh-listener`` has process id ``8675``, and was run with ``2>/var/log/aodh/aodh-listener.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/aodh/aodh-listener.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Aodh version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from aodh import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/index.rst0000664000175000017500000000243500000000000021023 0ustar00zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Contributor Guide ================= In the Contributions Guide, you will find documented policies for developing with Aodh. This includes the processes we use for bugs, contributor onboarding, core reviewer memberships, and other procedural items. Overview ======== .. toctree:: :maxdepth: 2 architecture webapi/index Developer Documentation ======================= .. toctree:: :maxdepth: 2 install/index testing contributing event-alarm gmr Appendix ======== .. toctree:: :maxdepth: 1 releasenotes/index .. update index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1030166 aodh-19.0.0/doc/source/contributor/install/0000775000175000017500000000000000000000000020624 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/install/development.rst0000664000175000017500000000317300000000000023704 0ustar00zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================== Installing development sandbox ============================== Configuring devstack ==================== .. index:: double: installing; devstack 1. Download devstack_. 2. Create a ``local.conf`` file as input to devstack. .. note:: ``local.conf`` replaces the former configuration file called ``localrc``. If you used localrc before, remove it to switch to using the new file. For further information see the `devstack configuration `_. 3. The aodh services are not enabled by default, so they must be enabled in ``local.conf`` before running ``stack.sh``. This example ``local.conf`` file shows all of the settings required for aodh:: [[local|localrc]] # Enable the aodh alarming services enable_plugin aodh https://opendev.org/openstack/aodh master .. _devstack: https://docs.openstack.org/devstack/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/install/index.rst0000664000175000017500000000141200000000000022463 0ustar00zuulzuul00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _install: =============== Installing Aodh =============== .. toctree:: :maxdepth: 2 development manual mod_wsgi uwsgi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/install/manual.rst0000664000175000017500000000272600000000000022642 0ustar00zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _installing_manually: =================== Installing Manually =================== Installing the API Server ========================= There are two recommended ways to start api server: 1. Starting API server through mod_wsgi_; 2. Starting API server through: uwsgi_. Not recommended, for testing purpose, we can also start api server by aodh-api binary:: aodh-api --port 8042 -- --config-file /etc/aodh/aodh.conf Database configuration ====================== You can use any SQLAlchemy-supported DB such as `PostgreSQL` or `MySQL`. To use MySQL as the storage backend, change the 'database' section in aodh.conf as follows:: [database] connection = mysql+pymysql://username:password@host/aodh?charset=utf8 .. _mod_wsgi: ../install/mod_wsgi.html .. _uwsgi: ../install/uwsgi.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/install/mod_wsgi.rst0000664000175000017500000000332300000000000023167 0ustar00zuulzuul00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================== Installing the API behind mod_wsgi ================================== Aodh comes with a WSGI application file named `aodh/api/app.wsgi` for configuring the API service to run behind Apache with ``mod_wsgi``. This file is installed with the rest of the Aodh application code, and should not need to be modified. You can then configure Apache with something like this:: Listen 8042 WSGIDaemonProcess aodh-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} WSGIProcessGroup aodh-api WSGIScriptAlias / /usr/lib/python2.7/dist-packages/aodh/api/app WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/httpd/aodh_error.log CustomLog /var/log/httpd/aodh_access.log combined WSGISocketPrefix /var/run/httpd Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and ``group`` values to an appropriate user on your server. In many installations ``aodh`` will be correct. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/install/uwsgi.rst0000664000175000017500000000342000000000000022513 0ustar00zuulzuul00000000000000============================= Installing the API with uwsgi ============================= Aodh comes with a few example files for configuring the API service to run behind Apache with ``mod_wsgi``. app.wsgi ======== The file ``aodh/api/app.wsgi`` sets up the V2 API WSGI application. The file is installed with the rest of the Aodh application code, and should not need to be modified. Example of uwsgi configuration file =================================== Create aodh-uwsgi.ini file:: [uwsgi] http = 0.0.0.0:8041 wsgi-file = /aodh/api/app.wsgi plugins = python # This is running standalone master = true # Set die-on-term & exit-on-reload so that uwsgi shuts down exit-on-reload = true die-on-term = true # uwsgi recommends this to prevent thundering herd on accept. thunder-lock = true # Override the default size for headers from the 4k default. (mainly for keystone token) buffer-size = 65535 enable-threads = true # Set the number of threads usually with the returns of command nproc threads = 8 # Make sure the client doesn't try to re-use the connection. add-header = Connection: close # Set uid and gip to an appropriate user on your server. In many # installations ``aodh`` will be correct. uid = aodh gid = aodh Then start the uwsgi server:: uwsgi ./aodh-uwsgi.ini Or start in background with:: uwsgi -d ./aodh-uwsgi.ini Configuring with uwsgi-plugin-python on Debian/Ubuntu ===================================================== Install the Python plugin for uwsgi: apt-get install uwsgi-plugin-python Run the server: uwsgi_python --master --die-on-term --logto /var/log/aodh/aodh-api.log \ --http-socket :8042 --wsgi-file /usr/share/aodh-common/app.wsgi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1030166 aodh-19.0.0/doc/source/contributor/releasenotes/0000775000175000017500000000000000000000000021647 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/releasenotes/index.rst0000664000175000017500000000175700000000000023522 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============= Release Notes ============= .. toctree:: :hidden: * `Liberty`_ Since Mitaka development cycle, we start to host release notes on: `Aodh Release Notes`_ .. _Liberty: https://wiki.openstack.org/wiki/ReleaseNotes/Liberty#OpenStack_Telemetry_.28Ceilometer.29 .. _Aodh Release Notes: https://docs.openstack.org/releasenotes/aodh/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/testing.rst0000664000175000017500000000506000000000000021366 0ustar00zuulzuul00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running the Tests ================= Aodh includes an extensive set of automated unit tests which are run through tox_. 1. Install ``tox``:: $ sudo pip install tox 2. On Ubuntu install ``libmysqlclient-dev`` packages:: $ sudo apt-get install libmysqlclient-dev For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead:: $ sudo yum install mariadb-devel.x86_64 3. Run the unit and code-style tests:: $ cd /opt/stack/aodh $ tox -e py27,pep8 As tox is a wrapper around testr, it also accepts the same flags as testr. See the `testr documentation`_ for details about these additional flags. Use a double hyphen to pass options to testr. For example, to run only tests under tests/functional/api/v2:: $ tox -e py27 -- functional.api.v2 To debug tests (ie. break into pdb debugger), you can use ''debug'' tox environment. Here's an example, passing the name of a test since you'll normally only want to run the test that hits your breakpoint:: $ tox -e debug aodh.tests.unit.test_bin For reference, the ``debug`` tox environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests .. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html 4. There is a growing suite of tests which use a tool called `gabbi`_ to test and validate the behavior of the Aodh API. These tests are run when using the usual ``functional`` tox target but if desired they can be run by themselves:: $ tox -e gabbi The YAML files used to drive the gabbi tests can be found in ``aodh/tests/functional/gabbi/gabbits``. If you are adding to or adjusting the API you should consider adding tests here. .. _gabbi: https://gabbi.readthedocs.io/en/latest/ .. seealso:: * tox_ .. _tox: https://tox.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1030166 aodh-19.0.0/doc/source/contributor/webapi/0000775000175000017500000000000000000000000020425 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/webapi/index.rst0000664000175000017500000000241300000000000022266 0ustar00zuulzuul00000000000000======= Web API ======= .. toctree:: :maxdepth: 2 v2 You can get API version list via request to endpoint root path. For example:: curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8042 Sample response:: { "versions": { "values": [ { "id": "v2", "links": [ { "href": "http://127.0.0.1:8042/v2", "rel": "self" }, { "href": "https://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.telemetry-v2+json" }, { "base": "application/xml", "type": "application/vnd.openstack.telemetry-v2+xml" } ], "status": "stable", "updated": "2013-02-13T00:00:00Z" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/contributor/webapi/v2.rst0000664000175000017500000000647600000000000021523 0ustar00zuulzuul00000000000000========== V2 Web API ========== Capabilities ============ The Capabilities API allows you to directly discover which functions from the V2 API functionality, including the selectable aggregate functions, are supported by the currently configured storage driver. A capabilities query returns a flattened dictionary of properties with associated boolean values - a 'False' or absent value means that the corresponding feature is not available in the backend. .. rest-controller:: aodh.api.controllers.v2.capabilities:CapabilitiesController :webprefix: /v2/capabilities .. autotype:: aodh.api.controllers.v2.capabilities.Capabilities :members: .. _alarms-api: Alarms ====== .. rest-controller:: aodh.api.controllers.v2.alarms:AlarmsController :webprefix: /v2/alarms .. rest-controller:: aodh.api.controllers.v2.alarms:AlarmController :webprefix: /v2/alarms .. autotype:: aodh.api.controllers.v2.alarms.Alarm :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.MetricOfResourceRule :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricByResourcesLookupRule :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricsByIdLookupRule :members: .. autotype:: aodh.api.controllers.v2.alarms.AlarmTimeConstraint :members: .. autotype:: aodh.api.controllers.v2.alarms.AlarmChange :members: Filtering Queries ================= The filter expressions of the query feature operate on the fields of *Alarm* and *AlarmChange*. The following comparison operators are supported: *=*, *!=*, *<*, *<=*, *>*, *>=* and *in*; and the following logical operators can be used: *and* *or* and *not*. The field names are validated against the database models. Complex Query supports defining the list of orderby expressions in the form of [{"field_name": "asc"}, {"field_name2": "desc"}, ...]. The number of the returned items can be bounded using the *limit* option. The *filter*, *orderby* and *limit* are all optional fields in a query. .. rest-controller:: aodh.api.controllers.v2.query:QueryAlarmsController :webprefix: /v2/query/alarms .. rest-controller:: aodh.api.controllers.v2.query:QueryAlarmHistoryController :webprefix: /v2/query/alarms/history .. autotype:: aodh.api.controllers.v2.query.ComplexQuery :members: Composite rule Alarm ==================== The *composite* type alarm allows users to specify a composite rule to define an alarm with multiple triggering conditions, using a combination of *and* and *or* relations. A composite rule is composed of multiple threshold rules or gnocchi rules. A sample composite alarm request form is as follows:: { "name": "test_composite", "type": "composite", "composite_rule": { "and": [THRESHOLD_RULE1, THRESHOLD_RULE2, { 'or': [THRESHOLD_RULE3, GNOCCHI_RULE1, GNOCCHI_RULE2, GNOCCHI_RULE3] }] } } A sub-rule in composite_rule is same as a threshold_rule in threshold alarm or a gnocchi_rule in gnocchi alarm. Additionally it has a mandatory *type* field to specify the rule type, like in the following sample:: { "threshold": 0.8, "meters": [ "f6857d3f-bde6-441a-aa1d-e98fa4ea543f", "ea1491ca-5309-4b5a-9f05-34409c6e8b6c" ], "type": "gnocchi_resources_threshold" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/glossary.rst0000664000175000017500000000307200000000000017203 0ustar00zuulzuul00000000000000.. Copyright 2012 New Dream Network (DreamHost) Copyright 2013 eNovance Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======== Glossary ======== .. glossary:: alarm An action triggered whenever a meter reaches a certain threshold. API server HTTP REST API service for Aodh. HTTP callback HTTP callback is used for calling a predefined URL, whenever an alarm has been set off. The payload of the request contains all the details of why the alarm was triggered. log Logging is one of the alarm actions that is useful mostly for debugging, it stores the alarms in a log file. zaqar According to `Zaqar Developer Documentation`_: Zaqar is a multi-tenant cloud messaging and notification service for web and mobile developers. project The OpenStack tenant or project. resource The OpenStack entity being metered (e.g. instance, volume, image, etc). user An OpenStack user. .. _Zaqar Developer Documentation: https://docs.openstack.org/zaqar/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/index.rst0000664000175000017500000000211600000000000016445 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================ Welcome to Aodh's documentation! ================================ The Alarming service (aodh) project provides a service that enables the ability to trigger actions based on defined rules against metric or event data collected by Ceilometer or Gnocchi. .. toctree:: :maxdepth: 2 install/index contributor/index admin/index configuration/index cli/index .. toctree:: :maxdepth: 1 glossary ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1030166 aodh-19.0.0/doc/source/install/0000775000175000017500000000000000000000000016252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/configure-common.rst0000664000175000017500000000432300000000000022255 0ustar00zuulzuul000000000000002. Edit the ``/etc/aodh/aodh.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://aodh:AODH_DBPASS@controller/aodh Replace ``AODH_DBPASS`` with the password you chose for the Telemetry Alarming module database. You must escape special characters such as ``:``, ``/``, ``+``, and ``@`` in the connection string in accordance with `RFC2396 `_. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. code-block:: ini [DEFAULT] ... auth_strategy = keystone [keystone_authtoken] ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_id = default user_domain_id = default project_name = service username = aodh password = AODH_PASS Replace ``AODH_PASS`` with the password you chose for the ``aodh`` user in the Identity service. * In the ``[service_credentials]`` section, configure service credentials: .. code-block:: ini [service_credentials] ... auth_type = password auth_url = http://controller:5000/v3 project_domain_id = default user_domain_id = default project_name = service username = aodh password = AODH_PASS interface = internalURL region_name = RegionOne Replace ``AODH_PASS`` with the password you chose for the ``aodh`` user in the Identity service. .. todo: Workaround for https://bugs.launchpad.net/ubuntu/+source/aodh/+bug/1513599. 3. In order to initialize the database please run the ``aodh-dbsync`` script. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/get_started.rst0000664000175000017500000000222100000000000021306 0ustar00zuulzuul00000000000000=================================== Telemetry Alarming service overview =================================== The Telemetry Alarming services trigger alarms when the collected metering or event data break the defined rules. The Telemetry Alarming service consists of the following components: An API server (``aodh-api``) Runs on one or more central management servers to provide access to the alarm information stored in the data store. An alarm evaluator (``aodh-evaluator``) Runs on one or more central management servers to determine when alarms fire due to the associated statistic trend crossing a threshold over a sliding time window. A notification listener (``aodh-listener``) Runs on a central management server and determines when to fire alarms. The alarms are generated based on defined rules against events, which are captured by the Telemetry Data Collection service's notification agents. An alarm notifier (``aodh-notifier``) Runs on one or more central management servers to allow alarms to be set based on the threshold evaluation for a collection of samples. These services communicate by using the OpenStack messaging bus. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/index.rst0000664000175000017500000000052500000000000020115 0ustar00zuulzuul00000000000000================== Installation Guide ================== .. toctree:: get_started.rst install-obs.rst install-rdo.rst install-ubuntu.rst next-steps.rst .. verify.rst This chapter assumes a working setup of OpenStack following the `OpenStack Installation Tutorials and Guides `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/install-obs.rst0000664000175000017500000000314000000000000021231 0ustar00zuulzuul00000000000000.. _install-obs: Install and configure for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry Alarming service, code-named aodh, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. .. include:: prereq-common.rst Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (...) in the configuration snippets indicates potential default configuration options that you should retain. 1. Install the packages: .. code-block:: console # zypper install openstack-aodh-api \ openstack-aodh-evaluator openstack-aodh-notifier \ openstack-aodh-listener openstack-aodh-expirer \ python-aodhclient .. include:: configure-common.rst Finalize installation --------------------- #. Start the Telemetry Alarming services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-aodh-api.service \ openstack-aodh-evaluator.service \ openstack-aodh-notifier.service \ openstack-aodh-listener.service # systemctl start openstack-aodh-api.service \ openstack-aodh-evaluator.service \ openstack-aodh-notifier.service \ openstack-aodh-listener.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/install-rdo.rst0000664000175000017500000000313700000000000021240 0ustar00zuulzuul00000000000000.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry Alarming service, code-named aodh, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. .. include:: prereq-common.rst Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (...) in the configuration snippets indicates potential default configuration options that you should retain. 1. Install the packages: .. code-block:: console # yum install openstack-aodh-api \ openstack-aodh-evaluator openstack-aodh-notifier \ openstack-aodh-listener openstack-aodh-expirer \ python-aodhclient .. include:: configure-common.rst Finalize installation --------------------- #. Start the Telemetry Alarming services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-aodh-api.service \ openstack-aodh-evaluator.service \ openstack-aodh-notifier.service \ openstack-aodh-listener.service # systemctl start openstack-aodh-api.service \ openstack-aodh-evaluator.service \ openstack-aodh-notifier.service \ openstack-aodh-listener.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/install-ubuntu.rst0000664000175000017500000000233000000000000021770 0ustar00zuulzuul00000000000000.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry Alarming service, code-named aodh, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. .. include:: prereq-common.rst Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (...) in the configuration snippets indicates potential default configuration options that you should retain. 1. Install the packages: .. code-block:: console # apt-get install aodh-api aodh-evaluator aodh-notifier \ aodh-listener aodh-expirer python-aodhclient .. include:: configure-common.rst Finalize installation --------------------- #. Restart the Alarming services: .. code-block:: console # service aodh-api restart # service aodh-evaluator restart # service aodh-notifier restart # service aodh-listener restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/next-steps.rst0000664000175000017500000000034300000000000021116 0ustar00zuulzuul00000000000000.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the aodh service. To add additional services, see the `OpenStack Installation Tutorials and Guides `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/prereq-common.rst0000664000175000017500000001262400000000000021575 0ustar00zuulzuul00000000000000Prerequisites ------------- Before you install and configure the Telemetry service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the ``aodh`` database: .. code-block:: console CREATE DATABASE aodh; * Grant proper access to the ``aodh`` database: .. code-block:: console GRANT ALL PRIVILEGES ON aodh.* TO 'aodh'@'localhost' \ IDENTIFIED BY 'AODH_DBPASS'; GRANT ALL PRIVILEGES ON aodh.* TO 'aodh'@'%' \ IDENTIFIED BY 'AODH_DBPASS'; Replace ``AODH_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: * Create the ``aodh`` user: .. code-block:: console $ openstack user create --domain default \ --password-prompt aodh User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | b7657c9ea07a4556aef5d34cf70713a3 | | name | aodh | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``aodh`` user: .. code-block:: console $ openstack role add --project service --user aodh admin .. note:: This command provides no output. * Create the ``aodh`` service entity: .. code-block:: console $ openstack service create --name aodh \ --description "Telemetry" alarming +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Telemetry | | enabled | True | | id | 3405453b14da441ebb258edfeba96d83 | | name | aodh | | type | alarming | +-------------+----------------------------------+ #. Create the Alarming service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ alarming public http://controller:8042 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 340be3625e9b4239a6415d034e98aace | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | aodh | | service_type | alarming | | url | http://controller:8042 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ alarming internal http://controller:8042 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 340be3625e9b4239a6415d034e98aace | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | aodh | | service_type | alarming | | url | http://controller:8042 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ alarming admin http://controller:8042 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 340be3625e9b4239a6415d034e98aace | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | aodh | | service_type | alarming | | url | http://controller:8042 | +--------------+----------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/doc/source/install/verify.rst0000664000175000017500000000016300000000000020310 0ustar00zuulzuul00000000000000:orphan: .. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Telemetry Alarming service. TBD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1070173 aodh-19.0.0/rally-jobs/0000775000175000017500000000000000000000000014615 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/rally-jobs/README.rst0000664000175000017500000000157400000000000016313 0ustar00zuulzuul00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * aodh is a task that is run in gates against aodh Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * How to add rally-gates: https://rally.readthedocs.io/en/latest/quick_start/gates.html * About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/rally-jobs/ceilometer.yaml0000664000175000017500000001177600000000000017645 0ustar00zuulzuul00000000000000--- CeilometerAlarms.create_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.create_and_delete_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.create_and_list_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.create_and_update_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.list_alarms: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerMeters.list_meters: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerResource.list_resources: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerStats.create_meter_and_get_stats: - args: user_id: "user-id" resource_id: "resource-id" counter_volume: 1.0 counter_unit: "" counter_type: "cumulative" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_alarms: - args: filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} orderby: !!null limit: 10 meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_alarm_history: - args: orderby: !!null limit: !!null meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_samples: - args: filter: {"=": {"counter_unit": "instance"}} orderby: !!null limit: 10 counter_name: "cpu_util" counter_type: "gauge" counter_unit: "instance" counter_volume: "1.0" resource_id: "resource_id" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1070173 aodh-19.0.0/rally-jobs/extra/0000775000175000017500000000000000000000000015740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/rally-jobs/extra/README.rst0000664000175000017500000000025500000000000017431 0ustar00zuulzuul00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/rally-jobs/extra/fake.img0000664000175000017500000000000000000000000017332 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1070173 aodh-19.0.0/rally-jobs/plugins/0000775000175000017500000000000000000000000016276 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/rally-jobs/plugins/README.rst0000664000175000017500000000060600000000000017767 0ustar00zuulzuul00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/rally-jobs/plugins/plugin_sample.py0000664000175000017500000000167400000000000021517 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Sample of plugin for Aodh. For more Aodh related benchmarks take a look here: github.com/openstack/rally/blob/master/rally/benchmark/scenarios/aodh/ About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts """ from rally.benchmark.scenarios import base class AodhPlugin(base.Scenario): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/0000775000175000017500000000000000000000000015230 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.119019 aodh-19.0.0/releasenotes/notes/0000775000175000017500000000000000000000000016360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000020631 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/Add-state-reason-to-the-API-7bc5a9465466db2b.yaml0000664000175000017500000000017300000000000026366 0ustar00zuulzuul00000000000000--- features: - | The reason of the state change is now part of the API as "state_reason" field of the alarm object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml0000664000175000017500000000045400000000000026500 0ustar00zuulzuul00000000000000--- upgrade: - > Add a tool for migrating alarm and alarm history data from NoSQL storage to SQL storage. The migration tool has been tested OK in devstack environment, but users need to be cautious with this, because the data migration between storage backends is a bit dangerous. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/add-upgrade-check-framework-ab35e6eb65504bc3.yaml0000664000175000017500000000071100000000000026754 0ustar00zuulzuul00000000000000--- prelude: > Added new tool ``aodh-status upgrade check``. features: - | New framework for ``aodh-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Aodh upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``aodh-status upgrade check`` to check if Aodh deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/auto-healing-notifier-794b64de776811e9.yaml0000664000175000017500000000022300000000000025535 0ustar00zuulzuul00000000000000features: - Added a new notifier(``trust+heat``) that works together with ``loadbalancer_member_health`` evaluator for auto-healing purpose. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/bug-1929178-46493335946174a5.yaml0000664000175000017500000000020300000000000022525 0ustar00zuulzuul00000000000000--- deprecations: - | The ``[coordination] check_watchers`` parameter has been deprecated since it has been ineffective. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/bug1540395-reason-string-0aad56966007d0e3.yaml0000664000175000017500000000027700000000000025521 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1540395 `_] Fix reason string to properly handle transitions when one sample is outside of defined threshold. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/composite-alarm-1b1ca9ea0e8f55c8.yaml0000664000175000017500000000045100000000000024714 0ustar00zuulzuul00000000000000--- features: - > Add a new composite type alarm, which allow users specifying a composite rule to define an alarm with multiple triggering conditions, using a combination of *and*, *or* relationships. The composite rule is composed of multiple threshold rules or gnocchi rules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/deprecate-combination-alarms-7ff26b73b61a0e59.yaml0000664000175000017500000000054400000000000027201 0ustar00zuulzuul00000000000000--- deprecations: - The combination alarms are officially deprecated and disabled by default. Set api.enable_combination_alarms to True to enable them. Existing alarms will still be evaluated, but access to them via the API is linked to whether that configuration option is turned on or off. It's advised to use composite alarms instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/deprecate-json-formatted-policy-file-fgb26387a9bdb3b9.yaml0000664000175000017500000000176000000000000030736 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/deprecate-nosql-backends-13079883eec7e8e5.yaml0000664000175000017500000000044700000000000026266 0ustar00zuulzuul00000000000000--- deprecations: - > Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient backend for handling the scope of alarms. To maximise available resources, NoSQL backends are deprecated so developers do not need to worry about adding features to multiple backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/deprecate-threshold-alarm-d89da351d4f6f50f.yaml0000664000175000017500000000063100000000000026571 0ustar00zuulzuul00000000000000--- deprecations: - | Ceilometer's API is deprecated in Ocata. Therefore, threshold alarms are now deprecated as well. Threshold rules will be removed when Ceilometer's API is also removed. Similar functionality is provided through Gnocchi alarm rules: ``gnocchi_resources_threshold``, ``gnocchi_aggregation_by_metrics_threshold``, or ``gnocchi_aggregation_by_resources_threshold``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/deprecate-unused-http_timeout-74fd60a4c26afd88.yaml0000664000175000017500000000022500000000000027526 0ustar00zuulzuul00000000000000--- deprecations: - | The ``[DEFAULT] http_timeout`` parameter has been deprecated. This parameter has been unused thus has had no effect. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/drop-py-2-7-54a9be4bfb8e9172.yaml0000664000175000017500000000030100000000000023440 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of Aodh to support py2.7 is OpenStack Train. The minimum version of Python now supported by Aodh is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/drop-python-3-6-and-3-7-89f2b7300c0166ca.yaml0000664000175000017500000000020100000000000025215 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/enable-aodh-service-multi-processes-67ed9a0b7fac69aa.yaml0000664000175000017500000000042100000000000030633 0ustar00zuulzuul00000000000000--- features: - Enable aodh services, including aodh-evaluator, aodh-listener and aodh-notifier to run in multiple worker mode. New options are introduced corresponsively as [evaluator]workers, [listener]workers and [notifier]workers. They all default to 1. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/event-listener-batch-support-04e6ff159ef34d8c.yaml0000664000175000017500000000073700000000000027320 0ustar00zuulzuul00000000000000--- features: - > Add support for batch processing of messages from queue. This will allow the aodh-listener to grab multiple event messages per thread to enable more efficient processing. upgrade: - > batch_size and batch_timeout configuration options are added to [listener] section of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/fix-ceilometerclient-init-8bc7a6742937c3e2.yaml0000664000175000017500000000023400000000000026464 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1518447 `_] Fix to ensure ceilometerclient is properly initialised on startup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/fix-combination-alarms-8097adf08b837a50.yaml0000664000175000017500000000030000000000000025742 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1511252 `_] Fix an issue with combination alarms where it fails to evaluate all issues in the chain of alarms. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/fix-empty-statistics-3852da99b1c0b297.yaml0000664000175000017500000000025200000000000025515 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1539069 `_] Fix to handle scenario where no valid statistics exist for specified period. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/fix-gnocchi-aggregation-eval-7c2c1c67bdf2d11c.yaml0000664000175000017500000000025300000000000027220 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1513738 `_] Fix an issue where alarms using Gnocchi aggregations are not being evaluated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/fix-rbac-50825144e0897d7d.yaml0000664000175000017500000000101200000000000022737 0ustar00zuulzuul00000000000000--- upgrade: - > A new default policy.json is provided to properly handle RBAC control. Existing policy.json files may not grant the appropriate access. security: - > Patch was added to address inconsistent RBAC policy handling. Certain rules may not have been given appropriate access. fixes: - > [`bug 1504495 `_] Patch was added to address inconsistent RBAC policy handling. Certain rules may not have been given appropriate access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/fix-ssl-request-8107616b6a85a217.yaml0000664000175000017500000000077200000000000024320 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1582131 `_] Fix an issue with adding CA_BUNDLE certificate parth as value of "verify" parameter in SSL requests. features: - > A new option “rest_notifier_ca_bundle_certificate_path” has been added in the configuration file, set None as default value. If this option is present and SSL is used for alarm action the certificate path provided will be used as value of verify parameter in action request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/gmr-3dd0a582af010bd4.yaml0000664000175000017500000000025600000000000022306 0ustar00zuulzuul00000000000000--- features: - | Aodh now supports generation of Guru Meditation Reports using oslo.reports library. Each service prints a report output when it receives SIGUSR1. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/gnocchi-capability-cache-75d011e77b8ecc72.yaml0000664000175000017500000000055000000000000026256 0ustar00zuulzuul00000000000000--- other: - | Gnocchi aggregation capabilities are now cached to minimise redundant calls to Gnocchi when validating aggregation methods. The cache is stored in-memory for an hour. If additional aggregations are added to Gnocchi, they will not be proprogated to Aodh's API service for at most an hour or unless the service is restarted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/gnocchi-client-a62ca5a0c717807e.yaml0000664000175000017500000000026700000000000024345 0ustar00zuulzuul00000000000000--- features: - > Gnocchi dispatcher now uses client rather than direct http requests upgrade: - > gnocchiclient library is now a requirement if using ceilometer+gnocchi. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/gnocchi-external-resource-owner-3fad253d30746b0d.yaml0000664000175000017500000000102300000000000027652 0ustar00zuulzuul00000000000000--- fixes: - | When an unprivileged user want to access to Gnocchi resources created by Ceilometer, that doesn't work because the filter scope the Gnocchi query to resource owner to the user. To fix we introduce a new configuration option "gnocchi_external_project_owner" set by default to "service". The new filter now allow two kind of Gnocchi resources: * owned by the user project * owned by "gnocchi_external_project_owner" and the original project_id of the resource is the user project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/healthcheck-560700b72ae68e18.yaml0000664000175000017500000000032500000000000023563 0ustar00zuulzuul00000000000000--- features: - A healthcheck endpoint is provided by default at /healthcheck. It leverages oslo_middleware healthcheck middleware. It allows to retrieve information about the health of the API service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/heartbeat_interval-d46e0f5efbd56264.yaml0000664000175000017500000000037000000000000025412 0ustar00zuulzuul00000000000000--- deprecations: - | The ``[coordination] heartbeat`` parameter has been renamed to the ``[coordination] heartbeat_interval``. The old ``[coordination] heartbeat`` parameter is deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/ingestion-lag-2317725887287fbc.yaml0000664000175000017500000000037200000000000024021 0ustar00zuulzuul00000000000000--- features: - Allow to extends the alarm evaluation windows to compensate the reporting/ingestion lag. An new option is introduced additional_ingestion_lag defaulted to 0. It represents the number of seconds of the window extension. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/keystone-v3-support-ffc0f804dbe9d7e9.yaml0000664000175000017500000000010300000000000025612 0ustar00zuulzuul00000000000000--- features: - > Add support for Keystone v3 authentication ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/load-api-paste-ini-from-config-dirs-69480861a9633df4.yaml0000664000175000017500000000054200000000000030000 0ustar00zuulzuul00000000000000--- features: - | Now the ``aodh-api`` service look for the paste config file (a.k.a. ``api-paste.ini`` from configruation directories like ``/etc/aodh``. If the file is not found in the configuration directories, it uses the default file. To use only a specific file, use a full file path for the ``[api] paste_confing`` parameter ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/loadbalancer-evaluator-85732c5e5f6e11e9.yaml0000664000175000017500000000031200000000000026020 0ustar00zuulzuul00000000000000features: - Added a new evaluator for the alarms of type ``loadbalancer_member_health`` which evaluates the alarm by checking the operating status of the members in a given load balancer pool.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/migrate-evaluation_interval-c65ba5cbe5fabb35.yaml0000664000175000017500000000035200000000000027452 0ustar00zuulzuul00000000000000--- deprecations: - | The ``[DEFAULT] evaluation_interval`` parameter has been migrated to the ``[evaluator]`` section. The old parameter is still kept for backword compatibility but will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/mysql-precise-datetime-e374c77e6707985e.yaml0000664000175000017500000000022400000000000025736 0ustar00zuulzuul00000000000000--- other: - Aodh now leverages microseconds timestamps available since MySQL 5.6.4, meaning it is now the minimum required version of MySQL. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/notifier-batch-listener-01796e2cb06344dd.yaml0000664000175000017500000000073100000000000026117 0ustar00zuulzuul00000000000000--- features: - > Add support for batch processing of messages from queue. This will allow the aodh-notifier to grab multiple messages per thread to enable more efficient processing. upgrade: - > batch_size and batch_timeout configuration options are added to [notifier] section of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml0000664000175000017500000000035000000000000030434 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1575530 `_] Patch was added to fix and improve the partition coordinator, make sure the input tasks can be correctly distributed to partition members. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/pecan-debug-removed-7c7a528a1aea98bf.yaml0000664000175000017500000000007600000000000025440 0ustar00zuulzuul00000000000000--- upgrade: - The api.pecan_debug option has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/policy-defaults-refresh-95b565bee059f611.yaml0000664000175000017500000000034500000000000026153 0ustar00zuulzuul00000000000000--- features: - | Aodh policies have been modified to isolate the system and project level APIs policy. Because of this change, system users will not be allowed to perform any operations on project level resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/policy-in-code-79edd9282f1e4603.yaml0000664000175000017500000000115600000000000024232 0ustar00zuulzuul00000000000000--- features: - | Aodh now support policy in code, which means if users didn't modify any of policy rules, they can leave policy file (in `json` or `yaml` format) empty or not deploy it at all. Because from now, Aodh keeps all default policies under `aodh/api/policies` module. Users can still modify/generate `policy.yaml` file which will override policy rules in code if those rules show in `policy.yaml` file. other: - | Default `policy.json` file is now removed as Aodh now generate the default policies from code. Please be aware that when using that file in your environment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/queue-communication-1b884feab4078dde.yaml0000664000175000017500000000121500000000000025614 0ustar00zuulzuul00000000000000--- features: - > Support for queue based communication between alarm evaluator service and alarm notifier services was added. Original implementation involved using RPC but there is significant overhead involved with using RPC. Work queues provided required functionality with better performance. upgrade: - > Queue based communication is the new default IPC protocol. RPC can still be used by choosing rpc as ipc_protocol option. Only one protocol can be run at any given time. deprecations: - > Because queues provide the equivalent functionality. RPC support is deprecated and will be removed after Mitaka. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/remove-alarm-name-unique-constraint-4fb0b14f3ad46f0b.yaml0000664000175000017500000000011700000000000030576 0ustar00zuulzuul00000000000000--- other: - Alarm name unique constraint for each project has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/remove-check_watchers-df14cecc258a3510.yaml0000664000175000017500000000013100000000000025772 0ustar00zuulzuul00000000000000--- upgrade: - | The ``[coordination] check_watchers`` parameter has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/remove-combination-alarms-a1a53655f3f7d1d1.yaml0000664000175000017500000000012300000000000026522 0ustar00zuulzuul00000000000000--- deprecations: - The deprecated combination alarms support have been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/remove-eventlet-18ada1cff213af5e.yaml0000664000175000017500000000012100000000000025002 0ustar00zuulzuul00000000000000--- features: - > Remove eventlet from Aodh in favour of threaded approach ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/remove-no-sql-drivers-21dfdbd750751340.yaml0000664000175000017500000000010700000000000025546 0ustar00zuulzuul00000000000000--- upgrade: - All the deprecated non-SQL drivers have been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/remove-threshold-alarm-a7901991d2da09f2.yaml0000664000175000017500000000012400000000000025760 0ustar00zuulzuul00000000000000--- deprecations: - | The deprecated 'threshold' alarm type has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/support-batch-delete-events-32496f15b1169887.yaml0000664000175000017500000000043600000000000026537 0ustar00zuulzuul00000000000000--- features: - | A new ``alarm_histories_delete_bacth_size`` option has been added to limit a number of alarm histories deleted from the database by aodh-expirer in a single iteration. This parameter is useful when there are a lot of alarm histories in the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/support-combination-to-composite-conversion-3e688a6b7d01a57e.yaml0000664000175000017500000000034600000000000032310 0ustar00zuulzuul00000000000000--- upgrade: - > Add a tool for converting combination alarms to composite alarms, since we have deprecated the combination alarm support and recommend to use composite alarm to perform multiple conditions alarming. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=aodh-19.0.0/releasenotes/notes/ussuri-support-builtin-active-active-aodh-evaluator-a935577e17a211ea.yaml 22 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/ussuri-support-builtin-active-active-aodh-evaluator-a935577e17a211ea.0000664000175000017500000000027700000000000032664 0ustar00zuulzuul00000000000000--- features: - | The admin user can fetch alarms of all the projects, e.g. ``curl -X GET "${aodh_prefix}/v2/alarms?q.field=all_projects&q.op=eq&q.value=true" X-Auth-Token:$token`` ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=aodh-19.0.0/releasenotes/notes/ussuri-support-query-all-projects-alarms-by-admin-3ecccf2217d711ea.yaml 22 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/ussuri-support-query-all-projects-alarms-by-admin-3ecccf2217d711ea.ya0000664000175000017500000000036400000000000033040 0ustar00zuulzuul00000000000000--- features: - Support to deploy aodh-evaluator in active/active mode by leveraging database non-locking mechanism. With this feature, there could be multiple aodh-evaluator processes running without dependency of etcd or zookeeper. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/notes/ussuri-support-quota-api-92f2fd0643d311ea.yaml0000664000175000017500000000053000000000000026412 0ustar00zuulzuul00000000000000--- features: - Aodh Quota API is aiming for multi-tenancy support. By default, only the admin user is able to change or delete the resource quota for projects as defined by the default policy rule 'telemetry:update_quotas' and 'telemetry:delete_quotas'. User alarm quota and alarm action quota are not supported in Quota API.././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/0000775000175000017500000000000000000000000016530 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000020001 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020002 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020002 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000020156 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000022427 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/conf.py0000664000175000017500000002041000000000000020024 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Aodh Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2015, Aodh Developers' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [openstackdocstheme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # openstackdocstheme options openstackdocs_repo_name = 'openstack/aodh' openstackdocs_bug_project = 'aodh' openstackdocs_bug_tag = '' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'AodhReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'AodhReleaseNotes.tex', 'Aodh Release Notes Documentation', 'Aodh Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'aodhreleasenotes', 'Aodh Release Notes Documentation', ['Aodh Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'AodhReleaseNotes', 'Aodh Release Notes Documentation', 'Aodh Developers', 'AodhReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/index.rst0000664000175000017500000000042600000000000020373 0ustar00zuulzuul00000000000000==================== Aodh Release Notes ==================== .. toctree:: :maxdepth: 1 unreleased 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/liberty.rst0000664000175000017500000000022200000000000020730 0ustar00zuulzuul00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000017767 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/source/locale/de/0000775000175000017500000000000000000000000020357 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000022144 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po0000664000175000017500000003734200000000000025206 0ustar00zuulzuul00000000000000# Robert Simai , 2016. #zanata # Robert Simai , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: aodh\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2019-01-23 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-03-09 10:41+0000\n" "Last-Translator: Robert Simai \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.1.1" msgstr "1.1.1" msgid "1.1.3" msgstr "1.1.3" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.1" msgstr "2.0.1" msgid "2.0.2" msgstr "2.0.2" msgid "3.0.0" msgstr "3.0.0" msgid "4.0.0" msgstr "4.0.0" msgid "" "A healthcheck endpoint is provided by default at /healthcheck. It leverages " "oslo_middleware healthcheck middleware. It allows to retrieve information " "about the health of the API service." msgstr "" "Ein Endpunkt für die Gesundheitsprüfung wird standardmäßig als /healthcheck " "bereitgestellt. Dies unterstützt oslo_middleware Gesundheitsprüfungs-" "Middleware. Es erlaubt den Abruf von Informationen über die Gesundheit des " "API-Dienstes." msgid "" "A new default policy.json is provided to properly handle RBAC control. " "Existing policy.json files may not grant the appropriate access." msgstr "" "Eine neue Standard-policy.json Datei wird ausgeliefert, um die RBAC-" "Kontrolle richtig zu verarbeiten. Die vorhandene policy.json Dateien " "erlauben möglicherweise nicht den notwendigen Zugriff." msgid "" "A new option “rest_notifier_ca_bundle_certificate_path” has been added in " "the configuration file, set None as default value. If this option is present " "and SSL is used for alarm action the certificate path provided will be used " "as value of verify parameter in action request." msgstr "" "Eine neue Option “rest_notifier_ca_bundle_certificate_path” wurde der " "Konfigurationsdatei hinzugefügt, mit Keine als Standardwert. Wenn die Option " "vorhanden ist und SSL für Alarmaktionen verwendet wird, so wird der " "Zertifikatpfad als Wert verwendet, um Parameter in Aktionsanfragen zu " "überprüfen." msgid "" "Add a new composite type alarm, which allow users specifying a composite " "rule to define an alarm with multiple triggering conditions, using a " "combination of *and*, *or* relationships. The composite rule is composed of " "multiple threshold rules or gnocchi rules." msgstr "" "Ein neuer Composite Alarmtyp wurde hinzugefügt, welcher dem Benutzer " "erlaubt, zusammengesetzte Regeln mit multiplen Auslösebedingungen zu " "erstellen. Dazu werden *and* und *or* Beziehungen verwendet. Die Composite-" "Regel besteht aus multiplen Schwellwertregeln oder Gnocchi-Regeln." msgid "" "Add a tool for converting combination alarms to composite alarms, since we " "have deprecated the combination alarm support and recommend to use composite " "alarm to perform multiple conditions alarming." msgstr "" "Werkzeug hinzugefügt, um Kombinationsalarme in Composite-Alarme zu " "konvertieren, da die Kombinationsalarmunterstützung abgekündigt wurde. Es " "wird empfohlen, zur Alarmierung durch multiple Bedingungen Composite-Alarme " "zu verwenden." msgid "" "Add a tool for migrating alarm and alarm history data from NoSQL storage to " "SQL storage. The migration tool has been tested OK in devstack environment, " "but users need to be cautious with this, because the data migration between " "storage backends is a bit dangerous." msgstr "" "Werkzeug hinzugefügt zum migrieren von Alarm- und Alarmverlaufsdaten aus " "NoSQL in SQL. Das Migrationswerkzeug wurde erfolgreich in der " "Devstackumgebung gestestet, aber Benutzer sollten dennoch vorsichtig " "vorgehen. Die Datenmigration zwischen Storage-Backends ist gefährlich." msgid "Add support for Keystone v3 authentication" msgstr "Unterstützung für Keystone v3 Authentifizierung hinzugefügt" msgid "" "Add support for batch processing of messages from queue. This will allow the " "aodh-listener to grab multiple event messages per thread to enable more " "efficient processing." msgstr "" "Unterstützung zur Stapelverarbeitung von Nachrichten in der Warteschlange " "hinzugefügt. Dies erlaubt dem aodh-listener mehrere Nachrichten pro Thread " "aufzunehmen und somit eine effizientere Bearbeitung." msgid "" "Add support for batch processing of messages from queue. This will allow the " "aodh-notifier to grab multiple messages per thread to enable more efficient " "processing." msgstr "" "Unterstützung zur Stapelverarbeitung von Nachrichten in der Warteschlange " "hinzugefügt. Dies erlaubt dem aodh-notifier mehrere Nachrichten pro Thread " "aufzunehmen und somit eine effizientere Bearbeitung." msgid "Alarm name unique constraint for each project has been removed." msgstr "" "Notwendigkeit eines einmaligen Alarmnamens für jedes Projekt wurde entfernt." msgid "All the deprecated non-SQL drivers have been removed." msgstr "Alle veralteten nicht-SQL Treiber wurden entfernt." msgid "" "Allow to extends the alarm evaluation windows to compensate the reporting/" "ingestion lag. An new option is introduced additional_ingestion_lag " "defaulted to 0. It represents the number of seconds of the window extension." msgstr "" "Es ist erlaubt, das Alarmevaluierungsfenster zu erweitern, um die " "Verzögerung zwischen Report und Aufnahme zu kompensieren. Eine neue Option " "additional_ingestion_lag wurde eingeführt, mit Standard 0. Sie bezeichnet " "die Anzahl an Sekunden der Fenstererweiterung." msgid "Aodh Release Notes" msgstr "Aodh Releasenotes" msgid "" "Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " "meaning it is now the minimum required version of MySQL." msgstr "" "Aodh verwendet jetzt Mikrosekunden Zeitstempel, die seit MySQL 5.6.4 " "verfügbar sind. Dies ist somit die minimale erforderliche MySQL Version." msgid "" "Because queues provide the equivalent functionality. RPC support is " "deprecated and will be removed after Mitaka." msgstr "" "RPC Unterstützung ist veraltet und wird nach Mitaka entfernt, da " "Warteschlangen (Queues) die gleiche Funktionalität bieten." msgid "Bug Fixes" msgstr "Fehlerkorrekturen" msgid "Current Series Release Notes" msgstr "Aktuelle Serie Releasenotes" msgid "Deprecation Notes" msgstr "Ablaufwarnungen" msgid "" "Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient " "backend for handling the scope of alarms. To maximise available resources, " "NoSQL backends are deprecated so developers do not need to worry about " "adding features to multiple backends." msgstr "" "Unterstützung für NoSQL Backends in Aodh entfällt. SQL ist ein vollkommen " "ausreichendes Backend um Alarme zu handhaben. Zum maximieren vorhandener " "Ressourcen werden NoSQL Backends als veraltet aussortiert, so dass sich " "Entwickler keine Gedanken beim hinzufügen neuer Funktionen für multiple " "Backends machen müssen." msgid "" "Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" "notifier to run in multiple worker mode. New options are introduced " "corresponsively as [evaluator]workers, [listener]workers and " "[notifier]workers. They all default to 1." msgstr "" "Aodh Diesnte aktiviert, einschließlich aodh-evaluator, aodh-listener und " "aodh-notifier, damit diese in multiplen Worker-Modi laufen. Neue Optionen " "wurden hinzugefügt, für [evaluator]Worker, [listener]Worker und " "[notifier]Workers. Standardmäßig sind sie auf 1 gesetzt." msgid "" "Gnocchi aggregation capabilities are now cached to minimise redundant calls " "to Gnocchi when validating aggregation methods. The cache is stored in-" "memory for an hour. If additional aggregations are added to Gnocchi, they " "will not be proprogated to Aodh's API service for at most an hour or unless " "the service is restarted." msgstr "" "Die Kumulierungsfähigkeiten in Gnocchi haben jetzt einen Cache, so dass " "redundante Aufrufe an Gnocchi bei der Prüfung von Kumulierungsmethoden " "minimiert werden. Der Cache wird für eine Stunde im Speicher gehalten. Zu " "Gnocchi hinzugefügte Kumulierungen werden für maximal eine Stunde nicht an " "Aodhs API-Dienst übergeben, außer der Dienst wird neu gestartet." msgid "Gnocchi dispatcher now uses client rather than direct http requests" msgstr "" "Der Gnocchi Dispatcher verwendet den Klienten und nicht mehr direkte HTTP-" "Anfragen" msgid "Liberty Series Release Notes" msgstr "Liberty Serie Releasenotes" msgid "Mitaka Series Release Notes" msgstr "Mitaka Serie Releasenotes" msgid "New Features" msgstr "Neue Funktionen" msgid "Newton Series Release Notes" msgstr "Newton Serie Releasenotes" msgid "Ocata Series Release Notes" msgstr "Ocata Serie Releasenotes" msgid "Other Notes" msgstr "Andere Notizen" msgid "" "Patch was added to address inconsistent RBAC policy handling. Certain rules " "may not have been given appropriate access." msgstr "" "Ein Patch wurde hinzugefügt, um die inkonsistente Behandlung von RBAC-" "Richtlinien zu korrigieren. Bestimmte Regeln konnten nicht den vorgesehenen " "Zugriff erhalten." msgid "" "Queue based communication is the new default IPC protocol. RPC can still be " "used by choosing rpc as ipc_protocol option. Only one protocol can be run at " "any given time." msgstr "" "Warteschlangenbasierte Kommunikation ist das neue Standard-IPC-Protokoll. " "RPC kann immer noch verwendet werden, in dem RPC als ipc_protocol Option " "ausgewählt wird. Es kann immer nur ein Protokoll abgearbeitet werden." msgid "Remove eventlet from Aodh in favour of threaded approach" msgstr "Eventlet aus Aodh entfernt, der Thread-Ansatz wird bevorzugt" msgid "Security Issues" msgstr "Sicherheitsrelevante Probleme" msgid "Start using reno to manage release notes." msgstr "Reno wird für die Verwaltung der Releasenotes verwendet." msgid "" "Support for queue based communication between alarm evaluator service and " "alarm notifier services was added. Original implementation involved using " "RPC but there is significant overhead involved with using RPC. Work queues " "provided required functionality with better performance." msgstr "" "Unterstützung für warteschleifenbasierte Kommunikation zwischen dem " "Alarmevaluierungsdienst und dem Alarmbenachrichtigungsdienst wurde " "hinzugefügt. Die Originalimplementierung verwendete RPC, was aber einen " "bedeutenden Mehraufwand bedeutet. Arbeitswarteschlangen stellen die " "notwendige Funktionalität bereit und bieten bessere Performanz." msgid "The api.pecan_debug option has been removed." msgstr "Die api.pecan_debug Option wurde entfernt." msgid "" "The combination alarms are officially deprecated and disabled by default. " "Set api.enable_combination_alarms to True to enable them. Existing alarms " "will still be evaluated, but access to them via the API is linked to whether " "that configuration option is turned on or off. It's advised to use composite " "alarms instead." msgstr "" "Kombinationsalarme gelten offiziell als veraltet und sind per Standard " "deaktiviert. Setzen Sie api.enable_combination_alarms auf Wahr, um sie zu " "aktivieren. Bereits vorhandene Alarme werden weiterhin evaluiert, aber die " "Bearbeitung über die API hängt von der Konfigurationsoption ab. Es wird " "empfohlen, Composite Alarme zu verwenden." msgid "The deprecated combination alarms support have been removed." msgstr "Die veralteten Kombinationsalarme wurden entfernt." msgid "Upgrade Notes" msgstr "Aktualisierungsnotizen" msgid "" "[`bug 1504495 `_] Patch " "was added to address inconsistent RBAC policy handling. Certain rules may " "not have been given appropriate access." msgstr "" "[`bug 1504495 `_] Ein " "Patch wurde hinzugefügt, um die inkonsistente RBAC-Richtlinienbehandlung zu " "korrigieren. Einige Regeln erhielten nicht den notwendigen Zugriff." msgid "" "[`bug 1511252 `_] Fix an issue " "with combination alarms where it fails to evaluate all issues in the chain " "of alarms." msgstr "" "[`bug 1511252 `_] Problem " "beseitigt bei der Kombination von Alarmen, wobei nicht alle Ereignisse in " "der Alarmkette beachtet wurden." msgid "" "[`bug 1513738 `_] Fix an issue " "where alarms using Gnocchi aggregations are not being evaluated." msgstr "" "[`bug 1513738 `_] Problem " "beseitigt, bei dem Alarme, die Gnocchi aggregations verwenden, nicht " "evaluiert wurden." msgid "" "[`bug 1518447 `_] Fix to " "ensure ceilometerclient is properly initialised on startup." msgstr "" "[`bug 1518447 `_] " "Fehlerkorrektur um sicherzustellen, dass ceilometerclient beim starten " "richtig initialisiert wird." msgid "" "[`bug 1539069 `_] Fix to " "handle scenario where no valid statistics exist for specified period." msgstr "" "[`bug 1539069 `_] " "Fehlerkorrektur für die Handhabung von Szenarien, in denen keine gültigen " "Statistiken für den angegeben Zeitraum existieren." msgid "" "[`bug 1540395 `_] Fix reason " "string to properly handle transitions when one sample is outside of defined " "threshold." msgstr "" "[`bug 1540395 `_] " "Fehlerkorrektur für die Begründungszeichenkette, um Übergänge zwischen " "innerhalb und außerhalb von Schwellwerten richtig zu behandeln." msgid "" "[`bug 1575530 `_] Patch was " "added to fix and improve the partition coordinator, make sure the input " "tasks can be correctly distributed to partition members." msgstr "" "[`bug 1575530 `_] Ein Patch " "wurde hinzugefügt, um den Partition Coordinator zu verbessern und um " "sicherzustellen, dass eingegebene Aufgaben richtig an Partitionsmitglieder " "verteilt werden." msgid "" "[`bug 1582131 `_] Fix an issue " "with adding CA_BUNDLE certificate parth as value of \"verify\" parameter in " "SSL requests." msgstr "" "[`bug 1582131 `_] Problem " "beseitigt beim hinzufügen des CA_BUNDLE Zertifikatpfades als Wert für den " "\"verify\" Parameter in SSL Anfragen." msgid "" "batch_size and batch_timeout configuration options are added to [listener] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgstr "" "batch_size und batch_timeout Konfigurationsoptionen wurden dem [listener]-" "Abschnitt der Konfiguration hinzugefügt. batch_size bestimmt die Anzahl der " "Nachrichten, die vor dem Start der Bearbeitung aufgenommen werden. In " "gleicher Weise bestimmt batch_timeout die Wartezeit, bevor die Bearbeitung " "beginnt." msgid "" "batch_size and batch_timeout configuration options are added to [notifier] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgstr "" "batch_size und batch_timeout Konfigurationsoptionen wurden dem [notifier]-" "Abschnitt der Konfiguration hinzugefügt. batch_size bestimmt die Anzahl der " "Nachrichten, die vor dem Start der Bearbeitung aufgenommen werden. In " "gleicher Weise bestimmt batch_timeout die Wartezeit, bevor die Bearbeitung " "beginnt." msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgstr "" "Die gnocchiclient Bibliothek ist jetzt notwendig, wenn ceilometer und " "gnocchi verwendet werden." ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000020741 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000022526 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000007041300000000000025564 0ustar00zuulzuul00000000000000# Andi Chandler , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2021. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: aodh\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-04-03 11:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-04-14 06:48+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.1.1" msgstr "1.1.1" msgid "1.1.3" msgstr "1.1.3" msgid "10.0.0" msgstr "10.0.0" msgid "12.0.0" msgstr "12.0.0" msgid "13.0.0" msgstr "13.0.0" msgid "14.0.0" msgstr "14.0.0" msgid "15.0.0" msgstr "15.0.0" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.1" msgstr "2.0.1" msgid "2.0.2" msgstr "2.0.2" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "2024.1 Series Release Notes" msgstr "2024.1 Series Release Notes" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.3" msgstr "3.0.3" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "5.0.0" msgstr "5.0.0" msgid "6.0.0" msgstr "6.0.0" msgid "9.0.0" msgstr "9.0.0" msgid "" "A healthcheck endpoint is provided by default at /healthcheck. It leverages " "oslo_middleware healthcheck middleware. It allows to retrieve information " "about the health of the API service." msgstr "" "A healthcheck endpoint is provided by default at /healthcheck. It leverages " "oslo_middleware healthcheck middleware. It allows you to retrieve " "information about the health of the API service." msgid "" "A new ``alarm_histories_delete_bacth_size`` option has been added to limit a " "number of alarm histories deleted from the database by aodh-expirer in a " "single iteration. This parameter is useful when there are a lot of alarm " "histories in the database." msgstr "" "A new ``alarm_histories_delete_bacth_size`` option has been added to limit " "the number of alarm histories deleted from the database by aodh-expirer in a " "single iteration. This parameter is useful when there are a lot of alarm " "histories in the database." msgid "" "A new default policy.json is provided to properly handle RBAC control. " "Existing policy.json files may not grant the appropriate access." msgstr "" "A new default policy.json is provided to properly handle RBAC control. " "Existing policy.json files may not grant the appropriate access." msgid "" "A new option “rest_notifier_ca_bundle_certificate_path” has been added in " "the configuration file, set None as default value. If this option is present " "and SSL is used for alarm action the certificate path provided will be used " "as value of verify parameter in action request." msgstr "" "A new option “rest_notifier_ca_bundle_certificate_path” has been added in " "the configuration file, set None as default value. If this option is present " "and SSL is used for alarm action the certificate path provided will be used " "as value of verify parameter in action request." msgid "" "Add a new composite type alarm, which allow users specifying a composite " "rule to define an alarm with multiple triggering conditions, using a " "combination of *and*, *or* relationships. The composite rule is composed of " "multiple threshold rules or gnocchi rules." msgstr "" "Add a new composite type alarm, which allow users specifying a composite " "rule to define an alarm with multiple triggering conditions, using a " "combination of *and*, *or* relationships. The composite rule is composed of " "multiple threshold rules or Gnocchi rules." msgid "" "Add a tool for converting combination alarms to composite alarms, since we " "have deprecated the combination alarm support and recommend to use composite " "alarm to perform multiple conditions alarming." msgstr "" "Add a tool for converting combination alarms to composite alarms, since we " "have deprecated the combination alarm support and recommend to use composite " "alarm to perform multiple conditions alarming." msgid "" "Add a tool for migrating alarm and alarm history data from NoSQL storage to " "SQL storage. The migration tool has been tested OK in devstack environment, " "but users need to be cautious with this, because the data migration between " "storage backends is a bit dangerous." msgstr "" "Add a tool for migrating alarm and alarm history data from NoSQL storage to " "SQL storage. The migration tool has been tested OK in Devstack environment, " "but users need to be cautious with this, because the data migration between " "storage backends is a bit dangerous." msgid "Add support for Keystone v3 authentication" msgstr "Add support for Keystone v3 authentication" msgid "" "Add support for batch processing of messages from queue. This will allow the " "aodh-listener to grab multiple event messages per thread to enable more " "efficient processing." msgstr "" "Add support for batch processing of messages from queue. This will allow the " "aodh-listener to grab multiple event messages per thread to enable more " "efficient processing." msgid "" "Add support for batch processing of messages from queue. This will allow the " "aodh-notifier to grab multiple messages per thread to enable more efficient " "processing." msgstr "" "Add support for batch processing of messages from queue. This will allow the " "aodh-notifier to grab multiple messages per thread to enable more efficient " "processing." msgid "" "Added a new evaluator for the alarms of type ``loadbalancer_member_health`` " "which evaluates the alarm by checking the operating status of the members in " "a given load balancer pool." msgstr "" "Added a new evaluator for the alarms of type ``loadbalancer_member_health`` " "which evaluates the alarm by checking the operating status of the members in " "a given load balancer pool." msgid "" "Added a new notifier(``trust+heat``) that works together with " "``loadbalancer_member_health`` evaluator for auto-healing purpose." msgstr "" "Added a new notifier(``trust+heat``) that works together with " "``loadbalancer_member_health`` evaluator for auto-healing purpose." msgid "Added new tool ``aodh-status upgrade check``." msgstr "Added new tool ``aodh-status upgrade check``." msgid "Alarm name unique constraint for each project has been removed." msgstr "Alarm name unique constraint for each project has been removed." msgid "All the deprecated non-SQL drivers have been removed." msgstr "All the deprecated non-SQL drivers have been removed." msgid "" "Allow to extends the alarm evaluation windows to compensate the reporting/" "ingestion lag. An new option is introduced additional_ingestion_lag " "defaulted to 0. It represents the number of seconds of the window extension." msgstr "" "Allow you to extends the alarm evaluation windows to compensate the " "reporting/ingestion lag. An new option is introduced " "additional_ingestion_lag defaulted to 0. It represents the number of seconds " "of the window extension." msgid "" "Aodh Quota API is aiming for multi-tenancy support. By default, only the " "admin user is able to change or delete the resource quota for projects as " "defined by the default policy rule 'telemetry:update_quotas' and 'telemetry:" "delete_quotas'. User alarm quota and alarm action quota are not supported in " "Quota API." msgstr "" "Aodh Quota API is aiming for multi-tenancy support. By default, only the " "admin user is able to change or delete the resource quota for projects as " "defined by the default policy rule 'telemetry:update_quotas' and 'telemetry:" "delete_quotas'. User alarm quota and alarm action quota are not supported in " "Quota API." msgid "Aodh Release Notes" msgstr "Aodh Release Notes" msgid "" "Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " "meaning it is now the minimum required version of MySQL." msgstr "" "Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " "meaning it is now the minimum required version of MySQL." msgid "" "Aodh now support policy in code, which means if users didn't modify any of " "policy rules, they can leave policy file (in `json` or `yaml` format) empty " "or not deploy it at all. Because from now, Aodh keeps all default policies " "under `aodh/api/policies` module. Users can still modify/generate `policy." "yaml` file which will override policy rules in code if those rules show in " "`policy.yaml` file." msgstr "" "Aodh now support policy in code, which means if users didn't modify any of " "policy rules, they can leave policy file (in `json` or `yaml` format) empty " "or not deploy it at all. Because from now, Aodh keeps all default policies " "under `aodh/api/policies` module. Users can still modify/generate `policy." "yaml` file which will override policy rules in code if those rules show in " "`policy.yaml` file." msgid "" "Aodh now supports generation of Guru Meditation Reports using oslo.reports " "library. Each service prints a report output when it receives SIGUSR1." msgstr "" "Aodh now supports the generation of Guru Meditation Reports using oslo." "reports library. Each service prints a report output when it receives " "SIGUSR1." msgid "" "Because queues provide the equivalent functionality. RPC support is " "deprecated and will be removed after Mitaka." msgstr "" "Because queues provide the equivalent functionality. RPC support is " "deprecated and will be removed after Mitaka." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Ceilometer's API is deprecated in Ocata. Therefore, threshold alarms are now " "deprecated as well. Threshold rules will be removed when Ceilometer's API is " "also removed. Similar functionality is provided through Gnocchi alarm rules: " "``gnocchi_resources_threshold``, " "``gnocchi_aggregation_by_metrics_threshold``, or " "``gnocchi_aggregation_by_resources_threshold``." msgstr "" "Ceilometer's API is deprecated in Ocata. Therefore, threshold alarms are now " "deprecated as well. Threshold rules will be removed when Ceilometer's API is " "also removed. Similar functionality is provided through Gnocchi alarm rules: " "``gnocchi_resources_threshold``, " "``gnocchi_aggregation_by_metrics_threshold``, or " "``gnocchi_aggregation_by_resources_threshold``." msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Default `policy.json` file is now removed as Aodh now generate the default " "policies from code. Please be aware that when using that file in your " "environment." msgstr "" "Default `policy.json` file is now removed as Aodh now generate the default " "policies from code. Please be aware that when using that file in your " "environment." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient " "backend for handling the scope of alarms. To maximise available resources, " "NoSQL backends are deprecated so developers do not need to worry about " "adding features to multiple backends." msgstr "" "Drop support for NoSQL backends in Aodh. SQL is a perfectly sufficient " "backend for handling the scope of alarms. To maximise available resources, " "NoSQL backends are deprecated so developers do not need to worry about " "adding features to multiple backends." msgid "" "Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" "notifier to run in multiple worker mode. New options are introduced " "corresponsively as [evaluator]workers, [listener]workers and " "[notifier]workers. They all default to 1." msgstr "" "Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" "notifier to run in multiple worker mode. New options are introduced " "correspondingly as [evaluator]workers, [listener]workers and " "[notifier]workers. They all default to 1." msgid "" "Gnocchi aggregation capabilities are now cached to minimise redundant calls " "to Gnocchi when validating aggregation methods. The cache is stored in-" "memory for an hour. If additional aggregations are added to Gnocchi, they " "will not be proprogated to Aodh's API service for at most an hour or unless " "the service is restarted." msgstr "" "Gnocchi aggregation capabilities are now cached to minimise redundant calls " "to Gnocchi when validating aggregation methods. The cache is stored in-" "memory for an hour. If additional aggregations are added to Gnocchi, they " "will not be proprogated to Aodh's API service for at most an hour or unless " "the service is restarted." msgid "Gnocchi dispatcher now uses client rather than direct http requests" msgstr "Gnocchi dispatcher now uses client rather than direct http requests" msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "New Features" msgstr "New Features" msgid "" "New framework for ``aodh-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Aodh " "upgrade to ensure if the upgrade can be performed safely." msgstr "" "New framework for ``aodh-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Aodh " "upgrade to ensure if the upgrade can be performed safely." msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "" "Now the ``aodh-api`` service look for the paste config file (a.k.a. ``api-" "paste.ini`` from configruation directories like ``/etc/aodh``. If the file " "is not found in the configuration directories, it uses the default file. To " "use only a specific file, use a full file path for the ``[api] " "paste_confing`` parameter" msgstr "" "Now the ``aodh-api`` service look for the paste config file (a.k.a. ``api-" "paste.ini`` from configruation directories like ``/etc/aodh``. If the file " "is not found in the configuration directories, it uses the default file. To " "use only a specific file, use a full file path for the ``[api] " "paste_confing`` parameter" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "Operator can now use new CLI tool ``aodh-status upgrade check`` to check if " "Aodh deployment can be safely upgraded from N-1 to N release." msgstr "" "Operator can now use new CLI tool ``aodh-status upgrade check`` to check if " "Aodh deployment can be safely upgraded from N-1 to N release." msgid "Other Notes" msgstr "Other Notes" msgid "" "Patch was added to address inconsistent RBAC policy handling. Certain rules " "may not have been given appropriate access." msgstr "" "Patch was added to address inconsistent RBAC policy handling. Certain rules " "may not have been given appropriate access." msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Prelude" msgstr "Prelude" msgid "" "Python 2.7 support has been dropped. Last release of Aodh to support py2.7 " "is OpenStack Train. The minimum version of Python now supported by Aodh is " "Python 3.6." msgstr "" "Python 2.7 support has been dropped. Last release of Aodh to support py2.7 " "is OpenStack Train. The minimum version of Python now supported by Aodh is " "Python 3.6." msgid "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgstr "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Queue based communication is the new default IPC protocol. RPC can still be " "used by choosing rpc as ipc_protocol option. Only one protocol can be run at " "any given time." msgstr "" "Queue based communication is the new default IPC protocol. RPC can still be " "used by choosing rpc as ipc_protocol option. Only one protocol can be run at " "any given time." msgid "Remove eventlet from Aodh in favour of threaded approach" msgstr "Remove eventlet from Aodh in favour of threaded approach" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "Security Issues" msgstr "Security Issues" msgid "Start using reno to manage release notes." msgstr "Start using reno to manage release notes." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Support for queue based communication between alarm evaluator service and " "alarm notifier services was added. Original implementation involved using " "RPC but there is significant overhead involved with using RPC. Work queues " "provided required functionality with better performance." msgstr "" "Support for queue based communication between alarm evaluator service and " "alarm notifier services was added. Original implementation involved using " "RPC but there is significant overhead involved with using RPC. Work queues " "provided required functionality with better performance." msgid "" "Support to deploy aodh-evaluator in active/active mode by leveraging " "database non-locking mechanism. With this feature, there could be multiple " "aodh-evaluator processes running without dependency of etcd or zookeeper." msgstr "" "Support to deploy aodh-evaluator in active/active mode by leveraging " "database non-locking mechanism. With this feature, there could be multiple " "aodh-evaluator processes running without dependency of etcd or ZooKeeper." msgid "" "The ``[DEFAULT] evaluation_interval`` parameter has been migrated to the " "``[evaluator]`` section. The old parameter is still kept for backword " "compatibility but will be removed in a future release." msgstr "" "The ``[DEFAULT] evaluation_interval`` parameter has been migrated to the " "``[evaluator]`` section. The old parameter is still kept for backwards " "compatibility but will be removed in a future release." msgid "" "The ``[DEFAULT] http_timeout`` parameter has been deprecated. This parameter " "has been unused thus has had no effect." msgstr "" "The ``[DEFAULT] http_timeout`` parameter has been deprecated. This parameter " "has been unused and thus has had no effect." msgid "" "The ``[coordination] check_watchers`` parameter has been deprecated since it " "has been ineffective." msgstr "" "The ``[coordination] check_watchers`` parameter has been deprecated since it " "has been ineffective." msgid "The ``[coordination] check_watchers`` parameter has been removed." msgstr "The ``[coordination] check_watchers`` parameter has been removed." msgid "" "The ``[coordination] heartbeat`` parameter has been renamed to the " "``[coordination] heartbeat_interval``. The old ``[coordination] heartbeat`` " "parameter is deprecated and will be removed in a future release." msgstr "" "The ``[coordination] heartbeat`` parameter has been renamed to the " "``[coordination] heartbeat_interval``. The old ``[coordination] heartbeat`` " "parameter is deprecated and will be removed in a future release." msgid "" "The admin user can fetch alarms of all the projects, e.g. ``curl -X GET " "\"${aodh_prefix}/v2/alarms?q.field=all_projects&q.op=eq&q.value=true\" X-" "Auth-Token:$token``" msgstr "" "The admin user can fetch alarms of all the projects, e.g. ``curl -X GET " "\"${aodh_prefix}/v2/alarms?q.field=all_projects&q.op=eq&q.value=true\" X-" "Auth-Token:$token``" msgid "The api.pecan_debug option has been removed." msgstr "The api.pecan_debug option has been removed." msgid "" "The combination alarms are officially deprecated and disabled by default. " "Set api.enable_combination_alarms to True to enable them. Existing alarms " "will still be evaluated, but access to them via the API is linked to whether " "that configuration option is turned on or off. It's advised to use composite " "alarms instead." msgstr "" "The combination alarms are officially deprecated and disabled by default. " "Set api.enable_combination_alarms to True to enable them. Existing alarms " "will still be evaluated, but access to them via the API is linked to whether " "that configuration option is turned on or off. It's advised to use composite " "alarms instead." msgid "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing " "customized or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them in YAML " "format. Use the `oslopolicy-convert-json-to-yaml `_ tool to " "convert a JSON to YAML formatted policy file in backward compatible way." msgstr "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing " "customised or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them in YAML " "format. Use the `oslopolicy-convert-json-to-yaml `_ tool to " "convert a JSON to YAML formatted policy file in backward compatible way." msgid "The deprecated 'threshold' alarm type has been removed." msgstr "The deprecated 'threshold' alarm type has been removed." msgid "The deprecated combination alarms support have been removed." msgstr "The deprecated combination alarms support have been removed." msgid "" "The reason of the state change is now part of the API as \"state_reason\" " "field of the alarm object." msgstr "" "The reason of the state change is now part of the API as \"state_reason\" " "field of the alarm object." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Use of JSON policy files was deprecated by the ``oslo.policy`` library " "during the Victoria development cycle. As a result, this deprecation is " "being noted in the Wallaby cycle with an anticipated future removal of " "support by ``oslo.policy``. As such operators will need to convert to YAML " "policy files. Please see the upgrade notes for details on migration of any " "custom policy files." msgstr "" "Use of JSON policy files was deprecated by the ``oslo.policy`` library " "during the Victoria development cycle. As a result, this deprecation is " "being noted in the Wallaby cycle with an anticipated future removal of " "support by ``oslo.policy``. As such operators will need to convert to YAML " "policy files. Please see the upgrade notes for details on migration of any " "custom policy files." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "When an unprivileged user want to access to Gnocchi resources created by " "Ceilometer, that doesn't work because the filter scope the Gnocchi query to " "resource owner to the user. To fix we introduce a new configuration option " "\"gnocchi_external_project_owner\" set by default to \"service\". The new " "filter now allow two kind of Gnocchi resources:" msgstr "" "When an unprivileged user want to access to Gnocchi resources created by " "Ceilometer, that doesn't work because the filter scope the Gnocchi query to " "resource owner to the user. To fix we introduce a new configuration option " "\"gnocchi_external_project_owner\" set by default to \"service\". The new " "filter now allow two kind of Gnocchi resources:" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "[`bug 1504495 `_] Patch " "was added to address inconsistent RBAC policy handling. Certain rules may " "not have been given appropriate access." msgstr "" "[`bug 1504495 `_] Patch " "was added to address inconsistent RBAC policy handling. Certain rules may " "not have been given appropriate access." msgid "" "[`bug 1511252 `_] Fix an issue " "with combination alarms where it fails to evaluate all issues in the chain " "of alarms." msgstr "" "[`bug 1511252 `_] Fix an issue " "with combination alarms where it fails to evaluate all issues in the chain " "of alarms." msgid "" "[`bug 1513738 `_] Fix an issue " "where alarms using Gnocchi aggregations are not being evaluated." msgstr "" "[`bug 1513738 `_] Fix an issue " "where alarms using Gnocchi aggregations are not being evaluated." msgid "" "[`bug 1518447 `_] Fix to " "ensure ceilometerclient is properly initialised on startup." msgstr "" "[`bug 1518447 `_] Fix to " "ensure Ceilometer client is properly initialised on startup." msgid "" "[`bug 1539069 `_] Fix to " "handle scenario where no valid statistics exist for specified period." msgstr "" "[`bug 1539069 `_] Fix to " "handle scenario where no valid statistics exist for specified period." msgid "" "[`bug 1540395 `_] Fix reason " "string to properly handle transitions when one sample is outside of defined " "threshold." msgstr "" "[`bug 1540395 `_] Fix reason " "string to properly handle transitions when one sample is outside of defined " "threshold." msgid "" "[`bug 1575530 `_] Patch was " "added to fix and improve the partition coordinator, make sure the input " "tasks can be correctly distributed to partition members." msgstr "" "[`bug 1575530 `_] Patch was " "added to fix and improve the partition coordinator, make sure the input " "tasks can be correctly distributed to partition members." msgid "" "[`bug 1582131 `_] Fix an issue " "with adding CA_BUNDLE certificate parth as value of \"verify\" parameter in " "SSL requests." msgstr "" "[`bug 1582131 `_] Fix an issue " "with adding CA_BUNDLE certificate path as value of \"verify\" parameter in " "SSL requests." msgid "" "batch_size and batch_timeout configuration options are added to [listener] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgstr "" "batch_size and batch_timeout configuration options are added to [listener] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgid "" "batch_size and batch_timeout configuration options are added to [notifier] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgstr "" "batch_size and batch_timeout configuration options are added to [notifier] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgstr "" "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgid "" "owned by \"gnocchi_external_project_owner\" and the original project_id of " "the resource is the user project." msgstr "" "owned by \"gnocchi_external_project_owner\" and the original project_id of " "the resource is the user project." msgid "" "owned by \"gnocchi_external_project_owner\" and the orignal project_id of " "the resource is the user project." msgstr "" "owned by \"gnocchi_external_project_owner\" and the original project_id of " "the resource is the user project." msgid "owned by the user project" msgstr "owned by the user project" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000020376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000022163 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000254000000000000025215 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Aodh Release Notes 5.0.1\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-10-15 20:27+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:18+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "1.1.1" msgstr "1.1.1" msgid "1.1.3" msgstr "1.1.3" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.1" msgstr "2.0.1" msgid "2.0.2" msgstr "2.0.2" msgid "3.0.0" msgstr "3.0.0" msgid "Aodh Release Notes" msgstr "Note de release de Aodh" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "Mitaka Series Release Notes" msgstr "Note de release pour Mitaka" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Other Notes" msgstr "Autres notes" msgid "Security Issues" msgstr "Problèmes de sécurités" msgid "Upgrade Notes" msgstr "Notes de mises à jours" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/source/locale/ja/0000775000175000017500000000000000000000000020361 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000022146 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000664000175000017500000004165300000000000025210 0ustar00zuulzuul00000000000000# Akihiro Motoki , 2016. #zanata # Shu Muto , 2017. #zanata # Takeru Ishibashi , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: aodh\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2020-01-31 02:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-02-01 05:46+0000\n" "Last-Translator: Takeru Ishibashi \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "1.1.1" msgstr "1.1.1" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.1" msgstr "2.0.1" msgid "2.0.2" msgstr "2.0.2" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.3" msgstr "3.0.3" msgid "4.0.0" msgstr "4.0.0" msgid "" "A healthcheck endpoint is provided by default at /healthcheck. It leverages " "oslo_middleware healthcheck middleware. It allows to retrieve information " "about the health of the API service." msgstr "" "ヘルスチェックのエンドポイントは、デフォルトで /healthcheck に用意されていま" "す。 oslo_middleware の healthcheck ミドルウェアを活用しています。 API サービ" "スの正常性に関する情報を取得することができます。" msgid "" "A new default policy.json is provided to properly handle RBAC control. " "Existing policy.json files may not grant the appropriate access." msgstr "" "新しいデフォルトの policy.json が RBAC 制御を適切に処理するために提供されてい" "ます。既存の policy.json ファイルが適切なアクセスを許可していない可能性があり" "ます。" msgid "" "A new option “rest_notifier_ca_bundle_certificate_path” has been added in " "the configuration file, set None as default value. If this option is present " "and SSL is used for alarm action the certificate path provided will be used " "as value of verify parameter in action request." msgstr "" "新しいオプション \"rest_notifier_ca_bundle_certificate_path\" が設定ファイル" "に追加されました。デフォルト値として None が設定されています。このオプション" "が存在し、アラームアクションに SSL が使用されている場合、提供された証明書パス" "はアクション要求の検証パラメータの値として使用されます。" msgid "" "Add a new composite type alarm, which allow users specifying a composite " "rule to define an alarm with multiple triggering conditions, using a " "combination of *and*, *or* relationships. The composite rule is composed of " "multiple threshold rules or gnocchi rules." msgstr "" "新しい複合タイプのアラームを追加します。複合ルールを指定すると、 *and* と " "*or* の関係を組み合わせて複数のトリガー条件を持つアラームを定義できます。複合" "ルールは、複数のしきい値ルールや gnocchi ルールで構成されます。" msgid "" "Add a tool for converting combination alarms to composite alarms, since we " "have deprecated the combination alarm support and recommend to use composite " "alarm to perform multiple conditions alarming." msgstr "" "コンビネーションアラームのサポートを非推奨とし、複合アラームを使用して複数の" "状態のアラームを挙げることを推奨しているため、コンビネーションアラームを複合" "アラームに変換するためのツールを追加しました。" msgid "" "Add a tool for migrating alarm and alarm history data from NoSQL storage to " "SQL storage. The migration tool has been tested OK in devstack environment, " "but users need to be cautious with this, because the data migration between " "storage backends is a bit dangerous." msgstr "" "NoSQL ストレージから SQL ストレージにアラームおよびアラーム履歴データを移行す" "るためのツールを追加しました。移行ツールは devstack 環境で正常にテストされて" "いますが、ストレージバックエンド間のデータ移行は少し危険なので、ユーザーはこ" "れに注意する必要があります。" msgid "Add support for Keystone v3 authentication" msgstr "Keystone v3 認証のサポートを追加しました。" msgid "" "Add support for batch processing of messages from queue. This will allow the " "aodh-listener to grab multiple event messages per thread to enable more " "efficient processing." msgstr "" "キューからのメッセージのバッチ処理をサポートしました。これにより、 aodh-" "listener はスレッドごとに複数のメッセージを取得して、より効率的に処理します。" msgid "" "Add support for batch processing of messages from queue. This will allow the " "aodh-notifier to grab multiple messages per thread to enable more efficient " "processing." msgstr "" "キューからのメッセージのバッチ処理をサポートしました。これにより、 aodh-" "notifier はスレッドごとに複数のメッセージを取得して、より効率的に処理します。" msgid "Alarm name unique constraint for each project has been removed." msgstr "プロジェクト毎のアラーム名の一意成約を削除しました。" msgid "All the deprecated non-SQL drivers have been removed." msgstr "すべての非推奨の non-SQL ドライバーを削除しました。" msgid "" "Allow to extends the alarm evaluation windows to compensate the reporting/" "ingestion lag. An new option is introduced additional_ingestion_lag " "defaulted to 0. It represents the number of seconds of the window extension." msgstr "" "報告や取得の遅れを補うためにアラーム評価のウィンドウを拡張することができま" "す。新しいオプション additional_ingestion_lag が導入され、デフォルトは 0 で" "す。これは、ウィンドウの拡張を秒数で表します。" msgid "Aodh Release Notes" msgstr "Aodh リリースノート" msgid "" "Aodh now leverages microseconds timestamps available since MySQL 5.6.4, " "meaning it is now the minimum required version of MySQL." msgstr "" "Aodh は現在、MySQL 5.6.4 から利用可能なマイクロ秒のタイムスタンプを利用してい" "ます。つまり、それが MySQL の最低限必要なバージョンです。" msgid "" "Because queues provide the equivalent functionality. RPC support is " "deprecated and will be removed after Mitaka." msgstr "" "キューが同等の機能を提供するため、 RPC サポートは非推奨となり、 Mitaka の後に" "削除されます。" msgid "Bug Fixes" msgstr "バグ修正" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "" "Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient " "backend for handling the scope of alarms. To maximise available resources, " "NoSQL backends are deprecated so developers do not need to worry about " "adding features to multiple backends." msgstr "" "Aodh での NoSQL バックエンドのサポートを中止します。 SQL は、アラームのスコー" "プを処理するのに十分なバックエンドです。利用可能なリソースを最大限に活用する" "ため、 NoSQL バックエンドは非推奨としました。開発者は複数のバックエンドに機能" "を追加する必要はありません。" msgid "" "Enable aodh services, including aodh-evaluator, aodh-listener and aodh-" "notifier to run in multiple worker mode. New options are introduced " "corresponsively as [evaluator]workers, [listener]workers and " "[notifier]workers. They all default to 1." msgstr "" "aodh-evaluator 、 aodh-listener 、および aodh-notifier を含む aodh サービスを" "複数のワーカーモードで実行できるようにしました。新しいオプションは、 " "[evaluator] workers 、[listener] workers 、[notifier] workers として対応して" "導入されています。 それらのデフォルトはすべて 1 です。" msgid "" "Gnocchi aggregation capabilities are now cached to minimise redundant calls " "to Gnocchi when validating aggregation methods. The cache is stored in-" "memory for an hour. If additional aggregations are added to Gnocchi, they " "will not be proprogated to Aodh's API service for at most an hour or unless " "the service is restarted." msgstr "" "Gnocchi 集計機能は、集計メソッドの検証時に Gnocchi への冗長な呼び出しを最小限" "に抑えるようキャッシュされるようになりました。キャッシュはメモリに1時間格納" "されます。追加の集計が Gnocchi に追加された場合、最大1時間、またはサービスが" "再起動されない限り、Aodh の API サービスに伝播されることはありません。" msgid "Gnocchi dispatcher now uses client rather than direct http requests" msgstr "" "Gnocchi のディスパッチャーは、直接 http リクエストを使わず、クライアントを使" "用するようになりました。" msgid "Liberty Series Release Notes" msgstr "Liberty バージョンのリリースノート" msgid "Mitaka Series Release Notes" msgstr "Mitaka バージョンのリリースノート" msgid "New Features" msgstr "新機能" msgid "Newton Series Release Notes" msgstr "Newton バージョンのリリースノート" msgid "Ocata Series Release Notes" msgstr "Ocata バージョンのリリースノート" msgid "Other Notes" msgstr "その他の注意点" msgid "" "Patch was added to address inconsistent RBAC policy handling. Certain rules " "may not have been given appropriate access." msgstr "" "矛盾した RBAC ポリシー処理に対処するためのパッチが追加されました。特定のルー" "ルに適切なアクセス権が与えられていない可能性があります。" msgid "" "Queue based communication is the new default IPC protocol. RPC can still be " "used by choosing rpc as ipc_protocol option. Only one protocol can be run at " "any given time." msgstr "" "キューベースの通信は、新しいデフォルトの IPC プロトコルです。 RPC は " "ipc_protocol オプションとして rpc を選択することによって引き続き使用できま" "す。一度に実行できるプロトコルは1つのみです。" msgid "Remove eventlet from Aodh in favour of threaded approach" msgstr "Aodh からイベントレットを削除し、スレッド化するアプローチにします。" msgid "Security Issues" msgstr "セキュリティー上の問題" msgid "Start using reno to manage release notes." msgstr "リリースノートの管理に reno を使い始めました。" msgid "" "Support for queue based communication between alarm evaluator service and " "alarm notifier services was added. Original implementation involved using " "RPC but there is significant overhead involved with using RPC. Work queues " "provided required functionality with better performance." msgstr "" "アラーム評価サービスとアラーム通知サービスの間のキューベースの通信のサポート" "が追加されました。 元の実装では RPC を使用していましたが、 RPC の使用にはかな" "りのオーバーヘッドがあります。 作業キューは、より良いパフォーマンスで必要な機" "能を提供します。" msgid "The api.pecan_debug option has been removed." msgstr "api.pecan_debug オプションを削除しました。" msgid "" "The combination alarms are officially deprecated and disabled by default. " "Set api.enable_combination_alarms to True to enable them. Existing alarms " "will still be evaluated, but access to them via the API is linked to whether " "that configuration option is turned on or off. It's advised to use composite " "alarms instead." msgstr "" "コンビネーションアラームは公式に非推奨となり、デフォルトでは無効になっていま" "す。 有効にするには、 api.enable_combination_alarms を True に設定します。既" "存のアラームは引き続き評価されますが、 API 経由でのアクセスはその設定オプショ" "ンのオン/オフによります。代わりに複合アラームを使用することをお勧めします。" msgid "The deprecated combination alarms support have been removed." msgstr "非推奨となっていたコンビネーションアラームを削除しました。" msgid "Upgrade Notes" msgstr "アップグレード時の注意" msgid "" "[`bug 1504495 `_] Patch " "was added to address inconsistent RBAC policy handling. Certain rules may " "not have been given appropriate access." msgstr "" "[`bug 1504495 `_] 矛盾し" "た RBAC ポリシー処理に対処するためのパッチが追加されました。特定のルールに適" "切なアクセス権が与えられていない可能性があります。" msgid "" "[`bug 1511252 `_] Fix an issue " "with combination alarms where it fails to evaluate all issues in the chain " "of alarms." msgstr "" "[`bug 1511252 `_] コンビネー" "ションアラームのチェーン内のすべてのアラームを評価できない問題を修正しまし" "た。" msgid "" "[`bug 1513738 `_] Fix an issue " "where alarms using Gnocchi aggregations are not being evaluated." msgstr "" "[`bug 1513738 `_] Gnocchi の集" "計を使用したアラームが評価されない問題を修正しました。" msgid "" "[`bug 1518447 `_] Fix to " "ensure ceilometerclient is properly initialised on startup." msgstr "" "[`bug 1518447 `_] " "ceilometerclient が起動時に適切に初期化されるようにしました。" msgid "" "[`bug 1539069 `_] Fix to " "handle scenario where no valid statistics exist for specified period." msgstr "" "[`bug 1539069 `_] 指定した期間" "に有効な統計が存在しないシナリオの処理を修正しました。" msgid "" "[`bug 1540395 `_] Fix reason " "string to properly handle transitions when one sample is outside of defined " "threshold." msgstr "" "[`bug 1540395 `_] 定義した閾値" "の外側にある1つのサンプルが翻訳を適切に処理するよう、理由の文字列を修正しま" "した。" msgid "" "[`bug 1575530 `_] Patch was " "added to fix and improve the partition coordinator, make sure the input " "tasks can be correctly distributed to partition members." msgstr "" "[`bug 1575530 `_] パーティショ" "ンコーディネーターを修正し改善するためのパッチを追加しました。入力タスクが" "パーティションメンバーに正しく配布されるようにしてください。" msgid "" "[`bug 1582131 `_] Fix an issue " "with adding CA_BUNDLE certificate parth as value of \"verify\" parameter in " "SSL requests." msgstr "" "[`bug 1582131 `_] SSL リクエス" "トの \"verify\" パラメーターの値として CA_BUNDLE 証明書のパスを追加する際の問" "題を修正しました。" msgid "" "batch_size and batch_timeout configuration options are added to [listener] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgstr "" "batch_size および batch_timeout 設定オプションを設定の [listener] セクション" "に追加しました。batch_size は、処理する前に取得するメッセージの数を制御しま" "す。同様に、batch_timeout は処理前の待機時間を定義します。" msgid "" "batch_size and batch_timeout configuration options are added to [notifier] " "section of configuration. The batch_size controls the number of messages to " "grab before processing. Similarly, the batch_timeout defines the wait time " "before processing." msgstr "" "batch_size および batch_timeout 設定オプションを設定の [notifier] セクション" "に追加しました。batch_size は、処理する前に取得するメッセージの数を制御しま" "す。同様に、batch_timeout は処理前の待機時間を定義します。" msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgstr "" "ceilometer と gnocchi を使う場合、 gnocchiclient ライブラリーが必要になりまし" "た。" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/source/locale/ko_KR/0000775000175000017500000000000000000000000020774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000022561 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po0000664000175000017500000000426500000000000025621 0ustar00zuulzuul00000000000000# Ian Y. Choi , 2016. #zanata # SEOKJAE BARK , 2017. #zanata # Sungjin Kang , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: Aodh Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-02-28 14:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-07-06 01:12+0000\n" "Last-Translator: SEOKJAE BARK \n" "Language-Team: Korean (South Korea)\n" "Language: ko_KR\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "1.1.1" msgstr "1.1.1" msgid "1.1.3" msgstr "1.1.3" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.1" msgstr "2.0.1" msgid "2.0.2" msgstr "2.0.2" msgid "3.0.0" msgstr "3.0.0" msgid "4.0.0" msgstr "4.0.0" msgid "" "A healthcheck endpoint is provided by default at /healthcheck. It leverages " "oslo_middleware healthcheck middleware. It allows to retrieve information " "about the health of the API service." msgstr "" "기본으로 /healthcheck 로 상태 체크 endpoint를 제공합니다. oslo_midlleware " "healthcheck 미들웨어를 활용하여 API 서비스 상태에 대한 정보를 검색할 수 있습" "니다." msgid "Aodh Release Notes" msgstr "Aodh 릴리즈 노트" msgid "Bug Fixes" msgstr "버그 수정" msgid "Current Series Release Notes" msgstr "현재 시리즈에 대한 릴리즈 노트" msgid "Deprecation Notes" msgstr "사용하지 않는 기능" msgid "Liberty Series Release Notes" msgstr "Liberty 시리즈에 대한 릴리즈 노트" msgid "Mitaka Series Release Notes" msgstr "Mitaka 시리즈에 대한 릴리즈 노트" msgid "New Features" msgstr "새로운 기능" msgid "Newton Series Release Notes" msgstr "Newton 시리즈 릴리즈 노트" msgid "Ocata Series Release Notes" msgstr "Ocata 시리즈 릴리즈 노트" msgid "Other Notes" msgstr "기타 기능" msgid "Security Issues" msgstr "보안 이슈" msgid "Upgrade Notes" msgstr "업그레이드 노트" msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgstr "" "ceilometer+gnocchi 를 사용할 경우, gnocchiclient 라이브러리가 필요합니다." ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727866891.055009 aodh-19.0.0/releasenotes/source/locale/pt_BR/0000775000175000017500000000000000000000000020775 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1230197 aodh-19.0.0/releasenotes/source/locale/pt_BR/LC_MESSAGES/0000775000175000017500000000000000000000000022562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/locale/pt_BR/LC_MESSAGES/releasenotes.po0000664000175000017500000000512600000000000025617 0ustar00zuulzuul00000000000000# Rodrigo Loures , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: Aodh Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-02-28 14:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-01-23 04:15+0000\n" "Last-Translator: Rodrigo Loures \n" "Language-Team: Portuguese (Brazil)\n" "Language: pt_BR\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.1.1" msgstr "1.1.1" msgid "1.1.3" msgstr "1.1.3" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.1" msgstr "2.0.1" msgid "2.0.2" msgstr "2.0.2" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.3" msgstr "3.0.3" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "5.0.0" msgstr "5.0.0" msgid "Add support for Keystone v3 authentication" msgstr "Adicionado o suporte para autenticação Keystone v3" msgid "All the deprecated non-SQL drivers have been removed." msgstr "Todos os drivers non-SQL obsoletos foram removidos." msgid "Aodh Release Notes" msgstr "Aodh - Notas de Versão" msgid "Bug Fixes" msgstr "Correção de erros" msgid "Current Series Release Notes" msgstr "Atual - Séries de Notas de Versão" msgid "Deprecation Notes" msgstr "Notas de obsolência" msgid "Liberty Series Release Notes" msgstr "Liberty - Série de Notas de Versão" msgid "Mitaka Series Release Notes" msgstr "Mitaka - Série de Notas de Versão" msgid "New Features" msgstr "Novos recursos" msgid "Newton Series Release Notes" msgstr "Newton - Série de Notas de Versão" msgid "Ocata Series Release Notes" msgstr "Ocata - Série de Notas de Versão" msgid "Other Notes" msgstr "Outras notas" msgid "Pike Series Release Notes" msgstr "Pike - Série de Notas de Versão" msgid "Security Issues" msgstr "Problemas de segurança" msgid "Start using reno to manage release notes." msgstr "Comece a usar o reno para gerenciar notas de versão." msgid "The api.pecan_debug option has been removed." msgstr "A opção api.pecan_debug foi removida." msgid "The deprecated 'threshold' alarm type has been removed." msgstr "O tipo de alarme 'threshold' obsoleto foi removido." msgid "The deprecated combination alarms support have been removed." msgstr "O suporte a alarmes de combinação obsoleto foi removido." msgid "" "The reason of the state change is now part of the API as \"state_reason\" " "field of the alarm object." msgstr "" "O motivo de mudança de estado agora é parte da API com a coluna " "\"state_reason\" do objeto de alarme." msgid "Upgrade Notes" msgstr "Notas de atualização" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/mitaka.rst0000664000175000017500000000021600000000000020527 0ustar00zuulzuul00000000000000============================= Mitaka Series Release Notes ============================= .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/newton.rst0000664000175000017500000001222200000000000020573 0ustar00zuulzuul00000000000000============================= Newton Series Release Notes ============================= 3.0.3 ===== Bug Fixes --------- .. releasenotes/notes/gnocchi-external-resource-owner-3fad253d30746b0d.yaml @ b'f87e0d05c4662c14c7a9f49a0a829cf9bf3edbdb' - When an unprivileged user want to access to Gnocchi resources created by Ceilometer, that doesn't work because the filter scope the Gnocchi query to resource owner to the user. To fix we introduce a new configuration option "gnocchi_external_project_owner" set by default to "service". The new filter now allow two kind of Gnocchi resources: * owned by the user project * owned by "gnocchi_external_project_owner" and the orignal project_id of the resource is the user project. 3.0.0 ===== New Features ------------ .. releasenotes/notes/enable-aodh-service-multi-processes-67ed9a0b7fac69aa.yaml @ b'bb7d87f0538d69c2db8f316891217733a2b5a443' - Enable aodh services, including aodh-evaluator, aodh-listener and aodh-notifier to run in multiple worker mode. New options are introduced corresponsively as [evaluator]workers, [listener]workers and [notifier]workers. They all default to 1. .. releasenotes/notes/event-listener-batch-support-04e6ff159ef34d8c.yaml @ b'56f24bdad16c21fe7daa4502844fa9e8a976a232' - Add support for batch processing of messages from queue. This will allow the aodh-listener to grab multiple event messages per thread to enable more efficient processing. .. releasenotes/notes/fix-ssl-request-8107616b6a85a217.yaml @ b'788403b0f18c8e68e01485f3c21f71f06eb57198' - A new option “rest_notifier_ca_bundle_certificate_path” has been added in the configuration file, set None as default value. If this option is present and SSL is used for alarm action the certificate path provided will be used as value of verify parameter in action request. .. releasenotes/notes/ingestion-lag-2317725887287fbc.yaml @ b'b3874c47f1051d37ed839f4f8fffda2c77641f28' - Allow to extends the alarm evaluation windows to compensate the reporting/ingestion lag. An new option is introduced additional_ingestion_lag defaulted to 0. It represents the number of seconds of the window extension. .. releasenotes/notes/notifier-batch-listener-01796e2cb06344dd.yaml @ b'520425faf80cf2e0fb86cab216440df5550171c8' - Add support for batch processing of messages from queue. This will allow the aodh-notifier to grab multiple messages per thread to enable more efficient processing. Upgrade Notes ------------- .. releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml @ b'a096e57759c00b8f98499a36bf8a8854daa378ec' - Add a tool for migrating alarm and alarm history data from NoSQL storage to SQL storage. The migration tool has been tested OK in devstack environment, but users need to be cautious with this, because the data migration between storage backends is a bit dangerous. .. releasenotes/notes/event-listener-batch-support-04e6ff159ef34d8c.yaml @ b'56f24bdad16c21fe7daa4502844fa9e8a976a232' - batch_size and batch_timeout configuration options are added to [listener] section of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. .. releasenotes/notes/notifier-batch-listener-01796e2cb06344dd.yaml @ b'520425faf80cf2e0fb86cab216440df5550171c8' - batch_size and batch_timeout configuration options are added to [notifier] section of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. .. releasenotes/notes/support-combination-to-composite-conversion-3e688a6b7d01a57e.yaml @ b'050a7dcb344a5ee3ad0351f3a4c18e90078e782b' - Add a tool for converting combination alarms to composite alarms, since we have deprecated the combination alarm support and recommend to use composite alarm to perform multiple conditions alarming. Deprecation Notes ----------------- .. releasenotes/notes/deprecate-combination-alarms-7ff26b73b61a0e59.yaml @ b'20abf3b1fb0190aa7c777f01844d062682ea41e1' - The combination alarms are officially deprecated and disabled by default. Set api.enable_combination_alarms to True to enable them. Existing alarms will still be evaluated, but access to them via the API is linked to whether that configuration option is turned on or off. It's advised to use composite alarms instead. Bug Fixes --------- .. releasenotes/notes/fix-ssl-request-8107616b6a85a217.yaml @ b'788403b0f18c8e68e01485f3c21f71f06eb57198' - [`bug 1582131 `_] Fix an issue with adding CA_BUNDLE certificate parth as value of "verify" parameter in SSL requests. .. releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml @ b'dd06bf9277774c56121be0b4878c8973f38e761d' - [`bug 1575530 `_] Patch was added to fix and improve the partition coordinator, make sure the input tasks can be correctly distributed to partition members. Other Notes ----------- .. releasenotes/notes/remove-alarm-name-unique-constraint-4fb0b14f3ad46f0b.yaml @ b'413f83d79530140280eacc3c25ba980fbcc3c1f9' - Alarm name unique constraint for each project has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/ocata.rst0000664000175000017500000000021200000000000020344 0ustar00zuulzuul00000000000000============================ Ocata Series Release Notes ============================ .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000020212 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000020557 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000020404 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000020377 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000020403 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000016100000000000021407 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000020606 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000021074 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000020712 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000020205 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000020211 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000020046 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/requirements.txt0000664000175000017500000000175200000000000016030 0ustar00zuulzuul00000000000000tenacity>=3.2.1 # Apache-2.0 croniter>=0.3.4 # MIT License futurist>=0.11.0 # Apache-2.0 jsonschema>=3.2.0 # MIT keystonemiddleware>=5.1.0 # Apache-2.0 gnocchiclient>=6.0.0 # Apache-2.0 lxml>=2.3 oslo.db>=11.0.0 # Apache-2.0 oslo.config>=6.8.0 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0 oslo.log>=4.3.0 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 oslo.policy>=3.11.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 PasteDeploy>=1.5.0 pbr>=2.0.0 # Apache-2.0 pecan>=0.8.0 oslo.messaging>=5.2.0 # Apache-2.0 oslo.middleware>=3.22.0 # Apache-2.0 oslo.utils>=4.7.0 # Apache-2.0 python-keystoneclient>=1.6.0 pytz>=2013.6;python_version<"3.9" # MIT requests>=2.5.2 stevedore>=1.5.0 # Apache-2.0 SQLAlchemy>=1.4.1 tooz>=1.28.0 # Apache-2.0 voluptuous>=0.8.10 WebOb>=1.2.3 WSME>=0.12.1 cachetools>=1.1.6 cotyledon>=1.7.3 keystoneauth1>=2.1 python-observabilityclient>=0.0.4 python-octaviaclient>=1.8.0 python-dateutil>=2.8.2 # BSD python-heatclient>=1.17.0 tzdata>=2022.4;python_version>="3.9" # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727866891.1270204 aodh-19.0.0/setup.cfg0000664000175000017500000000732700000000000014371 0ustar00zuulzuul00000000000000[metadata] name = aodh url = https://storyboard.openstack.org/#!/project/openstack/aodh summary = OpenStack Telemetry Alarming description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/aodh/latest/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Topic :: System :: Monitoring [options] packages = aodh [extras] mysql = SQLAlchemy-Utils>=0.41.0 alembic>=0.7.2 PyMySQL>=0.6.2 # MIT License postgresql = SQLAlchemy-Utils>=0.41.0 alembic>=0.7.2 psycopg2>=2.9.6 zaqar = python-zaqarclient>=1.2.0 [entry_points] aodh.storage = log = aodh.storage.impl_log:Connection mysql = aodh.storage.impl_sqlalchemy:Connection mysql+pymysql = aodh.storage.impl_sqlalchemy:Connection postgresql = aodh.storage.impl_sqlalchemy:Connection sqlite = aodh.storage.impl_sqlalchemy:Connection aodh.alarm.rule = gnocchi_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:MetricOfResourceRule gnocchi_aggregation_by_metrics_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricsByIdLookupRule gnocchi_aggregation_by_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricByResourcesLookupRule event = aodh.api.controllers.v2.alarm_rules.event:AlarmEventRule composite = aodh.api.controllers.v2.alarm_rules.composite:composite_rule loadbalancer_member_health = aodh.api.controllers.v2.alarm_rules.loadbalancer:LoadBalancerMemberHealthRule prometheus = aodh.api.controllers.v2.alarm_rules.prometheus:PrometheusRule aodh.evaluator = gnocchi_resources_threshold = aodh.evaluator.gnocchi:GnocchiResourceThresholdEvaluator gnocchi_aggregation_by_metrics_threshold = aodh.evaluator.gnocchi:GnocchiAggregationMetricsThresholdEvaluator gnocchi_aggregation_by_resources_threshold = aodh.evaluator.gnocchi:GnocchiAggregationResourcesThresholdEvaluator composite = aodh.evaluator.composite:CompositeEvaluator loadbalancer_member_health = aodh.evaluator.loadbalancer:LoadBalancerMemberHealthEvaluator prometheus = aodh.evaluator.prometheus:PrometheusEvaluator aodh.notifier = log = aodh.notifier.log:LogAlarmNotifier test = aodh.notifier.test:TestAlarmNotifier http = aodh.notifier.rest:RestAlarmNotifier https = aodh.notifier.rest:RestAlarmNotifier trust+http = aodh.notifier.trust:TrustRestAlarmNotifier trust+https = aodh.notifier.trust:TrustRestAlarmNotifier zaqar = aodh.notifier.zaqar:ZaqarAlarmNotifier trust+zaqar = aodh.notifier.zaqar:TrustZaqarAlarmNotifier trust+heat = aodh.notifier.heat:TrustHeatAlarmNotifier wsgi_scripts = aodh-api = aodh.api.app:build_wsgi_app console_scripts = aodh-dbsync = aodh.cmd.storage:dbsync aodh-expirer = aodh.cmd.storage:expirer aodh-evaluator = aodh.cmd.alarm:evaluator aodh-notifier = aodh.cmd.alarm:notifier aodh-listener = aodh.cmd.alarm:listener aodh-config-generator = aodh.cmd:config_generator aodh-status = aodh.cmd.status:main oslo.config.opts = aodh = aodh.opts:list_opts aodh-auth = aodh.opts:list_keystoneauth_opts oslo.config.opts.defaults = aodh = aodh.conf.defaults:set_lib_defaults oslo.policy.policies = aodh = aodh.api.policies:list_rules oslo.policy.enforcer = aodh = aodh.api.policies:get_enforcer [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/setup.py0000664000175000017500000000126200000000000014252 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/test-requirements.txt0000664000175000017500000000026300000000000017001 0ustar00zuulzuul00000000000000stestr>=2.0.0 # Apache-2.0 oslotest>=2.15.0 # Apache-2.0 coverage>=3.6 fixtures>=1.3.1 SQLAlchemy-Utils>=0.39.0 gabbi>=1.30.0 # Apache-2.0 # Provides subunit-trace WebTest>=3.0.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727866862.0 aodh-19.0.0/tox.ini0000664000175000017500000000671700000000000014065 0ustar00zuulzuul00000000000000[tox] minversion = 4.2.5 envlist = py3{,-mysql,-postgresql},functional,pep8 [testenv] usedevelop = False setenv = OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=600 AODH_TEST_DRIVERS=postgresql mysql mysql: AODH_TEST_DRIVERS=mysql postgresql: AODH_TEST_DRIVERS=postgresql AODH_TEST_DEPS=postgresql,mysql mysql: AODH_TEST_DEPS=mysql postgresql: AODH_TEST_DEPS=postgresql # TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0 SQLALCHEMY_WARN_20=1 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} .[{env:AODH_TEST_DEPS}] -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE AODH_TEST_DRIVERS commands = stestr --test-path=./aodh/tests run aodh-config-generator allowlist_externals = bash stestr [testenv:cover] setenv = {[testenv]setenv} PYTHON=coverage run --source aodh --parallel-mode commands = coverage erase stestr run {posargs} stestr --test-path=./aodh/tests/functional run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report stestr --test-path=./aodh/tests run coverage report [testenv:pep8] deps = hacking>=6.1.0,<6.2.0 commands = flake8 [testenv:releasenotes] deps = {[testenv:docs]deps} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:docs] usedevelop = True deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt allowlist_externals = rm commands = rm -rf doc/build/html sphinx-build -W --keep-going -b html doc/source doc/build/html setenv = PYTHONHASHSEED=0 [testenv:pdf-docs] usedevelop = {[testenv:docs]usedevelop} deps = {[testenv:docs]deps} allowlist_externals = make rm commands = rm -rf doc/build/pdf sphinx-build -W --keep-going -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:venv] commands = {posargs} setenv = PYTHONHASHSEED=0 [testenv:debug] commands = bash -x oslo_debug_helper {posargs} [testenv:debug-mysql] deps = gnocchi[mysql, file] pifpaf[gnocchi]>=1.0.1 -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} .[mysql] -r{toxinidir}/test-requirements.txt setenv = OS_TEST_PATH=aodh/tests/functional/ commands = pifpaf -g AODH_TEST_STORAGE_URL run mysql -- oslo_debug_helper {posargs} [testenv:debug-pgsql] deps = gnocchi[postgresql, file] pifpaf[gnocchi]>=1.0.1 -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} .[postgresql] -r{toxinidir}/test-requirements.txt setenv = OS_TEST_PATH=aodh/tests/functional/ commands = pifpaf -g AODH_TEST_STORAGE_URL run postgresql -- oslo_debug_helper {posargs} [flake8] # W503 line break before binary operator # W504 line break after binary operator # E402 module level import not at top of file ignore = W503,W504,E402 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build # [H106] Do not put vim configuration in source files. # [H203] Use assertIs(Not)None to check for None. # [H204] Use assert(Not)Equal to check for equality. # [H205] Use assert(Greater|Less)(Equal) for comparison. enable-extensions=H106,H203,H204,H205 show-source = True [hacking] import_exceptions = aodh.i18n