././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/0000775000175100017510000000000015033033521013764 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/.coveragerc0000664000175100017510000000014215033033467016113 0ustar00mylesmyles[run] branch = True source = ceilometer omit = ceilometer/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/.mailmap0000664000175100017510000000370615033033467015424 0ustar00mylesmyles# Format is: # # Adam Gandelman Alan Pevec Alexei Kornienko ChangBo Guo(gcb) Chang Bo Guo Chinmaya Bharadwaj chinmay Clark Boylan Doug Hellmann Fei Long Wang Fengqian Gao Fengqian Fengqian Gao Fengqian.Gao Gordon Chung gordon chung Gordon Chung Gordon Chung Gordon Chung gordon chung Ildiko Vancsa Ildiko John H. Tran John Tran Julien Danjou LiuSheng liu-sheng Mehdi Abaakouk Nejc Saje Nejc Saje Nicolas Barcet (nijaba) Pádraig Brady Rich Bowen Sandy Walsh Sascha Peilicke Sean Dague Shengjie Min shengjie-min Shuangtai Tian shuangtai Swann Croiset ZhiQiang Fan ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/.pre-commit-config.yaml0000664000175100017510000000230115033033467020252 0ustar00mylesmylesrepos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: trailing-whitespace # Replaces or checks mixed line ending - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' # Forbid files which have a UTF-8 byte-order marker - id: check-byte-order-marker # Checks that non-binary executables have a proper shebang - id: check-executables-have-shebangs # Check for files that contain merge conflict strings. - id: check-merge-conflict # Check for debugger imports and py37+ breakpoint() # calls in python source - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ - repo: https://opendev.org/openstack/hacking rev: 7.0.0 hooks: - id: hacking additional_dependencies: [] - repo: https://github.com/PyCQA/doc8 rev: v1.1.2 hooks: - id: doc8 - repo: https://github.com/asottile/pyupgrade rev: v3.18.0 hooks: - id: pyupgrade args: [--py3-only] - repo: https://github.com/openstack/bashate rev: 2.1.1 hooks: - id: bashate args: ['-v', '-iE006'] exclude: '.tox/.*' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/.stestr.conf0000664000175100017510000000010515033033467016242 0ustar00mylesmyles[DEFAULT] test_path=${OS_TEST_PATH:-ceilometer/tests/unit} top_dir=./././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/.zuul.yaml0000664000175100017510000000312215033033467015734 0ustar00mylesmyles- job: name: grenade-ceilometer parent: grenade voting: false required-projects: - opendev.org/openstack/grenade - opendev.org/openstack/ceilometer - name: gnocchixyz/gnocchi override-checkout: stable/4.6 vars: configure_swap_size: 8192 grenade_devstack_localrc: shared: CEILOMETER_BACKEND: gnocchi devstack_plugins: ceilometer: https://opendev.org/openstack/ceilometer devstack_services: ceilometer-acompute: true ceilometer-acentral: true ceilometer-aipmi: true ceilometer-anotification: true irrelevant-files: &ceilometer-irrelevant-files - ^\.gitreview$ - ^(test-|)requirements.txt$ - ^setup.cfg$ - ^doc/.*$ - ^.*\.rst$ - ^releasenotes/.*$ - ^ceilometer/locale/.*$ - ^ceilometer/tests/.*$ - project: queue: telemetry templates: - openstack-cover-jobs - openstack-python3-jobs - publish-openstack-docs-pti - periodic-stable-jobs - release-notes-jobs-python3 - check-requirements check: jobs: - grenade-ceilometer - telemetry-dsvm-integration: irrelevant-files: *ceilometer-irrelevant-files - telemetry-dsvm-integration-ipv6-only: irrelevant-files: *ceilometer-irrelevant-files gate: jobs: - grenade-ceilometer - telemetry-dsvm-integration: irrelevant-files: *ceilometer-irrelevant-files - telemetry-dsvm-integration-ipv6-only: irrelevant-files: *ceilometer-irrelevant-files ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/AUTHORS0000664000175100017510000004766315033033470015057 0ustar00mylesmyles1iuwei <1iu.wei81@zte.com.cn> Abhishek Chanda Abhishek Lekshmanan Abhishek Lekshmanan Adelina Tuvenie Ajaya Agrawal Akhil Hingane Ala Rezmerita Alessandro Pilotti Alex Holden Alexander Chadin Alexandra Settle Alexei Kornienko Alfredo Moralejo Amy Fong Ana Malagon Ananya Chatterjee Andrea Frittoli Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Angus Lees Angus Salkeld Ann Kamyshnikova Arnaud Morin Arnaud Morin Arnaud Morin Artem Vasilyev Artur Svechnikov Ashwin Agate Balazs Gibizer Bartosz Górski Ben Nemec Ben Nemec Boris Pavlovic Brad Pokorny Brant Knudson Brian Cline Brian Moss Brooklyn Chen Béla Vancsics Callum Dickinson Can ZHANG Cao Xuan Hoang Cedric Soulas Chad Lung Chandan Kumar Chandan Kumar ChangBo Guo(gcb) Chaozhe.Chen Charles Bitter Chen Hanxiao ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah Chris Dent Chris Dent Chris Sibbitt Christian Berendt Christian Martinez Christian Schwede Christian Zunker Christophe Useinovic Chuck Short Clark Boylan Claudiu Belu Cyril Roelandt Cyril Roelandt Dai Dang Van Damian Van Vuuren Dan Florea Dan Prince Dan Travis Danek Duvall Daniel Russell Daniel Vincze Dao Cong Tien Darren Birkett Darren Hague Davanum Srinivas David Peraza David Rabel Dazhao Debo~ Dutta Deepthi V V Dina Belova Dirk Mueller Divya Dmitriy Rabotjagov Dmitriy Rabotyagov Dong Ma Dou Yuan Doug Hellmann Drew Thorstensen Edward Hope-Morley Edwin Zhai Elod Illes Emilien Macchi Emma Foley Emma Foley Endre Karlson Eoghan Glynn Eoghan Glynn Eric Berglund Eric Brown Erno Kuvaja Eyal Fabio Giannetti Fei Long Wang Feilong Wang Felix Walter Feng Xi Yan Fengqian Gao Flavio Percoco François Charlier François Rossigneux Frederic FAURE Gangyi Luo Gauvain Pocentek Gerard Garcia Ghanshyam Mann Gordon Chung Graham Binns Graham Hayes Guangyu Suo Gyorgy Szombathelyi Ha Van Tu Han Guangyu Hang Liu Hangdong Zhang Hanxi Hanxi Liu Hanxi_Liu Haomeng, Wang Harri Hämäläinen Hervé Beraud Hisashi Osanai Hoang Trung Hieu Hongbin Lu Huachao Mao Huan Xie Huang Rui Ian Wienand Ianeta Hutchinson Igor Degtiarov Ihar Hrachyshka Ildar Svetlov Ildiko Vancsa Ilya Sviridov Ilya Tyaptin Ionuț Arțăriși Ivan Anfimov Jake Liu James E. Blair James E. Blair James Page Jaromir Wysoglad Jaromír Wysoglad Jason Myers Jason Zhang Jay Lau Jay Pipes Jeffrey Zhang Jens Rosenboom Jeremy Stanley Ji-Wei Jiang Qin Jianghua Wang Jie Li Jim Rollenhagen Jimmy McCrory Joanna H. Huang Joe Gordon Joe H. Rahme John H. Tran John Herndon Jon Schlueter Jonte Watford JordanP Jorge Niedbalski Joseph Davis Joseph Richard Joshua Harlow JuPing Juan Antonio Osorio Robles Juan Larriba Julien Danjou June.King Justin SB KIYOHIRO ADACHI Kamil Rykowski Keith Byrne Ken Pepple Ken'ichi Ohmichi Ken'ichi Ohmichi Kennan Kennan Kevin McDonald Kevin_Zheng Kirill Bespalov Kishore Juigil Kobi Samoray Koert van der Veer Komei Shimamura Ladislav Smola Lan Qi song Lance Albertson Lars Kellogg-Stedman Laszlo Hegedus Leehom Li (feli5) Lena Novokshonova Lianhao Lu Lingxian Kong LinuxJedi LiuSheng Luigi Toscano Luis A. Garcia Luis Pigueiras Luo Gangyi Luong Anh Tuan Maho Koshiya Manik Bindlish Manuel Rodriguez Marios Andreou Mariusz Karpiarz Mark Goddard Mark McClain Mark McLoughlin Martin Geisler Martin Kletzander Martin Mágr Mathew Odden Mathieu Gagné Matt Riedemann Matt Wisch Matthias Runge Maxime Guyot Mehdi Abaakouk Mehdi Abaakouk Michael Krotscheck Michael Still Michał Jastrzębski Michel Nederlof Miguel Alex Cantu Miguel Grinberg Mike Spreitzer Milan Potdar Ming Shuang Xian Monsyne Dragon Monty Taylor Morgan Fainberg Nadya Privalova Nadya Shakhat Nam Nguyen Hoai Nejc Saje Ngo Quoc Cuong Nguyen Phuong An Nguyen Van Trung Nick Barcet Nicolas Barcet (nijaba) Nishant Kumar Noorul Islam K M Octavian Ciuhandu OpenStack Release Bot Pablo Iranzo Gómez PanFengyun PanFengyun Patrick East Paul Belanger Paul Bourke Pavlo Shchelokovskyy Pedro Henrique Peter Nordquist Peter Portante Petr Kovar Petr Kuběna Phil Neal Pierre Riteau Pierre Riteau Piyush Masrani Pradeep Kilambi Pradeep Kilambi Pradeep Kumar Singh Pradyumna Sampath Prudhvi Rao Shedimbi Pádraig Brady Qiaowei Ren Rabi Mishra Rafael Folco Rafael Rivero Rafael Weingärtner Rafal Szmigiel Rich Bowen Richard Devers Rikimaru Honjo Rob Raymond Robert Collins Robert Mizielski Rohit Jaiswal Romain Soufflet Roman Bogorodskiy Roman Podoliaka Rosario Di Somma Ruslan Aliev Russell Bryant Ryan Petrello Ryota MIBU SU, HAO-CHEN Saba Ahmed Sam Morrison Samta Samuel Merritt Sandy Walsh Sanja Nosan Sascha Peilicke Sean Dague Sean McGinnis Sean Mooney Sergey Lukjanov Sergey Vilgelm Seyeong Kim Shane Wang Shengjie Min Shilla Saebi Shuangtai Tian Shubham Chitranshi Simona Iuliana Toader Sofer Athlan-Guyot Srinivas Sakhamuri Stas Maksimov Stefano Zilli Stephen Balukoff Stephen Finucane Stephen Gran Steve Lewis Steve Martinelli Steven Berler Sumant Murke Sumit Jamgade SunAnChen Supreeth Shivanand Surya Prabhakar Svetlana Shturm Swami Reddy Swann Croiset Swapnil Kulkarni (coolsvap) Sylvain Afchain Szymon Wroblewski Takashi Kajinami Takashi Kajinami Takashi NATSUME Taketani Ryo Tatsuro Makita Terri Yu Theo Gindre Thierry Carrez Thomas Bechtold Thomas Goirand Thomas Graichen Thomas Herve Thomas Herve Thomas Maddox Tin Lam Tobias Urdin Tobias Urdin Tong Li Tony Breeds Trinh Nguyen Tuan Do Anh Ubuntu Victor Stinner Victor Stinner Victoria Martinez de la Cruz Vinay Kapalavai Vitalii Lebedynskyi Vitaly Gridnev Vladislav Kuzmin Vu Cong Tuan WenyanZhang Wenzhi Yu Witek Bedyk Witold Bedyk Wu Wenxiang Xia Linjuan XiaBing Yao Xiang Li XieYingYun Yadnesh Kulkarni Yadnesh Kulkarni Yaguang Tang Yaguang Tang Yandong Xuan Yanos Angelopoulos Yanyan Hu Yarko Tymciurak Yassine Lamgarchal Yathiraj Udupi You Yamagata Yuanbin.Chen Yunhong, Jiang Yurii Prokulevych Yuriy Zveryanskyy Yushiro FURUKAWA ZTE-SuZhengwei ZhaoBo Zhengwei Gao Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan ZhongShengping Zhongyue Luo Zi Lian Ji aggaatul akhiljain23 alextricity25 ananya23d annegentle ansaba blue55 caoyuan cbitte000 ccrouch celik.esra chen-xing chenaidong1 chenxing daz dongwenjuan dsxyy eNovance emilienm fengchaoyang florent fujioka yuuichi gaofei gengchc2 gengjh ghanshyam ghanshyam gong yong sheng gord chung gugug guillaume pernot hanxi.liu hgangwx inspurericzhang jiaxi jimmygc jing.liuqing jinxingfang jizilian jlarriba jonnary joyce jwysogla kairat_kushaev kairoaraujo khushbuparakh kiwik-chenrui kpdev kuangcx leizhang lianghuifei lijian likui lipan liuqing liusheng liuwei liyi liyuenan lizheming ljhuang lqslan lrqrun ls1175 lvdongbing lvxianguo lzhijun melissaml mgirgisf minruigao mizeng nellysmitt nicodemus npraveen35 obutenko pangliye pedro pleimer prankul prankul mahajan qin.jiang replay rwe sanuptpm sh.huang shangxiaobj shengjie min sin songwenping srsakhamuri tanlin terriyu unknown vagrant venkatamahesh vivek.nandavanam vivek.nandavanam wangqi wangqiangbj wangzihao wbluo0907 wu.shiming xialinjuan xianbin xiangjun li xiangjun.li xiaozhuangqing xiexianbin xingzhou xqk xugang xuqiankun yanghuichan yanheven yuyafei zhang-jinnan zhang-shaoman zhang.lei zhangboye zhangdaolong zhangguoqing zhangshengping2012 zhangxuanyuan zhangyangyang zhangyanxian zhaolihui zhufl zhurong zjingbj ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/CONTRIBUTING.rst0000664000175100017510000000106515033033467016440 0ustar00mylesmylesIf you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/ceilometer ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ChangeLog0000664000175100017510000053755115033033470015561 0ustar00mylesmylesCHANGES ======= * Threads to process pollsters * Fix a typo in release note * Do not use eval for user input * Fix incomplete mock of libvirtError * Ensure libvirt python binding is installed for tests * Refactor volume pollsters * Fix volume provider pool capacity metrics * Add bashate * Catch exceptions for interfaceStats in libvirt inspector * Remove Python 3.9 support * devstack: Remove unused gnocchi-related variables * Replace deprecated datetime.datetime.utcnow * Deprecate unused [DEFAULT] http\_timeout * Drop description about ZeroMQ * Remove support for Intel Node Manager * Remove old code for suse in DevStack plugin * Poll for evaluation metrics from aodh * Remove unused test method * Remove CentOS Stream 9 jobs * Remove installation guide for openSUSE/SLES * Run pyupgrade to clean up Python 2 syntaxes * Use pre-commit to run pep8 checks * add pyproject.toml to support pip 23.1 * Enable prom-exporter with tls * Drop remaining lower-constraints target * Drop redundant injection of VIRTUAL\_ENV * Fix missing requests-aws * Add pool capacity pollsters * Make the instance 'host' attribute optional * Update master for stable/2025.1 * Skip functional tests for .gitreview 24.0.0 ------ * Rename [polling] tenant\_name\_discovery * Remove remaining logic for gnocchi devstack plugin * Drop implicit test dependency on iso8601 * Add a map event trait plugin * Revert "Fix vm\_instance label from exporter Prometheus metrics" * Do not drop floating IPs/VPNs with unknown status * Add the disk.ephemeral.size and disk.root.size pollsters * Imported Translations from Zanata * Add storage\_policy attribute for Swift containers * Add the volume\_type\_id attribute for volumes * Drop unused pyOpenSSL * Drop unused gabbi * Dynamic pollsters: Append relative url\_path to URLs * Update default envlist * Remove metrics dependent on Intel CMT perf events * Remove stale metrics from prometheus exporter * Replace oslo\_utils.encodeutils.exception\_to\_unicode * Use constants to validate sample type * Fix gnocchi filter project for non-default domain * Add maximum fan rpm to hardware.ipmi.fan metrics * Remove cpu\_util meter * nova: Remove unused instance\_get\_all * Imported Translations from Zanata * Drop unnecessary OS\_CLOUD override * Remove unused test utilities * Remove VMWare vSphere support * Imported Translations from Zanata * Fix vm\_instance label from exporter Prometheus metrics * Introduce power.state metric * Fix outdated supported database backends * Remove OpenContrail support * Remove remaining reference to OpenDaylight * Drop deprecated cinderv2 option * devstack: Drop unused local variable * reno: Update master for unmaintained/2023.1 * Make all metrics be gauge type * grenade: Use systemd unit name to check service status * grenade: Fix detection of ceilometer-upgrade * Use default service group to launch ceilometer * Replace deprecated configure\_auth\_token\_middleware * Add parameter for disabled projects * Pass interface to keystone client * Fix the doc build * Remove 'x' bit from non-executable files * Remove Python 3.8 support * Prometheus: Refactor metric handling * Ensure supported metric type is given * Peometheus: Refactor label generation logic * Drop unused eventlet from test requirements * Revert "Disable GLOBAL\_VENV" * Fix Prom exporter resource\_name parsing * Add server\_group label to prometheus exporter * Skip functional jobs for locale files * Support file watcher to trigger GMR report * Adjust explanation of enable\_notifications * Get rid of pkg\_resources * Get rid of distutils * Replace deprecated constant\_time\_compare * Handle IPv6 address for prometheus\_listen\_addresses * Ceilometer to export Prometheus metrics * Imported Translations from Zanata * Add [DEFAULT] polling\_namespace to generated ceilometer.conf * Fix a missing space in parameter description * Drop unused import * Remove leftover for python 3.7 support * Update master for stable/2024.2 * Fix package name of gnocchiclient * Imported Translations from Zanata 23.0.0 ------ * Remove sahara support * Imported Translations from Zanata * Add heart beat report for polling agents * Enable instance metadata polling * Imported Translations from Zanata * Replace use of testtools.testcase.TestSkipped * Remove Windows OS support * Add more metadata to volume.size and volume.backup.size pollster * Fix exception when updating non-existing gnocchi resources * Remove metrics specific to OpenDaylight * Remove old excludes * reno: Update master for unmaintained/zed * Remove OpenDaylight support * Stop stack traces on Swift when receiving 403 * Imported Translations from Zanata * Imported Translations from Zanata * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Fix dynamic pollsters on compute nodes and coordination process * Update master for stable/2024.1 * Update regex to detect closed branch 22.0.0 ------ * Fix wrong stacklevel in deprecation warnings * Change default pipeline interval comment * Fix releasenotes build of yoga moved to unmaintained * Remove unused cache\_key\_mangler * Bump hacking * Document choices of instance\_discovery\_method by oslo.config interface * devstack: Use spaces instead of tabs * doc: Remove remaining reference to Xen hypervisor * Remove uml virt type support * libvirt: Fix unsupported 'parallels' virt type * Stop using deprecated zake coordination driver * Update python classifier in setup.cfg * Create new meter to poll power usage * Drop authtoken middleware options from ceilometer config file * Update supported python versions * doc: Drop DJANGO\_SETTINGS\_MODULE environment * Add opentelemetry publisher base on http * doc: Fix wrong unit of "current" type ipmi sensor * Deprecate OpenContrail support * Deprecate OpenDaylight support * Add 'user\_id' label in prometheus publisher * Add logs for the \`tenant\_name\_discovery=True\` flow * Add logs in the SampleEndpoint class * Fix \`TelemetryIpc\` when using \`tenant\_name\_discovery=False\` * Fix devstack plugin for CEILOMETER\_BACKEND=none * Remove monasca * Add sg-core backend support in devstack plugin * Fix python shebang * Fallback to oslo\_cache.dict caching backend * Fix ConnectionRefused in TCP publisher * notification: Dump loaded options during start up * Update master for stable/2023.2 21.0.0 ------ * Deprecate support for VMWare vSphere * Deprecate Windows OS support * Add file publisher entrypoint to publish events * Refactor TCP provider * Disable GLOBAL\_VENV * Imported Translations from Zanata * Make centos9s jobs non-voting * Include 'hardware.ipmi.fan' to resource type ipmi\_sensor * Imported Translations from Zanata * Imported Translations from Zanata * Make multiple attempts to obtain gnocchiclient * jsonpath-rw-ext uses operator.add for "+" and operator.sub for "-". operator.add can do string concatenation, but operator.sub cannot do string subtraction. so the original hack does not work, perhaps we should use sub to get an empty string * Add logs to help identify when a polling task finishes * Imported Translations from Zanata * Add 'check\_requirements' job to CI * Make TCP publisher log warning instead of failing * Add vanity names to notification samples * [coordination] backend\_url should be secret * Imported Translations from Zanata * Update master for stable/2023.1 * Add vanity names to telemetry polling notifications 20.0.0 ------ * Enable volume metrics by default * Remove SNMP metrics from default polling * Imported Translations from Zanata * Imported Translations from Zanata * Fix gnocchi install from git * Imported Translations from Zanata * Add TCP publisher * Make tox.ini tox 4.0 compatible * Imported Translations from Zanata * NoUniqueMatch: ClientException on Gnocchi publisher * Fix barbican credentials retrieval * Tests the sample name * Fix OutgoingBytesDeltaPollster sample name * Change oslo\_cache implementation * Improve logging for Gnocchi publisher * Imported Translations from Zanata * Post individual JSONs instead of the whole list * Update the installation guide for RHEL8/CentOS8 * Imported Translations from Zanata * Fix the handle of plain text outputs * Remove lingering queue declaration * Properly handle 'resource\_id' as None for Gnocchi publisher * Add support to namespaces on dynamic pollsters * Imported Translations from Zanata * Add extra metadata fields skip * Add support to host command dynamic pollster definitions * Add user/project names to polled samples * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed 19.0.0 ------ * Imported Translations from Zanata * Fix Non-OpenStack dynamic pollster credentials handling * Exclude metrics from "service" project * Imported Translations from Zanata * Remove unnecessary logic for missing oslo.cache * Add response handlers to support different response types * [zuul] Make telemetry-dsvm-integration-centos-9s-fips vote again * Imported Translations from Zanata * Remove unicode prefixes * Replace abc.abstractproperty with property and abc.abstractmethod * Add debug to tox environment * Remove GenericHardwareDeclarativePollster * Replace deprecated iso8601\_from\_timestamp * Imported Translations from Zanata * Remove [coordination] check\_watchers * Rename a release note file * zuul: Declare queue at top level * Update python testing as per zed cycle testing runtime * Remove support for neutron-lbaas * Replace CentOS Stream 8 by 9 * Fix swift pollsters that use get\_account * Imported Translations from Zanata * Drop lower-constraints.txt and its testing * Debug log libvirt metadata version fails * Add Python3 zed unit tests * Update master for stable/yoga * Remove reference to the tenant attribute of RequestContext 18.0.0 ------ * OpenStack Dynamic pollsters metadata enrichment with other OpenStack API's data * Support two nova metadata versions in instance XML * Update python testing classifier * Remove the wrong release note file * Add missing oslo.cache parameters * Deprecate support for Neutron FWaaS * Fix ignored [notification] batch\_size * Deprecate support for Neutron LBaaS * Update requirements and lower\_constraints * Fix broken test\_discovery\_with\_libvirt\_error * Update compute.discovery to get nova domain meta * Deprecate GenericHardwareDeclarativePollster * Imported Translations from Zanata * Add the describe of uWSGI to run Gnocchi API * Imported Translations from Zanata * Add Python3 yoga unit tests * Update master for stable/xena 17.0.0 ------ * Add missing [oslo\_reports] options * Fix CA file for Swift pollster * Adding FIPS job * Do not install libvirt python bindings from pip * Moving Centos8s job to telemetry-tempest-plugin * Do not install libvirt-python on RHEL via pip * Replace deprecated oslo\_utils.timeutils.isotime * Moving IRC network reference to OFTC * Gnocchi: replace deprecated [storage] coordination\_url * Replace oslo\_utils.fnmatch with fnmatch * Notify and update snapshot metrics * Remove references to Ocata and Newton * Move bug tracking back to launchpad * Move IRC channel to OFTC * Changed minversion in tox to 3.18.0 * remove requires on monotonic * Deprecate unused [coordination] check\_watchers * Remove dependency to panko * vmware tests: Support different moref backend representations * Introduce \`timeout\` in dynamic pollsters sub-system * Remove Xen support * Ceilometer compute \`retry\_on\_disconnect\` using \`no-wait\` * Use tox constraints instead of upper constraints * setup.cfg: Replace dashes with underscores * Imported Translations from Zanata * Add Python3 xena unit tests * Update master for stable/wallaby * Cap tenacity < 7.0.0 * Imported Translations from Zanata 16.0.0 ------ * Deprecate support for Xen * Fix gnocchi create resource error when missing flavor * Using Iterable was deprecated in python 3.3 * Imported Translations from Zanata * Update requirements URLs in tox config * Replace six with python3 code style * Fix invalid argument formatting in the exception message * Imported Translations from Zanata * Fix lower-constraints job * Imported Translations from Zanata * Imported Translations from Zanata * Update bug location * Imported Translations from Zanata * Reuse the docs deps to benefit from constraints * Imported Translations from Zanata * Imported Translations from Zanata * Implement some new meters for vnic delta * Bump hacking min version to 3.0.1 * Imported Translations from Zanata * Revert "[goal] Migrate testing to ubuntu focal" * Drop py37 from tox * Imported Translations from Zanata * add py38 in tox.ini * Dep's should be restricted by upper-constraints * Imported Translations from Zanata * Add AZ resource metadata for volume size polling * Remove six.moves * Add Python3 wallaby unit tests * Update master for stable/victoria * Remove babel.cfg etc * [goal] Migrate testing to ubuntu focal * Remove install unnecessary packages * Only process 'resource\_id' if metric is configured for Gnocchi push 15.0.0 ------ * Fix handling of metadatas that are set to None or False * [goal] Migrate tox based testing to ubuntu focal * Statement about complex object handling in the Dynamic pollster documentation * Add logic for event\_update handling and set transfer event as event\_update * maybe use word is better * Run migration with NotFound exception as well * Run migration with NotFound exception as well * Fix failing unit tests for new msgpack version * Fix volume attachment event parsing * Dynamic pollster support paging systems that return only a path for the next page * Enable processing metadata with nested objects * Add support to the use of headers in the dynamic pollsters * Allow operations on Ceilometer dynamic pollster to reference the sample itself * Remove [ and ] around parameter values in http url * Imported Translations from Zanata * Adding exception handling when inspect\_disks * Remove glance-registry from docs * Fix dynamic pollster \`default\_discovery\` method call * Fix dynamic pollster samples indentation in the documentation * Trigger volume deletes only on volume deleted * Imported Translations from Zanata * Fix PDF build * Added missing Ceilometer configuration step for controller in docs * drop mock from lower-constraints * Refresh tox * Drop undefined doc config path * Stop to use the \_\_future\_\_ module * Port the grenade job to Zuul v3 * Enable OpenStack pollster to configure Ids(project, user, and resource) * Imported Translations from Zanata * Add volume.manage to metrics * Switch to newer openstackdocstheme and reno versions * Fix pygments style * Imported Translations from Zanata * Add py38 package metadata * Add Python3 victoria unit tests * Update master for stable/ussuri 14.0.0 ------ * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Cleanup py27 support * Fix docs build error * Imported Translations from Zanata * Temporary failures should be treated as temporary * Update hacking for Python3 * Use unittest.mock instead of third party mock * Add support to linked samples responses * Multi metric dynamic pollsters (handling attribute values with list of objects) * Tell reno to ignore the kilo branch * Fix logging libvirt error on python 3 * Add note for loadbalancer resource type support * Update links to Gnocchi website * Remove mention of meter\_dispatchers * Fix mismatch print format in log message * Imported Translations from Zanata * Imported Translations from Zanata * Include project\_id in Prometheus publisher * Dynamic pollsters: enable operation on attributes * [ussuri][goal] Drop python 2.7 support and testing * Make grenade-dsvm-ceilometer run under python3 * Dynamic pollster system to support non-OpenStack APIs * Imported Translations from Zanata * Fix instantiation of manager.AgentManager on cmd.polling.create\_polling\_service * Add missing ws seperator between words * Add loadbalancer resource type * Add Monasca to supported measurement back ends * Trivial-change: Fix rst syntax * support events * Dynamic pollsters to support nested dictionary values * Dynamic pollsters to handle different response structures * Create dynamic pollster feature * publisher: Contribute the Monasca publisher * Imported Translations from Zanata * Fix samples with dots in sample name * Update master for stable/train 13.0.0.0rc1 ----------- * PDF documentation build * Set proper trait type * Type field missing in snmp data harware.cpu meters * Run 'telemetry-dsvm-integration-ipv6-only' job in gate * Only install monotonic on python2 * Fixing broken links * Get output for processutils.execute impi command * Unit field missing in snmp data * Windows: avoid passing conf objects to subprocesses * Cleanup option meter\_definitions\_cfg\_file * Cap sphinx for py2 to match global requirements * Add framework for ceilometer-status upgrade check * Imported Translations from Zanata * Update reno for stable/rocky * Change a URL to point to more accurate targets * Imported Translations from Zanata * Update Python 3 test runtimes for Train * metering data lost randomly * Fix error print format * Update the job's irrelevant files * Switch to oslo privsep * Add lower-constraints job * Update the .gitignore file * disk capacity is less than disk usage * Fix the wrong ipmitool command location file * Fix install\_command in tox.ini * Remove \_ceilometer\_check\_for\_storage check * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * tempest: Allow to configure tempest config * Dropping the py35 testing * Remove the unused function \_ceilometer\_config\_apache\_wsgi * remove redundant line * Reduce the alarm\_threshold value to make autoscaling test stable * Remove telemetry-tox-py37 * making inspector object singleton again by assigning to the base pollster class * Handle volume.retype event from cinder * Replace openstack.org git:// URLs with https:// * Update master for stable/stein * Imported Translations from Zanata * Add availability\_zone attribute to gnocchi instance resources * Set instance\_type\_id in event traits to be a string * add python 3.7 unit test job 12.0.0 ------ * Add interfaceid and bridges parameters * [Trivial fix] Add missing ws seperator between words * Added metric backup.size to remove warning in ceilometer-agent-notification log * Only print polling.yaml file contents as DEBUG * Replace tripleo-scenario002-multinode with scenario002-standalone * modify the mail list address * Use kafka extras for oslo.messaging * Replace tripleo-scenario001-multinode with scenario001-standalone * Pass gnocchi endpoint into adapter endpoint\_override param * Update mailinglist from dev to discuss * Added support for python3 * Update min tox version to 2.0 * Fixes KeyError on volume create/delete * Added snapshot delete event * Add release note link in README * Fix the section of notify\_on\_state\_change to [notifications] * Replace deprecated snmp metrics * Skip oslo.messaging 9.0.0 release * Don't quote {posargs} in tox.ini * Volume discovery minimum 3.43 api version * Imported Translations from Zanata * opts: remove unused import * gnocchi: Don't fail if archive policy exists * Deprecate event subsystem * event: remove deprecated method * gnocchi: handle domain for filtering swift * Fix logic when selecting configuration file * agent: Remove unused parameter in load\_config * agent: remove unused config change detection code * agent: use any to filter source objects * Imported Translations from Zanata * Imported Translations from Zanata * Fix the default filter\_project name * Use openstack-tox-cover template * pipeline: remove confusing log message * notification: remove unused constant * notification: declare logging method as static * notification: do not store temporary transport * notification: remove unused pipeline listener * pipeline: simplfy classes * compute: remove deprecated disk meters * pipeline: remove transformer support * Follow the new PTI for document build * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * notification: remove workload partitioning * gnocchi: add ceilometer-high{,-rate} archive policies * gnocchi: use ceilometer-low as default archive policy * polling: remove deprecated option batch\_polled\_samples * notifications: remove deprecated meter\_definitions\_cfg\_file option * publisher: mark messaging topic options deprecated * Fix ceilometer polling process not able to coordinate issue * Imported Translations from Zanata 11.0.0 ------ * Migrate to stestr for running tests * Add option to file publisher to write json * Remove unused tox target * Add gating on py37 * udp: remove wrong and useless tests * Add support for Python 3.7 * publisher: allow to not inherit from ConfigPublisherBase * Remove deprecated gnocchi\_dispatcher option group * gnocchi: don't create metrics with resource * polling: remove useless base class * Imported Translations from Zanata * Remove dead link in doc file * objectstore/rgw: Add config option to support RGW implicit tenants * Fix broken link to ceph radosgw usage logging docs * fix tox python3 overrides * Wrong block format in rst file * del unused module * Ability to define batch size off polled samples * Adjust the controller installation position * fix typos * inspector: memory: use usable of memoryStats if available * snmp: ignore rfc1905.NoSuchInstance result * snmp: make oid value retrieval more solid * Replace Chinese punctuation with English punctuation * Remove restiction on allowable namespaces in polling * Imported Translations from Zanata * Imported Translations from Zanata * Deprecating transformers and pipeline partitioning * Unlist non existent storage.api.request * Stop gnocchi during unstack * publisher: add a Prometheus Pushgateway publisher * Imported Translations from Zanata * Remove liusheng and llu from the maintainers * supplement "zakar" and "https" description * fix errors about words spelling * Fix doc title format error * Modify the empty list ensure method * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * fix a typo in documentation * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Don't use gnocchiclient during publisher init * gnocchi: configure archive policies on Ceilo side * cinder: link volume to image and instance * Add new attributes to instance * Imported Translations from Zanata * Imported Translations from Zanata * hyper-v: Converts all os-win exceptions * remove integration playbooks * fix meter formating * the previous patch was missing a 'continue' * Gracefully handle missing metadata in libvirt xml * add disk.device.\*.latency meters * remove ceilometerclient references * avoid verbose tracebacks on known errors * update ipmi docs * add ipmi sensor data to gnocchi * Fix glossary.rst * Remove deprecated rgw meters * Use msgpack instead of msgpack-python * Update reno for stable/queens 10.0.0 ------ * Imported Translations from Zanata * add hardware.disk.read|write.\* stats * add volume.provider.\* meters to docs * add note explaining gnocchi coordination\_url * capture cell name * Deprecate aggregated disk.\* metrics on instance * ignore compute.instance.update for metrics * utils: move hash\_of\_set where it's solely used * utils: move kill\_listeners to ceilometer.notification * Imported Translations from Zanata * set cache with attributes only * Cached should not be used when creating gnocchi resources * Zuul: Remove project name * Remove run-tests.sh * utils: move publisher-only utils functions in publisher * remove repeated host * Imported Translations from Zanata * Replace curly quotes with straight quotes * Delete not applicable definition * add volume provider resource types to gnocchi * support cinder capacity statistics * Remove use of unsupported TEMPEST\_SERVICES variable * Imported Translations from Zanata * Remove state\_description for trove * polling: iter randomly over sources and pollsters when polling * polling: simplify manager method * Remove shuffle\_time\_before\_polling\_task option * utils: remove unused decimal/dt conversion functions * devstack: fix gnocchi database setup * gate: move tripleo job to experimental * Remove bundled intree ceileometer tempest plugin * tempest: reuse zuul conf of the plugin * tempest: use new plugin * Remove the redundant conf.py file * ignore compute.instance.create.start for metrics * tempest: remove ceilometer-api alarm * Remove extra space between method parameters * change doc ceilometer bug tracker url * remove gnocchi ignore option * simplify gnocchi batch push setup * compute sample as dictionary once * fix gnocchi stats logging * use hashmap to quickly find matching resource def * cleanup measurements page * Add README.rst record more project message * Imported Translations from Zanata * Check required Gnocchi version is installed * Replace ujson with json * Fix maximum recursion depth exceeded bug when property referencing itself * Run all telemetry tests in integration jobs * update best practices * cleanup data-pipelines admin guide * cleanup data collection admin guide * cleanup admin-guide architecture * reorder admin and contributor topics * remove configuration from contributor guide * remove install section from contributor guide * partial clean up of contributor install info * cleanup contributor plugins details * Do not check iterable objects before for loop * Do not check keystone domains length in TenantDiscovery * Add doc8 to pep8 check for ceilometer project * Imported Translations from Zanata * Remove filter\_service\_activity option in doc * Minor update comment of devstack settings * Add user\_id for sample volume.snapshot.size * Remove useless trailing newlines * Add missing name traits * devstack: redis on opensuse needs to have default config * ensure pipeline\_listener set up * remove sample sorting * Update http publisher options in doc * clean up non-install parts of contributor docs * Update NotifierPublisher class doc * Fix incorrect yaml code block in pipeline definition * [doc] frequency of polling should be controlled via the polling configuration * remove kafka publisher * Fix typo in utils.py * split partitioning polling tests * drop base polling test separation * static resources not picked up * Move delayed out of utils * doc: remove useless Ceilometer service creation * Move utils.dict\_to\_keyval to opendaylight * minor update to gnocchi contributor docs * ignore api docs * cleanup collector references * Imported Translations from Zanata * remove ceilometerclient * Imported Translations from Zanata * Imported Translations from Zanata * remove unused pollster exception resources * simplify test data generation * remove duplicate polling.yaml setup * remove test\_manager\_exception\_persistency * minor polling cleaning * Allow requeues in when there is a failure in EventsNotificationEndpoint * Add cpu\_l3\_cache to polling yaml * simplify cache generation * Put configurations to appropriate part * polling: run polling tasks immediately on start * Set shuffle\_time\_before\_polling\_task to float and set a minimum * fix ceilometer-compute invoke libvirt exception error * Imported Translations from Zanata * always declare partitioning variables * Remove setting of version/release from releasenotes * treat warnings as errors in release notes build * place release notes inline * zuul: run TripleO jobs with new zuulv3 layout * Remove unused ceilometer.utils.update\_nested * Remove unused ceilometer.utils.EPOCH\_TIME * Remove unused ceilometer.utils.sanitize\_timestamp * utils: remove unused create\_periodic function * queues per manager rather than per pipeline * pluggable notification agent * remove redundant filter check * move pipeline out of \_\_init\_\_ * separate polling code * nearly pluggable notification agent * separate base manager from pipeline * set models as part of pipeline manager class * remove agent setup\_\* helper functions * move sample/event specifc pipeline models to own module * Change oslo.messaging prefetch default * Imported Translations from Zanata * stop double refreshing on start * update install docs * move listener targets to notification agent * rename sample handler * common notification endpoint * refresh agent if group membership changes * Revert "devstack: Set an optimal processing queue" * libvirt: share disk device listing * revise doc of "workload\_partitioning" and term "Notification Agents" * Make Doc links point to the latest branch * Fix bug for ceilometer polling generates an exception * Imported Translations from Zanata * Clarify that there is only one polling agent * Remove direct publisher since it is deprecated * Fix Format in contributor/plugins.rst * set ceilometer\_backend * remove kafka and keystonemiddleware reqs * Zuul: add file extension to playbook path * Clean up direct publisher * Replace jsonutils by ujson * Remove deprecated storage drivers * remove branch specific references * Move binary and notification tests in unit * Remove Ceilometer API * Imported Translations from Zanata * Rename magnum events * fix radosgw meter name * Remove the wrap for skip inspect rbd disk info * tests: fix Redis key length debugging output format * snmp: warn if snmp call timeout * Move oslo.cache to gnocchi flavor * Use generic user for both zuul v2 and v3 * fix gnocchi publisher * zuul: remove ElasticSearch tempest experimental job * Migrate to Zuul v3 * handle new tempest swift interface * Remove unused and unmaintained doc Makefile * Update tests to do not use deprecated test.services() * kill collector * Imported Translations from Zanata * pipeline: remove polling fallback support * event: move models out of storage * Replace the Gnocchi dispatcher by a publisher * Add Hanxi Liu as a Ceilometer maintainer * Fix a typo in the Installation Guide * Use tempest.clients.Manager as base clients class * capture aodh events * Move object storage container\_client to match tempest * agent: log pollster skipping to debug * Cleanup test-requirements * messaging-publisher: fix threadsafe of flush() * devstack: install ceilometer extra * remove unused ExchangeTopics * remove ironic exchange * Replace the usage of some aliases in tempest * Remove deprecated pollster-list option * re-add cpu\_util metric to measurement list * Remove class KafkaBrokerPublisher * Set default ceilometer storage backend to gnocchi * Add dipatcher correlated renos * Remove deprecated nova\_http\_log\_debug option * Remove deprecated compute.workload\_partitioning * Initialize hashrings as AgentManager object * Removes use of timeutils.set\_time\_override * Remove class HttpDispatcher * Add missing command of adding admin role to gnocchi * Remove class FileDispatcher * Fix wrong links in ceilometer * Remove class PublisherBase * Retry to upgrade Gnocchi if connection fails * Use gnocchiclient 4.0 exception types * Update description 'resource\_update\_interval' option * support new gnocchiclient interface * fix disk total\_time metrics * Remove deprecated \*\_control\_exchange * Imported Translations from Zanata * Update reno for stable/pike 9.0.0 ----- * update measurements * vmware:The cpu\_util value should be a decimal * keystone: pass region name to keystone client * Add disk total duration of reads/writes metric * Modify memory swap metric type * Fix a typo in ceilometer * Remove install-guide env which is not effective * fix cpu\_util precision is too precise * ensure timezone is included in timestamps * snmp: Fix discovery when total memory is missing * Fix share.size meter * vmware: ignore -1 values * stop converting to old message format * deprecated pollster-list * use gnocchi devstack script * [Trivialfix]Fix typos in ceilometer * Update and replace http with https for doc links in ceilometer * Deprecate kafka publisher * wrong document location * Update the documentation link for doc migration * Fix neutron lbaas v2 when no listeners are set * add configuration folder * Fix inspect\_vnics error for libvirt * generate conf on doc build * turn on warning-is-error for doc builds * add missing sphinx extension * High precision rate of change timedelta * Specify region when looking for radosgw admin URL * doc: move install guide to main doc dir * remove templates * doc: move old dev docs to contributor section * doc: initial index cleanup * Project\_id for SDN controller meters * Gnocchi support for SDN controller meters * Ceilometer meter support to collect network statistics * Fix some initializations * Docs: update the path of "meters.yaml" and its new feature * Docs: switch to openstackdocstheme * Remove the explanation of nonexistent parameter * Update URL home-page in documents according to document migration * Add memory swap metric * Move "test\_versions.py" to the directory of functional test * Remove interval reference to pipeline * api: remove unused code * tempest: Use primary user token * Update Documentation link in README * Add some unit test cases * Limit the default numbers of metrics polled * Don't check libvirt exception if libvirt is absent * Revert "deprecate archive policy of Gnocchi dispatcher" * Fixed ceilometer arithmetic transformer bug * explicitly note sanitize\_timestamp return condition * Remove old maintainers * Add a description of the parameters to function * Migrate telemetry-events * Migrate telemetry-best-practices * Migrate telemetry-data-retrieval * Migrate telemetry-data-collection * Migrate telemetry-data-pipelines * Migrate telemetry troubleshooting guide * migrate telemetry measurements * Fix typo for additional device support in intel node manger * Add some unit test cases * Add event definition for manila * Migrate telemetry system architecture * Fix UnicodeEncodeError error when log messge in poll\_and\_notify * use tempest manager * Fix some reST field lists in docstrings * expect oslo.messaging messages * Fix can not find VMwareAPISession when import oslo\_vmware * post\_test\_hook: stop sourcing gnocchi/devstack/settings * cap cpu\_util * ensure resources are hashable * Doc: guide for operating resource type * Add share create/delete/expand/shrink meters * fix service path for fedora * modify the description for GnocchiDispatcher * add instruction to set auth\_mode * Update meters definition reference * Added Nova Compute CPU metrics to gnocchi\_resources.yaml * devstack: install Gnocchi using pip * This remove annoying message repetition in logs * Fix html\_last\_updated\_fmt for Python3 * update link away from google docs * Replace assertRaisesRegexp with assertRaisesRegex * Trivial fix typos * change to reference gnocchi.xyz * Change default polling interval * Fix typo in docstring * XenAPI: use os-xenapi for XenAPI driver * Support loading multiple meter definition files * tempest: fix some future flake8 issue * tempest: remove deprecation warning * Deprecate olsotest.mockpatch in favor of native fixtures * Fix a typo * tempest: Allow to configure granularity * integration: always run all commands * Remove upper constraint on sqlalchemy * Adds a Zaqar publisher * tempest: tell tempest our scenario are long * integration: Fix report generation * tests: fix messaging driver * minor doc updates * Replace Ceilometer coordination layer by tooz partition system * Fix publisher doc link * gnocchi/notification: allow to configure the maximum number of requests in parallel * tempest: Don't hardcode the network name * Remove powervm from inspector doc * Cleanup post\_test\_hook.sh * tempest: use the configured flavor * tests: remove oslo.config fixture usage * tempest: rework gabbi setup * Remove log translations * Change the compute polling local cache to be mutex * Optimize the link address * Format the abstraction layer of inspector methods to avoid TypeError * fix install-guide gnocchi+keystone instruction * Correct the configuration of pipeline\_processing\_queues in devstack * compute: remove default duration value * compute disks: use the generic compute pollster * compute vnics: use the generic compute pollster * Add tool for migrating metric data from ceilometer's storage to gnocchi * Use vcpu.x.time and vcpu.x.wait values in libvirt inspector * tests: fix conf object creation * Adds 'disabled' to the possible states for a member in LBaaSv2 * deprecated support of pipeline.yaml for polling * Correct bad use response\_strings in live.yaml * Use HostAddressOpt for opts that accept IP and hostnames * deprecate archive policy of Gnocchi dispatcher * tempest: Fix exception handling * [install-guide] Add more links * tempest: remove confusing stack check step * fix gnocchi unprocessed measures debug * compute: Remove dead code * compute: create one pollster to rule them all * Bump gnocchiclient min version * Remove second tenacity in requirements * tests: stop hammering CPU while waiting for sample to wait * coordination: remove started check * coordination: remove group\_id check * coordination: stop checking for \_coordinator to be None * coordination: create coordinator at init time * coordination: make group\_id to never be None * Add sem-ver flag so pbr generates correct version * tests: simplify broken test * libvirt: rewrite the error handling * Fix the incorrect gnocchi command * fix blacklisting NovaLike resources * tests: remove unused mocked method * remove resource if not created * fix gnocchi\_resources mapping * cleanup unused devstack code * start notification agent after restarting apache * Swallow & report 404s from Swift (missing tenant) * Remove Rohit Jaiswal from maintainers * devstack: Set an optimal processing queue * tempest: remove broken tests * remove ceilometer-collector condition when configuring storage * make gnocchi independent of ceilometer-api * Switch to use stable data\_utils * make gnocchi posting more resilient * remove tooz safety catch * Bugfix: use transport\_url from [oslo\_messaging\_notifications] if present * Bump kafka-python and oslo.messaging * Make sure to get all tenants by scanning each available domain * add jitter to notification agent * Modify variable's usage in Log Messages * agent: only create partition coordinator if backend url provided * agent: start coordinator at run() and never stops * coordination: use tooz builtin heartbeat manager * coordination: use join\_group\_create() * coordination: simplify retry condition by using tenacity.TryAgain * coordination: stop tracking joined groups * coordination: use a conf object with registered options in tests * Remove pipeline periodic refresh feature * Switch to use stable data\_utils * Enable Basic and https certificate authentication for http publisher * tempest: skip legacy telemetry-api tests * Use more specific asserts in tests * Load pipeline config files from /etc/ceilometer/ firstly * Don't run ceilometer-upgrade on unconfigured db * use tooz hashring * Remove unused variable * prepare future tempest breakage * include gnocchi+keystone instructions in install guide * add configuration instructions from admin guide and dev docs * upgrade gnocchi via ceilometer * switch dispatcher references to publisher * remove legacy db stuff * gnocchi: remove archive policy setting for identity * Use bytes for coordination member * Using Panko as publisher than dispatcher if enabled * remove keystone\_authtoken from polling agent * support gnocchi timeout * Remove smoke tag for TelemetryNotificationAPITest * add missing instance meta * Trivial: remove extra spaces * Support extended declaring exchanges * Remove unused override * remove collector instructions * chill out on the number of items in toc * Revert "verify gnocchi connection before processing" * Fix reno title format * doc: update pipeline link * Ship YAML file to /usr/share * Deprecate event\_dispatchers and meter\_dispatchers options * Remove useless metric name * set OS\_AUTH\_TYPE in gate * Support i18n for api app * Correct the use of marker function * match generic cirros name * nova: track flavor name * Trivial-fix: use domain\_id instead of domain\_name * fix [service\_credentials] section location * fix gnocchi url links * drop kwapi pollster * Correct the doc link * remove PaaS event format * cleanup devstack cache initialisation * add note about batching+gnocchi * Switch to use test\_utils.call\_until\_true * gabbi: use history * Deprecate collector * Remove support for py34 * Use https instead of http for git.openstack.org * stop hardcode timeout in tempest tests * Update reno for stable/ocata 8.0.0 ----- * add polling.yaml docs * Do not use non-UUID resource ID in Aodh+Gnocchi tests * Use Tempest stable library interfaces * polling definition file * remove endpoint\_override * gnocchi: do not use removed encode\_resource\_id * update multi-publisher image * make connection pool configurable * make http publisher equivalent to dispatcher * add ceilometer-upgrade step to install guide * update verify to use gnocchi * drop api and storage references from install-guide * Switch to decorators.idempotent\_id * modernise gabbi tests * drop notes re mod\_wsgi * move and trim legacy db docs * show panko events for debug * devstack: make sure it's possible to deploy panko only * set project\_id for cinder polling * install-doc: Disable ceilometer-api service * check panko during integration test * set panko dispatcher if enabled * tripleo: Fix logging of tripleo discovery * agent: always print the extension name on failure * Fix same type of simple message error * Don't load many times the same publisher * pipeline: remove tests helper from runtime code * Trivial: add white space of error message * Add support of refereshing the resource info in local cache * Simplify code of test\_complex\_query * Trivial: remove white space of exception message * switch instance\_discovery to libvirt\_metadata * publisher: fix pipeline confusing reference * remove test\_hbase\_table\_utils * Use parameter skip\_op\_id in compute.virt.vmware.test\_vsphere\_operations * gnocchi: replace / by \_ in resource\_id * gnocchi: don't rely on gnocchi uuid5 * gnocchi: prepare removal of gnocchi uuid5 * gnocchi: move volume\_type to correct section * tempest: use tempest img * fix the gnocchi resource type upgrade * remove residual instance pollster * use domainListGetStats to get cputime * drop instance meter * Fix oslo.vmware change that added new keyword argument * Don't poll nova with compute agent * tempest: Allow to not run deprecated API tests * Remove events storage and API * Update custom install doc * correct volumes name source * add volume.backup.size to gnocchi * upgrade: fix gnocchi resource update * stop assuming ceph/swift share same endpoint * devstack: check for ceilometer+panko service * tempest: allow usage of scenario-img for autoscaling * Fix the gate failure because of several issues * Trival-Fix: replace "nova" with "ceilometer" in api-ref * Fix error module usage * add support to parse user metadata * [doc] Note lack of constraints is a choice * always create ceilometer user * glossary: remove collector and add publisher * Test suite for Aodh's gnocchi-resource-threshold alarm * simplify fail gnochi+ceilometer check * Fix publisher comment * integration: run gabbi first * integration: deleting the stack sometimes timeout * tempest: support keystone v3 for autoscaling * tests: generate reports when tempest fail * Make sure gnocchi is enabled before ceilometer * Added new instance metrics to gnocchi definition * Revert "Add hypervisor inspector sanity check" * gnocchi: Allow to set a different creds section * Allow to configure the triplo network name * Fix oslo.vmware lazy loading * test: remove unused variable * utils: remove unused function stringify\_timestamps * gnocchi: Add volume\_type attribute to volume * devstack: Don't enable api by default * add libxml2 required for lxml * fix http publisher test * filtered out the phynical nics when query vm nics with VMware API * Correct ceilometer reraising of exception * Fix typo in plugin.sh * Add old vm state for compute.instance.update * capture keystone authentication as metric * tools: stop using global conf * gnocchi: use batch create\_metrics=True * Fix the install guide bug * Update Ceilometer architecture docs * Remove useless mock * drop \`counters\` support from pipeline * cleanup unit test location * update architecture docs * Remove api index file * Bump minimal version of cotyledon * Make rabbitmq configuration much simpler * Reenable the tempest test about nova notification * use hyphen-less option name sql\_expire\_sample\_only * Translate info-level log messages for LOG.error * drop disable\_non\_metric\_meters option * cleanup gnocchiclient * Change gnocchi\_ext.NotFound to gnocchi\_ext.ResourceTypeNotFound * integration tests: fix generation of tests\_results.html * Use one log level * Add volume and floatingip event\_delete for gnocchi event\_dispatcher * more gnocchi mapping cleanup * cleanup gnocchi mapping * cleanup manual install * fix postgresql functional gate * Add a release note for HTTPProxyToWSGI middleware * Modify unit of disk requests rate * Handling KeyError error, make gnocchi event dispatcher work * Corrected debug msg in CPUL3CachePollster * doc cleanup * use aodhclient in integration test * Print ceilometer configuration on startup * register ceilometer options at runtime * utils: stop using global conf * intel\_node\_manager: stop using global conf * Remove useless singleton check * register oslo\_db options at runtime * register keystoneauth options at runtime * storage: stop using global conf * cmd.storage: stop using global conf * test\_storage\_scenarios: stop using global conf * raise InstanceShutOffException if vm power off when get\_vm\_moid use map of vm manage obj to reduce vmware api calls * Remove duplicated code * Remove useless code * drop image pollster * gnocchi: remove useless keepalive adapter * comment: remove unused comment * XenAPI: Not support two key network meters * Replace retrying with tenacity * Clean the glance v1 code in tempest plugin * Convert file publisher to be equivalent to file dispatcher * Add aliases for direct publisher * doc: fix the wsgi configuration file name * deprecate ceilometer api * make blacklist log more specific * extract 'instance\_id' as 'resource\_id' in event definitions * disable signing in direct publisher * Add pyOpenSSL to test-requirements * Trivial fix: fix a wrong config option type usage * Add support of metering volume related resources * coordination: remove unused kwarg * Fix bug for ceilometer polling generates an error * fix perf when libvirt is >=2.0.0 and <2.3.0 * Remove ceilometer tool show\_data.py since it is not usable * Bump hacking to 0.12 * Replace oslo\_utils.timeutils.isotime * inspectors: stop using global conf * meter.notifications: stop using global conf * collector: stop using global conf * pollsters: stop using global conf * Replace SimpleProducer with KafkaProducer * document: remove configuration README description * devstack: allow to configure not backend * Modify variable's using method in Log Messages * compute.util: stop using global conf * event.converter: stop using global conf * discovery: stop using global conf * event: stop using global conf * sample: stop using global conf * keystone\_client: stop using global conf * publisher: stop using global conf * Use method constant\_time\_compare from oslo.utils * Add more verbosity for gnocchi\_resources.yaml * Add package "pifpaf" license description * Change redirect status from 307 to 308 * Use set\_defaults method in oslo\_middleware to check CORS\_OPTS * change gnocchi cache enable condition * Correct wrong description of method enforce * Fix gate problem related to \_error\_checker() * Remove deprecated ceilometer-dbsync * gnocchi: don't show backtrace on connection failure * fix gate * Redact password from opendaylight client logging * Broken Link: setuptools entry point * enable caching * Replace 'vmware' with 'vsphere' * gnocchi: stronger configuration file parsing * Don't create useless threads * pipeline: stop using global conf * declarative: stop using global conf * coordination: stop using global conf * Remove buggy usage of global config * pipeline services: stop using global conf * Fix collector single worker to handle UDP package with many worker config * Add http\_proxy\_to\_wsgi to config-generator * Fixed cotyledon version requirement * Remove pecan\_debug option * Fix the issue that missing the app file * neutron\_client: stop using global conf * nova\_client: stop using global conf * messaging: stop using global config * collector: stop using global config * api: Remove global conf * tests: fix broken udp tests * tests: fix tests for functional tests without dsvm * fix perf when libvirt is >=2.0.0 and <2.3.0 * Add http\_proxy\_to\_wsgi to api-paste * make gnocchi event dispatcher work * Enable release notes translation * collector: do not set any dispatcher by default * Remove the unnecessary space * Add autoscaling scenario in tempest tests * Compute agent can poll tx and rx errors and drops * Batching of event & meter dispatching over HTTP * Stop adding ServiceAvailable group option * specific the title * Trivial - Changes rst markup * Alters rst markup to comply with OpenStack rst guidelines * Add prefix "$" for command examples * Use 'code-block' for pieces of code * Docstrings should not start with a space * collector: fix graceful shutdown when udp enabled * Updates rst markup to better align with OpenStack rst guidelines * Modify startup parameters of ceilometer-api in devstack script * Fix the "Gnocchi" link pages of custom.rst doc * Bad Link: stevedore * Clean the deprecated non-metric related code * Clean imports in code * Using assertIsNone() instead of assertIs(None) * Using assertIsNone() instead of assertIs(None) * Fix typo in a docstring in agent/manager.py * XenAPI: polling meters are always 0 * Fix UnicodeEncodeError in Ceilometer polling * gabbi: set panko\_is\_disabled to avoid relying on Keystone autodiscovery * tox: refactor targets * Fix problem when using wsgi script to start * Remove import objects and use import modules * Refactor Ceilometer resource API * Move oslo.db to hard requirements list * Remove left over from old ceilometer-api binary * gabbi: import pipeline\_cfg\_file option before using it * Update reno for stable/newton 7.0.0.0rc1 ---------- * agentbase: remove flaky test * add note regarding pipeline\_processing\_queues option * Refactor Ceilometer event API * Refactor Ceilometer alarm API * standardize release note page ordering * gnocchi: enable event\_dispatcher in devstack and doc * Use pbr wsgi\_scripts feature to build ceilometer-api * Change fnmatch.match method to fnmatch.fnmatch * tox: Remove useless env * Use deps extra for optional requirements * Don't require gnocchiclient * Initialize correctly collector * update docs to show Telemetry projects * Remove store\_events options * Remove sql-expire-samples-only as a CLI option * Update the compute node service\_credentials parameters * Corrected file mode settings * Add oslo.config cli opt to skip the confirm check * add new meters about some perf events * Set a correct number of threads for polling tasks * improve notification processing * Fix string interpolation in log * correct input params in get\_samples * refactor service to be less pipeline dependent * [api-ref] Correct response code * [api-ref] Remove temporary block in conf.py * XenAPI: correct polling on memory\_usage * gnocchi: Create resource-types on upgrades * Allow to skip metering/event database upgrade * Deprecate Kwapi pollsters * Rename ceilometer-dbsync in ceilometer-upgrade * make reload test more resilient 7.0.0.0b3 --------- * generalise instable API redirect assertion * Nit: Aligning the content * Config logABug feature for Ceilometer api-ref * Fix checking IP version when using IPv6 * remove needless brackets * The debug that network.incoming/outgoing.packets print is not right * Update readme file * [install] Create endpoint in one command * [install] Add a missing stash * Replace urllib.quote() with six.moves.urllib.parse.quote() * correct the meaning of direct publish transport * correct the mistake in install-guide document * Add a publish transport in Ceilometer architecture document * XenAPI: failed to poll cpu\_util * Cleanup imports in code * add url in setup.cfg * conversions: remove no used local variable * Add api-ref/build to .gitignore * Fix a warning when running \`tox -e api-ref\` * Gnocchi dispatcher fails on skipped metric * Get ready for os-api-ref sphinx theme change * [dev-docs] Changed location of a class * Limit Happybase to < 1.0.0 * Fix tempest.conf generation * Configuration of certificate verification for HTTP dispatcher * api: redirect to Panko if enabled * fix the %{} when string formatting * ValueError exception when SNMP returns NoSuchObject * Format error message in http.py * HTTP Dispatcher: Post event data as JSON & improve logging * dispatcher: deprecate event database dispatcher for Panko * dispatcher/database: simplify connection retrieving * dispatcher: split the database dispatcher * Switch to use Glance v2 in image pollsters * Fix the not found link for notification in glossary.rst * Added a link to the architecture guide * Removed redundant phrase to increase readability * Fixed small grammar error in overview.rst * Set the correct for image bytes\_sent event * Change keystoneclient to keystoneauth * tests: rewrite batching test in a less racy way * Remove an unused method of network pollsters' base class * Fixed a small grammar error configuration help doc * add memory bandwidth meter * Do not limit elasticsearch to 2.0 * tests: remove dead code with self.source\_resources * Remove Nadya Privalova from core reviewers * Fix wrong parameter reference in periodic decorator * Add missing %s in print message * test: Fix wrong override value of config option interface * Modify py3.4 to py3.5 in the py35-functional of tox * Put py34 first in the env order of tox * This adds migrated API reference files * consumes error notif. when event are disabled * publisher: make direct publisher generic * Revert "[install] Create endpoint in one command" * Register the gnocchi event dispatcher * Reduce code duplication * Specify host info when using the notifier publisher * Add Python 3.4 and 3.5 classifiers and targets * Retrieval of RBD device information issue * Install configuration files in etc * Added full support of snmp v3 usm model 7.0.0.0b2 --------- * base.Resource not define \_\_ne\_\_() built-in function * Check lbaas version if call is v2 specific * move out oslo.service * Remove unused AdvEnum class definition * fix l3\_cache definition * throw PollsterPermenantError exception for memory usage meter * events: fix operator check in event filter * Raise PollsterPermanentError if inspector is not implemented * Remove unused LOG * Fix get\_gnocchiclient to work with python3.5 * collector: use an intermediate proxy class for event dispatcher * Replace raw\_input with input to make PY3 compatible * pollsters: Remove eventlet timers * Imported Translations from Zanata * Fix the name as it was a typo * devstack: prefix \_drop\_database * start partition coordinator only when there is a group to join * split discover into different namespaces * devstack: disable workload\_partition for compute polling * Add a tool to clean the legacy alarm tables * devstack: do not wait for service to start * Config: no need to set default=None * sqlalchemy: do not run upgrade on fresh install * sqlalchemy: fix JSONEncodedDict implementation type * Add install-guide for ceilometer * gnocchi: use events to end Gnocchi resource * Don't generate hostname of host running build to config file * Fixing a trivial typo * Add 'task\_state' attribute to meter metadata * add support of batch recording metering data for mongodb * Switch to use glanceclient to get image * Add install description for Neutron * add l3 cache usage meter * doc: remove left over of docbookrestapi * tests: do not override auth version to v2 6.1.0 ----- * dispacher/gnocchi: measures sent fix logging * Copy images\_client from tempest + Correct concurrency of gabbi 1.22.0 * Rename gabbits with \_ to have - instead * Correct concurrency of gabbi tests for gabbi 1.22.0 * Use "topics" instead of "topic" in Notifier initialization * Clean deprecated "rpc\_backend" in tests * Use trusts in Heat integration test * Remove configuration README in etc * Imported Translations from Zanata * Copy images\_client from tempest * Add Magnum events to definitions * [dispatcher/gnocchi] add unit for metric * Delete unused last\_dup variable * catch DriverLoadFailure for get\_transport optional * catch DriverLoadFailure for get\_transport optional * Bump to Nova v2.1 * Fix the py34 jenkins job * tempest: import ImagesClient as ImagesClientV2 * fix some typos in our doc, comments and releasenotes * enable swift pollsters poll data for specific region * enable swift pollsters poll data for specific region * Imported Translations from Zanata * tempest\_plugin: drop telemetry decorator * tempest\_plugin: drop telemetry decorator * Updated from global requirements * [Trivial] Update Neutron resource status list * doc: remove database alarm capability * replace deprecated heat command with OSC * Update to hacking 0.11.0 * Verify message's signature for every dispatcher * fix CI failure due to oslo.messaging 5.0.0 * remove log in tools/make\_test\_{event\_}data.py * fix CI failure due to oslo.messaging 5.0.0 * remove record\_metering\_data method from collector * tests: replace overtest by pifpaf * Add log hints for partition coordinator * fix opts.list\_opts for polling options * update help string for messaging\_urls * Drop timestamping in pollsters * Set the time point polling starts as timestamp of samples * Fix notification listeners usage * tox: only install hacking in pep8 target * Remove unused pylintrc * devstack: remove useless policy\_file setting * event: verify signature before recording events for all dispatchers * tests: stop services on tests teardown * Fix oslo\_service stop/start mechanism * remove floating\_ip\_get\_all in nova\_client * [Trivial] Refactor libvirt inspector connection & uri * Fix concurrency issue with snmp pollsters * Drop the executability of http.py * Updated from global requirements * remove deprecated auth type password-ceilometer-legacy * [Trivial] Update Neutron resource status list * [Trivial] Remove CEILOMETER\_API\_LOG\_DIR option for devstack * Update the default log levels * Clean some unused method in ceilometer/keystone\_client.py * remove invalid todo in storage functional test code * return 400 when invalid aggregation function is specified * Replace logging with oslo\_log * remove deprecated option database\_connection * move EventFilter to event storage namespace * remove MultipleResultsFound and NoResultFound exception * Remove useless file * remove todo for OS\_TEST\_PATH * add tempest to test-requirements.txt * Improve the docstring for Swift pollsters * add log decorator for neutron\_client public method * add debtcollector to requirements * Remove direct dependency on babel * Imported Translations from Zanata * Refactor floatingip pollster to use discovery * Fix notification listeners usage * notification: Remove eventlet timers * use static timestamps for api samples * refactor DefinitionException classes * collector: Don't use eventlet thread * fix openstack cli command in doc manual * Add release note link * switch to openstack cli instead of keystone cli * Updated from global requirements * libvirt: fix missing python-libvirt issue * Add status in Ceilometer VPN connection sample * document how to enable ceilometer stable branch in devstack * remove python-ceilometerclient from requirements * Imported Translations from Zanata * Updated from global requirements * Imported Translations from Zanata * Ignore the filter\_service\_activity option if gnocchi project not found * Fix Ceilometer tests config options * Updated from global requirements * Fix doc build if git is absent * Replace tempest-lib by os-testr * Add notes on moving to Gnocchi * delete verbose/redundant/deprecated text * replace fnmatch with oslo.utils.fnmatch * add ceilometer to gnocchi configuration notes * Updated from global requirements * Imported Translations from Zanata * remove complex capabilities for meter, resource and statistics * gnocchi: batch measurements * change keystone to openstack cli * re-org existing manually install notes * messaging: remove RequestContextSerializer * Remove unused context object in vpnaas test * Remove unused object from lbaas\_v2 test * Remove unused context object lbaas test * test: remove unused context object in FWaaS tests * Remove unused context objects in Glance tests * Remove unused context object in test * Remove a useless usage of oslo.context in meters API * Remove the deprecated DB2 driver * Update the Administrator Guide links * mongo: remove unused function * Updated from global requirements * Imported Translations from Zanata * drop magnetodb support * Simplify chained comparison * Enhancing Retry logic to Coordination when joining partitioning grp * publisher: clean out context usage * Disable ceilometer-aipmi by default for devstack * Remove useless context object usage * Imported Translations from Zanata * Imported Translations from Zanata 6.0.0 ----- * collector: never allow to lose data * 'ceilometer-polling' should fail with no valid pollsters * Imported Translations from Zanata * Fix typos in comments and config strings * Updated from global requirements 6.0.0.0rc2 ---------- * abort alarms URLs when Aodh is unavailable * abort alarms URLs when Aodh is unavailable * fix minor typo in test\_generic.py * Imported Translations from Zanata * Add the functional tests for getting events * collector: never allow to lose data * devstack Fix unprocess measure path * Imported Translations from Zanata * devstack: allow ceilometer-api and keystone to run on different hosts * Devstack: install coordination backend for compute agent * remove dns and trove from entry\_points * correct docstring in storage module * Imported Translations from Zanata * Remove gabbi tests that check content-location * Add http publisher * remove dns and trove from entry\_points * Imported Translations from Zanata * Imported Translations from Zanata * Update reno for stable/mitaka * Update .gitreview for stable/mitaka * Remove gabbi tests that check content-location 6.0.0.0rc1 ---------- * Imported Translations from Zanata * remove unused field 'triggers' defined in sample event\_pipeline.yaml * remove SERVICE\_TENANT\_NAME from devstack plugin * clean devstack plugin * add rc1 release notes * Use assertIn and assertNotIn * core status cleanup * tests: remove ceilometer-api bin test cases * gate: add missing sudo * change dns and trove notifications to declarative * Remove en\_GB translations * register the config generator default hook with the right name * Imported Translations from Zanata * Updated from global requirements * tempest: migrate api and scnario tests from tempest * mitaka-3 release notes * Adjust log levels for InstanceShutOffException * Fix event\_type creationg failure due to race condition * Imported Translations from Zanata * Ignoring cpu measurement when instance's state is SHUTOFF * Add validation for polling\_namespaces option * xenapi: support the session when xenserver is slave * Imported Translations from Zanata * gnocchi dispatch: Added new resource type support * remove wrong "#!/usr/bin/env python" header * Fixed corner cases of incorrect use of oslo.config * Updated from global requirements * timedelta plugin for meter definition process * Cast Int64 values to int, float in statistics * Cache getters for the decalarative definitions 6.0.0.0b3 --------- * [sahara] add events definitions regarding new notifications * Moved CORS middleware configuration into oslo-config-generator * Add the meter example file 'lbaas-v2-meter-definitions.yaml' * Change default policy to allow create\_samples * Enable the Load Balancer v2 events * Remove unused pngmath Sphinx extension * Updated from global requirements * Fix a minor missing parameter issue * close services in test * Add an update interval to compute discovery * Docs: Configure meters/events dispatch separately * Fix the typo in the gnocchiclient exception * Updated from global requirements * Add gnocchi dispatcher opts to config * Change the SERVICE\_TENANT\_NAME to SERVICE\_PROJECT\_NAME * Hyper-V: replaces in-tree hyper-v utils usage with os\_win * Initial seed of hacking * Add /usr/local/{sbin,bin} to rootwrap exec\_dirs * Fix SDR file parsing for Intel Node Manager * Gnocchi: fix ResourcesDefinitionException for py3 * Change LOG.warn to LOG.warning * tests: fix unworking debug output * Adds timestamp option to Aggregation transformer * remove default=None for config options * Replace assertEqual(None, \*) with assertIsNone in tests * Trivial: Cleanup unused conf variables * Enable the Load Balancer v2 for the Ceilometer(Part Two) * Remove unused variable * Enable the Load Balancer v2 for the Ceilometer(Part One) * Fix footnote reference to Aodh in docs * Updated from global requirements * Set None explicitly to filter options * KEYSTONE\_CATALOG\_BACKEND is deprecated * Use overtest to setup functional backends * devstack: Fix Keystone v3 configuration typo * Imported Translations from Zanata * Handle malformed resource definitions gracefully * Update the home page * Skip duplicate meter definitions * set higher batching requirement * use retrying to attempt to rejoin group * network: remove deprecated option name * sample: remove deprecated option name * Fix wrong capitalization * rewriting history * Remove unused pytz requirement * devstack: use password with version discovery * fix tempest path * Updated from global requirements * raise coordination error if not registered * do not configure worker specific items in init * integration-gate: fix publicURL retrieval * rolling upgrades * fix locking in ceilometer * enable notification agent partitioning * better support notification coordination * remove useless notification listener helper * Lookup meter definition fields correctly * Enhances get\_meters to return unique meters * Imported Translations from Zanata * Updated from global requirements * Fix ceilometer floatingip pollster * Updated from global requirements * tempest: migrate base class for tests * tempest: add ceilometer tempest plugin * tempest: add telemetry client manager * tempest: migrate conf.py from tempest tree * tempest: copy telemetry client from tempest tree * Fix events rbac 6.0.0.0b2 --------- * Don't store events with Gnocchi * add additional mitaka-2 release notes * Corrects typo "a other" -> "another" * Updated from global requirements * add release notes for mitaka-2 * devstack: add support for Gnocchi backend * notification: Use oslo.messaging batch listener * Cleanup of Translations * Added CORS support to Ceilometer * Don't set keystonemiddleware cache * Set None explicitly to filter options * Add OSprofiler-specific events definitions * collector: Use oslo.messaging batch listener * Updated from global requirements * Changes aggregator transformer to allow retention\_time w/o size * Replace LOG.warn with LOG.warning * Updated from global requirements * wrong accumulative value of "network.services.lb.incoming.bytes" * Trivial: Remove vim header from source files * Trival: Remove unused logging import * Fix the typos in the source code * gnocchi: fix stack resource type * Misspelling in message * Clean pagination related methods of impl\_mongodb * Fix some typos in the snmp.py * remove local hacking check * [MongoDB] add indexes in event collection * Remove unused code in gnocchi dispatcher * remove unnecessary code * recheck cache after acquired gnocchi\_resource\_lock * collector: remove deprecated RPC code * fix case in function name * Catch the EndpointNotFound in keystoneauth1 than in keystoneclient * Log exception if stevedore fails to load module * Updated from global requirements * Revert "Revert "devstack config for dogpile cache"" * add per resource lock * verify gnocchi connection before processing * [refactor] remove redundant import of options * Added unit test cases for pysnmp 4.3 * Add keystoneauth1 in requirements * gnocchi: fix cache hash logic * gnocchi: use gnocchiclient instead of requests * show queue status on integration test * Updated from global requirements * using a consistent uuid as cache namespace * Duplicate information link for writing agent plugins * Use keystoneauth1 instead of manual setup * Do not mock the memcache interface for auth\_token * oslo.messaging option group/name change for notification topics * Correct the host field of instance metadata * fix the bug that gnocchi dispatcher can't process single sample * Replace stackforge with openstack * MAINTAINERS: remove outdated data 6.0.0.0b1 --------- * Remove version from setup.cfg * add initial release notes * fix functional gate * messaging: stop using RequestContextSerializer * Fix ceilometer-test-event.py script * Deduplicate the code about snmp meter loading * Updated from global requirements * Revert "devstack config for dogpile cache" * Revert "Workaround requests/urllib connection leaks" * add cpu.delta to gnocchi resources * simplify collector cache * Consistent publisher\_id for polling agent * build metric list on init * re-implement thread safe fnmatch * clean up integration test urls * tools: fix default resource metadata for instance * don't pass ceilometer options to oslo.db engine facade * Use str(name) instead of name.prettyPrint() * Reduce code duplication * remove config files when run clean.sh * fix some test case wrongly skipped for mysql backend * Add WebTest to test-requirements.txt * tests: remove testscenario usage for storage drivers * Remove eventlet usage * Remove alarming code * Clarify the doc about multiple notification\_topics usage * Reduced source code by extracting duplicated code * devstack config for dogpile cache * Updated from global requirements * Updated from global requirements * Fix an indent nit of enforce\_limit method * Move the content of ReleaseNotes to README.rst * use common cache * A dogpile cache of gnocchi resources * Updated from global requirements * install database when collector is enabled * Updated from global requirements * Updated from global requirements * add reno for release notes management * Updated from global requirements * Support to get hardware's cpu\_util from snmp * add rohit\_ to MAINTAINERS * gnocchi: set the default archive policy to None * Mv gabbi\_pipeline.yaml into test directories * Factorize yaml loading of declarative stuffs * Factorize field definition of declarative code * Wrong result is returned when call events getting API * tox: use pretty\_tox in most places * Updated from global requirements * avoid unnecessary inner join in get\_resources() for SQL backend * Add sql-expire-samples-only to option list * Updated from global requirements * configure Apache only when ceilometer-api is enabled * Imported Translations from Zanata * avoid using isolation level * unquote resource id to support slash in it * specify runtime environment for scripts * Using oslo-config-generator to instead of generate-config-file.sh * Use gnocchiclient for integration script * Enable signature verification for events * Correct the timestamp type when make test samples data * Updated from global requirements * avoid generate temporary table when query samples * Reject posting sample with direct=true if Gnocchi is enabled * make script under tools directory executable * Updated from global requirements * Added the README.rst in devstack folder * fix tools/make\_test\_event\_data.py * fix image\_ref attr in gnocchi resource * support mysql+pymysql in functional test * Updated from global requirements * Fix snmp pollster to not ignore valid meters * Block oslo.messaging 2.6.1 release * reset policy per test * Remove dependency on sphinxcontrib-docbookrestapi * gnocchi: remove possible ending / in URL * api: simplify root controller * api: simplify Pecan config * remove instance:FLAVOR related code and docs * Do collector setup and storage cleanup for all backends * change collector\_workers to [collector]workers * Enable POST samples API when gnocchi enabled * devstack: fix debug info for Gnocchi * Imported Translations from Zanata * Add Liberty release note link * Fix make\_test\_data.sh * Imported Translations from Zanata * Be explicit when copying files to /etc/ceilometer * Deprecate event trait plugin 'split' * Updated from global requirements * Clean some log messages when polling neutron resources * Simplify the validation of required fields of pipeline source * doc: service enablement not necessary when using Devstack plugin * Skip bad meter definitions instead of erroring out * Remove the unused network\_get\_all method * mark logging.info translation accordingly * logging cleanup * Updated from global requirements * Remove last vestiges of devstack from grenade plugin * Add missing ceilometerclient repo location 5.0.0 ----- * Imported Translations from Zanata * Fix for resource polling warnings * SQL: Fix event-list with multiple trait query filters * Fix the bug of "Error spelling of a word" * Imported Translations from Zanata * SQL: Fix event-list with multiple trait query filters * Fix a mistake in a test * Configure collector to only record meter or event * Rename list\_events tests to list\_samples tests * fix elasticsearch script reference * Fix the deprecation note in meter.yaml * Fix the deprecation note in meter.yaml * Remove deprecated archive policy map for Gnocchi * Remove enable\_notification.sh * Parametrize table\_prefix\_separator in hbase * Imported Translations from Zanata * fix typo in storage/impl\_sqlalchemy * devstack: install all configuration files from etc/ * dispatcher: remove deprecated CADF code in HTTP * mongodb: remove deprecated replica\_set support * Ensure the test data sample has correct signature * Open Mitaka development 5.0.0.0rc1 ---------- * gnocchi: Don't raise NotImplementedError * Add missing meter and exchange opts * Imported Translations from Zanata * Add test to cover history rule change * Workaround requests/urllib connection leaks * integration tests: additional debugging infos * Coordinator handles ToozError when joining group * Don't create neutron client at loadtime * Delete its corresponding history data when deleting an alarm * update event filter test to validate multiple trait args * Fix variable typos * Updated from global requirements * Change ignore-errors to ignore\_errors * Fix reconnecting to libvirt * remove batch processing requirement from arithmetic transformer * Cleanup empty dirs from tests * retain existing listeners on refresh * Override dispatcher option for test\_alarm\_redirect\_keystone * [ceilometer] Update links to Cloud Admin Guide * Adds support for dynamic event pipeline * Updated from global requirements * Imported Translations from Zanata * pollster/api now publish to sample queue * tox: generate config file on test run * tox: Allow to pass some OS\_\* variables * Refactor keystone handling in discovery manager * Use make\_sample\_from\_instance for net-pollster * apply limit constraint on storage base interface * gnocchi: add two new resources * Fixed tox -egenconfig Error * Add declarative meters to developer docs * add delta transfomer support * do not recreate main queue listeners on partitioning * Validate required fields in meter definition * deprecate cadf\_only http dispatcher * Fix the heavy time cost of event-list * Update API Doc to deprecate the alarming part * Deprecate config options of the old alarming functionality * update architecture documentation * Add attribute 'state' to meter metadata when source is polling * doc: update devstack usage * Remove useless base class * Split out image non-meters * Make the gabbi tox target work with modern tox * Avoid 500 errors when duplicating limit queries * Correct test\_list\_meters\_meter\_id to work with py3 * Updated from global requirements * Update event\_definitions for Cinder Image Cache * Update install docs * Use b64encode to replace of encodestring * Prevent ceilometer expirer from causing deadlocks * remove duplicate log exception message * Spelling mistake of comment in api/controllers/v2/query.py * Fix typos in gnocchi.py and converter.py * Updated from global requirements * Updated from global requirements * Add a py34-functional tox target * doc: update notification\_driver * polling: remove deprecated agents * Fix string in limit warning * Typo fixing * missed entrypoint for nova\_notifier removal * Imported Translations from Transifex * Fix links in README.rst * integration: Add debugging information * deprecate db2 nosql driver * devstack: add new option to support event-alarm * Sync devstack plugin with devstack:lib/ceilometer * Updated from global requirements * remove old nova\_notifier processing code 5.0.0.0b3 --------- * restrict admin event access * Migrate the old snmp pollsters to new declarative pollster * Support to load pollsters extensions at runtime * Added snmp declarative hardware pollster * Requeuing event with workload\_partitioning on publish failure * Event filtering for non-admin users * integration: fix typo * gnocchi: cleanup instance resource definition * Updated from global requirements * Adding pradk to MAINTAINERS * Adding liusheng to MAINTAINERS * Add index to metadata\_hash column of resource table * Incorrect Links are updated * Removing unused dependency: discover * Use new location of subunit2html * Change tox default targets for quick use * Fixed identity trust event types * gnocchi: quote the resource\_id in url * fix metadata for compute cpu notifications * support custom metadata * Move profiler meters to yaml * Control Events RBAC from policy.json * Events RBAC needs scoped token * make telemetry sample payloads dictionaries * Fix requeue process on event handling error * allow configurable pipeline partitioning * Keep the instance\_type meta from polling and notification consistent * Add user\_id,project\_id traits to audit events * Change json path's to start with $. for consistency * Add validation tests for arithmetic, string and prefix expressions * Fix description for "Inapt spelling of 'MongoDB'" * Create conf directory during devstack install phase * support custom timestamp * Add cpu meters to yaml * Fix description for "Incorrect spelling of a word" * integration: add some new tests * Fix disable\_non\_metric\_meters referencing * Update tests to reflect WSME 0.8 fixes * remove jsonpath-rw requirement * Do not use system config file for test * gnocchi: move to jsonpath\_rw\_ext * Updated from global requirements * Allow to run debug tox job for functional tests * Use jsonpath\_rw\_ext for meter/event definitions * preload jsonpath\_rw parsers * integration test: adjusts timeout * integration test: failfast * Updated from global requirements * Avoid recording whole instance info in log * Fix dependency for doc build * Mark record\_type in PaaS Event Format doc as optional * full multi-meter support * add flexible grouping key * Corrected test\_fallback\_meter\_path test case * Add hypervisor inspector sanity check * handle list payloads in notifications * xenapi: support the session to "unix://local" * Introduce Guru Meditation Reports into Ceilometer * Use start status of coodinator in tooz * Fixed event requeuing/ack on publisher failure * Implement consuming metrics from Magnum * Avoid from storing samples with empty or not numerical volumes * use union all when building trait query * Fixed spelling error, retreive -> retrieve * Use min and max on IntOpt option types * Update install docs with gnocchi dispatcher info * Make it possible to run postgresql functional job * Revert "Remove version from os\_auth\_url in service\_credentials" * Updated from global requirements * Use oslo\_config PortOpt support * integration: chown ceilometer directory properly * add mandatory limit value to complex query list * add test to validate jsonpath * Remove version from os\_auth\_url in service\_credentials * do not translate debug logs * Updated from global requirements * Grenade plugin using devstack plugin for ceilometer * remove alembic requirement * Convert instance, bandwidth and SwiftMiddleware meters * Change and move the workers options to corresponding service section * Drop the downgrade function of migration scripts * start rpc deprecation * support multiple-meter payloads * add poll history to avoid duplicate samples * Add Kilo release note reference * initialise opencontrail client in tests * Make ConnectionRetryTest more reliable * Correct thread handling in TranslationHook * Updated from global requirements * Correctly intialized olso config fixture for TestClientHTTPBasicAuth * Don't start up mongodb for unit test coverage * disable non-metric meter definitions * Cast Int64 values to float * Convert identity, sahara and volume to meters yaml * Enable entry points for new declarative meters * Fix for rgw still throwing errors * group pollsters by interval * Revert "Revert "remove instance: meter"" * api: fix alarm deletion and update * Fixes the kafka publisher * Sync devstack plugin with devstack:lib/ceilometer * integration: use the right user in gate * Imported Translations from Transifex * Initial separating unit and functional tests * Stop using openstack.common from keystoneclient * minimise scope of hmac mocking * Updated from global requirements * gnocchi: retry with a new token on 401 * Fix some gabbi tests * Improve comments in notification.py * mongo: fix last python3 bugs * postgres isolation level produces inconsistent reads * Masks messaging\_urls in logs during debug mode * Corrected unit of snmp based harware disk and memory meters * Provide base method for inspect\_memory\_resident * Fix Python 3 issue in opendaylight client * Fix more tests on Python 3 * Remove the compute inspector choice restriction * [MongoDB] Refactor indexes for meter and resources * tests: add an integration test * Fix WSGI replacement\_start\_response() on Python 3 * gnocchi: reduce the number of patch to gnocchi API * Make the partition coordinator log more readable * Drop out-of-time-sequence rate of change samples 5.0.0.0b2 --------- * [MongoDB] Use a aggregate pipeline in statistics * Instance Cache in Node Discovery Pollster * Instance Caching * Imported Translations from Transifex * fix gnocchi resources yaml * Import the api opt group in gabbi fixture * Add a batch\_polled\_samples configuration item * Remove redundant comma * storage: deprecates mongodb\_replica\_set option * Improves send\_test\_data tools * Replace isotime() with utcnow() and isoformat() * distributed coordinated notifications * Imported Translations from Transifex * Close and dispose test database setup connections * Updated from global requirements * api: Redirect request to aodh if available * api: return 410 if only Gnocchi is enabled * Fix broken IPMI agent * add mandatory limit value to meter list * add mandatory limit value to resource list * add mandatory limit value to event list * Move gnocchi resources definition in yaml file * Send a notification per sample, do not batch * Handles dns.domain.exists event in Ceilometer * Pollsters now send notifications without doing transforms * Imported Translations from Transifex * Switch to the oslo\_utils.fileutils * Updated from global requirements * Use choices for hypervisor\_inspector option * The product name Vsphere should be vSphere * Add necessary executable permission * Store and restore the xtrace option in devstack plugin * gnocchi: Remove useless resources patching * add Trove(DBaaS) events * Set conf.gnocchi\_dispatcher.url explicitly in tests * Declarative meters support * Stop the tests if backend hasn't started * Delay the start of the collector until after apache restart * Clean the re-implemented serializers in Ceilometer * monkey\_patch thread in tests * make notifier default event publisher * Fix gnocchi DispatcherTest tests * Sort metric data before grouping and processing * Namespace functions in devstack plugin * Added valid values of operator to response body * gnocchi: fixes the instance flavor type * gnocchi dispatcher: fix typo in stevedore endpoint * Imported Translations from Transifex * Tolerate alarm actions set to None * Make ceilometer work correctly when hosted with a SCRIPT\_NAME * Implementation of dynamically reloadable pipeline * fix log msg typo in api utils * Updated from global requirements * Add documentation about the usage of api-no-pipline * drop deprecated pipeline * Improve doc strings after changing method for index creation * set default limit to meter/sample queries * collector: fix test raising error * Remove test-requirements-py3.txt * remove unused event query * Create a devstack plugin for ceilometer * Add support for posting samples to notification-agent via API * restore long uuid data type * Revert "Add support for posting samples to notification-agent via API" * Update alarm history only if change in alarm property * test error log - catch dummy error * fix kafka tests from flooding logs * catch warnings from error tests * remove unused notifier * Add support for posting samples to notification-agent via API * Stop dropping deprecated tables while upgrade in mongodb and db2 * Add handler of sample creation notification * Remove the unused get\_targets method of plugin base * Replaces methods deprecated in pymongo3.0 * add oslo.service options * Restricts pipeline to have unique source names * drop use of oslo.db private attribute * Fix oslo.service configuration options building * Add fileutils to openstack-common.conf * disable non-metric meters 5.0.0.0b1 --------- * Remove unnecessary executable permission * Imported Translations from Transifex * Switch to oslo.service * Remove unnecessary wrapping of transformer ExtentionManager * Port test\_complex\_query to Python 3 * Fix expected error message on Python 3 * Fix usage of iterator/list on Python 3 * Replaces ensure\_index for create\_index * pip has its own download cache by default * For sake of future python3 encode FakeMemcache hashes * Make acl\_scenarios tests' keystonemiddleware cache work flexibly * Update version for Liberty * Gnocchi Dispatcher support in Ceilometer 5.0.0a0 ------- * Updated from global requirements * Fix alarm rest notifier logging to include severity * Remove useless execute bit on rst file * Fix unicode/bytes issues in API v2 tests * Fix script name in tox.ini for Elasticsearch * Fix the meter unit types to be consistent * tests: use policy\_file in group oslo\_policy * Fix publisher test\_udp on Python 3 * Fix Ceph object store tests on Python 3 * Port IPMI to Python 3 * Port middleware to Python 3 * [elasticsearch] default trait type to string * Updated from global requirements * Lower down the range for columns which are being used as uuid * Sync with latest oslo-incubator * Fix testing of agent manager with tooz * Remove deprecated Swift middleware * add DNS events * Handle database failures on api startup * Fix more tests on Python 3 * Switch to using pbr's autodoc capability * Remove old oslo.messaging aliases * Remove useless versioninfo and clean ceilometer.conf git exclusion * Register oslo\_log options before using them * Add running functional scripts for defined backend * Remove snapshot.update events as they are not sent * WSME version >=0.7 correctly returns a 405 * TraitText value restricted to max length 255 * Cause gabbi to skip on no storage sooner * Updated from global requirements * Move eventlet using commands into own directory * adjust alarm post ut code to adapt to upstream wsme * Disable rgw pollster when aws module not found * Fixes DiskInfoPollster AttributeError exception * remove useless log message * use oslo.log instead of oslo-incubator code * Port test\_inspector to Python 3 * Fix usage of dictionary methods on Python 3 * Imported Translations from Transifex * Add oslo.vmware to Python 3 test dependencies * Optionally create trust for alarm actions * Remove iso8601 dependency * Enable test\_swift\_middleware on Python 3 * Enable more tests on Python 3 * Skip hbase tests on Python 3 * Clear useless exclude from flake8 ignore in tox * Remove pagination code * Stop importing print\_function * Remove useless release script in tools * Remove useless dependency on posix\_ipc * Remove exceute bit on HTTP dispatcher * Remove oslo.messaging compat from Havana * Fixing event types pattern for Role Noti. handler * Mask database.event\_connection details in logs * Switch from MySQL-python to PyMySQL * Python 3: replace long with int * Python 3: Replace unicode with six.text\_type * Python 3: generalize the usage of the six module * Update Python 3 requirements * Python 3: set \_\_bool\_\_() method on Namespace * Python 3: encode to UTF-8 when needed * Python 3: sort tables by their full name * Python 3: replace sys.maxint with sys.maxsize * Initial commit for functional tests * Update a test to properly anticipate HTTP 405 for RestController * proposal to add Chris Dent to Ceilometer core * rebuild event model only for database writes * cleanup problem events logic in event db storage * fix incorrect docstring for dispatcher * Imported Translations from Transifex * api: record severity change in alarm history * VMware: verify vCenter server certificate * Add hardware memory buffer and cache metrics * Make interval optional in pipeline * Improve ceilometer-api install documentation * empty non-string values are returned as string traits * Trait\_\* models have incorrect type for key * small change to development.rst file * Drop use of 'oslo' namespace package * [unittests] Increase agent module unittests coverage * stop mocking os.path in test\_setup\_events\_default\_config * Remove py33 tox target * made change to mod\_wsgi.rst file * ensure collections created on upgrade * Fix raise error when run "tox -egenconfig" * Updated from global requirements * Fix None TypeError in neutron process notifications 2015.1.0 -------- * Have eventlet monkeypatch the time module * Have eventlet monkeypatch the time module * Add the function of deleting alarm history * Updated from global requirements * Fix valueerror when ceilometer-api start * Override gnocchi\_url configuration in test * Move ceilometer/cli.py to ceilometer/cmd/sample.py * Fix valueerror when ceilometer-api start * remove deprecated partitioned alarm service * use message id to generate hbase unique key * gnocchi: fix typo in the aggregation endpoint * Release Import of Translations from Transifex * Fix Copyright date in docs * Replace 'metrics' with 'meters' in option and doc * use message id to generate hbase unique key * update .gitreview for stable/kilo * gnocchi: fix typo in the aggregation endpoint * broadcast data to relevant queues only * Imported Translations from Transifex * fix combination alarm with operator == 'or' * Updated from global requirements 2015.1.0rc1 ----------- * proposal to add ZhiQiang Fan to Ceilometer core * Open Liberty development * Fix a samples xfail test that now succeeds * Cosmetic changes for system architecture docs * Fix a issue for kafka-publisher and refactor the test code * pymongo 3.0 breaks ci gate * use oslo.messaging dispatch filter * Further mock adjustments to deal with intermittent failure * Adds support for default rule in ceilometer policy.json * Updated from global requirements * limit alarm actions * Use oslo\_vmware instead of deprecated oslo.vmware * Remove 'samples:groupby' from the Capabilities list * Use old name of 'hardware.ipmi.node.temperature' * Revert "remove instance: meter" * Tweak authenticate event definition * Add project and domain ID to event definition for identity CRUD * Fix the event type for trusts * reset croniter to avoid cur time shift * Imported Translations from Transifex * Avoid a error when py27 and py-mysql tests run in sequence * Stop using PYTHONHASHSEED=0 in ceilometer tests * remove instance: meter * Added ipv6 support for udp publisher * Remove the unnecessary dependency to netaddr * Optimize the flow of getting pollster resources * support ability to skip message signing * Avoid conflict with existing gnocchi\_url conf value * Using oslo.db retry decorator for sample create * alarm: Use new gnocchi aggregation API * collector: enable the service to listen on IPv6 * minimise the use of hmac * Typo in pylintrc * Ceilometer retrieve all images by 'all-tenants' * fix incorrect key check in swift notifications * support disabling profiler and http meters * ensure collections created on upgrade * Fix common misspellings * Updated from global requirements * refuse to post sample which is not supported * Enable collector to requeue samples when enabled * drop deprecated novaclient.v1\_1 * exclude precise metaquery in query field 2015.1.0b3 ---------- * Imported Translations from Transifex * remove log message when process notification * Add gabbi tests for resources * Fix typos and format in docstrings in http dispatcher * add ability to dispatch events to http target * doc: fix class name * add ability to publish to multiple topics * make field and value attributes mandatory in API Query * Fix db2 upgrade in multi-thread run issue * Add memory.resident libvirt meter for Ceilometer * Update reference * Check the namespaces duplication for ceilometer-polling * Add gabbi tests to explore the Meter and MetersControllers * Imported Translations from Transifex * mysql doesn't understand intersect * order traits returned within events * add network, kv-store, and http events * Add support for additional identity events * Add a Kafka publisher as a Ceilometer publisher * Fix response POST /v2/meters/(meter\_name) to 201 status * Attempt to set user\_id for identity events * Switch to oslo.policy 0.3.0 * normalise timestamp in query * Add more power and thermal data * Updated from global requirements * Fix formatting error in licence * Added option to allow sample expiration more frequently * add option to store raw notification * use mongodb distinct * remove event\_types ordering assumption * Add gabbi tests to cover the SamplesController * api: fix alarm creation if time\_constraint is null * fix log message format in event.storage.impl\_sqlalchemy * Remove duplications from docco * Tidy up clean-samples.yaml * Fix a few typos in the docs * use default trait type in event list query * fix wrong string format in libvirt inspector * create a developer section and refactor * Do not default pecan\_debug to CONF.debug * Adding Gabbi Tests to Events API * fix config opts in objectstore.rgw * Updated from global requirements * support time to live on event database for sql backend * add an option to disable non-metric meters * add missing objectstore entry points * Initial gabbi testing for alarms * reorganise architecture page * Add ceph object storage meters * Use oslo\_config choices support * fix inline multiple assignment * alarming: add gnocchi alarm rules * Protect agent startup from import errors in plugins * Revert "Add ceph object storage meters" * api: move alarm rules into they directory * compress events notes * Destroy fixture database after each gabbi TestSuite * Fix unittests for supporting py-pgsql env * Adding links API and CLI query examples * correct column types in events * Be explicit about using /tmp for temporary datafiles * Patch for fixing hardware.memory.used metric * Add ceph object storage meters * [PostgreSQL] Fix regexp operator * Add clean\_exit for py-pgsql unit tests * modify events sql schema to reduce empty columns * Remove duplicated resource when pollster polling * check metering\_connection attribute by default * unicode error in event converter * cleanup measurements page * api: add missing combination\_rule field in sample * Fix test case of self-disabled pollster * update event architecture diagram * use configured max\_retries and retry\_interval for database connection * Updated from global requirements * Making utilization the default spelling * Add Disk Meters for ceilometer * correctly leave group when process is stopped * Updated from global requirements * enable oslo namespace check for ceilometer project * Add doc for version list API * Enabling self-disabled pollster * Use werkzeug to run the developement API server * Imported Translations from Transifex * switch to oslo\_serialization * move non-essential libs to test-requirements * Validate default values in config * fix the value of query\_spec.maxSample to advoid to be zero * clean up to use common service code * Add more sql test scenarios * [SQLalchemy] Add regex to complex queries * Fix duplication in sinks names * metering data ttl sql backend breaks resource metadata * Refactor unit test code for disk pollsters * start recording error notifications * Remove no\_resource hack for IPMI pollster * Add local node resource for IPMI pollsters * Use stevedore to load alarm rules api * [MongoDB] Add regex to complex queries * Imported Translations from Transifex * support time to live on event database for MongoDB 2015.1.0b2 ---------- * split api.controllers.v2 * add elasticsearch events db * use debug value for pecan\_debug default * Shuffle agents to send request * Updated from global requirements * Adds disk iops metrics implementation in Hyper-V Inspector * discovery: allow to discover all endpoints * Declarative HTTP testing for the Ceilometer API * add listener to pick up notification from ceilometermiddleware * Drop deprecated namespace for oslo.rootwrap * remove empty module tests.collector * Add disk latency metrics implementation in Hyper-V Inspector * add event listener to collector * add notifier publisher for events * enable event pipeline * Imported Translations from Transifex * deprecate swift middleware * sync oslo and bring in versionutils * Expose alarm severity in Alarm Model * Hyper-V: Adds memory metrics implementation * Remove mox from requirements * Fix IPMI unit test to cover different platforms * adjust import group order in db2 ut code * add event pipeline * remove unexistent module from doc/source/conf.py * Upgrade to hacking 0.10 * Remove the Nova notifier * Remove argparse from requirements * [MongoDB] Improves get\_meter\_statistics method * Fix docs repeating measuring units * [DB2 nosql] Create TIMESTAMP type index for 'timestamp' field * remove pytidylib and netifaces from tox.ini external dependency * Avoid unnecessary API dependency on tooz & ceilometerclient * Correct name of "ipmi" options group * Fix Opencontrail pollster according the API changes * enable tests.storage.test\_impl\_mongodb * Remove lockfile from requirements * Disable eventlet monkey-patching of DNS * Expose vm's metadata to metrics * Adding build folders & sorting gitignore * Disable proxy in unit test case of test\_bin * Add Event and Trait API to document * Refactor ipmi agent manager * Use alarm's evaluation periods in sufficient test * Use oslo\_config instead of deprecated oslo.config * Avoid executing ipmitool in IPMI unit test * Updated from global requirements * Add a direct to database publisher * Fixed MagnetoDB metrics title * Imported Translations from Transifex * Fix incorrect test case name in test\_net.py * Updated from global requirements * notification agent missing CONF option * switch to oslo\_i18n * Use right function to create extension list for agent test * Imported Translations from Transifex * Add an exchange for Zaqar in profiler notification plugin * Remove unused pecan configuration options * Updated from global requirements * Use oslo\_utils instead of deprecated oslo.utils * Match the meter names for network services * stop using private timeutils attribute * Update measurement docs for network services * Catch exception when evaluate single alarm * Return a meaningful value or raise an excpetion for libvirt * Imported Translations from Transifex * make transformers optional in pipeline * Added metering for magnetodb * Add release notes URL for Juno * Fix release notes URL for Icehouse * remove unnecessary str method when log messages * Revert "Remove Sphinx from py33 requirements" * untie pipeline manager from samples * reset listeners on agent refresh * Remove inspect\_instances method from virt * Optimize resource list query * Synchronize Python 3 requirements * Remove unnecessary import\_opt|group * Add test data generator via oslo messaging * Check to skip to poll and publish when no resource * Add oslo.concurrency module to tox --env genconfig * add glance events * add cinder events * Manual update from global requirements * Add cmd.polling.CLI\_OPTS to option list * Ignore ceilometer.conf * Switch to oslo.context library 2015.1.0b1 ---------- * Revert "Skip to poll and publish when no resources found" * Added missing measurements and corrected errors in doc * Remove Sphinx from py33 requirements * Clean up bin directory * Improve tools/make\_test\_data.sh correctness * ensure unique pipeline names * implement notification coordination * Make methods static where possible (except openstack.common) * Fix docs to suit merged compute/central agents concept * Drop anyjson * Move central agent code to the polling agent module * RBAC Support for Ceilometer API Implementation * [SQLalchemy] Add groupby ability resource\_metadata * Improve links in config docs * Make LBaaS total\_connections cumulative * remove useless looping in pipeline * Encompassing one source pollsters with common context * Modify tests to support ordering of wsme types * Make compute discovery pollster-based, not agent-level * Add docs about volume/snapshot measurements * Port to graduated library oslo.i18n * Retry to connect database when DB2 or mongodb is restarted * Updated from global requirements * Standardize timestamp fields of ceilometer API * Workflow documentation is now in infra-manual * Add alarm\_name field to alarm notification * Updated from global requirements * Rely on VM UUID to fetch metrics in libvirt * Imported Translations from Transifex * Initializing a longer resource id in DB2 nosql backend * Sync oslo-incubator code to latest * ensure unique list of consumers created * fix import oslo.concurrency issue * Add some rally scenarios * Do not print snmpd password in logs * Miniscule typo in metering\_connection help string * add http dispatcher * [MongoDB] Add groupby ability on resource\_metadata * [MongoDB] Fix bug with 'bad' chars in metadatas keys * Override retry\_interval in MongoAutoReconnectTest * Exclude tools/lintstack.head.py for pep8 check * Add encoding of rows and qualifiers in impl\_hbase * Database.max\_retries only override on sqlalchemy side * Support to capture network services notifications * Internal error with period overflow * Remove Python 2.6 classifier * Enable pep8 on ./tools directory * Imported Translations from Transifex * Fixes Hyper-V Inspector disk metrics cache issue * fix swift middleware parsing * Fix order of arguments in assertEqual * Updated from global requirements * Adapting pylint runner to the new message format * Validate AdvEnum & return an InvalidInput on error * add sahara and heat events * add keystone events to definitions * Add timeout to all http requests * [MongoDB] Refactor time to live feature * transform samples only when transformers exist * Updated from global requirements * Remove module not really used by Ceilometer * Switch to oslo.concurrency * Skip to poll and publish when no resources found * Change event type for identity trust notifications * Add mysql and postgresql in tox for debug env * Add new notifications types for volumes/snapshots * Add encoding to keys in compute\_signature * Tests for system and network aggregate pollsters * Add bandwidth to measurements * Fix wrong example of capabilities * Correct the mongodb\_replica\_set option's description * Alarms listing based on "timestamp" * Use 'pg\_ctl' utility to start and stop database * Correct alarm timestamp field in unittest code * Refactor kwapi unit test * Remove duplicated config doc * VMware: Enable VMware inspector to support any port * Clean event method difinition in meter storage base * Fix some nits or typos found by chance * Add Sample ReST API path in webapi document * Enable filter alarms by their type * Fix storage.hbase.util.prepare\_key() for 32-bits system * Add event storage for test\_hbase\_table\_utils * Add per device rate metrics for instances * Fix hacking rule H305 imports not grouped correctly * Add \_\_repr\_\_ method for sample.Sample * remove ordereddict requirement * Improve manual.rst file * Imported Translations from Transifex * Fix columns migrating for PostgreSQL * Updated from global requirements * Updated from global requirements * [MongoDB] Fix bug with reconnection to new master node * Updated from global requirements * support request-id * Update coverage job to references correct file * remove reference to model in migration * Use oslo\_debug\_helper and remove our own version * Allow collector service database connection retry * refresh ceilometer architecture documentation * Edits assert methods * Adds memory stats meter to libvirt inspector * Edits assert methods * Edits assert methods * Edits assert methods * Edits assert method * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * add script to generate test event data * Handle poorly formed individual sensor readings * refactor hbase storage code * Avoid clobbering existing class definition * Hoist duplicated AlarmService initialization to super * Clarify deprecation comment to be accurate * Work toward Python 3.4 support and testing 2014.2 ------ * Fix recording failure for system pollster * sync and clean up oslo * Add missing notification options to the documentation * Add missing alarm options to the documentation * Add oslo.db to config generator * Add missed control exchange options to the documentation * Add coordination related options to the documentation * Add missing collector options to the documentation * switch to oslo-config-generator * Edit docs for docs.opentack.org/developer/ * Add oslo.db to config generator * Fix signature validation failure when using qpid message queue * clean capabilities * move db2 and mongo driver to event tree * move sql event driver to event tree * move hbase event driver to event tree * Sets default encoding for PostgreSQL testing * update database dispatcher to use events db * Add role assignment notifications for identity * add mailmap to avoid dup of authors * Add user\_metadata to network samples * Fix recording failure for system pollster 2014.2.rc2 ---------- * Manually updated translations * Updated from global requirements * Creates one database per sql test * Adds pylint check for critical error in new patches * Fix neutron client to catch 404 exceptions * Fix OrderedDict usage for Python 2.6 * Include a 'node' key and value in ipmi metadata * clean path in swift middleware * Implement redesigned separator in names of columns in HBase * [HBase] Add migration script for new row separate design * Imported Translations from Transifex * Include a 'node' key and value in ipmi metadata * Updated from global requirements * Run unit tests against PostgreSQL * create skeleton files for event storage backends * Imported Translations from Transifex * isolate event storage models * Fix neutron client to catch 404 exceptions * Run unit tests against MySQL * Updated from global requirements * Correct JSON-based query examples in documentation * Open Kilo development * Add cfg.CONF.import\_group for service\_credentials * Fix OrderedDict usage for Python 2.6 * clean path in swift middleware 2014.2.rc1 ---------- * Partition static resources defined in pipeline.yaml * Per-source separation of static resources & discovery * dbsync: Acknowledge 'metering\_connection' option * Fix bug in the documentation * Use oslo.msg retry API in rpc publisher * Describe API versions * Change compute agent recurring logs from INFO to DEBUG * Fix bug with wrong bool opt value interpolation * [HBase] Improves speed of unit tests on real HBase backend * Imported Translations from Transifex * Removed unused abc meta class * update references to auth\_token middleware * clean up swift middleware to avoid unicode errors * [HBase] Catch AlreadyExists error in Connection upgrade * Use None instead of mutables in method params default values * Updated from global requirements * Enable to get service types from configuration file * test db2 driver code * Docs: Add description of pipeline discovery section * Typo "possibilites" should be "possibilities" * Modified docs to update DevStack's config filename * Add an API configuration section to docs * Tune up mod\_wsgi settings in example configuration * Allow pecan debug middleware to be turned off * Provide \_\_repr\_\_ for SampleFilter * Eliminate unnecessary search for test cases * Switch to a custom NotImplementedError * minimise ceilometer memory usage * Partition swift pollster resources by tenant * Add IPMI pollster * Add IPMI support * Stop using intersphinx * Use central agent manager's keystone token in discoveries * Handle invalid JSON filters from the input gracefully * Sync jsonutils for namedtuple\_as\_object fix * ceilometer spamming syslog * Timestamp bounds need not be tight (per ceilometer 1288372) * Allow to pass dict from resource discovery * fix network discovery meters * switch to sqlalchemy core * Imported Translations from Transifex * Improve the timestamp validation of ceilometer API * Update docs with Sahara notifications configuration * Migrate the rest of the central agent pollsters to use discoveries * Add documentation for implemented identity meters * Fix tests with testtools>=0.9.39 * Document the standard for PaaS service notifications * Returns 401 when unauthorized project access occurs * Adding another set of hardware metrics * normalise resource data 2014.2.b3 --------- * warn against sorting requirements * Add validate alarm\_actions schema in alarm API * Fix help strings * Imported Translations from Transifex * Switch partitioned alarm evaluation to a hash-based approach * Central agent work-load partitioning * collector: Allows to requeue a sample * Typo fixed * Switch to oslo.serialization * Document pipeline publishers configuration * Alarm: Use stevedore to load the service class * Enhance compute diskio tests to handle multi instance * Adding comparison operators in query for event traits * XenAPI support: Update measurements documentation * update requirements * add documentation for setting up api pipeline * Permit usage of notifications for metering * XenAPI support: Disk rates * XenAPI support: Changes for networking metrics * XenAPI support: Memory Usage * XenAPI support: Changes for cpu\_util * XenAPI support: List the instances * Rebase hardware pollsters to use new inspector interface * Switch to use oslo.db * Remove oslo middleware * Adding quotas on alarms * Add an exchange for Trove in profiler notification plugin * Simplify chained comparisons * In-code comments should start with \`#\`, not with \`"""\` * Remove redundant parentheses * skip polls if service is not registered * re-add hashseed to avoid gate error * Switch to oslo.utils * Switch to oslotest * Handle sqlalchemy connection strings with drivers * Rewrite list creation as a list literal * Rewrite dictionary creation as a dictionary literal * Triple double-quoted strings should be used for docstrings * Add upgrading alarm storage in dbsync * Improving of configuration.rst * Fix typos in transformer docstrings * Update tox.ini pep8 config to ignore i18n functions * Added new hardware inspector interface * compute: fix wrong test assertion * sync olso-incubator code * VMware: Support secret host\_password option * refactor filter code in sql backend * Support for per disk volume measurements * Use a FakeRequest object to test middleware * Imported Translations from Transifex * Improve api\_paste\_config file searching * [Hbase] Add column for source filter in \_get\_meter\_samples * Issue one SQL statement per execute() call * Allow tests to run outside tox * [HBase] Refactor hbase.utils * Set page size when Glance API request is called * Adding init into tools folder * Enhancing the make\_test\_data script * correct DB2 installation supported features documentation * Avoid duplication of discovery for multi-sink sources * Improve performance of libvirt inspector requests * Documented Stevedore usage and source details * Add notifications for identity authenticate events * Add message translate module in vmware inspector * Handle Cinder attach and detach notifications * [HBase] Improve uniqueness for row in meter table * Doc enhancement for API service deployment with mod\_wsgi * Update documentation for new transformer * Add the arithmetic transformer endpoint to setup.cfg * Imported Translations from Transifex * Fix unit for vpn connection metric * Debug env for tox * Change spelling mistakes * Use auth\_token from keystonemiddleware * Fix dict and set order related issues in tests * Fix listener for update.start notifications * Sahara integration with Ceilometer * Add notifications for identity CRUD events * Extracting make\_resource\_metadata method * Fix make\_test\_data tools script * Add cumulative and gauge to aggregator transformer * Enable some tests against py33 * Remove --tmpdir from mktemp * Replace dict.iteritems() with six.iteritems(dict) * Replace iterator.next() with next(iterator) * Fix aggregator flush method * Automatic discovery of TripleO Overcloud hardware * Set python hash seed to 0 in tox.ini * Don't override the original notification message * Remove ConnectionProxy temporary class * Move sqlalchemy alarms driver code to alarm tree * basestring replaced with six.string\_types * Correct misspelled words 2014.2.b2 --------- * Add retry function for alarm REST notifier * Move hbase alarms driver code to alarm tree * Update measurement docs for FWaaS * Update measurement docs for VPNaaS * Follow up fixes to network services pollsters * Updated from global requirements * Implement consuming ipmi notifications from Ironic * Support for metering FWaaS * Adds Content-Type to alarm REST notifier * Multi meter arithmetic transformer * Remove redudent space in doc string * Use None instead of mutables in test method params defaults * Add support for metering VPNaaS * Use resource discovery for Network Services * Change of get\_events and get\_traits method in MongoDB and Hbase * Fix two out-dated links in doc * Move log alarms driver code to alarm tree * Separate the console scripts * clean up event model * improve expirer performance for sql backend * Move mongodb/db2 alarms driver code to alarm tree * Allow to have different DB for alarm and metering * Replace datetime of time\_constraints by aware object * Sync oslo log module and its dependencies * Use hmac.compare\_digest to compare signature * Add testcase for multiple discovery-driven sources * Fixes aggregator transformer timestamp and user input handling * Improves pipeline transformer documentation * Fix incorrect use of timestamp in test * Add keystone control exchange * Fix call to meter-list in measurements doc * Remove redundant parentheses * [Mongodb] Implement events on Mongodb and DB2 * Fix typos in code comments & docstrings * Make the error message of alarm-not-found clear * Fix SQL exception getting statitics with metaquery * Remove docutils pin * update default\_log\_levels set by ceilometer * Fix annoying typo in partition coordinator test * Transform sample\_cnt type to int * Remove useless sources.json * Fix H405 violations and re-enable gating * Fix H904 violations and re-enable gating * Fix H307 violations and re-enable gating * Fix the section name in CONTRIBUTING.rst * Added osprofiler notifications plugin * Improve a bit performance of Ceilometer * Revert "Align to openstack python package index mirror" * Fix aggregator \_get\_unique\_key method * Remove meter hardware.network.bandwidth.bytes * Fix F402 violations and re-enable gating * Fix E265 violations and re-enable gating * Fix E251 violations and re-enable gating * Fix E128 violations and re-enable gating * Fix E126,H104 violations and re-enable gating * Bump hacking to 0.9.x * Fixed various import issues exposed by unittest * use urlparse from six * clean up sample index * Fix HBase available capabilities list * Updated from global requirements * VMware:Update the ceilometer doc with VMware opts * Handle non-ascii character in meter name * Add log output of "x-openstack-request-id" from nova * Imported Translations from Transifex * fix StringIO errors in unit test * Fix hacking rule 302 and enable it * Imported Translations from Transifex * sync oslo code * Fixes ceilometer-compute service start failure * Reenables the testr per test timeout * Avoid reading real config files in unit test * Clean up oslo.middleware.{audit,notifier} * Use hacking from test-requirements * Splits hbase storage code base * Splits mongo storage code base * Separate alarm storage models from other models * Iterates swift response earlier to get the correct status * Fix messaging.get\_transport caching * Fix method mocked in a test * Don't keep a single global TRANSPORT object * Clean up .gitignore * Fix Sphinx directive name in session.py * Fix list of modules not included in auto-gen docs * Downgrade publisher logging to debug level again 2014.2.b1 --------- * remove default=None for config options * [HBase] get\_resource optimization * Fix incorrect trait initialization * Remove unused logging in tests * Revert "Fix the floatingip pollster" * Remove low-value logging from publication codepath * Fix LBaaS connection meter docs * Fix the meter type for LB Bytes * Adding alarm list filtering by state and meter * Adds caches for image and flavor in compute agent * [HBase] Implement events on HBase * Skipping central agent pollster when keystone not available * Respect $TMPDIR environment variable to run tests * Fixed unit test TestRealNotification * Update Measurement Docs for LBaaS * Metering LoadBalancer as a Service * Removes per test testr timeout * Change pipeline\_manager to instance attribute in hooks * Change using of limit argument in get\_sample * Refactor tests to remove direct access to test DBManagers * Fix notification for NotImplemented record\_events * Add missing explicit cfg option import * Fix ceilometer.alarm.notifier.trust import * Use TYPE\_GAUGE rather than TYPE\_CUMULATIVE * Update doc for sample config file issue * Corrects a flaw in the treatment of swift endpoints * use LOG instead of logger as name for the Logger object * Fix doc gate job false success * Improve performance of api requests with hbase scan * Add new 'storage': {'production\_ready': True} capability * Clean tox.ini * Remove (c) and remove unnecessary encoding lines * Fix testing gate due to new keystoneclient release * Ignore the generated file ceilometer.conf.sample * Update the copyright date in doc * Updated from global requirements * reconnect to mongodb on connection failure * refactor sql backend to improve write speed * Don't rely on oslomsg configuration options * replaced unicode() with six.text\_type() * Synced jsonutils from oslo-incubator * Fix the floatingip pollster * Fix project authorization check * Update testrepository configuration * Implemented metering for Cinder's snapshots * Use joins instead of subqueries for metadata filtering * Use None instead of mutables in method params defaults * Remove all mostly untranslated PO files * switch SplitResult to use six * Remove unused db code due to api v1 drop * Updated from global requirements * oslo.messaging context must be a dict * Drop deprecated api v1 * Fix network notifications of neutron bulk creation * mongo: remove \_id in inserted alarm changes * Clean up openstack-common.conf * Revert "oslo.messaging context must be a dict" * Correct class when stopping partitioned alarm eval svc * oslo.messaging context must be a dict * Corrections of spelling, rephrasing for clarity * Adapt failing tests for latest wsme version * Removed StorageEngine class and it's hierarchy * Correcting formatting and adding period in measurement doc * Initialize dispatcher manager in event endpoint * Replaced CONF object with url in storage engine creation * Synced jsonutils from oslo-incubator * Remove gettextutils.\_ imports where they are not used * Remove "# noqa" leftovers for gettextutils.\_ * transformer: Add aggregator transformer * Remove conversion debug message * Fix the return of statistic with getting no sample * Remove eventlet.sleep(0) in collector tests * Don't allow queries with 'IN' predicate with an empty sequence * Check if samples returned by get\_sample\_data are not None * Opencontrail network statistics driver * Add a alarm notification using trusts * Replace hard coded WSGI application creation * Describe storage backends in the collector installation guide * Made get\_capabilities a classmethod instead of object method * Disable reverse dns lookup * Consume notif. from multiple message bus * Use NotificationPlugin as an oslo.msg endpoint * Improve combination rule validation * Remove ceilometer.conf.sample * Use known protocol scheme in keystone tests * cleanup virt pollster code * Add encoding argument to deserialising udp packets in collector * Made get\_engine method module-private * Make entities (Resource, User, Project) able to store lists * Remove duplicate alarm from alarm\_ids * More accurate meter name and unit for host load averages * Replace oslo.rpc by oslo.messaging * Fix a response header bug in the error middleware * Remove unnecessary escape character in string format * Optimize checks to set image properties in metadata * fix statistics query in postgres * Removed useless code from \_\_init\_\_ method * Refactored fake connection URL classes * Replace assert statements with assert methods * Removes direct access of timeutils.override\_time * Disable specifying alarm itself in combination rule * Include instance state in metadata * Allowed nested resource metadata in POST'd samples * Sync oslo-incubator code * Updated from global requirements * Refactor the DB implementation of Capabilities API * Fix Jenkins translation jobs * Align to openstack python package index mirror * User a more accurate max\_delay for reconnects * Open Juno development 2014.1.rc1 ---------- * Imported Translations from Transifex * Add note on aggregate duplication to API docco * Use ConectionPool instead of one Connection in HBase * remove dump tables from previous migrations * De-dupe selectable aggregate list in statistics API * ensure dispatcher service is configured before rpc * improve performance of resource-list in sql * SSL errors thrown with Postgres on multi workers * Remove escape character in string format * Verify user/project ID for alarm created by non-admin user * enable a single worker by default * Fix ceilometer.conf.sample mismatch * Metadata in compute.instance.exists fix * Fix order of arguments in assertEquals * Documenting hypervisor support for nova meters * Ensure idempotency of cardinality reduction in mongo * VMware vSphere: Improve the accuracy of queried samples * Use swob instead of webob in swift unit tests * Disable oslo.messaging debug logs * Fix validation error for invalid field name in simple query * fix create\_or\_update logic to avoid rollbacks * Avoid swallowing AssertionError in test skipping logic * Fix hardware pollster to inspect multiple resources * spawn multiple workers in services * Install global lazy \_() * Fixes Hyper-V metrics units * Ensure intended indices on project\_id are created for mongo * Fix the type of the disk IO rate measurements * Change the sample\_type from tuple to string * Fix order of arguments in assertEquals * Ensure alarm rule conform to alarm type * insecure flag added to novaclient * Fixes duplicated names in alarm time constraints * Use the list when get information from libvirt * Eventlet monkeypatch must be done before anything * 028 migration script incorrectly skips over section * Fix bug in get\_capabilities behavior in DB drivers * Added documentation for selectable aggregates * Make sure use IPv6 sockets for ceilometer in IPv6 environment * VMware vSphere: Bug fixes * Ensure insecure config option propagated by alarm evaluator * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Rationalize get\_resources for mongodb * Ensure insecure config option propagated by alarm service * add host meters to doc * Add field translation to complex query from OldSample to Sample * Extend test case to cover old alarm style conversion * Updated doc with debug instructions * Refactored the way how testscenarios tests are run * Corrected the sample names in hardware pollsters * Prevent alarm\_id in query field of getting history * Make ceilometer work with sqla 0.9.x * Implements monitoring-network-from-opendaylight * Add user-supplied arguments in log\_handler * VMware vSphere support: Disk rates * Fix updating alarm can specify existing alarm name * Changes for networking metrics support for vSphere * VMware vSphere: Changes for cpu\_util * VMware vSphere support: Memory Usage * Fix broken statistics in sqlalchemy * Fixes Hyper-V Inspector network metrics values * Set storage engine for the trait\_type table * Enable monkeypatch for select module * Rename id to alarm\_id of Alarm in SqlAlchemy * Fix some spelling mistakes and a incorrect url * Skip central agent interval\_task when keystone fails 2014.1.b3 --------- * Ensure user metadata mapped for instance notifications * Per pipeline pluggable resource discovery * Wider selection of aggregates for sqlalchemy * Wider selection of aggregates for mongodb * Adds time constraints to alarms * Remove code duplication Part 3 * Decouple source and sink configuration for pipelines * Selectable aggregate support in mongodb * Selectable aggregation functions for statistics * Add simple capabilities API * Removed global state modification by api test * VMware vSphere support: Performance Mgr APIs * Fix typo * move databases to test requirements * Make recording and scanning data more determined * Implements "not" operator for complex query * Implements metadata query for complex query feature * Alarms support in HBase Part 2 * Alarm support in HBase Part 1 * Remove unused variable * Added hardware pollsters for the central agent * Added hardware agent's inspector and snmp implementation * Updated from global requirements * Pluggable resource discovery for agents * Remove code duplication Part 2 * Imported Translations from Transifex * remove audit logging on flush * Tolerate absent recorded\_at on older mongo/db2 samples * api: export recorded\_at in returned samples * Fix the way how metadata is stored in HBase * Set default log level of iso8601 to WARN * Sync latest config file generator from oslo-incubator * Fix typo on testing doc page * Remove code duplication * sample table contains redundant/duplicate data * rename meter table to sample * storage: store recording timestamp * Fixed spelling error in Ceilometer * Adds doc string to query validate functions in V2 API * Updated from global requirements * Remove code that works around a (now-resolved) bug in pecan * Fix missing source field content on /v2/samples API * Refactor timestamp existence validation in V2 API * Use the module units to refer bytes type * sync units.py from oslo to ceilometer * Add comments for \_build\_paginate\_query * Implements monitoring-network * Handle Heat notifications for stack CRUD * Alembic migrations not tested * Modify the discription of combination alarm * check domain state before inspecting nics/disks * Adds gettextutils module in converter * Keep py3.X compatibility for urllib.urlencode * Added missing import * Removed useless prints that pollute tests log * Implements in operator for complex query functionality * Implements field validation for complex query functionality * allow hacking to set dependencies * Implements complex query functionality for alarm history * Implements complex query functionality for alarms * Remove None for dict.get() * Replace assertEqual(None, \*) with assertIsNone in tests * Update notification\_driver * Switch over to oslosphinx * Fix some flaws in ceilometer docstrings * Rename Openstack to OpenStack * Remove start index 0 in range() * Updated from global requirements * Remove blank line in docstring * Use six.moves.urllib.parse instead of urlparse * Propogate cacert and insecure flags to glanceclient * Test case for creating an alarm without auth headers * Refactored run-tests script * Implements complex query functionality for samples * fix column name and alignment * Remove tox locale overrides * Updated from global requirements * Adds flavor\_id in the nova\_notifier * Improve help strings * service: re-enable eventlet just for sockets * Fixes invalid key in Neutron notifications * Replace BoundedInt with WSME's IntegerType * Replace 'Ceilometer' by 'Telemetry' in the generated doc * Doc: Add OldSample to v2.rst * Fixing some simple documentation typos * Updated from global requirements * Fix for a simple typo * Replace 'a alarm' by 'an alarm' * Move ceilometer-send-counter to a console script * sync oslo common code * Handle engine creation inside of Connection object * Adds additional details to alarm notifications * Fix formating of compute-nova measurements table * Fix string-to-boolean casting in queries * nova notifier: disable tests + update sample conf * Update oslo * Refactored session access * Fix the py27 failure because of "ephemeral\_key\_uuid" error * Correct a misuse of RestController in the Event API * Fix docs on what an instance meter represents * Fix measurement docs to correctly represent Existance meters * samples: fix test case status code check * Replace non-ascii symbols in docs * Use swift master * Add table prefix for unit tests with hbase * Add documentation for pipeline configuration * Remove unnecessary code from alarm test * Updated from global requirements * Use stevedore's make\_test\_instance * use common code for migrations * Use explicit http error code for api v2 * Clean .gitignore * Remove unused db engine variable in api * Revert "Ensure we are not exhausting the sqlalchemy pool" * eventlet: stop monkey patching * Update dev docs to include notification-agent * Change meter\_id to meter\_name in generated docs * Correct spelling of logger for dispatcher.file * Fix some typos in architecture doc * Drop foreign key contraints of alarm in sqlalchemy * Re-enable lazy translation * Sync gettextutils from Oslo * Fix wrong doc string for meter type * Fix recursive\_keypairs output * Added abc.ABCMeta metaclass for abstract classes * Removes use of timeutils.set\_time\_override 2014.1.b2 --------- * tests: kill all started processes on exit * Exclude weak datapoints from alarm threshold evaluation * Move enable\_acl and debug config to ceilometer.conf * Fix the Alarm documentation of Web API V2 * StringIO compatibility for python3 * Set the SQL Float precision * Convert alarm timestamp to PrecisionTimestamp * use six.move.xrange replace xrange * Exit expirer earlier if db-ttl is disabled * Added resources support in pollster's interface * Improve consistency of help strings * assertTrue(isinstance) replace by assertIsInstance * Return trait type from Event api * Add new rate-based disk and network pipelines * Name and unit mapping for rate\_of\_change transformer * Update oslo * Remove dependencies on pep8, pyflakes and flake8 * Implement the /v2/samples/ API * Fix to handle null threshold\_rule values * Use DEFAULT section for dispatcher in doc * Insertion in HBase should be fixed * Trivial typo * Update ceilometer.conf.sample * Fix use the fact that empty sequences are false * Remove unused imports * Replace mongo aggregation with plain ol' map-reduce * Remove redundant meter (name,type,unit) tuples from Resource model * Fix work of udp publisher * tests: pass /dev/null as config for mongod * requirements: drop netaddr * tests: allow to skip if no database URL * Fix to tackle instances without an image assigned * Check for pep8 E226 and E24 * Fixed spelling mistake * AlarmChange definition added to doc/source/webapi/v2.rst * 1st & last sample timestamps in Resource representation * Avoid false negatives on message signature comparison * cacert is not picked up correctly by alarm services * Change endpoint\_type parameter * Utilizes assertIsNone and assertIsNotNone * Add missing gettextutils import to ceilometer.storage.base * Remove redundant code in nova\_client.Client * Allow customized reseller\_prefix in Ceilometer middleware for Swift * Fix broken i18n support * Empty files should no longer contain copyright * Add Event API * Ensure we are not exhausting the sqlalchemy pool * Add new meters for swift * Sync config generator workaround from oslo * storage: factorize not implemented methods * Don't assume alarms are returned in insert order * Correct env variable in file oslo.config.generator.rc * Handle the metrics sent by nova notifier * Add a wadl target to the documentation * Sync config generator from oslo-incubator * Convert event timestamp to PrecisionTimestamp * Add metadata query validation limitation * Ensure the correct error message is displayed * Imported Translations from Transifex * Move sphinxcontrib-httpdomain to test-requirements * Ensure that the user/project exist on alarm update * api: raise ClientSideError rather than ValueError * Implement the /v2/sample API * service: fix service alive checking * Oslo sync to recover from db2 server disconnects * Event Storage Layer * config: specify a template for mktemp * test code should be excluded from test coverage summary * doc: remove note about Nova plugin framework * doc: fix formatting of alarm action types * Updated from global requirements * Add configuration-driven conversion to Events * add newly added constraints to expire clear\_expired\_metering\_data * fix unit * Add import for publisher\_rpc option * add more test cases to improve the test code coverage #5 * Create a shared queue for QPID topic consumers * Properly reconnect subscribing clients when QPID broker restarts * Don't need session.flush in context managed by session * sql migration error in 020\_add\_metadata\_tables 2014.1.b1 --------- * Remove rpc service from agent manager * Imported Translations from Transifex * organise requirements files * Add a Trait Type model and db table * No module named MySQLdb bug * Add a note about permissions to ceilometer logging directory * sync with oslo-incubator * Rename OpenStack Metering to OpenStack Telemetry * update docs to adjust for naming change * Add i18n warpping for all LOG messages * Imported Translations from Transifex * Removed unused method in compute agent manger * connection is not close in migration script * Fixed a bug in sql migration script 020 * Fixed nova notifier test * Added resources definition in the pipeline * Change metadata\_int's value field to type bigint * Avoid intermittent integrity error on alarm creation * Simplify the dispatcher method prototype * Use map\_method from stevedore 0.12 * Remove the collector submodule * Move dispatcher a level up * Split collector * Add a specialized Event Type model and db table * Remove old sqlalchemy-migrate workaround * Revert "Support building wheels (PEP-427)" * full pep8 compliance (part 2) * Selectively import RPC backend retry config * Fixes Hyper-V Inspector disk metrics bug * Imported Translations from Transifex * full pep8 compliance (part1) * Replace mox with mock in alarm,central,image tests * Stop ignoring H506 errors * Update hacking for real * Replace mox with mock in tests.collector * Replace mox with mock in publisher and pipeline * Replace mox with mock in novaclient and compute * Remove useless defined Exception in tests * Support building wheels (PEP-427) * Fixes Hyper-V Inspector cpu metrics bug * Replace mox with mock in tests.storage * Document user-defined metadata for swift samples * Replace mox with mock in energy and objectstore * Updated from global requirements * Replace mox with mock in tests.api.v2 * Refactor API error handling * make record\_metering\_data concurrency safe * Move tests into ceilometer module * Replace mox with mock in tests.api.v1 * Replace mox with mock in tests.api.v2.test\_compute * Corrected import order * Use better predicates from testtools instead of plain assert * Stop using openstack.common.exception * Replace mox with mock in tests.network * Replace mox with mocks in test\_inspector * Fix failing nova\_tests tests * Replace mox with mocks in tests.compute.pollsters * Add an insecure option for Keystone client * Sync log from oslo * Cleanup tests.publisher tests * mongodb, db2: do not print full URL in logs * Use wsme ClientSideError to handle unicode string * Use consistant cache key for swift pollster * Fix the developer documentation of the alarm API * Fix the default rpc policy value * Allow Events without traits to be returned * Replace tests.base part8 * Replace tests.base part7 * Replace tests.base part6 * Imported Translations from Transifex * Imported Translations from Transifex * Sync log\_handler from Oslo * Don't use sqlachemy Metadata as global var * enable sql metadata query * Replace tests.base part5 * Replace tests.base part4 * Imported Translations from Transifex * Updated from global requirements * Fix doc typo in volume meter description * Updated from global requirements * Add source to Resource API object * compute: virt: Fix Instance creation * Fix for get\_resources with postgresql * Updated from global requirements * Add tests when admin set alarm owner to its own * Replace tests.base part3 * Replace tests.base part2 * Replace tests.base part1 * Fix wrong using of Metadata in 15,16 migrations * api: update for WSME 0.5b6 compliance * Changes FakeMemcache to set token to expire on utcnow + 5 mins * Change test case get\_alarm\_history\_on\_create * Change alarm\_history.detail to text type * Add support for keystoneclient 0.4.0 * Ceilometer has no such project-list subcommand * Avoid leaking admin-ness into combination alarms * Updated from global requirements * Avoid leaking admin-ness into threshold-oriented alarms * Update Oslo * Set python-six minimum version * Ensure combination alarms can be evaluated * Ensure combination alarm evaluator can be loaded * Apply six for metaclass * add more test cases to improve the test code coverage #6 * Update python-ceilometerclient lower bound to 1.0.6 * Imported Translations from Transifex * add more test cases to improve the test code coverage #4 2013.2.rc1 ---------- * db2 does not allow None as a key for user\_id in user collection * Start Icehouse development * Imported Translations from Transifex * Disable lazy translation * Add notifications for alarm changes * Updated from global requirements * api: allow alarm creation for others project by admins * assertEquals is deprecated, use assertEqual * Imported Translations from Transifex * update alarm service setup in dev doc * Add bug number of some wsme issue * api: remove useless comments * issue an error log when cannot import libvirt * add coverage config file to control module coverage report * tests: fix rounding issue in timestamp comparison * api: return 404 if a alarm is not found * remove locals() for stringformat * add more test cases to improve the test code coverage #3 * Remove extraneous vim configuration comments * Return 401 when action is not authorized * api: return 404 if a resource is not found * keystone client changes in AuthProtocol made our test cases failing * Don't load into alarms evaluators disabled alarms * Remove MANIFEST.in * Allow to get a disabled alarm * Add example with return values in API v2 docs * Avoid imposing alembic 6.0 requirement on all distros * tests: fix places check for timestamp equality * Don't publish samples if resource\_id in missing * Require oslo.config 1.2.0 final * Don't send unuseful rpc alarm notification * service: check that timestamps are almost equals * Test the response body when deleting a alarm * Change resource.resource\_metadata to text type * Adding region name to service credentials * Fail tests early if mongod is not found * add more test cases to improve the test code coverage #2 * add more test cases to improve the test code coverage #1 * Imported Translations from Transifex * Replace OpenStack LLC with OpenStack Foundation * Use built-in print() instead of print statement * Simple alarm partitioning protocol based on AMQP fanout RPC * Handle manually mandatory field * Provide new API endpoint for alarm state * Implement the combination evaluator * Add alarm combination API * Notify with string representation of alarm reason * Convert BoundedInt value from json into int * Fix for timestamp precision in SQLAlchemy * Add source field to Meter model * Refactor threshold evaluator * Alarm API update * Update requirements * WSME 0.5b5 breaking unit tests * Fix failed downgrade in migrations * refactor db2 get\_meter\_statistics method to support mongodb and db2 * tests: import pipeline config * Fix a tiny mistake in api doc * collector-udp: use dispatcher rather than storage * Imported Translations from Transifex * Drop sitepackages=False from tox.ini * Update sphinxcontrib-pecanwsme to 0.3 * Architecture enhancements * Force MySQL to use InnoDB/utf8 * Update alembic requirement to 0.6.0 version * Correctly output the sample content in the file publisher * Pecan assuming meter names are extensions * Handle inst not found exceptions in pollsters * Catch exceptions from nova client in poll\_and\_publish * doc: fix storage backend features status * Add timestamp filtering cases in storage tests * Imported Translations from Transifex * Use global openstack requirements * Add group by statistics examples in API v2 docs * Add docstrings to some methods * add tests for \_query\_to\_kwargs func * validate counter\_type when posting samples * Include auth\_token middleware in sample config * Update config generator * run-tests: fix MongoDB start wait * Imported Translations from Transifex * Fix handling of bad paths in Swift middleware * Drop the \*.create.start notification for Neutron * Make the Swift-related doc more explicit * Fix to return latest resource metadata * Update the high level architecture * Alarm history storage implementation for sqlalchemy * Improve libvirt vnic parsing with missing mac! * Handle missing libvirt vnic targets! * Make type guessing for query args more robust * add MAINTAINERS file * nova\_notifier: fix tests * Update openstack.common.policy from oslo-incubator * Clean-ups related to alarm history patches * Improved MongoClient pooling to avoid out of connections error * Disable the pymongo pooling feature for tests * Fix wrong migrations * Fixed nova notifier unit test * Add group by statistics in API v2 * Update to tox 1.6 and setup.py develop * Add query support to alarm history API * Reject duplicate events * Fixes a bug in Kwapi pollster * alarm api: rename counter\_name to meter\_name * Fixes service startup issue on Windows * Handle volume.resize.\* notifications * Network: process metering reports from Neutron * Alarm history storage implementation for mongodb * Fix migration with fkeys * Fixes two typos in this measurements.rst * Add a fake UUID to Meter on API level * Append /usr/sbin:/sbin to the path for searching mongodb * Plug alarm history logic into the API * Added upper version boundry for six * db2 distinct call results are different from mongodb call * Sync rpc from oslo-incubator * Imported Translations from Transifex * Add pagination parameter to the database backends of storage * Base Alarm history persistence model * Fix empty metadata issue of instance * alarm: generate alarm\_id in API * Import middleware from Oslo * Imported Translations from Transifex * Adds group by statistics for MongoDB driver * Fix wrong UniqueConstraint name * Adds else and TODO in statistics storage tests * Imported Translations from Transifex * Extra indexes cleanup * API FunctionalTest class lacks doc strings * install manual last few sections format needs to be fixed * api: update v1 for Flask >= 0.10 * Use system locale when Accept-Language header is not provided * Adds Hyper-V compute inspector * missing resource in middleware notification * Support for wildcard in pipeline * Refactored storage tests to use testscenarios * doc: replace GitHub by git.openstack.org * api: allow usage of resource\_metadata in query * Remove useless doc/requirements * Fixes non-string metadata query issue * rpc: reduce sleep time * Move sqlachemy tests only in test\_impl\_sqlachemy * Raise Error when pagination/groupby is missing * Raise Error when pagination support is missing * Use timeutils.utcnow in alarm threshold evaluation * db2 support * plugin: remove is\_enabled * Doc: improve doc about Nova measurements * Storing events via dispatchers * Imported Translations from Transifex * ceilometer-agent-compute did not catch exception for disk error * Change counter to sample in network tests * Change counter to sample in objectstore tests * Remove no more used code in test\_notifier * Change counter to sample vocable in cm.transformer * Change counter to sample vocable in cm.publisher * Change counter to sample vocable in cm.image * Change counter to sample vocable in cm.compute * Change counter to sample vocable in cm.energy * Use samples vocable in cm.publisher.test * Change counter to sample vocable in volume tests * Change counter to sample vocable in api tests * Add the source=None to from\_notification * Make RPCPublisher flush method threadsafe * Enhance delayed message translation when \_ is imported * Remove use\_greenlets argument to MongoClient * Enable concurrency on nova notifier tests * Imported Translations from Transifex * Close database connection for alembic env * Fix typo in 17738166b91 migration * Don't call publisher without sample * message\_id is not allowed to be submitted via api * Api V2 post sample refactoring * Add SQLAlchemy implementation of groupby * Fixes failed notification when deleting instance * Reinitialize pipeline manager for service restart * Sync gettextutils from oslo-incubator * Doc: clearly state that one can filter on metadata * Add HTTP request/reply samples * Use new olso fixture in CM tests * Imported Translations from Transifex * Bump hacking to 0.7.0 * Fix the dict type metadata missing issue * Raise error when period with negative value * Imported Translations from Transifex * Import missing gettext \_ * Remove 'counter' occurences in pipeline * Remove the mongo auth warning during tests * Change the error message of resource listing in mongodb * Change test\_post\_alarm case in test\_alarm\_scenarios * Skeletal alarm history API * Reorg alarms controller to facilitate history API * Fix Jenkins failed due to missing \_ * Fix nova test\_notifier wrt new notifier API * Remove counter occurences from documentation * Updated from global requirements * Fixes dict metadata query issue of HBase * s/alarm/alarm\_id/ in alarm notification * Remove unused abstract class definitions * Removed unused self.counters in storage test class * Initial alarming documentation * Include previous state in alarm notification * Consume notification from the default queue * Change meter.resource\_metadata column type * Remove MongoDB TTL support for MongoDB < 2.2 * Add first and last sample timestamp * Use MongoDB aggregate to get resources list * Fix resources/meters pagination test * Handle more Nova and Neutron events * Add support for API message localization * Add the alarm id to the rest notifier body * fix alarm notifier tests * Sync gettextutils from oslo * Fix generating coverage on MacOSX * Use the new nova Instance class * Return message\_id in POSTed samples * rpc: remove source argument from message conversion * Remove source as a publisher argument * Add repeat\_actions to alarm * Rename get\_counters to get\_samples * Add pagination support for MongoDB * Doc: measurements: add doc on Cinder/Swift config * Update nova\_client.py * objectstore: trivial cleanup in \_Base * Add support for CA authentication in Keystone * add unit attribute to statistics * Fix notify method signature on LogAlarmNotifier * Fix transformer's LOG TypeError * Update openstack.common * Fixes Hbase metadata query return wrong result * Fix Hacking 0.6 warnings * Make middleware.py Python 2.6 compatible * Call alembic migrations after sqlalchemy-migrate * Rename ceilometer.counter to ceilometer.sample * Added separate MongoDB database for each test * Relax OpenStack upper capping of client versions * Refactored MongoDB connection pool to use weakrefs * Centralized backends tests scenarios in one place * Added tests to verify that local time is correctly handled * Refactored impl\_mongodb to use full connection url * calling distinct on \_id field against a collection is slow * Use configured endpoint\_type everywhere * Allow use of local conductor * Update nova configuration doc to use notify\_on\_state\_change * doc: how to inject user-defined data * Add documentation on nova user defined metadata * Refactored API V2 tests to use testscenarios * Refactored API V1 tests to use testscenarios * alarm: Per user setting to disable ssl verify * alarm: Global setting to disable ssl verification * Imported Translations from Transifex * Implementation of the alarm RPCAlarmNotifier * Always init cfg.CONF before running a test * Sets storage\_conn in CollectorService * Remove replace/preserve logic from rate of change transformer * storage: remove per-driver options * hbase: do not register table\_prefix as a global option * mongodb: do not set replica\_set as a global option * Change nose to testr in the documentation * Fixed timestamp creation in MongoDB mapreduce * Ensure url is a string for requests.post * Implement a https:// in REST alarm notification * Implement dot in matching\_metadata key for mongodb * trailing slash in url causes 404 error * Fix missing foreign keys * Add cleanup migration for indexes * Sync models with migrations * Avoid dropping cpu\_util for multiple instances * doc: /statistics fields are not queryable (you cannot filter on them) * fix resource\_metadata failure missing image data * Standardize on X-Project-Id over X-Tenant-Id * Default to ctx user/project ID in sample POST API * Multiple dispatcher enablement * storage: fix clear/upgrade order * Lose weight for Ceilometer log in verbose mode * publisher.rpc: queing policies * Remove useless mongodb connection pool comment * Add index for db.meter by descending timestamp * doc: add a bunch of functional examples for the API * api: build the storage connection once and for all * Fix the argument of UnknownArgument exception * make publisher procedure call configurable * Disable mongod prealloc, wait for it to start * Added alembic migrations * Allow to enable time to live on metering sample * Implement a basic REST alarm notification * Imported Translations from Transifex * Ensure correct return code of run-tests.sh * File based publisher * Unset OS\_xx variable before generate configuration * Use run-tests.sh for tox coverage tests * Emit cpu\_util from transformer instead of pollster * Allow simpler scale exprs in transformer.conversions * Use a real MongoDB instance to run unit tests * Allow to specify the endpoint type to use * Rename README.md to README.rst * Use correct hostname to get instances * Provide CPU number as additional metadata * Remove get\_counter\_names from the pollster plugins * Sync SQLAlchemy models with migrations * Transformer to measure rate of change * Make sure plugins are named after their meters * Break up the swift pollsters * Split up the glance pollsters * Make visual coding style consistent * Separate power and energy pollsters * Break up compute pollsters * Implement a basic alarm notification service * Optionally store Events in Collector * Fix issue with pip installing oslo.config-1.2.0 * Transformer to convert between units * publisher.rpc: make per counter topic optional * ceilometer tests need to be enabled/cleaned * Also accept timeout parameter in FakeMemCache * Fix MongoDB backward compat wrt units * Use oslo.sphinx and remove local copy of doc theme * Reference setuptools and not distribute * enable v2 api hbase tests * Register all interesting events * Unify Counter generation from notifications * doc: enhance v2 examples * Update glossary * Imported Translations from Transifex * Imported Translations from Transifex * Filter query op:gt does not work as expected * sqlalchemy: fix performance issue on get\_meters() * enable v2 api sqlalchemy tests * Update compute vnic pollster to use cache * Update compute CPU pollster to use cache * Update compute disk I/O pollster to use cache * update Quantum references to Neutron * Update swift pollster to use cache * Update kwapi pollster to use cache * Update floating-ip pollster to use cache * Update glance pollster to use cache * Add pollster data cache * Fix flake8 errors * Update Oslo * Enable Ceilometer to support mongodb replication set * Fix return error when resource can't be found * Simple service for singleton threshold eval * Basic alarm threshold evaluation logic * add metadata to nova\_client results * Bring in oslo-common rpc ack() changes * Pin the keystone client version * Fix auth logic for PUT /v2/alarms * Imported Translations from Transifex * Change period type in alarms API to int * mongodb: fix limit value not being an integer * Check that the config file sample is always up to date * api: enable v2 tests on SQLAlchemy & HBase * Remove useless periodic\_interval option * doc: be more explicit about network counters * Capture instance metadata in reserved namespace * Imported Translations from Transifex * pep8: enable E125 checks * pep8: enable F403 checks * pep8: enable H302 checks * pep8: enable H304 checks * pep8: enable H401 * pep8: enable H402 checks * Rename the MeterPublisher to RPCPublisher * Replace publisher name by URL * Enable pep8 H403 checks * Activate H404 checks * Ceilometer may generate wrong format swift url in some situations * Code cleanup * Update Oslo * Use Flake8 gating for bin/ceilometer-\* * Update requirements to fix devstack installation * Update to the latest stevedore * Start gating on H703 * Remove disabled\_notification\_listeners option * Remove disabled\_compute\_pollsters option * Remove disabled\_central\_pollsters option * Longer string columns for Trait and UniqueNames * Fix nova notifier tests * pipeline: switch publisher loading model to driver * Enforce reverse time-order for sample return * Remove explicit distribute depend * Use Python 3.x compatible octal literals * Improve Python 3.x compatibility * Fix requirements * Corrected path for test requirements in docs * Fix some typo in documentation * Add instance\_scheduled in entry points * fix session connection * Remove useless imports, reenable F401 checks * service: run common initialization stuff * Use console scripts for ceilometer-api * Use console scripts for ceilometer-dbsync * Use console scripts for ceilometer-agent-compute * Use console scripts for ceilometer-agent-central * agent-central: use CONF.import\_opt rather than import * Move os\_\* options into a group * Use console scripts for ceilometer-collector * sqlalchemy: migration error when running db-sync * session flushing error * api: add limit parameters to meters * python3: Introduce py33 to tox.ini * Start to use Hacking * Session does not use ceilometer.conf's database\_connection * Add support for limiting the number of samples returned * Imported Translations from Transifex * Add support policy to installation instructions * sql: fix 003 downgrade * service: remove useless PeriodicService class * Fix nova notifier tests * Explicitly set downloadcache in tox.ini * Imported Translations from Transifex 2013.2.b1 --------- * Switch to sphinxcontrib-pecanwsme for API docs * Update oslo, use new configuration generator * doc: fix hyphens instead of underscores for 'os\*' conf options * Allow specifying a listen IP * Log configuration values on API startup * Don't use pecan to configure logging * Mark sensitive config options as secret * Imported Translations from Transifex * ImagePollster record duplicate counter during one poll * Rename requires files to standard names * Add an UDP publisher and receiver * hbase metaquery support * Imported Translations from Transifex * Fix and update extract\_opts group extraction * Fix the sample name of 'resource\_metadata' * Added missing source variable in storage drivers * Add Event methods to db api * vnics: don't presume existence of filterref/filter * force the test path to a str (sometimes is unicode) * Make sure that v2 api tests have the policy file configured * Imported Translations from Transifex * setup.cfg misses swift filter * Add a counter for instance scheduling * Move recursive\_keypairs into utils * Replace nose with testr * Use fixtures in the tests * fix compute units in measurement doc * Allow suppression of v1 API * Restore default interval * Change from unittest to testtools * remove unused tests/skip module * Imported Translations from Transifex * Get all tests to use tests.base.TestCase * Allow just a bit longer to wait for the server to startup * Document keystone\_authtoken section * Restore test dependency on Ming * Set the default pipline config file for tests * Imported Translations from Transifex * Fix cross-document references * Fix config setting references in API tests * Restrict pep8 & co to pep8 target * Fix meter\_publisher in setup.cfg * Use flake8 instead of pep8 * Imported Translations from Transifex * Use sqlalchemy session code from oslo * Switch to pbr * fix the broken ceilometer.conf.sample link * Add a direct Ceilometer notifier * Do the same auth checks in the v2 API as in the v1 API * Add the sqlalchemy implementation of the alarms collection * Allow posting samples via the rest API (v2) * Updated the ceilometer.conf.sample * Don't use trivial alarm\_id's like "1" in the test cases * Fix the nova notifier tests after a nova rename * Document HBase configuration * alarm: fix MongoDB alarm id * Use jsonutils instead of json in test/api.py * Connect the Alarm API to the db * Add the mongo implementation of alarms collection * Move meter signature computing into meter\_publish * Update WSME dependency * Imported Translations from Transifex * Add Alarm DB API and models * Imported Translations from Transifex * Remove "extras" again * add links to return values from API methods * Modify limitation on request version * Doc improvements * Rename EventFilter to SampleFilter * Fixes AttributeError of FloatingIPPollster * Add just the most minimal alarm API * Update oslo before bringing in exceptions * Enumerate the meter type in the API Meter class * Remove "extras" as it is not used * Adds examples of CLI and API queries to the V2 documentation * Measurements documentation update * update the ceilometer.conf.sample * Set hbase table\_prefix default to None * glance/cinder/quantum counter units are not accurate/consistent * Add some recommendations about database * Pin SQLAlchemy to 0.7.x * Ceilometer configuration.rst file not using right param names for logging * Fix require\_map\_reduce mim import * Extend swift middleware to collect number of requests * instances: fix counter unit * Remove Folsom support * transformer, publisher: move down base plugin classes * pipeline, publisher, transformer: reorganize code * Fix tests after nova changes * Update to the lastest loopingcall from oslo * Imported Translations from Transifex * update devstack instructions for cinder * Update openstack.common * Reformat openstack-common.conf * storage: move nose out of global imports * storage: get rid of get\_event\_interval * Remove gettext.install from ceilometer/\_\_init\_\_.py * Prepare for future i18n use of \_() in nova notifier * Update part of openstack.common * Convert storage drivers to return models * Adpated to nova's gettext changes * add v2 query examples * storage: remove get\_volume\_sum and get\_volume\_max * api: run tests against HBase too * api: run sum unit tests against SQL backend too * Split and fix live db tests * Remove impl\_test * api: run max\_resource\_volume test on SQL backend * Refactor DB tests * fix volume tests to utilize VOLUME\_DELETE notification * Open havana development, bump to 2013.2 2013.1 ------ * Change the column counter\_volume to Float * tests: disable Ming test if Ming unavailable * Imported Translations from Transifex * enable arguments in tox * api: run max\_volume tests on SQL backend too * api: run list\_sources tests on SQL and Mongo backend * api: run list\_resources test against SQL * api: handle case where metadata is None * Fix statistics period computing with start/end time * Allow publishing arbitrary headers via the "storage.objects.\*.bytes" counter * Updated the description of get\_counters routine * enable xml error message response * Swift pollster silently return no counter if keystone endpoint is not present * Try to get rid of the "events" & "raw events" naming in the code * Switch to python-keystoneclient 0.2.3 * include a copy of the ASL 2.0 * add keystone configuration instructions to manual install docs * Update openstack.common * remove unused dependencies * Set the default\_log\_levels to include keystoneclient * Switch to final 1.1.0 oslo.config release * Add deprecation warnings for V1 API * Raise stevedore requirement to 0.7 * Fixed the blocking unittest issues * Fix a pep/hacking error in a swift import * Add sample configuration files for mod\_wsgi * Add a tox target for building documentation * Use a non-standard port for the test server * Ensure the statistics are sorted * Start both v1 and v2 api from one daemon * Handle missing units values in mongodb data * Imported Translations from Transifex * Make HACKING compliant * Update manual installation instructions * Fix oslo.config and unittest * Return something sane from the log impl * Fix an invalid test in the storage test suite * Add the etc directory to the sdist manifest * api: run compute duration by resource on SQL backend * api: run list\_projects tests against SQL backend too * api: run list users test against SQL backend too * api: run list meters tests against SQL backend too * Kwapi pollster silently return no probre if keystone endpoint is not present * HBase storage driver, initial version * Exclude tests directory from installation * Ensure missing period is treated consistently * Exclude tests when installing ceilometer * Run some APIv1 tests on different backends * Remove old configuration metering\_storage\_engine * Set where=tests * Decouple the nova notifier from ceilometer code * send-counter: fix & test * Remove nose wrapper script * Fix count type in MongoDB * Make sure that the period is returned as an int as the api expects an int * Imported Translations from Transifex * Remove compat cfg wrapper * compute: fix unknown flavor handling * Allow empty dict as metaquery param for sqlalchemy * Add glossary definitions for additional terms * Support different publisher interval * Fix message envelope keys * Revert recent rpc wire format changes * Document the rules for units * Fix a bug in compute manager test case * plugin: don't use @staticmethod with abc * Support list/tuple as meter message value * Imported Translations from Transifex * Update common to get new kombu serialization code * Disable notifier tests * pipeline: manager publish multiple counters * Imported Translations from Transifex * Use oslo-config-2013.1b3 * mongodb: make count an integer explicitely * tests: allow to run API tests on live db * Update to latest oslo-version * Imported Translations from Transifex * Add directive to MANIFEST.in to include all the html files * Use join\_consumer\_pool() for notifications * Update openstack.common * Add period support in storage drivers and API * Update openstack/common tree * storage: fix mongo live tests * swift: configure RPC service correctly * Fix tox python version for Folsom * api: use delta\_seconds() * transformer: add acculumator transformer * Import service when cfg.CONF.os\_\* is used * pipeline: flush after publishing call * plugin: format docstring as rst * Use Mongo finalize to compute avg and duration * Code cleanup, remove useless import * api: fix a test * compute: fix notifications test * Move counter\_source definition * Allow to publish several counters in a row * Fixed resource api in v2-api * Update meter publish with pipeline framework * Use the same Keystone client instance for pollster * pipeline: fix format error in logging * More robust mocking of nova conductor * Mock more conductor API methods to unblock tests * Update pollsters to return counter list * Update V2 API documentation * Added hacking.py support to pep8 portion of tox * setup: fix typo in package data * Fix formatting issue with v1 API parameters * Multiple publisher pipeline framework * Remove setuptools\_git from setup\_requires * Removed unused param for get\_counters() * Use WSME 0.5b1 * Factorize agent code * Fixed the TemplateNotFound error in v1 api * Ceilometer-api is crashing due to pecan module missing * Clean class variable in compute manager test case * Update nova notifier test after nova change * Fix documentation formatting issues * Simplify ceilometer-api and checks Keystone middleware parsing * Fix nova conf compute\_manager unavailable * Rename run\_tests.sh to wrap\_nosetests.sh * Update openstack.common * Corrected get\_raw\_event() in sqlalchemy * Higher level test for db backends * Remove useless imports * Flatten the v2 API * Update v2 API for WSME code reorg * Update WebOb version specification * Remove the ImageSizePollster * Add Kwapi pollster (energy monitoring) * Fixes a minor documentation typo * Peg the version of Ming used in tests * Update pep8 to 1.3.3 * Remove leftover useless import * Enhance policy test for init() * Provide the meters unit's in /meters * Fix keystoneclient auth\_token middleware changes * policy: fix policy\_file finding * Remove the \_initialize\_config\_options * Add pyflakes * Make the v2 API date query parameters consistent * Fix test blocking issue and pin docutils version * Apply the official OpenStack stylesheets and templates to the Doc build * Fixed erroneous source filter in SQLAlchemy * Fix warnings in the documentation build * Handle finish and revert resize notifications * Add support for Folsom version of Swift * Implement user-api * Add support for Swift incoming/outgoing trafic metering * Pass a dict configuration file to auth\_keystone * Import only once in nova\_notifier * Fix MySQL charset error * Use default configuration file to make test data * Fix Glance control exchange * Move back api-v1 to the main api * Fix WSME arguments handling change * Remove useless gettext call in sql engine * Ground work for transifex-ify ceilometer * Add instance\_type information to NetPollster * Fix dbsync API change * Fix image\_id in instance resource metadata * Instantiate inspector in compute manager * remove direct nova db access from ceilometer * Make debugging the wsme app a bit easier * Implements database upgrade as storage engine independent * Fix the v1 api importing of acl * Add the ability to filter on metadata * Virt inspector directly layered over hypervisor API * Move meter.py into collector directory * Change mysql schema from latin1 to utf8 * Change default os-username to 'ceilometer' * Restore some metadata to the events and resources * Update documentation URL * Add sql db option to devstack for ceilometer * Remove debug print in V2 API * Start updating documentation for V2 API * Implement V2 API with Pecan and WSME * Move v1 API files into a subdirectory * Add test storage driver * Implement /meters to make discovery "nicer" from the client * Fix sqlalchemy for show\_data and v1 web api * Implement object store metering * Make Impl of mongodb and sqlalchemy consistent * add migration migrate.cfg file to the python package * Fixes to enable the jenkins doc job to work * Lower the minimum required version of anyjson * Fix blocking test for nova notifier * network: remove left-over useless nova import * tools: set novaclient minimum version * libvirt: fix Folsom compatibility * Lower pymongo dependency * Remove rickshaw subproject * Remove unused rpc import * Adapted to nova's compute\_driver moving * doc: fix cpu counter unit * tools: use tarballs rather than git for Folsom tests * Used auth\_token middleware from keystoneclient * Remove cinderclient dependency * Fix latest nova changes * api: replace minified files by complete version * Add Folsom tests to tox * Handle nova.flags removal * Provide default configuration file * Fix mysql\_engine option type * Remove nova.flags usage * api: add support for timestamp in \_list\_resources() * api: add timestamp interval support in \_list\_events() * tests: simplify api list\_resources * Update openstack.common(except policy) * Adopted the oslo's rpc.Service change * Use libvirt num\_cpu for CPU utilization calculation * Remove obsolete reference to instance.vcpus * Change references of /etc/ceilometer-{agent,collector}.conf to /etc/ceilometer/ceilometer.conf * Determine instance cores from public flavors API * Determine flavor type from the public nova API * Add comment about folsom compatibility change * Add keystone requirement for doc build * Avoid TypeError when loading libvirt.LibvirtDriver * Don't re-import flags and do parse\_args instead of flags.FLAGS() * doc: rename stackforge to openstack * Fix pymongo requirements * Update .gitreview for openstack * Update use of nova config to work with folsom * compute: remove get\_disks work-around * Use openstack versioning * Fix documentation build * document utc naive timestamp * Remove database access from agent pollsters * Fix merge error in central/manager.py * Fix nova config parsing * pollster trap error due to zero floating ip * Use the service.py in openstack-common * Allow no configured sources, provide a default file * Add service.py from openstack-common * Update common (except policy) * nova fake libvirt library breaking tests * Move db access out into a seperate file * Remove invalid fixme comments * Add new cpu\_util meter recording CPU utilization % * Fix TypeError from old-style publish\_counter calls * Fix auth middleware configuration * pin sqlalchemy to 0.7.x but not specifically 0.7.8 * add mongo index names * set tox to ignore global packages * Provide a way to disable some plugins * Use stevedore to load all plugins * implement get\_volume\_max for sqlalchemy * Add basic text/html renderer * network: floating IP account in Quantum * add unit test for CPUPollster * Clean up context usage * Add dependencies on clients used by pollsters * add ceilometer-send-counter * Update openstack.common.cfg * Fix tests broken by API change with Counter class * api: add source detail retrieval * Set source at publish time * Instance pollster emits instance. meter * timestamp columns in sqlalchemy not timezone aware * Remove obsolete/incorrect install instructions * network: emit router meter * Fix sqlalchemy performance problem * Added a working release-bugs.py script to tools/ * Change default API port * sqlalchemy record\_meter merge objs not string * Use glance public API as opposed to registry API * Add OpenStack trove classifier for PyPI * bump version number to 0.2 0.1 --- * Nova libvirt release note * Update metadata for PyPI registration * tox: add missing venv * Fixes a couple typos * Counter renaming * Set correct timestamp on floatingip counter * Fix API change in make\_test\_data.py * Fix Nova URL in doc * Some more doc fixes * Ignore instances in the ERROR state * Use the right version number in documentation * doc: fix network.\*.\* resource id * image: handle glance delete notifications * image: handle glance upload notifications * image: add update event, fix ImageServe owner * network: fix create/update counter type & doc * Assorted doc fixes * add max/sum project volume and fix tests * Add general options * compute.libvirt: split read/write counters * API: add Keystone ACL and policy support * Add documentation for configuration options * network: do not emit counter on exists event, fix resource id * Move net function in class method and fix instance id * Prime counter table * Fix the configuration for the nova notifier * Initialize the control\_exchange setting * Set version 0.1 * Make the instance counters use the same type * Restore manual install documentation * add quantum release note * Add release notes to docs * Update readme and create release notes * Remove duration field in Counter * Add counter for number of packets per vif * Move instance counter into its own pollster * Add a request counter for instance I/O * Rename instance disk I/O counter * Rename instances network counters * Use constant rather than string from counter type * Update the architecture diagram * Increase default polling interval * Fix compute agent publishing call * network: listen for Quantum exists event * Correct requirements filename * Fix notification subscription logic * Fix quantum notification subscriptions * Split meter publishing from the global config obj * network: add counter for actions * network: listen for Quantum notifications * Rename absolute to gauge * Fix typo in control exchanges help texts * Rework RPC notification mechanism * Update packaging files * Update URL list * Update openstack.common * Add volume/sum API endpoint for resource meters * Add resource volume/max api call * Fix dependency on anyjson * Listen for volume.delete.start instead of end * implement sqlalchemy dbengine backend * Add a notification handler for image downloads * Allow glance pollster tests to run * Create tox env definition for using a live db * Picking up dependencies from pip-requires file * Specify a new queue in manager * Rework RPC connection * Stop using nova's rpc module * Add configuration script to turn on notifications * Pep8 fixes, implement pep8 check on tests subdir * Use standard CLI options & env vars for creds * compute: remove get\_metadata\_from\_event() * Listen for volume notifications * Add pollster for Glance * Fix Nova notifier test case * Fix nova flag parsing * Add nova\_notifier notification driver for nova * Split instance polling code * Use stevedore to load storage engine drivers * Implement duration calculation API * Create tool for generating test meter data * Update openstack-common code to latest * Add bin/ceilometer-api for convenience * Add local copy of architecture diagram * Add timestamp parameters to the API docs * Check for doc build dependency before building * Pollster for network internal traffic (n1,n2) * Fix PEP8 issues * Add archicture diagram to documentation * added mongodb auth * Change timestamp management for resources * Log the instance causing the error when a pollster fails * Document how to install with devstack * Remove test skipping logic * Remove dependency on nova test modules * Add date range parameters to resource API * Add setuptools-git support * Add separate notification handler for instance flavor * Change instance meter type * Split the existing notification handlers up * Remove redundancy in the API * Separate the tox coverage test setup from py27 * Do not require user or project argument for event query * Add pymongo dependency for readthedocs.org build * Update openstack.common * Add API documentation * Be explicit about test dir * Add list projects API * Sort list of users and projects returned from queries * Add project arg to event and resource queries * Fix "meter" literal in event list API * collector exception on record\_metering\_data * Add API endpoint for listing raw event data * Change compute pollster API to work on one instance at a time * Create "central" agent * Skeleton for API server * fix use of source value in mongdb driver * Add {root,ephemeral}\_disk\_size counters * Implements vcpus counter * Fix nova configuration loading * Implements memory counter * Fix and document counter types * Check compute driver using new flag * Add openstack.common.{context,notifier,log} and update .rpc * Update review server link * Add link to roadmap * Add indexes to MongoDB driver * extend developer documentation * Reset the correct nova dependency URL * Switch .gitreview to use OpenStack gerrit * Add MongoDB engine * Convert timestamps to datetime objects before storing * Reduce complexity of storage engine API * Remove usage of nova.log * Documentation edits: * fix typo in instance properties list * Add Sphinx wrapper around existing docs * Configure nova.flags as well as openstack.common.cfg * First draft of plugin/agent documentation. Fixes bug 1018311 * Essex: update Nova to 2012.1.1, add python-novaclient * Split service preparation, periodic interval configurable * Use the same instance metadata everywhere * Emit meter event for instance "exists" * Start defining DB engine API * Fallback on nova.rpc for Essex * Add instance metadata from notification events * Combined fix to get past broken state of repo * Add more metadata to instance counter * Register storage options on import * Add Essex tests * log more than ceilometer * Remove event\_type field from meter messages * fix message signatures for nested dicts * Remove nova.flags usage * Copy openstack.common.cfg * check message signatures in the collector * Sketch out a plugin system for saving metering data * refactor meter event publishing code * Add and use ceilometer own log module * add counter type field * Use timestamp instead of datetime when creating Counter * Use new flag API * Fix a PEP8 error * Make the stand-alone test script mimic tox * Remove unneeded eventlet test requirement * Add listeners for other instance-related events * Add tox configuration * Use openstack.common.cfg for ceilometer options * Publish and receive metering messages * Add floating IP pollster * Fix tests based on DB by importing nova.tests * make the pollsters in the agent plugins * Build ceilometer-agent and ceilometer-collector * Add plugin support to the notification portion of the collector daemon * Add CPU time fetching * Add an example function for converting a nova notification to a counter * add a tool for recording notifications and replaying them * Add an exception handler to deal with errors that occur when the info in nova is out of sync with reality (as on my currently broken system). Also adds a nova prefix to the logger for now so messages from this module make it into the log file * Periodically fetch for disk io stats * Use nova.service, add a manager class * Change license to Apache 2.0 * Add setup.py * Import ceilometer-nova-compute * Ignore pyc files * Add link to blueprint * Add .gitreview file * initial commit ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/HACKING.rst0000664000175100017510000000205715033033467015577 0ustar00mylesmylesCeilometer Style Commandments ============================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Ceilometer Specific Commandments -------------------------------- - [C301] LOG.warn() is not allowed. Use LOG.warning() - [C302] Deprecated library function os.popen() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/LICENSE0000664000175100017510000002363715033033467015015 0ustar00mylesmyles Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/MAINTAINERS0000664000175100017510000000064015033033467015472 0ustar00mylesmyles= Generalist Code Reviewers = The current members of ceilometer-core are listed here: https://launchpad.net/~ceilometer-drivers/+members#active This group can +2 and approve patches in Ceilometer. However, they may choose to seek feedback from the appropriate specialist maintainer before approving a patch if it is in any way controversial or risky. = IRC handles of maintainers = gordc jd__ lhx pradk sileht ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/PKG-INFO0000644000175100017510000000706415033033521015066 0ustar00mylesmylesMetadata-Version: 2.2 Name: ceilometer Version: 24.1.0.dev59 Summary: OpenStack Telemetry Home-page: https://docs.openstack.org/ceilometer/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Topic :: System :: Monitoring Requires-Python: >=3.10 License-File: LICENSE Requires-Dist: xmltodict>=0.13.0 Requires-Dist: cachetools>=2.1.0 Requires-Dist: cotyledon>=1.3.0 Requires-Dist: futurist>=1.8.0 Requires-Dist: jsonpath-rw-ext>=1.1.3 Requires-Dist: lxml>=4.5.1 Requires-Dist: msgpack>=0.5.2 Requires-Dist: oslo.concurrency>=3.29.0 Requires-Dist: oslo.config>=8.6.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=3.36.0 Requires-Dist: oslo.reports>=1.18.0 Requires-Dist: oslo.rootwrap>=2.0.0 Requires-Dist: pbr>=2.0.0 Requires-Dist: oslo.messaging>=10.3.0 Requires-Dist: oslo.upgradecheck>=0.1.1 Requires-Dist: oslo.utils>=4.7.0 Requires-Dist: oslo.privsep>=1.32.0 Requires-Dist: python-glanceclient>=2.8.0 Requires-Dist: python-keystoneclient>=3.18.0 Requires-Dist: keystoneauth1>=3.18.0 Requires-Dist: python-neutronclient>=6.7.0 Requires-Dist: python-novaclient>=9.1.0 Requires-Dist: python-swiftclient>=3.2.0 Requires-Dist: python-cinderclient>=3.3.0 Requires-Dist: PyYAML>=5.1 Requires-Dist: requests>=2.25.1 Requires-Dist: stevedore>=1.20.0 Requires-Dist: tenacity>=6.3.1 Requires-Dist: tooz>=1.47.0 Requires-Dist: oslo.cache>=1.26.0 Requires-Dist: gnocchiclient>=7.0.0 Requires-Dist: python-zaqarclient>=1.3.0 Requires-Dist: prometheus_client>=0.20.0 Requires-Dist: requests-aws>=0.1.4 Requires-Dist: aodhclient>=3.8.0 Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: requires-dist Dynamic: requires-python Dynamic: summary ========== Ceilometer ========== -------- Overview -------- Ceilometer is a data collection service that collects event and metering data by monitoring notifications sent from OpenStack services. It publishes collected data to various targets including data stores and message queues. Ceilometer is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. ------------- Documentation ------------- Release notes are available at https://releases.openstack.org/teams/telemetry.html Developer documentation is available at https://docs.openstack.org/ceilometer/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/ceilometer Code Repository --------------- - Server: https://github.com/openstack/ceilometer Bug Tracking ------------ - Bugs: https://bugs.launchpad.net/ceilometer/ Release Notes ------------- - Server: https://docs.openstack.org/releasenotes/ceilometer/ IRC --- IRC Channel: #openstack-telemetry on `OFTC`_. Mailinglist ----------- Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss as the mailinglist. Please use tag ``[Ceilometer]`` in the subject for new threads. .. _OFTC: https://oftc.net/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/README.rst0000664000175100017510000000236415033033467015471 0ustar00mylesmyles========== Ceilometer ========== -------- Overview -------- Ceilometer is a data collection service that collects event and metering data by monitoring notifications sent from OpenStack services. It publishes collected data to various targets including data stores and message queues. Ceilometer is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. ------------- Documentation ------------- Release notes are available at https://releases.openstack.org/teams/telemetry.html Developer documentation is available at https://docs.openstack.org/ceilometer/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/ceilometer Code Repository --------------- - Server: https://github.com/openstack/ceilometer Bug Tracking ------------ - Bugs: https://bugs.launchpad.net/ceilometer/ Release Notes ------------- - Server: https://docs.openstack.org/releasenotes/ceilometer/ IRC --- IRC Channel: #openstack-telemetry on `OFTC`_. Mailinglist ----------- Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss as the mailinglist. Please use tag ``[Ceilometer]`` in the subject for new threads. .. _OFTC: https://oftc.net/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922512.0 ceilometer-24.1.0.dev59/RELEASENOTES.rst0000664000175100017510000025253115033033520016416 0ustar00mylesmyles========== ceilometer ========== .. _ceilometer_24.0.0-37: 24.0.0-37 ========= .. _ceilometer_24.0.0-37_New Features: New Features ------------ .. releasenotes/notes/add-aodh-metrics-afbe9b780fd137d6.yaml @ b'cd0db8b76661507dd99d7bdbbdc1bbed03fd802d' - Ceilometer is now able to poll the /metrics endpoint in Aodh to get evaluation results metrics. .. releasenotes/notes/add-map-trait-plugin-0d969f5cc7b18175.yaml @ b'd1ba90b3c3599ff0a1e26837f43e137d0c13e108' - A ``map`` event trait plugin has been added. This allows notification meter attributes to be created by mapping one set of values from an attribute to another set of values defined in the meter definition. Additional options are also available for controlling how to handle edge cases, such as unknown values and case sensitivity. .. releasenotes/notes/add-pool-size-metrics-cdecb979135bba85.yaml @ b'3922db4f3d4f0df586a89894363148a6ee84b28e' - Added the following meters to the central agent to capture these metrics for each storage pool by API. - `volume.provider.pool.capacity.total` - `volume.provider.pool.capacity.free` - `volume.provider.pool.capacity.provisioned` - `volume.provider.pool.capacity.virtual_free` - `volume.provider.pool.capacity.allocated` .. releasenotes/notes/enable-promethus-exporter-tls-76e78d4f4a52c6c4.yaml @ b'e769a80b6ca896bacadefa66bbc374a9be3b39f7' - Enhanced the Prometheus exporter to support TLS for exposing metrics securely. .. releasenotes/notes/remove-service-type-volume-v2-08c81098dc7c0922.yaml @ b'bbc5436b5e5b787a155c8c999cfe4b192ac0edd7' - The deprecated ``[service_types] cinderv2`` option has been removed. Use the ``[service_types] cinder`` option instead. .. releasenotes/notes/threeads-process-pollsters-cbd22cca6f2effc4.yaml @ b'492974dd0b4c5666defe2398e95d80aa325e3d0c' - Introduce ``threads_to_process_pollsters`` to enable operators to define the number of pollsters that can be executed in parallel inside a polling task. .. _ceilometer_24.0.0-37_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/make-instance-host-optional-972fa14405c1e2f6.yaml @ b'703ada2c056235e15d843cb9cd99e75b7a362c6d' - The ``instance`` resource type has been updated to make the ``host`` resource attribute optional. This allows the hypervisor a compute instance is running on to be withheld from Gnocchi's resource metadata, which may be required for security reasons e.g. for public clouds. .. releasenotes/notes/publish-network-resources-with-invalid-state-6693c6fa1fefa097.yaml @ b'7040e8e4e3fe6443e2e3be18bc00b27e856a31f4' - The ``ip.floating`` and ``network.services.vpn`` pollsters now publish samples for all found floating IPs and VPNs, even if they are known to have an unknown state, when they would previously be dropped. The volume of samples for such floating IPs and VPNs will be set to ``-1``. This improves visibility of floating IPs and VPNs with unknown states, allowing them to be monitored via samples and the Gnocchi metrics, making it easier to discover such resources for troubleshooting. It also moves some of the "business logic" for downstream rating/billing services such as CloudKitty out of Ceilometer itself. .. releasenotes/notes/publish-network-resources-with-invalid-state-6693c6fa1fefa097.yaml @ b'7040e8e4e3fe6443e2e3be18bc00b27e856a31f4' - The ``network.services.vpn`` now publishes samples for VPNs with status ``ERROR``, when they would previously be dropped. The sample volume for VPNs in ``ERROR`` state is ``7``. .. releasenotes/notes/remove-intel-node-manager-0889de66dede9ab0.yaml @ b'4fa2e23e4a17a1bc3f56cd9c985f17de29dc2e83' - Support for Intel Node Manager was removed. .. releasenotes/notes/remove-py39-8c39f81f856bee9f.yaml @ b'a203292c5414c365a15992ae893ee7333a90766f' - Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. .. _ceilometer_24.0.0-37_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-http_timeout-ce98003e4949f9d9.yaml @ b'e3950a55c2e45ad4261637f3471f1785903d4a71' - The ``[DEFAULT] http_timeout`` option has been deprecated because it is unused. .. _ceilometer_24.0.0-37_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-2113768-a2db3a59c8e13558.yaml @ b'70a56ba670157bf72d1513b8c177c9294071e2eb' - Fixed `bug #2113768 `__ where the Libvirt inspector did not catch exceptions thrown when calling interfaceStats function on a domain. .. releasenotes/notes/fix-volume-provider-pool-capacity-metrics-7b8b0de29a513cea.yaml @ b'0e16c1dd91b6affef63e153dfee75d54d775849d' - [`bug 2113903 `_] Fix volume provider pool capacity metrics for ceph backend. .. _ceilometer_24.0.0: 24.0.0 ====== .. _ceilometer_24.0.0_New Features: New Features ------------ .. releasenotes/notes/add-disk-size-pollsters-6b819d067f9cf736.yaml @ b'36b40ed7deec6fcf81f8a9bfeead03bdb48362a8' - The ``disk.ephemeral.size`` meter is now published as a compute pollster, in addition to the existing notification meter. .. releasenotes/notes/add-disk-size-pollsters-6b819d067f9cf736.yaml @ b'36b40ed7deec6fcf81f8a9bfeead03bdb48362a8' - The ``disk.root.size`` meter is now published as a compute pollster, in addition to the existing notification meter. .. releasenotes/notes/add-parameter-for-disabled-projects-381da4543fff071d.yaml @ b'2624d7ca2faa3c98722cbd388cf25d9bf2eb78a3' - The ``[polling] ignore_disabled_projects`` option has been added. This option allows polling agent to only parse enabled projects, to reduce procese time in case many projects are disabled. .. releasenotes/notes/add-power-state-metric-cdfbb3098b50a704.yaml @ b'dab9630588e5bf334449a3794844c686e3acb734' - Added the new power.state metric from virDomainState. .. releasenotes/notes/add-swift-storage_policy-attribute-322fbb5716c5bb10.yaml @ b'56ce75d897e5e8261eec79d3552b95a60ed8663c' - The ``storage_policy`` resource metadata attribute has been added to the ``swift.containers.objects`` and ``swift.containers.objects.size`` meters, populated from already performed Swift account ``GET`` requests. This functionality requires using a new version of Swift that adds the ``storage_policy`` attribute when listing containers in an account. Ceilometer is backwards compatible with Swift versions that do not provide this functionality, but ``storage_policy`` will be set to ``None`` in samples and Gnocchi resources. .. releasenotes/notes/add-swift-storage_policy-attribute-322fbb5716c5bb10.yaml @ b'56ce75d897e5e8261eec79d3552b95a60ed8663c' - An optional ``storage_policy`` attribute has been added to the ``swift_account`` Gnocchi resource type, to store the storage policy for Swift containers in Gnocchi. For Swift accounts, ``storage_policy`` will be set to ``None``. .. releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml @ b'ce3ab93cb88ccc751d2efbb5681e8f4c4704539f' - Added the ``volume_type_id`` attribute to ``volume.size`` notification samples, which stores the ID for the volume type of the given volume. .. releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml @ b'ce3ab93cb88ccc751d2efbb5681e8f4c4704539f' - Added the ``volume_type_id`` attribute to ``volume`` resources in Gnocchi, which stores the ID for the volume type of the given volume. .. _ceilometer_24.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-swift-storage_policy-attribute-322fbb5716c5bb10.yaml @ b'56ce75d897e5e8261eec79d3552b95a60ed8663c' - To publish the ``storage_policy`` attribute for Swift containers, ``gnocchi_resources.yaml`` will need to be updated to the latest version. Swift in the target OpenStack cloud will also need upgrading to add support for providing the storage policy when listing containers. .. releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml @ b'ce3ab93cb88ccc751d2efbb5681e8f4c4704539f' - ``meters.yaml`` has been updated with changes to the ``volume.size`` notification meter. If you override this file in your deployment, it needs to be updated. .. releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml @ b'ce3ab93cb88ccc751d2efbb5681e8f4c4704539f' - ``gnocchi_resources.yaml`` has been updated with changes to the ``volume`` resource type. If you override this file in your deployment, it needs to be updated. .. releasenotes/notes/dynamic-pollster-url-joins-6cdb01c4015976f7.yaml @ b'0468126182e176e922db1c637e64a04c56fc4040' - When using dynamic pollsters to query OpenStack APIs, if the endpoint URL returned by Keystone does not have a trailing slash and ``url_path`` is a relative path, the ``url_path`` configured in the dynamic pollster would replace sections of the endpoint URL instead of being appended to the end of the URL. This behaviour has now been changed so that ``url_path`` values that do not start with a ``/`` are always appended to the end of endpoint URLs. This change may require existing dynamic pollsters that rely on this behaviour to be changed, but this allows dynamic pollsters to be added for OpenStack services that append the active project ID to the API endpoint URL (e.g. Trove). .. releasenotes/notes/remove-intel-cmt-perf-meters-15d0fe72b2804f48.yaml @ b'9fe1d19de22de0dcecf2bfed086b75a3a8a706a0' - The following meters were removed. Nova removed support for Intel CMT perf events in 22.0.0, and these meters can no longer be measured since then. - ``cpu_l3_cache_usage`` - ``memory_bandwidth_local`` - ``memory_bandwidth_total`` .. releasenotes/notes/remove-opencontrail-88656a9354179299.yaml @ b'f4749c7251826970d6c6a1f0abda1aef7247834a' - Support for Open Contrail has been removed. Because no SDN is supported after the removal, the mechanism to pull metrics from SDN is also removed. .. releasenotes/notes/remove-py38-80670bdcfd4dd135.yaml @ b'ebcaee9b6c1e8f3a8a7251ec1729506607321180' - Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. .. releasenotes/notes/remove-vsphere-support-411c97b66bdcd264.yaml @ b'e37e2f3ff7789f09b91d06979214913f56d92471' - Support for VMware vSphere has been removed. .. _ceilometer_24.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/remove-vsphere-support-411c97b66bdcd264.yaml @ b'e37e2f3ff7789f09b91d06979214913f56d92471' - The ``[DEFAULT] hypervisor_inspector`` option has been deprecated, because libvirt is the only supported hypervisor currently. The option will be removed in a future release. .. releasenotes/notes/rename-tenant_name_discovery-1675a236bb51176b.yaml @ b'dd9ff99dd167aa858be78a1a89bf89d9ec5e9fd4' - The ``[polling] tenant_name_discovery`` option has been deprecated in favor of the new ``[polling] identity_name_discovery`` option. .. _ceilometer_5.0.3: 5.0.3 ===== .. _ceilometer_5.0.3_Critical Issues: Critical Issues --------------- .. releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml @ b'67e47cda8e7e0d2649fef334a6e0db2826d5fbd1' - [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. .. _ceilometer_5.0.3_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml @ b'99a56e707a1bc9049b167b303501cceb7d37e65e' - [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. .. releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml @ b'3a1fcaf712a7cdcffafe2ae33bc4c2508bab8a49' - [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. .. releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml @ b'4a4df73759cd78105ceb115ac0f4ec0980285d80' - [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurance of a meter will be used and any following duplicates will be skipped from processing. .. _ceilometer_5.0.2: 5.0.2 ===== .. _ceilometer_5.0.2_Critical Issues: Critical Issues --------------- .. releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml @ b'3b35c4087519981ba7dd062eec988e9ee5ddf076' - [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. .. _ceilometer_5.0.2_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml @ b'3b35c4087519981ba7dd062eec988e9ee5ddf076' - [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. .. _ceilometer_5.0.1: 5.0.1 ===== .. _ceilometer_5.0.1_Other Notes: Other Notes ----------- .. releasenotes/notes/start-using-reno-9ffb7d0035846b4b.yaml @ b'949c343d12bbb27881ce11b11a2d0bd983fd6622' - Start using reno to manage release notes. .. _ceilometer_6.0.0: 6.0.0 ===== .. _ceilometer_6.0.0_New Features: New Features ------------ .. releasenotes/notes/batch-messaging-d126cc525879d58e.yaml @ b'c5895d2c6efc6676679e6973c06b85c0c3a10585' - Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. .. releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. .. releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - [`bug 1480333 `_] Support ability to configure collector to capture events or meters mutally exclusively, rather than capturing both always. .. releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml @ b'c5895d2c6efc6676679e6973c06b85c0c3a10585' - Support for CORS is added. More information can be found [`here `_] .. releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. .. releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - Gnocchi dispatcher now uses client rather than direct http requests .. releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - [`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. .. releasenotes/notes/keystone-v3-fab1e257c5672965.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - Add support for Keystone v3 authentication .. releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - Ceilometer alarms code is now fully removed from code base. Equivalent functionality is handled by Aodh. .. releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. .. releasenotes/notes/remove-eventlet-6738321434b60c78.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - Remove eventlet from Ceilometer in favour of threaded approach .. releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. .. releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - [`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml .. releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - [`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. .. _ceilometer_6.0.0_Known Issues: Known Issues ------------ .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer's polling agents may generage a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron's API is improved to handle polling. .. _ceilometer_6.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ b'244439979fd28ecb0c76d132f0be784c988b54c8' - The options `requeue_event_on_dispatcher_error' and `requeue_sample_on_dispatcher_error' have been enabled and removed. .. releasenotes/notes/batch-messaging-d126cc525879d58e.yaml @ b'c5895d2c6efc6676679e6973c06b85c0c3a10585' - batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. .. releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml @ b'c5895d2c6efc6676679e6973c06b85c0c3a10585' - The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. .. releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - gnocchiclient library is now a requirement if using ceilometer+gnocchi. .. releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - gnocchi_resources.yaml in Ceilometer should be updated. .. releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. .. releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - Run db-sync to add new indices. .. releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - audit middleware in keystonemiddleware library should be used for similar support. .. releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. .. _ceilometer_6.0.0_Critical Issues: Critical Issues --------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ b'244439979fd28ecb0c76d132f0be784c988b54c8' - The previous configuration options default for `requeue_sample_on_dispatcher_error' and `requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." .. releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml @ b'e84a10882a9b682ff41c84e8bf4ee2497e7e7a31' - [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. .. releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. .. _ceilometer_6.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. .. releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - [`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. .. releasenotes/notes/event-type-race-c295baf7f1661eab.yaml @ b'0e3ae8a667d9b9d6e19a7515854eb1703fc05013' - [`bug 1254800 `_] Add better support to catch race conditions when creating event_types .. releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - [`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. .. releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml @ b'1f9f4e1072a5e5037b93734bafcc65e4211eb19f' - [`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. .. releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - [`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. .. releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - [`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. .. releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - Fix samples from Heat to map to correct Gnocchi resource type .. releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - [`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector .. releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml @ b'02b1e1399bf885d03113a1cc125b1f97ed5540b9' - [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. .. releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - [`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. .. releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml @ b'1689e7053f4e7587a2b836035cdfa4fda56667fc' - [`bug 1526793 `_] Additional indices were added to better support querying of event data. .. releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml @ b'903a0a527cb240cfd9462b7f56d3463db7128993' - [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. .. releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - [`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. .. releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml @ b'0c6f11cf88bf1a13a723879de46ec616678d2e0b' - [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurance of a meter will be used and any following duplicates will be skipped from processing. .. releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - [`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load .. releasenotes/notes/support-None-query-45abaae45f08eda4.yaml @ b'e6fa0a84d1f7a326881f3587718f1df743b8585f' - [`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. .. _ceilometer_6.0.0_Other Notes: Other Notes ----------- .. releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. .. releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml @ b'f24ea44401b8945c9cb8a34b2aedebba3c040691' - A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. .. _ceilometer_7.0.5: 7.0.5 ===== .. _ceilometer_7.0.5_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml @ b'66dd8ab65e2d9352de86e47056dea0b701e21a15' - A local cache is used when polling instance metrics to minimise calls Nova API. A new option is added `resource_cache_expiry` to configure a time to live for cache before it expires. This resolves issue where migrated instances are not removed from cache. This is only relevant when `instance_discovery_method` is set to `naive`. It is recommended to use `libvirt_metadata` if possible. .. _ceilometer_7.0.1: 7.0.1 ===== .. _ceilometer_7.0.1_New Features: New Features ------------ .. releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml @ b'032032642ad49e01d706f19f51d672fcff403442' - Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. The purpose of this middleware is to set up the request URL correctly in case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of Ceilometer. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Ceilometer, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. .. _ceilometer_7.0.0: 7.0.0 ===== .. _ceilometer_7.0.0_Prelude: Prelude ------- .. releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml @ b'18c181f0b3ce07a0cd552a9060dd09a95cc26078' Ceilometer backends are no more only databases but also REST API like Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and have been renamed ceilometer-upgrade. The new binary handles database schema upgrade like ceilometer-dbsync does, but it also handle any changes needed in configured ceilometer backends like Gnocchi. .. _ceilometer_7.0.0_New Features: New Features ------------ .. releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml @ b'cf3f7c992e0d29e06a7bff6c1df2f0144418d80f' - Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'. .. releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml @ b'2fca7ebd7c6a4d29c8a320fffd035ed9814e8293' - In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set to True to use system-installed certificates (default value) or False to ignore certificate verification (use in development only!). verify_ssl can also be set to the location of a certificate file e.g. /some/path/cert.crt (use for self-signed certs) or to a directory of certificates. The value is passed as the 'verify' option to the underlying requests method, which is documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification .. releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml @ b'ed7b6dbc952e49ca69de9a94a01398b106aece4b' - Add two new meters, including memory.bandwidth.total and memory.bandwidth.local, to get memory bandwidth statistics based on Intel CMT feature. .. releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml @ b'aaedbbe0eb02ad1f86395a5a490495b64ce26777' - Add four new meters, including perf.cpu.cycles for the number of cpu cycles one instruction needs, perf.instructions for the count of instructions, perf.cache_references for the count of cache hits and cache_misses for the count of caches misses. .. releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml @ b'a2a04e5d234ba358c25d541f31f8ca1a61bfd5d8' - Add support of batch recording metering data to mongodb backend, since the pymongo support *insert_many* interface which can be used to batch record items, in "big-data" scenarios, this change can improve the performance of metering data recording. .. releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml @ b'f8933f4abda4ecfc07ee41f84fd5fd8f6667e95a' - Since the Glance v1 APIs won't be maintained any more, this change add the support of glance v2 in images pollsters. .. _ceilometer_7.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ b'40684dafae76eab77b66bb1da7e143a3d7e2c9c8' - The options `requeue_event_on_dispatcher_error' and `requeue_sample_on_dispatcher_error' have been enabled and removed. .. releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml @ b'5750fddf288c749cacfc825753928f66e755758d' - Batching is enabled by default now when coordinated workers are enabled. Depending on load, it is recommended to scale out the number of `pipeline_processing_queues` to improve distribution. `batch_size` should also be configured accordingly. .. releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml @ b'f8933f4abda4ecfc07ee41f84fd5fd8f6667e95a' - The option `glance_page_size' has been removed because it's not actually needed. .. _ceilometer_7.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml @ b'3685dcf417543db0bb708b347e996d88385c8c5b' - The event database dispatcher is now deprecated. It has been moved to a new project, alongside the Ceilometer API for /v2/events, called Panko. .. releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml @ b'2bb81d41f1c5086b68b1290362c72966c1e33702' - The Kwapi pollsters are deprecated and will be removed in the next major version of Ceilometer. .. releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml @ b'18c181f0b3ce07a0cd552a9060dd09a95cc26078' - For backward compatibility reason we temporary keep ceilometer-dbsync, at least for one major version to ensure deployer have time update their tooling. .. _ceilometer_7.0.0_Critical Issues: Critical Issues --------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ b'40684dafae76eab77b66bb1da7e143a3d7e2c9c8' - The previous configuration options default for `requeue_sample_on_dispatcher_error' and `requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." .. _ceilometer_7.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml @ b'800034dc0bbb9502893dedd9bcde7c170780c375' - [`bug 1578128 `_] Add a tool that allow users to drop the legacy alarm and alarm_history tables. .. releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml @ b'dc254e2f78a4bb42b0df6556df8347c7137ab5b2' - [`bug 1597618 `_] Add the full support of snmp v3 user security model. .. releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml @ b'5750fddf288c749cacfc825753928f66e755758d' - Fix to improve handling messages in environments heavily backed up. Previously, notification handlers greedily grabbed messages from queues which could cause ordering issues. A fix was applied to sequentially process messages in a single thread to prevent ordering issues. .. releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml @ b'8dd821a03dcff45258251bebfd2beb86c07d94f7' - [`bug 1491509 `_] Patch to unify timestamp in samples polled by pollsters. Set the time point polling starts as timestamp of samples, and drop timetamping in pollsters. .. _ceilometer_8.0.0: 8.0.0 ===== .. _ceilometer_8.0.0_Prelude: Prelude ------- .. releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml @ b'4d8bc1095627574919d2d699e82f5522698cde5e' In an effort to minimise the noise, Ceilometer will no longer produce meters which have no measureable data associated with it. Image meter only captures state information which is already captured in events and other meters. .. releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml @ b'a91253a8104b3f56a847ec64dcbe1720b06f20e9' Samples are required to measure some aspect of a resource. Samples not measuring anything will be dropped. .. _ceilometer_8.0.0_New Features: New Features ------------ .. releasenotes/notes/http-dispatcher-batching-4e17fce46a196b07.yaml @ b'54e4cc77e08800d2e9b9f8d2c67290118beae6ac' - In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set to True to activate sending meters and events in batches, or False (default value) to send each meter and event with a fresh HTTP call. .. releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml @ b'752af208e0e42264b3140b0fd686c6490259c134' - Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. The purpose of this middleware is to set up the request URL correctly in case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of Ceilometer. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Ceilometer, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. .. releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml @ b'b692d3a6a61fbbe4c3db8c14a7ae5423559b5144' - The Ceilometer compute agent can now retrieve some instance metadata from the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova fills this metadata with some information about the instance. To enable this feature you should set [compute]/instance_discovery_method = libvirt_metadata in the configuration file. The only downside of this method is that user_metadata (and some other instance attributes) are no longer part of the samples created by the agent. But when Gnocchi is used as backend, this is not an issue since Gnocchi doesn't store resource metadata aside of the measurements. And the missing informations are still retrieved through the Nova notifications and will fully update the resource information in Gnocchi. .. releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e803.yaml @ b'b7344dd6c8232214b19c396772e544c929627468' - Add support of metering the size of cinder volume/snapshot/backup. Like other meters, these are useful for billing system. .. _ceilometer_8.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml @ b'e2e74892daa79388accb6df4d4642a4632332911' - Configuration values can passed in via the querystring of publisher in pipeline. For example, rather than setting target, timeout, verify_ssl, and batch_mode under [dispatcher_http] section of conf, you can specify http:///?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only the raw details of event are required. .. releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml @ b'4d8bc1095627574919d2d699e82f5522698cde5e' - Any existing commands utilising `image` meter should be switched to `image.size` meter which will provide equivalent functionality .. releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml @ b'a91253a8104b3f56a847ec64dcbe1720b06f20e9' - The `instance` meter no longer will be generated. For equivalent functionality, perform the exact same query on any compute meter such as `cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc... .. releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml @ b'13aeba410b0195a90d1c359d59c5bfb0820aae9b' - Ceilometer legacy backends and Ceilometer API are now deprecated. Polling all nova instances from compute agent is no more required with Gnocchi. So we switch the [compute]instance_discovery_method to libvirt_metadata. To switch back to the old deprecated behavior you can set it back to 'naive'. .. releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml @ b'b692d3a6a61fbbe4c3db8c14a7ae5423559b5144' - If you are using Gnocchi as backend it's strongly recommended to switch [compute]/instance_discovery_method to libvirt_metadata. This will reduce the load on the Nova API especially if you have many compute nodes. .. releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml @ b'3e95cc12fd24b088ffbe028145fb39f590f3e734' - The api.pecan_debug option has been removed. .. releasenotes/notes/polling-definition-efffb92e3810e571.yaml @ b'89995280927d495042f342e8a0a520fa4775c515' - Pipeline processing in polling agents was removed in Liberty cycle. A new polling specific definition file is created to handle polling functionality and pipeline definition file is now reserved exclusively for transformations and routing. The polling.yaml file follows the same syntax as the pipeline.yaml but only handles polling attributes such as interval, discovery, resources, meter matching. It is configured by setting cfg_file under the polling section.If no polling definition file is found, it will fallback to reuse pipeline_cfg_file. .. _ceilometer_8.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/ceilometer-api-deprecate-862bfaa54e80fa01.yaml @ b'6616a714009a80c7484fa2292c2331868617cb9c' - Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi (metrics), and/or Panko (events). .. releasenotes/notes/deprecate-file-dispatcher-2aff376db7609136.yaml @ b'0aaa1603d4d77d1465b6039e556f68d4425122e4' - With collector service being deprecated, we now have to address the duplication between dispatchers and publishers. The file dispatcher is now marked as deprecated. Use the file publisher to push samples into a file. .. releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml @ b'e2e74892daa79388accb6df4d4642a4632332911' - As the collector service is being deprecated, the duplication of publishers and dispatchers is being addressed. The http dispatcher is now marked as deprecated and the recommended path is to use http publisher. .. releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml @ b'4d8bc1095627574919d2d699e82f5522698cde5e' - The `image` meter is dropped in favour of `image.size` meter. .. releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml @ b'a91253a8104b3f56a847ec64dcbe1720b06f20e9' - The `instance` meter no longer will be generated. .. releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml @ b'b692d3a6a61fbbe4c3db8c14a7ae5423559b5144' - The [compute]/workload_partitioning = True is deprecated in favor of [compute]/instance_discovery_method = workload_partitioning .. _ceilometer_8.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml @ b'3ae997e3d6c9648292969366d25362297474b815' - A local cache is used when polling instance metrics to minimise calls Nova API. A new option is added `resource_cache_expiry` to configure a time to live for cache before it expires. This resolves issue where migrated instances are not removed from cache. This is only relevant when `instance_discovery_method` is set to `naive`. It is recommended to use `libvirt_metadata` if possible. .. _ceilometer_8.0.0_Other Notes: Other Notes ----------- .. releasenotes/notes/ceilometer-event-api-removed-49c57835e307b997.yaml @ b'8d23f431ab0bd638edbf2197e56bea68d7b06a21' - The Events API (exposed at /v2/events) which was deprecated has been removed. The Panko project is now responsible for providing this API and can be installed separately. .. releasenotes/notes/remove-ceilometer-dbsync-53aa1b529f194f15.yaml @ b'779673534fefc39633aed18bb020885bd6020e06' - The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade instead. .. _ceilometer_9.0.0: 9.0.0 ===== .. _ceilometer_9.0.0_Prelude: Prelude ------- .. releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml @ b'ae0716c6d061b8784afeedf8823ebc94717a9aeb' Network Statistics From OpenDaylight. .. _ceilometer_9.0.0_New Features: New Features ------------ .. releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml @ b'f8c243f448dbb02307ccadc96d9e585091c62060' - Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap.out'. .. releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml @ b'11191a4612e424c02a5d90a1337141c26f79c098' - Because of deprecating the collector, the default publishers in pipeline.yaml and event_pipeline.yaml are now changed using database instead of notifier. .. releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml @ b'af23b6eeafcb7adc76f60bfcb04aee699c975e31' - Ceilometer supports generic notifier to publish data and allow user to customize parameters such as topic, transport driver and priority. The publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit driver, but also other driver like kafka can be used. .. releasenotes/notes/http-publisher-authentication-6371c5a9aa8d4c03.yaml @ b'191748a403e1e38d6cf643d210a7fd9de3a7fc11' - In the 'publishers' section of a meter/event pipeline definition, https:// can now be used in addition to http://. Furthermore, either Basic or client-certificate authentication can be used (obviously, client cert only makes sense in the https case). For Basic authentication, use the form http://username:password@hostname/. For client certificate authentication pass the client certificate's path (and the key file path, if the key is not in the certificate file) using the parameters 'clientcert' and 'clientkey', e.g. https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any parameters or credentials used for http(s) publishers are removed from the URL before the actual HTTP request is made. .. releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml @ b'ae0716c6d061b8784afeedf8823ebc94717a9aeb' - Add a ceilometer driver to collect network statistics information using REST APIs exposed by network-statistics module in OpenDaylight. .. releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml @ b'ae0716c6d061b8784afeedf8823ebc94717a9aeb' - Add support for network statistics meters with gnocchi .. releasenotes/notes/parallel_requests_option-a3f901b6001e26e4.yaml @ b'c84c113c0a402216c6a6f09ed4622a8163e4aaeb' - A new option named `max_parallel_requests` is available to control the maximum number of parallel requests that can be executed by the agents. This option also replaces the `poolsize` option of the HTTP publisher. .. releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml @ b'ff5822d2b4be480e3c35ea2a700bf7e643f205aa' - The tenant (project) discovery code in the polling agent now scans for tenants in all available domains. .. releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.yaml @ b'f05939d742233b44240036e1151a07bb71b4159d' - Support loading multiple meter definition files and allow users to add their own meter definitions into several files according to different types of metrics under the directory of /etc/ceilometer/meters.d. .. releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml @ b'c1c56d6aaeacd1e81b6d74b7ef9c2eeae22cefbc' - Add a new publisher for pushing samples or events to a Zaqar queue. .. _ceilometer_9.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml @ b'c15d8bc6330de63317089ca614f86667cc40ed7a' - Add a tool for migrating metrics data from Ceilometer's native storage to Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the recommended metrics data storage backend. .. releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml @ b'27604abd461d7dbf8098c7cc794dfcc2686c4527' - Ceilometer now leverages the latest distribution mechanism provided by the tooz library. Therefore the options `coordination.retry_backoff` and `coordination.max_retry_interval` do not exist anymore. .. _ceilometer_9.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml @ b'11191a4612e424c02a5d90a1337141c26f79c098' - Collector is no longer supported in this release. The collector introduces lags in pushing data to backend. To optimize the architecture, Ceilometer push data through dispatchers using publishers in notification agent directly. .. releasenotes/notes/deprecate-http-control-exchanges-026a8de6819841f8.yaml @ b'5beedc81e1e569059686b64fb632cffdd86985b5' - Allow users to add additional exchanges in ceilometer.conf instead of hardcoding exchanges. Now original http_control_exchanges is being deprecated and renamed notification_control_exchanges. Besides, the new option is integrated with other exchanges in default EXCHANGE_OPTS to make it available to extend additional exchanges. .. releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml @ b'af23b6eeafcb7adc76f60bfcb04aee699c975e31' - Kafka publisher is deprecated to use generic notifier instead. .. releasenotes/notes/deprecate-pollster-list-ccf22b0dea44f043.yaml @ b'32a7c4dfbd9ad794a3c5d300c4996c6586fc0626' - Deprecating support for enabling pollsters via command line. Meter and pollster enablement should be configured via polling.yaml file. .. releasenotes/notes/drop-kwapi-b687bc476186d01b.yaml @ b'47ae182b4dbada1beb4e1b9017fad102d9549aec' - Previously deprecated kwapi meters are not removed. .. releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml @ b'1dd80664a8b219ddb1092be5413a313073e2785b' - Usage of pipeline.yaml for polling configuration is now deprecated. The dedicated polling.yaml should be used instead. .. releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml @ b'1dbd307a3a6ab45a797d4f88e342b2f537e3bb6f' - The pipeline dynamic refresh code has been removed. Ceilometer relies on the cotyledon library for a few releases which provides reload functionality by sending the SIGHUP signal to the process. This achieves the same feature while making sure the reload is explicit once the file is correctly and entirely written to the disk, avoiding the failing load of half-written files. .. _ceilometer_9.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml @ b'379f10fc739358b85b031761fad7d9cd2658af77' - The transport_url defined in [oslo_messaging_notifications] was never used, which contradicts the oslo_messaging documentation. This is now fixed. .. _ceilometer_9.0.0_Other Notes: Other Notes ----------- .. releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml @ b'd9c11bb0abf15de1487a287bdcdf42c6ffb2d94d' - Ship YAML files to ceilometer/pipeline/data/ make it convenient to update all the files on upgrade. Users can copy yaml files from /usr/share/ceilometer and customise their own files located in /etc/ceilometer/. .. _ceilometer_10.0.1-14: 10.0.1-14 ========= .. _ceilometer_10.0.1-14_New Features: New Features ------------ .. releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml @ b'6582e8e4e20c511acab9e16cfd3c0f8e569f705a' - `launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked. .. _ceilometer_10.0.1: 10.0.1 ====== .. _ceilometer_10.0.1_New Features: New Features ------------ .. releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml @ b'f4b58ae01e8ddfc515e6f14a0d19d726370f4870' - Add `disk.device.read.latency` and `disk.device.write.latency` meters to capture total time used by read or write operations. .. _ceilometer_10.0.0: 10.0.0 ====== .. _ceilometer_10.0.0_New Features: New Features ------------ .. releasenotes/notes/cinder-capacity-samples-de94dcfed5540b6c.yaml @ b'b10076d03c63595bb5829149868b1fc5e525f216' - Add support to capture volume capacity usage details from cinder. This data is extracted from notifications sent by Cinder starting in Ocata. .. releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml @ b'911b973d7056b8bbbffef1f4d36bd662173a4f91' - Workload partitioning of notification agent is now split into queues based on pipeline type (sample, event, etc...) rather than per individual pipeline. This will save some memory usage specifically for pipeline definitions with many source/sink combinations. .. releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml @ b'60d9b87a808c75c1cd111a7fc788f58e07b34e99' - The notification-agent can now be configured to either build meters or events. By default, the notification agent will continue to load both pipelines and build both data models. To selectively enable a pipeline, configure the `pipelines` option under the `[notification]` section. Addition pipelines can be created following the format used by existing pipelines. .. releasenotes/notes/snmp-diskio-samples-fc4b5ed5f19c096c.yaml @ b'9f7878eed04bfa0f45542d6e005e77f6d89e5b87' - Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio details. .. _ceilometer_10.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml @ b'd881dd52289d453b9f9d94c7c32c0672a70a8064' - The deprecated Ceilometer API has been removed. .. releasenotes/notes/drop-collector-4c207b35d67b2977.yaml @ b'fad69e9603e20c076175733374993fd31df49a4c' - The collector service is removed. From Ocata, it's possible to edit the pipeline.yaml and event_pipeline.yaml files and modify the publisher to provide the same functionality as collector dispatcher. You may change publisher to 'gnocchi', 'http', 'panko', or any combination of available publishers listed in documentation. .. releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml @ b'fdc54487922cd27feecf316f334a641f2a567321' - Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*` .. releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml @ b'911b973d7056b8bbbffef1f4d36bd662173a4f91' - If workload partitioning of the notification agent is enabled, the notification agent should not run alongside pre-Queens agents. Doing so may result in missed samples when leveraging transformations. To upgrade without loss of data, set `notification_control_exchanges` option to empty so only existing `ceilometer-pipe-*` queues are processed. Once cleared, reset `notification_control_exchanges` option and launch the new notification agent(s). If `workload_partitioning` is not enabled, no special steps are required. .. releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml @ b'bb573177cf352992425a6d5801ed7440ad8d158b' - The deprecated support of configure polling in the `pipeline.yaml` file has been removed. Ceilometer now only uses the `polling.yaml` file for polling configuration. .. releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.yaml @ b'60f11cb7af5aba061429f4cb86a7ca4a175aadb2' - The deprecated `compute.workload_partitioning` option has been removed in favor of `compute.instance_discovery_method`. .. releasenotes/notes/remove-direct-publisher-5785ee7edd16c4d9.yaml @ b'ffc87c0b4c5896632d18aaa36fdc7f31d9d71e99' - Remove direct publisher and use the explicit publisher instead. .. releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml @ b'508fad109daa3bd5011dad9d9ccc2e8baccfc3df' - The deprecated control exchange options have been removed. .. releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml @ b'1cb713f3e238fa1d534e8ca9f5baae4c54af0609' - The deprecated file dispatcher has been removed. .. releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml @ b'83ffaffcb2cee6a0b19601bc7cefd863685601e2' - The Gnocchi dispatcher has been removed and replaced by a native Gnocchi publisher. The configuration options from the `[dispatcher_gnocchi]` has been removed and should be passed via the URL in `pipeline.yaml`. The service authentication override can be done by adding specific credentials to a `[gnocchi]` section instead. .. releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml @ b'1cb713f3e238fa1d534e8ca9f5baae4c54af0609' - The deprecated http dispatcher has been removed. .. releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml @ b'20023730174bda005b3fe6978ad2e98efe060b75' - The deprecated kafka publisher has been removed, use NotifierPublisher instead. .. releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml @ b'd563e1348b2baa58dee772b37d0cdd08d67adb8f' - The deprecated `nova_http_log_debug` option has been removed. .. releasenotes/notes/remove-pollster-list-bda30d747fb87c9e.yaml @ b'32c129aabd05633ee8a2dbe5b91c01d2ff910882' - The deprecated `pollster-list` option has been removed. .. _ceilometer_10.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-aggregated-disk-metrics-54a395c05e74d685.yaml @ b'1e673a64b9a90880cb83895a2b54f91de5b85b4d' - disk.* aggregated metrics for instance are deprecated, in favor of the per disk metrics (disk.device.*). Now, it's up to the backend to provide such aggregation feature. Gnocchi already provides this. .. releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml @ b'fdc54487922cd27feecf316f334a641f2a567321' - Previously, to enable/disable radosgw.* meters, you must define entry_point name rather than meter name. This is corrected so you do not need to be aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and will be removed in Rocky. .. releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yaml @ b'faac031a9b6893963375674f031e28a8c486c2a8' - The `shuffle_time_before_polling_task` option has been removed. This option never worked in the way it was originally intended too. .. _ceilometer_10.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml @ b'fdc54487922cd27feecf316f334a641f2a567321' - Fix ability to enable/disable radosgw.* meters explicitly .. _ceilometer_11.1.0-6: 11.1.0-6 ======== .. _ceilometer_11.1.0-6_New Features: New Features ------------ .. releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml @ b'0d3d55af704d3f0689805f2b03c5397ce07667aa' - Add availability_zone attribute to gnocchi instance resources. Populates this attribute by consuming instance.create.end events. .. _ceilometer_11.1.0-6_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml @ b'0d3d55af704d3f0689805f2b03c5397ce07667aa' - To take advantage of this new feature you will need to update your gnocchi_resources.yaml file. See the example file for an example. You will need to ensure all required attributes of an instance are specified in the event_attributes. .. _ceilometer_11.1.0: 11.1.0 ====== .. _ceilometer_11.1.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml @ b'f56407d7953fe635bb703da543f2807a8d32369f' - [`bug 1848286 `_] Enable load balancer metrics by adding the loadbalancer resource type, allowing Gnocchi to capture measurement data for Octavia load balancers. .. _ceilometer_11.0.0: 11.0.0 ====== .. _ceilometer_11.0.0_New Features: New Features ------------ .. releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml @ b'8fdd19e78a2053285569cda05cdc4875b716190c' - Add `disk.device.read.latency` and `disk.device.write.latency` meters to capture total time used by read or write operations. .. releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml @ b'36414e1cebe3a43d962f8d2adfe7cc34742e9057' - `launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked. .. releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml @ b'2dc21a5f05ee670292a8a7f97952d3942c32f5cf' - Add support for configuring the size of samples the poller will send in each batch. .. releasenotes/notes/prometheus-bcb201cfe46d5778.yaml @ b'2b8052052d861b856b3522a8d7f857735793f01b' - A new pulisher have been added to push data to Prometheus Pushgateway. .. releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml @ b'e906bcda82918aff000ab76f067a2dc49660d0b4' - Archive policies can now be configured per metrics in gnocchi_resources.yaml. A default list of archive policies is now created by Ceilometer. They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low" for others. .. releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml @ b'2dee485da7a6f2cdf96525fabc18a8c27c8be570' - use memory usable metric from libvirt memoryStats if available. .. _ceilometer_11.0.0_Known Issues: Known Issues ------------ .. releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml @ b'826ba35c6eb9900bb0a557f6e4f06f7d1b9bd394' - Ceilometer created metrics that could never get measures depending on the polling configuration. Metrics are now created only if Ceilometer gets at least a measure for them. .. _ceilometer_11.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml @ b'663c523328690dfcc30c1ad986ba57e566bd194c' - `ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi. .. releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml @ b'2dc21a5f05ee670292a8a7f97952d3942c32f5cf' - batch_size option added to [polling] section of configuration. Use batch_size=0 to disable batching of samples. .. releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml @ b'5efc0281faab2f17acab8d384beaf3c87b087e58' - The deprecated `dispatcher_gnocchi` option group has been removed. .. releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml @ b'dd1b7abf329755c8377862328f770e0b7974f5c2' - Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead. .. releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml @ b'e906bcda82918aff000ab76f067a2dc49660d0b4' - Ceilometer now creates it own archive policies in Gnocchi and use them to create metrics in Gnocchi. Old metrics kept their current archive policies and will not be updated with ceilometer-upgrade. Only newly created metrics will be impacted. Archive policy can still be overridden with the publisher url (e.g: gnocchi://archive_policy=high). .. _ceilometer_11.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml @ b'2dc21a5f05ee670292a8a7f97952d3942c32f5cf' - The option batch_polled_samples in the [DEFAULT] section is deprecated. Use batch_size option in [polling] to configure and/or disable batching. .. releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml @ b'e906bcda82918aff000ab76f067a2dc49660d0b4' - cpu_util and \*.rate meters are deprecated and will be removed in future release in favor of the Gnocchi rate calculation equivalent. .. releasenotes/notes/transformer-ed4b1ea7d1752576.yaml @ b'1dcbd607df0696101b40f77d7721489679ebe0ba' - Usage of transformers in Ceilometer pipelines is deprecated. Transformers in Ceilometer have never computed samples correctly when you have multiple workers. This functionality can be done by the storage backend easily without all issues that Ceilometer has. For example, the rating is already computed in Gnocchi today. .. releasenotes/notes/transformer-ed4b1ea7d1752576.yaml @ b'1dcbd607df0696101b40f77d7721489679ebe0ba' - Pipeline Partitioning is also deprecated. This was only useful to workaround of some issues that tranformers has. .. _ceilometer_11.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml @ b'663c523328690dfcc30c1ad986ba57e566bd194c' - Ceilometer previously did not create IPMI sensor data from IPMI agent or Ironic in Gnocchi. This data is now pushed to Gnocchi. .. _ceilometer_12.1.0: 12.1.0 ====== .. _ceilometer_12.1.0_Prelude: Prelude ------- .. releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml @ b'2e395e05c3073155d6bdb3f8dba1745c1801921f' Added new tool ``ceilometer-status upgrade check``. .. _ceilometer_12.1.0_New Features: New Features ------------ .. releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml @ b'2e395e05c3073155d6bdb3f8dba1745c1801921f' - New framework for ``ceilometer-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Ceilometer upgrade to ensure if the upgrade can be performed safely. .. _ceilometer_12.1.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml @ b'2e395e05c3073155d6bdb3f8dba1745c1801921f' - Operator can now use new CLI tool ``ceilometer-status upgrade check`` to check if Ceilometer deployment can be safely upgraded from N-1 to N release. .. _ceilometer_12.1.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml @ b'b591fe11d7df4d2eda3ef5da854035b8ef780ebf' - [`bug 1848286 `_] Enable load balancer metrics by adding the loadbalancer resource type, allowing Gnocchi to capture measurement data for Octavia load balancers. .. _ceilometer_12.0.0: 12.0.0 ====== .. _ceilometer_12.0.0_New Features: New Features ------------ .. releasenotes/notes/add-json-output-to-file-publisher-786380cb7e21b56b.yaml @ b'063af43744bb0c29ce866bba6d3c40e3508a201f' - Add new json output option for the existing file publisher. .. releasenotes/notes/snmp-cpu-util-055cd7704056c1ce.yaml @ b'8fd68396af595a2ba009591c9e53a1b55557b311' - new metrics are available for snmp polling hardware.cpu.user, hardware.cpu.nice, hardware.cpu.system, hardware.cpu.idle, hardware.cpu.wait, hardware.cpu.kernel, hardware.cpu.interrupt. They replace deprecated hardware.cpu.util and hardware.system_stats.cpu.idle. .. _ceilometer_12.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/remove-batch_polled_samples-b40241c8aad3667d.yaml @ b'413a9a9446a42f196e8ea8fe6f0575426b08205e' - Remove deprecated option `batch_polled_samples`. .. releasenotes/notes/remove-compute-disk-meters-264e686622886ff0.yaml @ b'f7b1218b8e26a0f9a5924a15b029a961413cc40a' - The deprecated `disk.*` meters have been removed. Use the `disk.device.*` meters instead. .. releasenotes/notes/remove-compute-rate-deprecated-meters-201893c6b686b04a.yaml @ b'f7b1218b8e26a0f9a5924a15b029a961413cc40a' - The deprecated meter for compute where removed: - disk.read.requests.rate - disk.write.requests.rate - disk.read.bytes.rate - disk.write.bytes.rate - disk.device.read.requests.rate - disk.device.write.requests.rate - disk.device.read.bytes.rate - disk.device.write.bytes.rate .. releasenotes/notes/remove-meter-definitions-cfg-file-d57c726d563d805f.yaml @ b'4106079180bde2e989511d1781fb7ea319098310' - The deprecated `meter_definitions_cfg_file` option has been removed. .. releasenotes/notes/remove-notification-workload-partitioning-2cef114fb2478e39.yaml @ b'9d90ce8d37c0020077e4429f41c1ea937c1b3c1e' - The deprecated workload partitioning for notification agent has been removed. .. releasenotes/notes/remove-publisher-topic-options-7a40787a3998921d.yaml @ b'56063f3262726daf5906baf90a981f9d88eaa9ed' - The notifier publisher options `metering_topic` and `event_topic` are deprecated and will be removed. Use the `topic` query parameter in the notifier publisher URL instead. .. releasenotes/notes/remove-transformers-14e00a789dedd76b.yaml @ b'9db5c6c9bfc66018aeb78c4a262e1bfa9b326798' - The support for transformers has been removed from the pipeline. .. _ceilometer_12.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-events-6561f4059fa25c02.yaml @ b'8a0245a5b3e1357d35ad6653be37ca01176577e4' - The Ceilometer event subsystem and pipeline is now deprecated and will be removed in a future release. .. releasenotes/notes/snmp-cpu-util-055cd7704056c1ce.yaml @ b'8fd68396af595a2ba009591c9e53a1b55557b311' - metrics hardware.cpu.util and hardware.system_stats.cpu.idle are now deprecated. Other hardware.cpu.* metrics should be used instead. .. _ceilometer_13.1.0: 13.1.0 ====== .. _ceilometer_13.1.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml @ b'1ae5dcbb32a54ce9edf5396e114aabb527fdc853' - [`bug 1848286 `_] Enable load balancer metrics by adding the loadbalancer resource type, allowing Gnocchi to capture measurement data for Octavia load balancers. .. _ceilometer_13.0.0: 13.0.0 ====== .. _ceilometer_13.0.0_Prelude: Prelude ------- .. releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml @ b'53321c1a7240967a5076f055e60092786f66b2a0' Added new tool ``ceilometer-status upgrade check``. .. _ceilometer_13.0.0_New Features: New Features ------------ .. releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml @ b'21a810fec6bbf19584cad7aa5aff59ecea826907' - Add availability_zone attribute to gnocchi instance resources. Populates this attribute by consuming instance.create.end events. .. releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml @ b'53321c1a7240967a5076f055e60092786f66b2a0' - New framework for ``ceilometer-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Ceilometer upgrade to ensure if the upgrade can be performed safely. .. _ceilometer_13.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml @ b'21a810fec6bbf19584cad7aa5aff59ecea826907' - To take advantage of this new feature you will need to update your gnocchi_resources.yaml file. See the example file for an example. You will need to ensure all required attributes of an instance are specified in the event_attributes. .. releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml @ b'53321c1a7240967a5076f055e60092786f66b2a0' - Operator can now use new CLI tool ``ceilometer-status upgrade check`` to check if Ceilometer deployment can be safely upgraded from N-1 to N release. .. releasenotes/notes/remove-meter-definitions-cfg-file-config-476596fc86c36a81.yaml @ b'1f8ec3facda98a395975c330a7e197228fd047f1' - Remove deprecated option meter_definitions_cfg_file, use meter_definitions_dirs to configure meter notification file. .. releasenotes/notes/switch-to-oslo-privsep-b58f20a279f31bc0.yaml @ b'bd0d5a8a27b29455e19ad062f44dd1ffb8af1abf' - The following commands are no longer required to be listed in your rootwrap configuration: ipmitool. .. _ceilometer_13.0.0_Security Issues: Security Issues --------------- .. releasenotes/notes/switch-to-oslo-privsep-b58f20a279f31bc0.yaml @ b'bd0d5a8a27b29455e19ad062f44dd1ffb8af1abf' - Privsep transitions. Ceilometer is transitioning from using the older style rootwrap privilege escalation path to the new style Oslo privsep path. This should improve performance and security of Ceilometer in the long term. .. releasenotes/notes/switch-to-oslo-privsep-b58f20a279f31bc0.yaml @ b'bd0d5a8a27b29455e19ad062f44dd1ffb8af1abf' - Privsep daemons are now started by Ceilometer when required. These daemons can be started via rootwrap if required. rootwrap configs therefore need to be updated to include new privsep daemon invocations. .. _ceilometer_14.1.0-4: 14.1.0-4 ======== .. _ceilometer_14.1.0-4_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml @ b'fae674ad4afe92ff4bb30322edde65e0dfa9d191' - [`bug 1940660 `_] Fixes an issue with the Swift pollster where the ``[service_credentials] cafile`` option was not used. This could prevent communication with TLS-enabled Swift APIs. .. _ceilometer_14.0.0: 14.0.0 ====== .. _ceilometer_14.0.0_New Features: New Features ------------ .. releasenotes/notes/dynamic-pollster-system-6b45c8c973201b2b.yaml @ b'7bff46921e6a5f9c8ecae97aa3756d8c570f23c8' - Add dynamic pollster system. The dynamic pollster system enables operators to gather new metrics on the fly (without needing to code pollsters). .. releasenotes/notes/dynamic-pollster-system-for-non-openstack-apis-4e06694f223f34f3.yaml @ b'7cba277d798c07410b9b41bef945b83e8c4a16e5' - Add the support for non-OpenStack APIs in the dynamic pollster system. This extension enables operators to create pollster on the fly to handle metrics from systems such as the RadosGW API. .. releasenotes/notes/include-monasca-publisher-1f47dde52af50feb.yaml @ b'126350c0ae609c5d35d54556883da2476e81e30e' - Include a publisher for the Monasca API. A ``monasca://`` pipeline sink will send data to a Monasca instance, using credentials configured in ceilometer.conf. This functionality was previously available in the Ceilosca project (https://github.com/openstack/monasca-ceilometer). .. _ceilometer_14.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/drop-py-2-7-87352d5763131c13.yaml @ b'c6395782172007cc868b1cfd0f11c8baa3567eb4' - Python 2.7 support has been dropped. Last release of ceilometer to support py2.7 is OpenStack Train. The minimum version of Python now supported by ceilometer is Python 3.6. .. _ceilometer_14.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml @ b'1617aa3099bfa5f7b38c11c51b23d2b9cf9b5c2b' - [`bug 1848286 `_] Enable load balancer metrics by adding the loadbalancer resource type, allowing Gnocchi to capture measurement data for Octavia load balancers. .. _ceilometer_15.1.0: 15.1.0 ====== .. _ceilometer_15.1.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml @ b'3f73d8baeec85703ac307830772d3b3372fbe10f' - [`bug 1940660 `_] Fixes an issue with the Swift pollster where the ``[service_credentials] cafile`` option was not used. This could prevent communication with TLS-enabled Swift APIs. .. _ceilometer_16.0.1-12: 16.0.1-12 ========= .. _ceilometer_16.0.1-12_New Features: New Features ------------ .. releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml @ b'f7aab07bc57becb60970ba29430f3c42a9d5444e' - Identify user and projects names with the help of their UUIDs in the polled samples. If they are identified, set "project_name" and "user_name" fields in the sample to the corresponding values. .. _ceilometer_16.0.1: 16.0.1 ====== .. _ceilometer_16.0.1_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml @ b'8fd515773bc98309dc9c9a5b1f1363f2f98a85f8' - [`bug 1940660 `_] Fixes an issue with the Swift pollster where the ``[service_credentials] cafile`` option was not used. This could prevent communication with TLS-enabled Swift APIs. .. _ceilometer_16.0.0: 16.0.0 ====== .. _ceilometer_16.0.0_New Features: New Features ------------ .. releasenotes/notes/cinder-volume-size-poller-availability_zone-2d20a7527e2341b9.yaml @ b'77b516acac5c7130177e5de5a720cf41e62f3b8b' - The resource metadata for the Cinder volume size poller now includes the availability zone field. .. _ceilometer_16.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-xen-support-27600e2bf7be548c.yaml @ b'fd0a561bea956f1b62f6ca5a27e762cb76ad9a90' - Support for XenServer/Xen Cloud Platform has been deprecated and will be removed in a future release. .. _ceilometer_17.0.2-6: 17.0.2-6 ======== .. _ceilometer_17.0.2-6_New Features: New Features ------------ .. releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml @ b'a43afad30f9fa96417f95cc25580d687b8acdf2e' - Identify user and projects names with the help of their UUIDs in the polled samples. If they are identified, set "project_name" and "user_name" fields in the sample to the corresponding values. .. _ceilometer_17.0.0: 17.0.0 ====== .. _ceilometer_17.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/remove-xen-support-7cb932b7bc621269.yaml @ b'99bdd78d950977b11effba08cf3a76e5b87bf9b4' - Support for XenServer/Xen Cloud Platform has been removed. .. _ceilometer_17.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/bug-1929178-a8243526ce2311f7.yaml @ b'46ad2786ae48f41a97a014fd140f022ee3ee3c1c' - The ``[coordination] check_watchers`` parameter has been deprecated since it has been ineffective. .. _ceilometer_17.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml @ b'ed404c5f66e874779d58d3ac81f28ae22c55cf09' - [`bug 1940660 `_] Fixes an issue with the Swift pollster where the ``[service_credentials] cafile`` option was not used. This could prevent communication with TLS-enabled Swift APIs. .. _ceilometer_18.1.0: 18.1.0 ====== .. _ceilometer_18.1.0_New Features: New Features ------------ .. releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml @ b'c6f00257d4586eebfcaf4a208824f5be3a63c3f4' - Identify user and projects names with the help of their UUIDs in the polled samples. If they are identified, set "project_name" and "user_name" fields in the sample to the corresponding values. .. _ceilometer_18.0.0: 18.0.0 ====== .. _ceilometer_18.0.0_New Features: New Features ------------ .. releasenotes/notes/openstack-dynamic-pollsters-metadata-enrichment-703cf5914cf0c578.yaml @ b'fbb4b6d264c9d24e3f85f891ef765507be1f899a' - OpenStack Dynamic pollsters metadata enrichment with other OpenStack API's data. .. _ceilometer_18.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-generic-hardware-declarative-pollstar-dfa418bf6a5e0459.yaml @ b'd10f6ca32fd9dfe5100cb54531adb4bba866edf8' - ``GenericHardwareDeclarativePollster`` has been deprecated and will be removed in a future release. This pollster was designed to be used in TripleO deployment to gather hardware metrics from overcloud nodes but Telemetry services are no longer deployed in undercloud in current TripleO. .. releasenotes/notes/deprecate-generic-hardware-declarative-pollstar-dfa418bf6a5e0459.yaml @ b'd10f6ca32fd9dfe5100cb54531adb4bba866edf8' - The ``NodesDiscoveryTripleO`` discovery plugin has been deprecated and will be removed in a future release. This plugin is designed for TripleO deployment but no longer used since Telemetry services were removed from undercloud. .. releasenotes/notes/deprecate-neutron-fwaas-e985afe956240c08.yaml @ b'dc5dd89152a1b2167e60a2c14c9df1b9cf027d87' - Support for Neutron FWaaS has been officially deprecated. The feature has been useless since the Neutron FWaaS project was retired. .. releasenotes/notes/deprecate-neutron-lbaas-5a36406cbe44bbe3.yaml @ b'8917c73964eb764c3c4beb65d3713b36938181dd' - Support for Neutron LBaaS has been officially deprecated. The feature has been useless since the Neutron LBaaS project was retired. .. _ceilometer_18.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-notification-batch-9bb42cbdf817e7f9.yaml @ b'ec5b90e98f605626ec766e3f8b823044e34ddc2d' - The ``[notification] batch_size`` parameter now takes effect to enable batch processing of notifications. The ``[notification] batch_timeout`` parameter has been restored at the same time to determine how much and how long notifications are kept. .. _ceilometer_19.1.0: 19.1.0 ====== .. _ceilometer_19.1.0_New Features: New Features ------------ .. releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml @ b'1da0e14bef969c7ccff57910f1b4408234d50f5c' - Identify user and projects names with the help of their UUIDs in the polled samples. If they are identified, set "project_name" and "user_name" fields in the sample to the corresponding values. .. _ceilometer_19.1.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-2007108-dba7163b245ad8fd.yaml @ b'23308bfa7f0816eb53b843111f2054b6868bee2d' - [`bug 2007108 `_] The retired metrics dependent on SNMP have been removed from the default ``polling.yaml``. .. _ceilometer_19.0.0: 19.0.0 ====== .. _ceilometer_19.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/drop-python-3-6-and-3-7-f67097fa6894da52.yaml @ b'bf263b11181f4e44850e991282766b3bdf4f41e1' - Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. .. releasenotes/notes/remove-check_watchers-a7c955703b6d9f57.yaml @ b'3400ad134b80512fd3a06ff8760e80b0abeaafb6' - The ``[coordination] check_watchers`` parameter has been removed. .. releasenotes/notes/remove-generic-hardware-declarative-pollster-e05c614f273ab149.yaml @ b'a28cef7036edc2ecb0f60b5d27a97735482e7f98' - ``GenericHardwareDeclarativePollster`` has been removed. Because of this removal all metrics gathered by SNMP daemon have been removed as well. .. releasenotes/notes/remove-generic-hardware-declarative-pollster-e05c614f273ab149.yaml @ b'a28cef7036edc2ecb0f60b5d27a97735482e7f98' - The ``NodesDiscoveryTripleO`` discovery plugin has been removed. .. releasenotes/notes/remove-neutron-lbaas-d3d4a5327f6a167a.yaml @ b'318c54648c2c85d4f4f5425c5ffc5e5f3dda86f2' - Support for neutron-lbaas resources has been removed. .. _ceilometer_20.0.0: 20.0.0 ====== .. _ceilometer_20.0.0_New Features: New Features ------------ .. releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml @ b'79454d6b22787627ae6239aa7b2707101ba30212' - Identify user and projects names with the help of their UUIDs in the polled samples. If they are identified, set "project_name" and "user_name" fields in the sample to the corresponding values. .. _ceilometer_20.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-2007108-dba7163b245ad8fd.yaml @ b'0118742bd396b1783a1e3691a21d3d375bcaa9a1' - [`bug 2007108 `_] The retired metrics dependent on SNMP have been removed from the default ``polling.yaml``. .. _ceilometer_21.0.0: 21.0.0 ====== .. _ceilometer_21.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/volume-metrics-01ddde0180bc21cb.yaml @ b'abec9b3eaa5aed11b73090c58c1c77db60aead02' - The default ``polling.yaml`` file has been updated and now it enables meters related to cinder by default. .. _ceilometer_21.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-vmware-ae49e07e40e74577.yaml @ b'297089a622c25795f97d647d54c6240ab0e12b1c' - Support for VMWare vSphere has been deprecated, because the vmwareapi virt driver in nova has been marked experimental and may be removed in a future release. .. releasenotes/notes/deprecate-windows-support-d784b975ce878864.yaml @ b'7660339b4a4d578f2615fd6c27e8c9627ab29d37' - Support for running Ceilometer in Windows operating systems has been deprecated because of retirement of the Winstackers project. Because of this, Hyper-V inspector is also deprecated. .. _ceilometer_22.0.0: 22.0.0 ====== .. _ceilometer_22.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/remove-monasca-d5ceda231839d43d.yaml @ b'd2e247cf385f28a31fa965a8c13b48a9ae63cc5f' - Remove integration with the inactive Monasca project .. _ceilometer_22.0.0_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-contrail-256177299deb6926.yaml @ b'36e47d74328d0aff9ba20f3fe40dbaf36cef3cc8' - Support for OpenContrail, which is currently known as Tungsten Fabric, has been deprecated and will be removed in a future release. .. releasenotes/notes/deprecate-odl-07e3f59165612566.yaml @ b'a92d77b1823f7d9789d07faca785299637cabaa6' - Support for OpenDaylight has been deprecated and will be removed in a future release. .. _ceilometer_23.0.0: 23.0.0 ====== .. _ceilometer_23.0.0_New Features: New Features ------------ .. releasenotes/notes/add-volume-pollster-metadata-d7b435fed9aac0aa.yaml @ b'21f44848269e57b3f0fc03511db89cd8f3b6b672' - Add volume.volume_type_id and backup.is_incremental metadata for cinder pollsters. Also user_id information is now included for backups with the generated samples. .. _ceilometer_23.0.0_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-volume-pollster-metadata-d7b435fed9aac0aa.yaml @ b'21f44848269e57b3f0fc03511db89cd8f3b6b672' - The cinder api microversion has been increased from Pike to Wallaby version (3.64) for volume/snapshot/backup related pollsters. These might not work until the cinder API has been upgraded up to this microversion. .. releasenotes/notes/remove-opendaylight-c3839bbe9aa2a227.yaml @ b'c2de6a104a1c6e85701bfc1b6cafc43ec89998f2' - Support for OpenDaylight has been removed. .. releasenotes/notes/remove-sahara-9254593d4fb137b9.yaml @ b'8036d4913e8aaeb4f18607d9d9cfbf836c77b020' - Default value of the ``[notification] notification_control_exchanges`` option has been updated and ``sahara`` is no longer included by default. .. releasenotes/notes/remove-sahara-9254593d4fb137b9.yaml @ b'8036d4913e8aaeb4f18607d9d9cfbf836c77b020' - The default event definiton has been updated and no longer includes events for sahara. .. releasenotes/notes/remove-uml-e86feeabdd16c628.yaml @ b'c9d71d92283789496fce1e713e06fb05889839f5' - The ``[DEFAULT] virt_type`` option no longer supports ``uml``. UML support by nova was removed in nova 23.3.0 release. .. releasenotes/notes/remove-windows-support-0d280cc7c7fffc61.yaml @ b'3b8adafbb05ff0ba114cdd95066ea790a98a51c6' - Support for running ceilometer in Windows operating systems has been removed. Because of the removal, Hyper-V inspector has also been removed. .. _ceilometer_23.0.0_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/parallels-virt_type-ee29c4802fdf5c8e.yaml @ b'6d3afd83faf76b44b8c620034846cf9c59cbc75a' - The ``[DEFAULT] virt_type`` option now supports ``parallels``. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/bindep.txt0000664000175100017510000000041615033033467016000 0ustar00mylesmyleslibxml2-dev [platform:dpkg test] libxslt-devel [platform:rpm test] libxslt1-dev [platform:dpkg test] build-essential [platform:dpkg] libffi-dev [platform:dpkg] gettext [platform:dpkg] libvirt-dev [platform:dpkg test] libvirt-devel [platform:rpm test] pkg-config [test] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7919414 ceilometer-24.1.0.dev59/ceilometer/0000775000175100017510000000000015033033521016114 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/__init__.py0000664000175100017510000000117015033033467020235 0ustar00mylesmyles# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NotImplementedError(NotImplementedError): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/agent.py0000664000175100017510000000734515033033467017606 0ustar00mylesmyles# # Copyright 2013 Intel Corp. # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import os from oslo_log import log import yaml LOG = log.getLogger(__name__) class ConfigException(Exception): def __init__(self, cfg_type, message, cfg): self.cfg_type = cfg_type self.msg = message self.cfg = cfg def __str__(self): return '{} {}: {}'.format(self.cfg_type, self.cfg, self.msg) class SourceException(Exception): def __init__(self, message, cfg): self.msg = message self.cfg = cfg def __str__(self): return 'Source definition invalid: {} ({})'.format(self.msg, self.cfg) class ConfigManagerBase: """Base class for managing configuration file refresh""" def __init__(self, conf): self.conf = conf def load_config(self, cfg_file): """Load a configuration file and set its refresh values.""" if os.path.exists(cfg_file): cfg_loc = cfg_file else: cfg_loc = self.conf.find_file(cfg_file) if not cfg_loc: LOG.debug("No pipeline definitions configuration file found! " "Using default config.") cfg_loc = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'pipeline', 'data', cfg_file) with open(cfg_loc) as fap: conf = yaml.safe_load(fap) LOG.debug("Config file: %s", conf) return conf class Source: """Represents a generic source""" def __init__(self, cfg): self.cfg = cfg try: self.name = cfg['name'] except KeyError as err: raise SourceException( "Required field %s not specified" % err.args[0], cfg) def __str__(self): return self.name def check_source_filtering(self, data, d_type): """Source data rules checking - At least one meaningful datapoint exist - Included type and excluded type can't co-exist on the same pipeline - Included type meter and wildcard can't co-exist at same pipeline """ if not data: raise SourceException('No %s specified' % d_type, self.cfg) if (any(x for x in data if x[0] not in '!*') and any(x for x in data if x[0] == '!')): raise SourceException( 'Both included and excluded %s specified' % d_type, self.cfg) if '*' in data and any(x for x in data if x[0] not in '!*'): raise SourceException( 'Included %s specified with wildcard' % d_type, self.cfg) @staticmethod def is_supported(dataset, data_name): # Support wildcard like storage.* and !disk.* # Start with negation, we consider that the order is deny, allow if any(fnmatch.fnmatch(data_name, datapoint[1:]) for datapoint in dataset if datapoint[0] == '!'): return False if any(fnmatch.fnmatch(data_name, datapoint) for datapoint in dataset if datapoint[0] != '!'): return True # if we only have negation, we suppose the default is allow return all(datapoint.startswith('!') for datapoint in dataset) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7919414 ceilometer-24.1.0.dev59/ceilometer/alarm/0000775000175100017510000000000015033033521017210 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/alarm/__init__.py0000664000175100017510000000000015033033467021320 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/alarm/aodh.py0000664000175100017510000000405515033033467020512 0ustar00mylesmyles# # Copyright 2025 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with alarm metrics """ from ceilometer.polling import plugin_base from ceilometer import sample DEFAULT_GROUP = "service_credentials" class _Base(plugin_base.PollsterBase): @property def default_discovery(self): return 'alarm' class EvaluationResultPollster(_Base): @staticmethod def get_evaluation_results_metrics(metrics): evaluation_metrics = [] if "evaluation_results" in metrics: for metric in metrics["evaluation_results"]: for state, count in metric["state_counters"].items(): evaluation_metrics.append({ "name": "evaluation_result", "state": state, "count": count, "project_id": metric['project_id'], "alarm_id": metric['alarm_id'] }) return evaluation_metrics def get_samples(self, manager, cache, resources): metrics = self.get_evaluation_results_metrics(resources[0]) for metric in metrics: yield sample.Sample( name='alarm.' + metric['name'], type=sample.TYPE_GAUGE, volume=int(metric['count']), unit='evaluation_result_count', user_id=None, project_id=metric['project_id'], resource_id=metric['alarm_id'], resource_metadata={"alarm_state": metric['state']}, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/alarm/discovery.py0000664000175100017510000000256115033033467021606 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from aodhclient import client as aodh_client from oslo_config import cfg from ceilometer import keystone_client from ceilometer.polling import plugin_base SERVICE_OPTS = [ cfg.StrOpt('aodh', default='alarming', help='Aodh service type.'), ] class AlarmDiscovery(plugin_base.DiscoveryBase): def __init__(self, conf): super().__init__(conf) creds = conf.service_credentials self.aodh_client = aodh_client.Client( version='2', session=keystone_client.get_session(conf), region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.aodh) def discover(self, manager, param=None): """Discover resources to monitor.""" return [self.aodh_client.metrics.get(all_projects=True)] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/cache_utils.py0000664000175100017510000000560015033033467020763 0ustar00mylesmyles# # Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simple wrapper for oslo_cache.""" from keystoneauth1 import exceptions as ka_exceptions from oslo_cache import core as cache from oslo_cache import exception from oslo_log import log from ceilometer import keystone_client # Default cache expiration period CACHE_DURATION = 600 LOG = log.getLogger(__name__) class CacheClient: def __init__(self, region, conf): self.region = region self.conf = conf def get(self, key): value = self.region.get(key) if value == cache.NO_VALUE: return None return value def set(self, key, value): return self.region.set(key, value) def delete(self, key): return self.region.delete(key) def resolve_uuid_from_cache(self, attr, uuid): resource_name = self.get(uuid) if resource_name: return resource_name else: # Retrieve project and user names from Keystone only # if ceilometer doesn't have a caching backend resource_name = self._resolve_uuid_from_keystone(attr, uuid) self.set(uuid, resource_name) return resource_name def _resolve_uuid_from_keystone(self, attr, uuid): try: return getattr( keystone_client.get_client(self.conf), attr ).get(uuid).name except AttributeError as e: LOG.warning("Found '%s' while resolving uuid %s to name", e, uuid) except ka_exceptions.NotFound as e: LOG.warning(e.message) def get_client(conf): cache.configure(conf) if conf.cache.enabled: region = get_cache_region(conf) if region: return CacheClient(region, conf) else: # configure oslo_cache.dict backend if # no caching backend is configured region = get_dict_cache_region() return CacheClient(region, conf) def get_dict_cache_region(): region = cache.create_region() region.configure('oslo_cache.dict', expiration_time=CACHE_DURATION) return region def get_cache_region(conf): # configure caching region using params from config try: region = cache.create_region() cache.configure_cache_region(conf, region) return region except exception.ConfigurationError as e: LOG.error("failed to configure oslo_cache: %s", str(e)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7919414 ceilometer-24.1.0.dev59/ceilometer/cmd/0000775000175100017510000000000015033033521016657 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/cmd/__init__.py0000664000175100017510000000000015033033467020767 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/cmd/agent_notification.py0000664000175100017510000000203315033033467023104 0ustar00mylesmyles# # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cotyledon from cotyledon import oslo_config_glue from oslo_log import log from ceilometer import notification from ceilometer import service LOG = log.getLogger(__name__) def main(): conf = service.prepare_service() conf.log_opt_values(LOG, log.DEBUG) sm = cotyledon.ServiceManager() sm.add(notification.NotificationService, workers=conf.notification.workers, args=(conf,)) oslo_config_glue.setup(sm, conf) sm.run() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/cmd/polling.py0000664000175100017510000000701415033033467020710 0ustar00mylesmyles# # Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import multiprocessing import shlex import cotyledon from cotyledon import oslo_config_glue from oslo_config import cfg from oslo_log import log from oslo_privsep import priv_context from ceilometer.polling import manager from ceilometer import service from ceilometer import utils LOG = log.getLogger(__name__) class MultiChoicesOpt(cfg.Opt): def __init__(self, name, choices=None, **kwargs): super().__init__( name, type=DeduplicatedCfgList(choices), **kwargs) self.choices = choices def _get_argparse_kwargs(self, group, **kwargs): """Extends the base argparse keyword dict for multi choices options.""" kwargs = super()._get_argparse_kwargs(group) kwargs['nargs'] = '+' choices = kwargs.get('choices', self.choices) if choices: kwargs['choices'] = choices return kwargs class DeduplicatedCfgList(cfg.types.List): def __init__(self, choices=None, **kwargs): super().__init__(**kwargs) self.choices = choices or [] def __call__(self, *args, **kwargs): result = super().__call__(*args, **kwargs) result_set = set(result) if len(result) != len(result_set): LOG.warning("Duplicated values: %s found in CLI options, " "auto de-duplicated", result) result = list(result_set) if self.choices and not (result_set <= set(self.choices)): raise Exception('Valid values are %s, but found %s' % (self.choices, result)) return result CLI_OPTS = [ MultiChoicesOpt('polling-namespaces', default=['compute', 'central'], dest='polling_namespaces', help='Polling namespace(s) to be used while ' 'resource polling') ] def _prepare_config(): conf = cfg.ConfigOpts() conf.register_cli_opts(CLI_OPTS) service.prepare_service(conf=conf) return conf def create_polling_service(worker_id, conf=None, queue=None): if conf is None: conf = _prepare_config() conf.log_opt_values(LOG, log.DEBUG) return manager.AgentManager(worker_id, conf, conf.polling_namespaces, queue) def create_heartbeat_service(worker_id, conf, queue=None): if conf is None: conf = _prepare_config() conf.log_opt_values(LOG, log.DEBUG) return manager.AgentHeartBeatManager(worker_id, conf, conf.polling_namespaces, queue) def main(): sm = cotyledon.ServiceManager() conf = _prepare_config() priv_context.init(root_helper=shlex.split(utils._get_root_helper())) oslo_config_glue.setup(sm, conf) if conf.polling.heartbeat_socket_dir is not None: queue = multiprocessing.Queue() sm.add(create_heartbeat_service, args=(conf, queue)) else: queue = None sm.add(create_polling_service, args=(conf, queue)) sm.run() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/cmd/sample.py0000664000175100017510000000604515033033467020530 0ustar00mylesmyles# # Copyright 2012-2014 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating meter for Ceilometer. """ import ast import logging import sys from oslo_config import cfg from oslo_utils import timeutils from ceilometer.pipeline import sample as sample_pipe from ceilometer import sample from ceilometer import service def send_sample(): conf = cfg.ConfigOpts() conf.register_cli_opts([ cfg.StrOpt('sample-name', short='n', help='Meter name.', required=True), cfg.StrOpt('sample-type', short='y', help='Meter type.', default=sample.TYPE_GAUGE, choices=sample.TYPES), cfg.StrOpt('sample-unit', short='U', help='Meter unit.'), cfg.IntOpt('sample-volume', short='l', help='Meter volume value.', default=1), cfg.StrOpt('sample-resource', short='r', help='Meter resource id.', required=True), cfg.StrOpt('sample-user', short='u', help='Meter user id.'), cfg.StrOpt('sample-project', short='p', help='Meter project id.'), cfg.StrOpt('sample-timestamp', short='i', help='Meter timestamp.', default=timeutils.utcnow().isoformat()), cfg.StrOpt('sample-metadata', short='m', help='Meter metadata.'), ]) service.prepare_service(conf=conf) # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) pipeline_manager = sample_pipe.SamplePipelineManager(conf) with pipeline_manager.publisher() as p: p([sample.Sample( name=conf.sample_name, type=conf.sample_type, unit=conf.sample_unit, volume=conf.sample_volume, user_id=conf.sample_user, project_id=conf.sample_project, resource_id=conf.sample_resource, timestamp=conf.sample_timestamp, resource_metadata=conf.sample_metadata and ast.literal_eval( conf.sample_metadata))]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/cmd/status.py0000664000175100017510000000300615033033467020564 0ustar00mylesmyles# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_upgradecheck import upgradecheck from ceilometer.i18n import _ CONF = cfg.CONF class Checks(upgradecheck.UpgradeCommands): """Contains upgrade checks Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _sample_check(self): """This is sample check added to test the upgrade check framework It needs to be removed after adding any real upgrade check """ return upgradecheck.Result(upgradecheck.Code.SUCCESS, 'Sample detail') _upgrade_checks = ( # Sample check added for now. # Whereas in future real checks must be added here in tuple (_('Sample Check'), _sample_check), ) def main(): return upgradecheck.main( CONF, project='ceilometer', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/cmd/storage.py0000664000175100017510000000357315033033467020716 0ustar00mylesmyles# # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import tenacity from ceilometer import service LOG = log.getLogger(__name__) def upgrade(): conf = cfg.ConfigOpts() conf.register_cli_opts([ cfg.BoolOpt('skip-gnocchi-resource-types', help='Skip gnocchi resource-types upgrade.', default=False), cfg.IntOpt('retry', min=0, help='Number of times to retry on failure. ' 'Default is to retry forever.'), ]) service.prepare_service(conf=conf) if conf.skip_gnocchi_resource_types: LOG.info("Skipping Gnocchi resource types upgrade") else: LOG.debug("Upgrading Gnocchi resource types") from ceilometer import gnocchi_client from gnocchiclient import exceptions if conf.retry is None: stop = tenacity.stop_never else: stop = tenacity.stop_after_attempt(conf.retry) tenacity.Retrying( stop=stop, retry=tenacity.retry_if_exception_type(( exceptions.ConnectionFailure, exceptions.UnknownConnectionError, exceptions.ConnectionTimeout, exceptions.SSLError, )) )(gnocchi_client.upgrade_resource_types, conf) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7919414 ceilometer-24.1.0.dev59/ceilometer/compute/0000775000175100017510000000000015033033521017570 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/__init__.py0000664000175100017510000000000015033033467021700 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/discovery.py0000664000175100017510000002603015033033467022163 0ustar00mylesmyles# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from lxml import etree import operator import threading import cachetools from novaclient import exceptions from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.compute.virt.libvirt import utils as libvirt_utils from ceilometer import nova_client from ceilometer.polling import plugin_base OPTS = [ cfg.StrOpt('instance_discovery_method', default='libvirt_metadata', choices=[('naive', 'poll nova to get all instances'), ('workload_partitioning', 'poll nova to get instances of the compute'), ('libvirt_metadata', 'get instances from libvirt metadata but without ' 'instance metadata (recommended)')], help="Ceilometer offers many methods to discover the instance " "running on a compute node"), cfg.IntOpt('resource_update_interval', default=0, min=0, help="New instances will be discovered periodically based" " on this option (in seconds). By default, " "the agent discovers instances according to pipeline " "polling interval. If option is greater than 0, " "the instance list to poll will be updated based " "on this option's interval. Measurements relating " "to the instances will match intervals " "defined in pipeline. This option is only used " "for agent polling to Nova API, so it will work only " "when 'instance_discovery_method' is set to 'naive'."), cfg.IntOpt('resource_cache_expiry', default=3600, min=0, help="The expiry to totally refresh the instances resource " "cache, since the instance may be migrated to another " "host, we need to clean the legacy instances info in " "local cache by totally refreshing the local cache. " "The minimum should be the value of the config option " "of resource_update_interval. This option is only used " "for agent polling to Nova API, so it will work only " "when 'instance_discovery_method' is set to 'naive'.") ] LOG = log.getLogger(__name__) class NovaLikeServer: def __init__(self, **kwargs): self.id = kwargs.pop('id') for k, v in kwargs.items(): setattr(self, k, v) def __repr__(self): return '' % getattr(self, 'name', 'unknown-name') def __eq__(self, other): return self.id == other.id class InstanceDiscovery(plugin_base.DiscoveryBase): method = None def __init__(self, conf): super().__init__(conf) if not self.method: self.method = conf.compute.instance_discovery_method self.nova_cli = nova_client.Client(conf) self.expiration_time = conf.compute.resource_update_interval self.cache_expiry = conf.compute.resource_cache_expiry if self.method == "libvirt_metadata": # 4096 instances on a compute should be enough :) self._server_cache = cachetools.LRUCache(4096) else: self.lock = threading.Lock() self.instances = {} self.last_run = None self.last_cache_expire = None @property def connection(self): return libvirt_utils.refresh_libvirt_connection(self.conf, self) def discover(self, manager, param=None): """Discover resources to monitor.""" if self.method != "libvirt_metadata": return self.discover_nova_polling(manager, param=None) else: return self.discover_libvirt_polling(manager, param=None) @staticmethod def _safe_find_int(xml, path): elem = xml.find("./%s" % path) if elem is not None: return int(elem.text) return 0 @cachetools.cachedmethod(operator.attrgetter('_server_cache')) def get_server(self, uuid): try: return self.nova_cli.nova_client.servers.get(uuid) except exceptions.NotFound: return None @libvirt_utils.retry_on_disconnect def discover_libvirt_polling(self, manager, param=None): instances = [] for domain in self.connection.listAllDomains(): xml_string = libvirt_utils.instance_metadata(domain) if xml_string is None: continue full_xml = etree.fromstring(domain.XMLDesc()) os_type_xml = full_xml.find("./os/type") metadata_xml = etree.fromstring(xml_string) # TODO(sileht, jwysogla): We don't have the flavor ID # and server metadata here. We currently poll nova to get # the flavor ID, but storing the # flavor_id doesn't have any sense because the flavor description # can change over the time, we should store the detail of the # flavor. this is why nova doesn't put the id in the libvirt # metadata. I think matadata field could be eventually added to # the libvirt matadata created by nova. try: flavor_xml = metadata_xml.find( "./flavor") user_id = metadata_xml.find( "./owner/user").attrib["uuid"] project_id = metadata_xml.find( "./owner/project").attrib["uuid"] instance_name = metadata_xml.find( "./name").text instance_arch = os_type_xml.attrib["arch"] server = self.get_server(domain.UUIDString()) flavor_id = (server.flavor["id"] if server is not None else flavor_xml.attrib["name"]) flavor = { "id": flavor_id, "name": flavor_xml.attrib["name"], "vcpus": self._safe_find_int(flavor_xml, "vcpus"), "ram": self._safe_find_int(flavor_xml, "memory"), "disk": self._safe_find_int(flavor_xml, "disk"), "ephemeral": self._safe_find_int(flavor_xml, "ephemeral"), "swap": self._safe_find_int(flavor_xml, "swap"), } # The image description is partial, but Gnocchi only care about # the id, so we are fine image_xml = metadata_xml.find("./root[@type='image']") image = ({'id': image_xml.attrib['uuid']} if image_xml is not None else None) metadata = server.metadata if server is not None else {} except AttributeError: LOG.error( "Fail to get domain uuid %s metadata: " "metadata was missing expected attributes", domain.UUIDString()) continue dom_state = domain.state()[0] vm_state = libvirt_utils.LIBVIRT_POWER_STATE.get(dom_state) status = libvirt_utils.LIBVIRT_STATUS.get(dom_state) # From: # https://github.com/openstack/nova/blob/852f40fd0c6e9d8878212ff3120556668023f1c4/nova/api/openstack/compute/views/servers.py#L214-L220 host_id = hashlib.sha224( (project_id + self.conf.host).encode('utf-8')).hexdigest() instance_data = { "id": domain.UUIDString(), "name": instance_name, "flavor": flavor, "image": image, "os_type": os_type_xml.text, "architecture": instance_arch, "OS-EXT-SRV-ATTR:instance_name": domain.name(), "OS-EXT-SRV-ATTR:host": self.conf.host, "OS-EXT-STS:vm_state": vm_state, "tenant_id": project_id, "user_id": user_id, "hostId": host_id, "status": status, # NOTE(sileht): Other fields that Ceilometer tracks # where we can't get the value here, but their are # retrieved by notification "metadata": metadata, # "OS-EXT-STS:task_state" # 'reservation_id', # 'OS-EXT-AZ:availability_zone', # 'kernel_id', # 'ramdisk_id', # some image detail } LOG.debug("instance data: %s", instance_data) instances.append(NovaLikeServer(**instance_data)) return instances def discover_nova_polling(self, manager, param=None): secs_from_last_update = 0 utc_now = timeutils.utcnow(True) secs_from_last_expire = 0 if self.last_run: secs_from_last_update = timeutils.delta_seconds( self.last_run, utc_now) if self.last_cache_expire: secs_from_last_expire = timeutils.delta_seconds( self.last_cache_expire, utc_now) instances = [] # NOTE(ityaptin) we update make a nova request only if # it's a first discovery or resources expired with self.lock: if (not self.last_run or secs_from_last_update >= self.expiration_time): try: if (secs_from_last_expire < self.cache_expiry and self.last_run): since = self.last_run.isoformat() else: since = None self.instances.clear() self.last_cache_expire = utc_now instances = self.nova_cli.instance_get_all_by_host( self.conf.host, since) self.last_run = utc_now except Exception: # NOTE(zqfan): instance_get_all_by_host is wrapped and will # log exception when there is any error. It is no need to # raise it again and print one more time. return [] for instance in instances: if getattr(instance, 'OS-EXT-STS:vm_state', None) in [ 'deleted', 'error']: self.instances.pop(instance.id, None) else: self.instances[instance.id] = instance return self.instances.values() @property def group_id(self): return self.conf.host ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/compute/pollsters/0000775000175100017510000000000015033033521021617 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/pollsters/__init__.py0000664000175100017510000001763315033033467023753 0ustar00mylesmyles# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from time import monotonic as now from oslo_log import log from oslo_utils import timeutils import ceilometer from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) class NoVolumeException(Exception): pass class GenericComputePollster(plugin_base.PollsterBase): """This class aims to cache instance statistics data First polled pollsters that inherit of this will retrieve and cache stats of an instance, then other pollsters will just build the samples without queyring the backend anymore. """ sample_name = None sample_unit = '' sample_type = sample.TYPE_GAUGE sample_stats_key = None inspector_method = None def setup_environment(self): super().setup_environment() self.inspector = GenericComputePollster._get_inspector(self.conf) @staticmethod def aggregate_method(stats): # Don't aggregate anything by default return stats @staticmethod def _get_inspector(conf): # FIXME(sileht): This doesn't looks threadsafe... try: inspector = GenericComputePollster._inspector except AttributeError: inspector = virt_inspector.get_hypervisor_inspector(conf) GenericComputePollster._inspector = inspector return inspector @property def default_discovery(self): return 'local_instances' def _record_poll_time(self): """Method records current time as the poll time. :return: time in seconds since the last poll time was recorded """ current_time = timeutils.utcnow() duration = None if hasattr(self, '_last_poll_time'): duration = timeutils.delta_seconds(self._last_poll_time, current_time) self._last_poll_time = current_time return duration @staticmethod def get_additional_metadata(instance, stats): pass @staticmethod def get_resource_id(instance, stats): return instance.id def _inspect_cached(self, cache, instance, duration): cache.setdefault(self.inspector_method, {}) if instance.id not in cache[self.inspector_method]: result = getattr(self.inspector, self.inspector_method)( instance, duration) polled_time = now() # Ensure we don't cache an iterator if isinstance(result, collections.abc.Iterable): result = list(result) else: result = [result] cache[self.inspector_method][instance.id] = (polled_time, result) return cache[self.inspector_method][instance.id] def _stats_to_sample(self, instance, stats, polled_time): volume = getattr(stats, self.sample_stats_key) LOG.debug("%(instance_id)s/%(name)s volume: " "%(volume)s" % { 'name': self.sample_name, 'instance_id': instance.id, 'volume': (volume if volume is not None else 'Unavailable')}) if volume is None: raise NoVolumeException() return util.make_sample_from_instance( self.conf, instance, name=self.sample_name, unit=self.sample_unit, type=self.sample_type, resource_id=self.get_resource_id(instance, stats), volume=volume, additional_metadata=self.get_additional_metadata( instance, stats), monotonic_time=polled_time, ) def get_samples(self, manager, cache, resources): self._inspection_duration = self._record_poll_time() for instance in resources: try: polled_time, result = self._inspect_cached( cache, instance, self._inspection_duration) if not result: continue for stats in self.aggregate_method(result): yield self._stats_to_sample(instance, stats, polled_time) except NoVolumeException: # FIXME(sileht): This should be a removed... but I will # not change the test logic for now LOG.warning("%(name)s statistic in not available for " "instance %(instance_id)s" % {'name': self.sample_name, 'instance_id': instance.id}) except virt_inspector.InstanceNotFoundException as err: # Instance was deleted while getting samples. Ignore it. LOG.debug('Exception while getting samples %s', err) except virt_inspector.InstanceShutOffException as e: LOG.debug('Instance %(instance_id)s was shut off while ' 'getting sample of %(name)s: %(exc)s', {'instance_id': instance.id, 'name': self.sample_name, 'exc': e}) except virt_inspector.NoDataException as e: LOG.warning('Cannot inspect data of %(pollster)s for ' '%(instance_id)s, non-fatal reason: %(exc)s', {'pollster': self.__class__.__name__, 'instance_id': instance.id, 'exc': e}) except ceilometer.NotImplementedError: # Selected inspector does not implement this pollster. LOG.debug('%(inspector)s does not provide data for ' '%(pollster)s', {'inspector': self.inspector.__class__.__name__, 'pollster': self.__class__.__name__}) raise plugin_base.PollsterPermanentError(resources) except Exception as err: LOG.error( 'Could not get %(name)s events for %(id)s: %(e)s', { 'name': self.sample_name, 'id': instance.id, 'e': err}, exc_info=True) class InstanceMetadataPollster(plugin_base.PollsterBase): """A base class for implementing a pollster using instance metadata. This metadata is originally supplied by Nova, but if instance_discovery_method is set to libvirt_metadata, metadata is fetched from the local libvirt socket, just like with the standard compute pollsters. """ sample_name = None sample_unit = '' sample_type = sample.TYPE_GAUGE @property def default_discovery(self): return 'local_instances' def get_resource_id(self, instance): return instance.id def get_volume(self, instance): raise ceilometer.NotImplementedError def get_additional_metadata(self, instance): return {} def get_samples(self, manager, cache, resources): for instance in resources: yield util.make_sample_from_instance( self.conf, instance, name=self.sample_name, unit=self.sample_unit, type=self.sample_type, resource_id=self.get_resource_id(instance), volume=self.get_volume(instance), additional_metadata=self.get_additional_metadata(instance), monotonic_time=now(), ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/pollsters/disk.py0000664000175100017510000000652115033033467023140 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer import sample class PerDeviceDiskPollster(pollsters.GenericComputePollster): inspector_method = "inspect_disks" @staticmethod def get_resource_id(instance, stats): return "{}-{}".format(instance.id, stats.device) @staticmethod def get_additional_metadata(instance, stats): return {'disk_name': stats.device} class PerDeviceReadRequestsPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_requests' class PerDeviceReadBytesPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_bytes' class PerDeviceWriteRequestsPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_requests' class PerDeviceWriteBytesPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_bytes' class PerDeviceCapacityPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.capacity' sample_unit = 'B' sample_stats_key = 'capacity' class PerDeviceAllocationPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.allocation' sample_unit = 'B' sample_stats_key = 'allocation' class PerDevicePhysicalPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.usage' sample_unit = 'B' sample_stats_key = 'physical' class PerDeviceDiskReadLatencyPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.latency' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'ns' sample_stats_key = 'rd_total_times' class PerDeviceDiskWriteLatencyPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.latency' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'ns' sample_stats_key = 'wr_total_times' class EphemeralSizePollster(pollsters.InstanceMetadataPollster): sample_name = 'disk.ephemeral.size' sample_unit = 'GB' def get_volume(self, instance): return int(instance.flavor['ephemeral']) class RootSizePollster(pollsters.InstanceMetadataPollster): sample_name = 'disk.root.size' sample_unit = 'GB' def get_volume(self, instance): return (int(instance.flavor['disk']) - int(instance.flavor['ephemeral'])) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/pollsters/instance_stats.py0000664000175100017510000000460515033033467025231 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer import sample class InstanceStatsPollster(pollsters.GenericComputePollster): inspector_method = 'inspect_instance' class PowerStatePollster(InstanceStatsPollster): sample_name = 'power.state' sample_stats_key = 'power_state' class CPUPollster(InstanceStatsPollster): sample_name = 'cpu' sample_unit = 'ns' sample_stats_key = 'cpu_time' sample_type = sample.TYPE_CUMULATIVE @staticmethod def get_additional_metadata(instance, c_data): return {'cpu_number': c_data.cpu_number} class MemoryUsagePollster(InstanceStatsPollster): sample_name = 'memory.usage' sample_unit = 'MB' sample_stats_key = 'memory_usage' class MemoryResidentPollster(InstanceStatsPollster): sample_name = 'memory.resident' sample_unit = 'MB' sample_stats_key = 'memory_resident' class MemorySwapInPollster(InstanceStatsPollster): sample_name = 'memory.swap.in' sample_unit = 'MB' sample_stats_key = 'memory_swap_in' sample_type = sample.TYPE_CUMULATIVE class MemorySwapOutPollster(InstanceStatsPollster): sample_name = 'memory.swap.out' sample_unit = 'MB' sample_stats_key = 'memory_swap_out' sample_type = sample.TYPE_CUMULATIVE class PerfCPUCyclesPollster(InstanceStatsPollster): sample_name = 'perf.cpu.cycles' sample_stats_key = 'cpu_cycles' class PerfInstructionsPollster(InstanceStatsPollster): sample_name = 'perf.instructions' sample_stats_key = 'instructions' class PerfCacheReferencesPollster(InstanceStatsPollster): sample_name = 'perf.cache.references' sample_stats_key = 'cache_references' class PerfCacheMissesPollster(InstanceStatsPollster): sample_name = 'perf.cache.misses' sample_stats_key = 'cache_misses' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/pollsters/net.py0000664000175100017510000000746715033033467023006 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer.compute.pollsters import util from ceilometer import sample class NetworkPollster(pollsters.GenericComputePollster): inspector_method = "inspect_vnics" @staticmethod def get_additional_metadata(instance, stats): additional_stats = {k: getattr(stats, k) for k in ["name", "mac", "fref", "parameters"]} if stats.fref is not None: additional_stats['vnic_name'] = stats.fref else: additional_stats['vnic_name'] = stats.name return additional_stats @staticmethod def get_resource_id(instance, stats): if stats.fref is not None: return stats.fref else: instance_name = util.instance_name(instance) return "{}-{}-{}".format(instance_name, instance.id, stats.name) class IncomingBytesPollster(NetworkPollster): sample_name = 'network.incoming.bytes' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'B' sample_stats_key = 'rx_bytes' class IncomingPacketsPollster(NetworkPollster): sample_name = 'network.incoming.packets' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'rx_packets' class OutgoingBytesPollster(NetworkPollster): sample_name = 'network.outgoing.bytes' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'B' sample_stats_key = 'tx_bytes' class OutgoingPacketsPollster(NetworkPollster): sample_name = 'network.outgoing.packets' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'tx_packets' class IncomingBytesRatePollster(NetworkPollster): inspector_method = "inspect_vnic_rates" sample_name = 'network.incoming.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'rx_bytes_rate' class OutgoingBytesRatePollster(NetworkPollster): inspector_method = "inspect_vnic_rates" sample_name = 'network.outgoing.bytes.rate' sample_unit = 'B/s' sample_stats_key = 'tx_bytes_rate' class IncomingDropPollster(NetworkPollster): sample_name = 'network.incoming.packets.drop' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'rx_drop' class OutgoingDropPollster(NetworkPollster): sample_name = 'network.outgoing.packets.drop' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'tx_drop' class IncomingErrorsPollster(NetworkPollster): sample_name = 'network.incoming.packets.error' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'rx_errors' class OutgoingErrorsPollster(NetworkPollster): sample_name = 'network.outgoing.packets.error' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'packet' sample_stats_key = 'tx_errors' class IncomingBytesDeltaPollster(NetworkPollster): sample_name = 'network.incoming.bytes.delta' sample_type = sample.TYPE_DELTA sample_unit = 'B' sample_stats_key = 'rx_bytes_delta' class OutgoingBytesDeltaPollster(NetworkPollster): sample_name = 'network.outgoing.bytes.delta' sample_type = sample.TYPE_DELTA sample_unit = 'B' sample_stats_key = 'tx_bytes_delta' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/pollsters/util.py0000664000175100017510000000670015033033467023162 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import sample INSTANCE_PROPERTIES = [ # Identity properties 'reservation_id', # Type properties 'architecture', 'OS-EXT-AZ:availability_zone', 'kernel_id', 'os_type', 'ramdisk_id', ] def _get_metadata_from_object(conf, instance): """Return a metadata dictionary for the instance.""" instance_type = instance.flavor['name'] if instance.flavor else None metadata = { 'display_name': instance.name, 'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', ''), 'instance_id': instance.id, 'instance_type': instance_type, 'host': instance.hostId, 'instance_host': getattr(instance, 'OS-EXT-SRV-ATTR:host', ''), 'flavor': instance.flavor, 'status': instance.status.lower(), 'state': getattr(instance, 'OS-EXT-STS:vm_state', ''), 'task_state': getattr(instance, 'OS-EXT-STS:task_state', ''), } # Image properties if instance.image: metadata['image'] = instance.image metadata['image_ref'] = instance.image['id'] # Images that come through the conductor API in the nova notifier # plugin will not have links. if instance.image.get('links'): metadata['image_ref_url'] = instance.image['links'][0]['href'] else: metadata['image_ref_url'] = None else: metadata['image'] = None metadata['image_ref'] = None metadata['image_ref_url'] = None for name in INSTANCE_PROPERTIES: if hasattr(instance, name): metadata[name] = getattr(instance, name) metadata['vcpus'] = instance.flavor['vcpus'] metadata['memory_mb'] = instance.flavor['ram'] metadata['disk_gb'] = instance.flavor['disk'] metadata['ephemeral_gb'] = instance.flavor['ephemeral'] metadata['root_gb'] = (int(metadata['disk_gb']) - int(metadata['ephemeral_gb'])) return sample.add_reserved_user_metadata(conf, instance.metadata, metadata) def make_sample_from_instance(conf, instance, name, type, unit, volume, resource_id=None, additional_metadata=None, monotonic_time=None): additional_metadata = additional_metadata or {} resource_metadata = _get_metadata_from_object(conf, instance) resource_metadata.update(additional_metadata) return sample.Sample( name=name, type=type, unit=unit, volume=volume, user_id=instance.user_id, project_id=instance.tenant_id, resource_id=resource_id or instance.id, resource_metadata=resource_metadata, monotonic_time=monotonic_time, ) def instance_name(instance): """Shortcut to get instance name.""" return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/compute/virt/0000775000175100017510000000000015033033521020554 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/virt/__init__.py0000664000175100017510000000000015033033467022664 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/virt/inspector.py0000664000175100017510000002042315033033467023146 0ustar00mylesmyles# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Inspector abstraction for read-only access to hypervisors.""" import collections from oslo_config import cfg from oslo_log import log from stevedore import driver import ceilometer OPTS = [ cfg.StrOpt('hypervisor_inspector', default='libvirt', choices=['libvirt'], deprecated_for_removal=True, deprecated_reason='libvirt is the only supported hypervisor', help='Inspector to use for inspecting the hypervisor layer.') ] LOG = log.getLogger(__name__) # Named tuple representing instance statistics class InstanceStats: fields = [ 'power_state', # the power state of the domain 'cpu_number', # number: number of CPUs 'cpu_time', # time: cumulative CPU time 'memory_usage', # usage: Amount of memory used 'memory_resident', # 'memory_swap_in', # memory swap in 'memory_swap_out', # memory swap out 'cpu_cycles', # cpu_cycles: the number of cpu cycles one # instruction needs 'instructions', # instructions: the count of instructions 'cache_references', # cache_references: the count of cache hits 'cache_misses', # cache_misses: the count of caches misses ] def __init__(self, **kwargs): for k in self.fields: setattr(self, k, kwargs.pop(k, None)) if kwargs: raise AttributeError( "'InstanceStats' object has no attributes '%s'" % kwargs) # Named tuple representing vNIC statistics. # # name: the name of the vNIC # mac: the MAC address # fref: the filter ref # parameters: miscellaneous parameters # rx_bytes: number of received bytes # rx_packets: number of received packets # tx_bytes: number of transmitted bytes # tx_packets: number of transmitted packets # InterfaceStats = collections.namedtuple('InterfaceStats', ['name', 'mac', 'fref', 'parameters', 'rx_bytes', 'tx_bytes', 'rx_packets', 'tx_packets', 'rx_drop', 'tx_drop', 'rx_errors', 'tx_errors', 'rx_bytes_delta', 'tx_bytes_delta']) # Named tuple representing vNIC rate statistics. # # name: the name of the vNIC # mac: the MAC address # fref: the filter ref # parameters: miscellaneous parameters # rx_bytes_rate: rate of received bytes # tx_bytes_rate: rate of transmitted bytes # InterfaceRateStats = collections.namedtuple('InterfaceRateStats', ['name', 'mac', 'fref', 'parameters', 'rx_bytes_rate', 'tx_bytes_rate']) # Named tuple representing disk statistics. # # read_bytes: number of bytes read # read_requests: number of read operations # write_bytes: number of bytes written # write_requests: number of write operations # errors: number of errors # DiskStats = collections.namedtuple('DiskStats', ['device', 'read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors', 'wr_total_times', 'rd_total_times']) # Named tuple representing disk rate statistics. # # read_bytes_rate: number of bytes read per second # read_requests_rate: number of read operations per second # write_bytes_rate: number of bytes written per second # write_requests_rate: number of write operations per second # DiskRateStats = collections.namedtuple('DiskRateStats', ['device', 'read_bytes_rate', 'read_requests_rate', 'write_bytes_rate', 'write_requests_rate']) # Named tuple representing disk Information. # # capacity: capacity of the disk # allocation: allocation of the disk # physical: usage of the disk DiskInfo = collections.namedtuple('DiskInfo', ['device', 'capacity', 'allocation', 'physical']) # Exception types # class InspectorException(Exception): def __init__(self, message=None): super().__init__(message) class InstanceNotFoundException(InspectorException): pass class InstanceShutOffException(InspectorException): pass class NoDataException(InspectorException): pass # Main virt inspector abstraction layering over the hypervisor API. # class Inspector: def __init__(self, conf): self.conf = conf def inspect_instance(self, instance, duration): """Inspect the CPU statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: the instance stats """ raise ceilometer.NotImplementedError def inspect_vnics(self, instance, duration): """Inspect the vNIC statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each vNIC, the number of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_vnic_rates(self, instance, duration): """Inspect the vNIC rate statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each vNIC, the rate of bytes & packets received and transmitted """ raise ceilometer.NotImplementedError def inspect_disks(self, instance, duration): """Inspect the disk statistics for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the number of bytes & operations read and written, and the error count """ raise ceilometer.NotImplementedError def inspect_disk_rates(self, instance, duration): """Inspect the disk statistics as rates for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk, the number of bytes & operations read and written per second, with the error count """ raise ceilometer.NotImplementedError def inspect_disk_info(self, instance, duration): """Inspect the disk information for an instance. :param instance: the target instance :param duration: the last 'n' seconds, over which the value should be inspected :return: for each disk , capacity , allocation and usage """ raise ceilometer.NotImplementedError def get_hypervisor_inspector(conf): try: namespace = 'ceilometer.compute.virt' mgr = driver.DriverManager(namespace, conf.hypervisor_inspector, invoke_on_load=True, invoke_args=(conf, )) return mgr.driver except ImportError as e: LOG.error("Unable to load the hypervisor inspector: %s" % e) return Inspector(conf) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/compute/virt/libvirt/0000775000175100017510000000000015033033521022227 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/virt/libvirt/__init__.py0000664000175100017510000000000015033033467024337 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/virt/libvirt/inspector.py0000664000175100017510000002564115033033467024630 0ustar00mylesmyles# # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of Inspector abstraction for libvirt.""" from lxml import etree from oslo_log import log as logging from oslo_utils import units try: import libvirt except ImportError: libvirt = None from ceilometer.compute.pollsters import util from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import utils as libvirt_utils from ceilometer.i18n import _ LOG = logging.getLogger(__name__) class LibvirtInspector(virt_inspector.Inspector): def __init__(self, conf): super().__init__(conf) # NOTE(sileht): create a connection on startup self.connection self.cache = {} @property def connection(self): return libvirt_utils.refresh_libvirt_connection(self.conf, self) def _lookup_by_uuid(self, instance): instance_name = util.instance_name(instance) try: return self.connection.lookupByUUIDString(instance.id) except libvirt.libvirtError as ex: if libvirt_utils.is_disconnection_exception(ex): raise msg = _("Error from libvirt while looking up instance " ": " "[Error Code %(error_code)s] " "%(ex)s") % {'name': instance_name, 'id': instance.id, 'error_code': ex.get_error_code(), 'ex': ex} raise virt_inspector.InstanceNotFoundException(msg) except Exception as ex: raise virt_inspector.InspectorException(str(ex)) def _get_domain_not_shut_off_or_raise(self, instance): instance_name = util.instance_name(instance) domain = self._lookup_by_uuid(instance) state = domain.info()[0] if state == libvirt.VIR_DOMAIN_SHUTOFF: msg = _('Failed to inspect data of instance ' ', ' 'domain state is SHUTOFF.') % { 'name': instance_name, 'id': instance.id} raise virt_inspector.InstanceShutOffException(msg) return domain @libvirt_utils.retry_on_disconnect def inspect_vnics(self, instance, duration): domain = self._get_domain_not_shut_off_or_raise(instance) tree = etree.fromstring(domain.XMLDesc(0)) for iface in tree.findall('devices/interface'): target = iface.find('target') if target is not None: name = target.get('dev') else: continue mac = iface.find('mac') if mac is not None: mac_address = mac.get('address') else: continue fref = iface.find('filterref') if fref is not None: fref = fref.get('filter') params = {p.get('name').lower(): p.get('value') for p in iface.findall('filterref/parameter')} # Extract interface ID try: interfaceid = iface.find('virtualport').find( 'parameters').get('interfaceid') except AttributeError: interfaceid = None # Extract source bridge try: bridge = iface.find('source').get('bridge') except AttributeError: bridge = None params['interfaceid'] = interfaceid params['bridge'] = bridge try: dom_stats = domain.interfaceStats(name) except libvirt.libvirtError as ex: LOG.warning(_("Error from libvirt when running instanceStats, " "This may not be harmful, but please check : " "%(ex)s") % {'ex': ex}) continue # Retrieve previous values prev = self.cache.get(name) # Store values for next call self.cache[name] = dom_stats if prev: # Compute stats rx_delta = dom_stats[0] - prev[0] tx_delta = dom_stats[4] - prev[4] # Avoid negative values if rx_delta < 0: rx_delta = dom_stats[0] if tx_delta < 0: tx_delta = dom_stats[4] else: LOG.debug('No delta meter predecessor for %s / %s' % (instance.id, name)) rx_delta = 0 tx_delta = 0 yield virt_inspector.InterfaceStats(name=name, mac=mac_address, fref=fref, parameters=params, rx_bytes=dom_stats[0], rx_packets=dom_stats[1], rx_errors=dom_stats[2], rx_drop=dom_stats[3], rx_bytes_delta=rx_delta, tx_bytes=dom_stats[4], tx_packets=dom_stats[5], tx_errors=dom_stats[6], tx_drop=dom_stats[7], tx_bytes_delta=tx_delta) @staticmethod def _get_disk_devices(domain): tree = etree.fromstring(domain.XMLDesc(0)) return filter(bool, [target.get("dev") for target in tree.findall('devices/disk/target') if target.getparent().find('source') is not None]) @libvirt_utils.retry_on_disconnect def inspect_disks(self, instance, duration): domain = self._get_domain_not_shut_off_or_raise(instance) for device in self._get_disk_devices(domain): try: block_stats = domain.blockStats(device) block_stats_flags = domain.blockStatsFlags(device, 0) yield virt_inspector.DiskStats( device=device, read_requests=block_stats[0], read_bytes=block_stats[1], write_requests=block_stats[2], write_bytes=block_stats[3], errors=block_stats[4], wr_total_times=block_stats_flags['wr_total_times'], rd_total_times=block_stats_flags['rd_total_times']) except libvirt.libvirtError as ex: # raised error even if lock is acquired while live migration, # even it looks normal. LOG.warning(_("Error from libvirt while checking blockStats, " "This may not be harmful, but please check : " "%(ex)s") % {'ex': ex}) pass @libvirt_utils.retry_on_disconnect def inspect_disk_info(self, instance, duration): domain = self._get_domain_not_shut_off_or_raise(instance) for device in self._get_disk_devices(domain): block_info = domain.blockInfo(device) # if vm mount cdrom, libvirt will align by 4K bytes, capacity may # be smaller than physical, avoid with this. # https://libvirt.org/html/libvirt-libvirt-domain.html disk_capacity = max(block_info[0], block_info[2]) yield virt_inspector.DiskInfo(device=device, capacity=disk_capacity, allocation=block_info[1], physical=block_info[2]) @libvirt_utils.raise_nodata_if_unsupported @libvirt_utils.retry_on_disconnect def inspect_instance(self, instance, duration=None): domain = self._get_domain_not_shut_off_or_raise(instance) memory_used = memory_resident = None memory_swap_in = memory_swap_out = None memory_stats = domain.memoryStats() # Stat provided from libvirt is in KB, converting it to MB. if 'usable' in memory_stats and 'available' in memory_stats: memory_used = (memory_stats['available'] - memory_stats['usable']) / units.Ki elif 'available' in memory_stats and 'unused' in memory_stats: memory_used = (memory_stats['available'] - memory_stats['unused']) / units.Ki if 'rss' in memory_stats: memory_resident = memory_stats['rss'] / units.Ki if 'swap_in' in memory_stats and 'swap_out' in memory_stats: memory_swap_in = memory_stats['swap_in'] / units.Ki memory_swap_out = memory_stats['swap_out'] / units.Ki # TODO(sileht): stats also have the disk/vnic info # we could use that instead of the old method for Queen stats = self.connection.domainListGetStats([domain], 0)[0][1] cpu_time = 0 current_cpus = stats.get('vcpu.current') # Iterate over the maximum number of CPUs here, and count the # actual number encountered, since the vcpu.x structure can # have holes according to # https://libvirt.org/git/?p=libvirt.git;a=blob;f=src/libvirt-domain.c # virConnectGetAllDomainStats() for vcpu in range(stats.get('vcpu.maximum', 0)): try: cpu_time += (stats.get('vcpu.%s.time' % vcpu) + stats.get('vcpu.%s.wait' % vcpu)) current_cpus -= 1 except TypeError: # pass here, if there are too many holes, the cpu count will # not match, so don't need special error handling. pass if current_cpus: # There wasn't enough data, so fall back cpu_time = stats.get('cpu.time') return virt_inspector.InstanceStats( power_state=domain.info()[0], cpu_number=stats.get('vcpu.current'), cpu_time=cpu_time, memory_usage=memory_used, memory_resident=memory_resident, memory_swap_in=memory_swap_in, memory_swap_out=memory_swap_out, cpu_cycles=stats.get("perf.cpu_cycles"), instructions=stats.get("perf.instructions"), cache_references=stats.get("perf.cache_references"), cache_misses=stats.get("perf.cache_misses") ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/compute/virt/libvirt/utils.py0000664000175100017510000001241315033033467023753 0ustar00mylesmyles# # Copyright 2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import tenacity try: import libvirt except ImportError: libvirt = None from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.i18n import _ LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('libvirt_type', default='kvm', choices=['kvm', 'lxc', 'qemu', 'parallels'], help='Libvirt domain type.'), cfg.StrOpt('libvirt_uri', default='', help='Override the default libvirt URI ' '(which is dependent on libvirt_type).'), ] LIBVIRT_PER_TYPE_URIS = dict( parallels='parallels:///system', lxc='lxc:///') # We don't use the libvirt constants in case of libvirt is not available VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 VIR_DOMAIN_PMSUSPENDED = 7 # Stolen from nova LIBVIRT_POWER_STATE = { VIR_DOMAIN_NOSTATE: 'pending', VIR_DOMAIN_RUNNING: 'running', VIR_DOMAIN_BLOCKED: 'running', VIR_DOMAIN_PAUSED: 'paused', VIR_DOMAIN_SHUTDOWN: 'shutdown', VIR_DOMAIN_SHUTOFF: 'shutdown', VIR_DOMAIN_CRASHED: 'crashed', VIR_DOMAIN_PMSUSPENDED: 'suspended', } # NOTE(sileht): This is a guessing of the nova # status, should be true 99.9% on the time, # but can be wrong during some transition state # like shelving/rescuing LIBVIRT_STATUS = { VIR_DOMAIN_NOSTATE: 'building', VIR_DOMAIN_RUNNING: 'active', VIR_DOMAIN_BLOCKED: 'active', VIR_DOMAIN_PAUSED: 'paused', VIR_DOMAIN_SHUTDOWN: 'stopped', VIR_DOMAIN_SHUTOFF: 'stopped', VIR_DOMAIN_CRASHED: 'error', VIR_DOMAIN_PMSUSPENDED: 'suspended', } # NOTE(pas-ha) in the order from newest to oldest NOVA_METADATA_VERSIONS = ( "http://openstack.org/xmlns/libvirt/nova/1.1", "http://openstack.org/xmlns/libvirt/nova/1.0", ) def new_libvirt_connection(conf): if not libvirt: raise ImportError("python-libvirt module is missing") uri = (conf.libvirt_uri or LIBVIRT_PER_TYPE_URIS.get(conf.libvirt_type, 'qemu:///system')) LOG.debug('Connecting to libvirt: %s', uri) return libvirt.openReadOnly(uri) def refresh_libvirt_connection(conf, klass): connection = getattr(klass, '_libvirt_connection', None) if not connection or not connection.isAlive(): connection = new_libvirt_connection(conf) setattr(klass, '_libvirt_connection', connection) return connection def is_disconnection_exception(e): if not libvirt: return False return (isinstance(e, libvirt.libvirtError) and e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)) retry_on_disconnect = tenacity.retry( retry=tenacity.retry_if_exception(is_disconnection_exception), stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(multiplier=3, min=1, max=60)) def raise_nodata_if_unsupported(method): def inner(in_self, instance, *args, **kwargs): try: return method(in_self, instance, *args, **kwargs) except libvirt.libvirtError as e: # NOTE(sileht): At this point libvirt connection error # have been reraise as tenacity.RetryError() msg = _('Failed to inspect instance %(instance_uuid)s stats, ' 'can not get info from libvirt: %(error)s') % { "instance_uuid": instance.id, "error": e} raise virt_inspector.NoDataException(msg) return inner @retry_on_disconnect def instance_metadata(domain): xml_string = None last_error = None for meta_version in NOVA_METADATA_VERSIONS: try: xml_string = domain.metadata( libvirt.VIR_DOMAIN_METADATA_ELEMENT, meta_version) break except libvirt.libvirtError as exc: if exc.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_METADATA: LOG.debug("Failed to find metadata %s in domain %s", meta_version, domain.UUIDString()) last_error = exc continue elif is_disconnection_exception(exc): # Re-raise the exception so it's handled and retries raise last_error = exc if xml_string is None: LOG.error( "Fail to get domain uuid %s metadata, libvirtError: %s", domain.UUIDString(), last_error ) return xml_string ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7869415 ceilometer-24.1.0.dev59/ceilometer/data/0000775000175100017510000000000015033033521017025 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/data/meters.d/0000775000175100017510000000000015033033521020546 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/data/meters.d/meters.yaml0000664000175100017510000003241615033033467022750 0ustar00mylesmyles--- metric: # Image - name: "image.size" event_type: - "image.upload" - "image.delete" - "image.update" type: "gauge" unit: B volume: $.payload.size resource_id: $.payload.id project_id: $.payload.owner - name: "image.download" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id user_id: $.payload.receiver_user_id project_id: $.payload.receiver_tenant_id - name: "image.serve" event_type: "image.send" type: "delta" unit: "B" volume: $.payload.bytes_sent resource_id: $.payload.image_id project_id: $.payload.owner_id - name: 'volume.provider.capacity.total' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.total resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.free' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.free resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.allocated' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.allocated resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.provisioned' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.provisioned resource_id: $.payload.name_to_id - name: 'volume.provider.capacity.virtual_free' event_type: 'capacity.backend.*' type: 'gauge' unit: 'GB' volume: $.payload.virtual_free resource_id: $.payload.name_to_id - name: 'volume.provider.pool.capacity.total' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.total resource_id: $.payload.name_to_id metadata: &provider_pool_meta provider: $.payload.name_to_id.`split(#, 0, 1)` - name: 'volume.provider.pool.capacity.free' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.free resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.provider.pool.capacity.allocated' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.allocated resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.provider.pool.capacity.provisioned' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.provisioned resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.provider.pool.capacity.virtual_free' event_type: 'capacity.pool.*' type: 'gauge' unit: 'GB' volume: $.payload.virtual_free resource_id: $.payload.name_to_id metadata: <<: *provider_pool_meta - name: 'volume.size' event_type: - 'volume.exists' - 'volume.retype' - 'volume.create.*' - 'volume.delete.*' - 'volume.resize.*' - 'volume.attach.*' - 'volume.detach.*' - 'volume.update.*' - 'volume.manage.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.volume_id metadata: display_name: $.payload.display_name volume_type: $.payload.volume_type volume_type_id: $.payload.volume_type image_id: $.payload.glance_metadata[?key=image_id].value instance_id: $.payload.volume_attachment[0].server_id - name: 'snapshot.size' event_type: - 'snapshot.exists' - 'snapshot.create.*' - 'snapshot.delete.*' - 'snapshot.manage.*' type: 'gauge' unit: 'GB' volume: $.payload.volume_size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.snapshot_id metadata: display_name: $.payload.display_name - name: 'backup.size' event_type: - 'backup.exists' - 'backup.create.*' - 'backup.delete.*' - 'backup.restore.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.backup_id metadata: display_name: $.payload.display_name # Magnum - name: $.payload.metrics.[*].name event_type: 'magnum.bay.metrics.*' type: 'gauge' unit: $.payload.metrics.[*].unit volume: $.payload.metrics.[*].value user_id: $.payload.user_id project_id: $.payload.project_id resource_id: $.payload.resource_id lookup: ['name', 'unit', 'volume'] # Swift - name: $.payload.measurements.[*].metric.[*].name event_type: 'objectstore.http.request' type: 'delta' unit: $.payload.measurements.[*].metric.[*].unit volume: $.payload.measurements.[*].result resource_id: $.payload.target.id user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id lookup: ['name', 'unit', 'volume'] - name: 'memory' event_type: &instance_events compute.instance.(?!create.start|update).* type: 'gauge' unit: 'MB' volume: $.payload.memory_mb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: &instance_meta host: $.payload.host flavor_id: $.payload.instance_flavor_id flavor_name: $.payload.instance_type display_name: $.payload.display_name image_ref: $.payload.image_meta.base_image_ref launched_at: $.payload.launched_at created_at: $.payload.created_at deleted_at: $.payload.deleted_at - name: 'vcpus' event_type: *instance_events type: 'gauge' unit: 'vcpu' volume: $.payload.vcpus user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'compute.instance.booting.time' event_type: 'compute.instance.create.end' type: 'gauge' unit: 'sec' volume: fields: [$.payload.created_at, $.payload.launched_at] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'disk.root.size' event_type: *instance_events type: 'gauge' unit: 'GB' volume: $.payload.root_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'disk.ephemeral.size' event_type: *instance_events type: 'gauge' unit: 'GB' volume: $.payload.ephemeral_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - name: 'bandwidth' event_type: 'l3.meter' type: 'delta' unit: 'B' volume: $.payload.bytes project_id: $.payload.tenant_id resource_id: $.payload.label_id - name: 'compute.node.cpu.frequency' event_type: 'compute.metrics.update' type: 'gauge' unit: 'MHz' volume: $.payload.metrics[?(@.name='cpu.frequency')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.frequency')].source - name: 'compute.node.cpu.user.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.user.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.time')].source - name: 'compute.node.cpu.kernel.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.time')].source - name: 'compute.node.cpu.idle.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.idle.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.time')].source - name: 'compute.node.cpu.iowait.time' event_type: 'compute.metrics.update' type: 'cumulative' unit: 'ns' volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.time')].source - name: 'compute.node.cpu.kernel.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source - name: 'compute.node.cpu.idle.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.percent')].source - name: 'compute.node.cpu.user.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.percent')].source - name: 'compute.node.cpu.iowait.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source - name: 'compute.node.cpu.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp metadata: event_type: $.event_type host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.percent')].source # Identity # NOTE(gordc): hack because jsonpath-rw-ext can't concat starting with string. - name: $.payload.outcome.`sub(/.*/, )` + 'identity.authenticate.' + $.payload.outcome type: 'delta' unit: 'user' volume: 1 event_type: - 'identity.authenticate' resource_id: $.payload.initiator.id user_id: $.payload.initiator.id # DNS - name: 'dns.domain.exists' event_type: 'dns.domain.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.id user_id: $.ctxt.user metadata: status: $.payload.status pool_id: $.payload.pool_id host: $.publisher_id # Trove - name: 'trove.instance.exists' event_type: 'trove.instance.exists' type: 'cumulative' unit: 's' volume: fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_id: $.payload.user_id metadata: nova_instance_id: $.payload.nova_instance_id state: $.payload.state service_id: $.payload.service_id instance_type: $.payload.instance_type instance_type_id: $.payload.instance_type_id # Manila - name: 'manila.share.size' event_type: - 'share.create.*' - 'share.delete.*' - 'share.extend.*' - 'share.shrink.*' type: 'gauge' unit: 'GB' volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.project_id resource_id: $.payload.share_id metadata: name: $.payload.name host: $.payload.host status: $.payload.status availability_zone: $.payload.availability_zone protocol: $.payload.proto ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/declarative.py0000664000175100017510000001542715033033467020773 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from jsonpath_rw_ext import parser from oslo_log import log import yaml from ceilometer.i18n import _ LOG = log.getLogger(__name__) class DefinitionException(Exception): def __init__(self, message, definition_cfg=None): msg = '{} {}: {}'.format( self.__class__.__name__, definition_cfg, message) super().__init__(msg) self.brief_message = message class MeterDefinitionException(DefinitionException): pass class EventDefinitionException(DefinitionException): pass class ResourceDefinitionException(DefinitionException): pass class DynamicPollsterException(DefinitionException): pass class DynamicPollsterDefinitionException(DynamicPollsterException): pass class InvalidResponseTypeException(DynamicPollsterException): pass class NonOpenStackApisDynamicPollsterException\ (DynamicPollsterDefinitionException): pass class Definition: JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() GETTERS_CACHE = {} def __init__(self, name, cfg, plugin_manager): self.cfg = cfg self.name = name self.plugin = None if isinstance(cfg, dict): if 'fields' not in cfg: raise DefinitionException( _("The field 'fields' is required for %s") % name, self.cfg) if 'plugin' in cfg: plugin_cfg = cfg['plugin'] if isinstance(plugin_cfg, str): plugin_name = plugin_cfg plugin_params = {} else: try: plugin_name = plugin_cfg['name'] except KeyError: raise DefinitionException( _('Plugin specified, but no plugin name supplied ' 'for %s') % name, self.cfg) plugin_params = plugin_cfg.get('parameters') if plugin_params is None: plugin_params = {} try: plugin_ext = plugin_manager[plugin_name] except KeyError: raise DefinitionException( _('No plugin named %(plugin)s available for ' '%(name)s') % dict( plugin=plugin_name, name=name), self.cfg) plugin_class = plugin_ext.plugin self.plugin = plugin_class(**plugin_params) fields = cfg['fields'] else: # Simple definition "foobar: jsonpath" fields = cfg if isinstance(fields, list): # NOTE(mdragon): if not a string, we assume a list. if len(fields) == 1: fields = fields[0] else: fields = '|'.join('(%s)' % path for path in fields) if isinstance(fields, int): self.getter = fields else: try: self.getter = self.make_getter(fields) except Exception as e: raise DefinitionException( _("Parse error in JSONPath specification " "'%(jsonpath)s' for %(name)s: %(err)s") % dict(jsonpath=fields, name=name, err=e), self.cfg) def _get_path(self, match): if match.context is not None: yield from self._get_path(match.context) yield str(match.path) def parse(self, obj, return_all_values=False): if callable(self.getter): values = self.getter(obj) else: return self.getter values = [match for match in values if return_all_values or match.value is not None] if self.plugin is not None: if return_all_values and not self.plugin.support_return_all_values: raise DefinitionException("Plugin %s don't allows to " "return multiple values" % self.cfg["plugin"]["name"], self.cfg) values_map = [('.'.join(self._get_path(match)), match.value) for match in values] values = [v for v in self.plugin.trait_values(values_map) if v is not None] else: values = [match.value for match in values if match is not None] if return_all_values: return values else: return values[0] if values else None def make_getter(self, fields): if fields in self.GETTERS_CACHE: return self.GETTERS_CACHE[fields] else: getter = self.JSONPATH_RW_PARSER.parse(fields).find self.GETTERS_CACHE[fields] = getter return getter def load_definitions(conf, defaults, config_file, fallback_file=None): """Setup a definitions from yaml config file.""" if not os.path.exists(config_file): config_file = conf.find_file(config_file) if not config_file and fallback_file is not None: LOG.debug("No Definitions configuration file found! " "Using default config.") config_file = fallback_file if config_file is not None: LOG.debug("Loading definitions configuration file: %s", config_file) with open(config_file) as cf: config = cf.read() try: definition_cfg = yaml.safe_load(config) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = (_("Invalid YAML syntax in Definitions file " "%(file)s at line: %(line)s, column: %(column)s.") % dict(file=config_file, line=mark.line + 1, column=mark.column + 1)) else: errmsg = (_("YAML error reading Definitions file " "%(file)s") % dict(file=config_file)) LOG.error(errmsg) raise else: LOG.debug("No Definitions configuration file found! " "Using default config.") definition_cfg = defaults LOG.debug("Definitions: %s", definition_cfg) return definition_cfg ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/event/0000775000175100017510000000000015033033521017235 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/event/__init__.py0000664000175100017510000000000015033033467021345 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/event/converter.py0000664000175100017510000002677015033033467021643 0ustar00mylesmyles# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import os from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer import declarative from ceilometer.event import models from ceilometer.i18n import _ OPTS = [ cfg.StrOpt('definitions_cfg_file', default="event_definitions.yaml", help="Configuration file for event definitions." ), cfg.BoolOpt('drop_unmatched_notifications', default=False, help='Drop notifications if no event definition matches. ' '(Otherwise, we convert them with just the default traits)'), cfg.MultiStrOpt('store_raw', default=[], help='Store the raw notification for select priority ' 'levels (info and/or error). By default, raw details are ' 'not captured.') ] LOG = log.getLogger(__name__) class TraitDefinition(declarative.Definition): def __init__(self, name, trait_cfg, plugin_manager): super().__init__(name, trait_cfg, plugin_manager) type_name = (trait_cfg.get('type', 'text') if isinstance(trait_cfg, dict) else 'text') self.trait_type = models.Trait.get_type_by_name(type_name) if self.trait_type is None: raise declarative.EventDefinitionException( _("Invalid trait type '%(type)s' for trait %(trait)s") % dict(type=type_name, trait=name), self.cfg) def to_trait(self, notification_body): value = self.parse(notification_body) if value is None: return None # NOTE(mdragon): some openstack projects (mostly Nova) emit '' # for null fields for things like dates. if self.trait_type != models.Trait.TEXT_TYPE and value == '': return None value = models.Trait.convert_value(self.trait_type, value) return models.Trait(self.name, self.trait_type, value) class EventDefinition: DEFAULT_TRAITS = dict( service=dict(type='text', fields='publisher_id'), request_id=dict(type='text', fields='ctxt.request_id'), project_id=dict(type='text', fields=['payload.tenant_id', 'ctxt.project_id']), user_id=dict(type='text', fields=['payload.user_id', 'ctxt.user_id']), # TODO(dikonoor):tenant_id is old terminology and should # be deprecated tenant_id=dict(type='text', fields=['payload.tenant_id', 'ctxt.project_id']), ) def __init__(self, definition_cfg, trait_plugin_mgr, raw_levels): self._included_types = [] self._excluded_types = [] self.traits = dict() self.cfg = definition_cfg self.raw_levels = raw_levels try: event_type = definition_cfg['event_type'] traits = definition_cfg['traits'] except KeyError as err: raise declarative.EventDefinitionException( _("Required field %s not specified") % err.args[0], self.cfg) if isinstance(event_type, str): event_type = [event_type] for t in event_type: if t.startswith('!'): self._excluded_types.append(t[1:]) else: self._included_types.append(t) if self._excluded_types and not self._included_types: self._included_types.append('*') for trait_name in self.DEFAULT_TRAITS: self.traits[trait_name] = TraitDefinition( trait_name, self.DEFAULT_TRAITS[trait_name], trait_plugin_mgr) for trait_name in traits: self.traits[trait_name] = TraitDefinition( trait_name, traits[trait_name], trait_plugin_mgr) def included_type(self, event_type): for t in self._included_types: if fnmatch.fnmatch(event_type, t): return True return False def excluded_type(self, event_type): for t in self._excluded_types: if fnmatch.fnmatch(event_type, t): return True return False def match_type(self, event_type): return (self.included_type(event_type) and not self.excluded_type(event_type)) @property def is_catchall(self): return '*' in self._included_types and not self._excluded_types def to_event(self, priority, notification_body): event_type = notification_body['event_type'] message_id = notification_body['metadata']['message_id'] when = timeutils.normalize_time(timeutils.parse_isotime( notification_body['metadata']['timestamp'])) traits = (self.traits[t].to_trait(notification_body) for t in self.traits) # Only accept non-None value traits ... traits = [trait for trait in traits if trait is not None] raw = notification_body if priority in self.raw_levels else {} event = models.Event(message_id, event_type, when, traits, raw) return event class NotificationEventsConverter: """Notification Event Converter The NotificationEventsConverter handles the conversion of Notifications from openstack systems into Ceilometer Events. The conversion is handled according to event definitions in a config file. The config is a list of event definitions. Order is significant, a notification will be processed according to the LAST definition that matches it's event_type. (We use the last matching definition because that allows you to use YAML merge syntax in the definitions file.) Each definition is a dictionary with the following keys (all are required): - event_type: this is a list of notification event_types this definition will handle. These can be wildcarded with unix shell glob (not regex!) wildcards. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. This item can also be a string, which will be taken as equivalent to 1 item list. Examples: * ['compute.instance.exists'] will only match compute.instance.exists notifications * "compute.instance.exists" Same as above. * ["image.create", "image.delete"] will match image.create and image.delete, but not anything else. * "compute.instance.*" will match compute.instance.create.start but not image.upload * ['*.start','*.end', '!scheduler.*'] will match compute.instance.create.start, and image.delete.end, but NOT compute.instance.exists or scheduler.run_instance.start * '!image.*' matches any notification except image notifications. * ['*', '!image.*'] same as above. - traits: (dict) The keys are trait names, the values are the trait definitions. Each trait definition is a dictionary with the following keys: - type (optional): The data type for this trait. (as a string) Valid options are: 'text', 'int', 'float' and 'datetime', defaults to 'text' if not specified. - fields: a path specification for the field(s) in the notification you wish to extract. The paths can be specified with a dot syntax (e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so:: "payload.image_meta.'org.openstack__1__architecture'" The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null (i.e. is not None) values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish) This configuration value is normally a string, for convenience, it can be specified as a list of specifications, which will be OR'ed together (a union query in jsonpath terms) - plugin (optional): (dictionary) with the following keys: - name: (string) name of a plugin to load - parameters: (optional) Dictionary of keyword args to pass to the plugin on initialization. See documentation on each plugin to see what arguments it accepts. For convenience, this value can also be specified as a string, which is interpreted as a plugin name, which will be loaded with no parameters. """ def __init__(self, conf, events_config, trait_plugin_mgr): self.conf = conf raw_levels = [level.lower() for level in self.conf.event.store_raw] self.definitions = [ EventDefinition(event_def, trait_plugin_mgr, raw_levels) for event_def in reversed(events_config)] add_catchall = not self.conf.event.drop_unmatched_notifications if add_catchall and not any(d.is_catchall for d in self.definitions): event_def = dict(event_type='*', traits={}) self.definitions.append(EventDefinition(event_def, trait_plugin_mgr, raw_levels)) def to_event(self, priority, notification_body): event_type = notification_body['event_type'] message_id = notification_body['metadata']['message_id'] edef = None for d in self.definitions: if d.match_type(event_type): edef = d break if edef is None: msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)') % dict(type=event_type, msgid=message_id)) if self.conf.event.drop_unmatched_notifications: LOG.debug(msg) else: # If drop_unmatched_notifications is False, this should # never happen. (mdragon) LOG.error(msg) return None return edef.to_event(priority, notification_body) def setup_events(conf, trait_plugin_mgr): """Setup the event definitions from yaml config file.""" return NotificationEventsConverter( conf, declarative.load_definitions( conf, [], conf.event.definitions_cfg_file, os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', 'pipeline', 'data', 'event_definitions.yaml')), trait_plugin_mgr) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/event/models.py0000664000175100017510000001124415033033467021105 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the events storage API. """ from oslo_utils import timeutils def serialize_dt(value): """Serializes parameter if it is datetime.""" return value.isoformat() if hasattr(value, 'isoformat') else value class Model: """Base class for storage API models.""" def __init__(self, **kwds): self.fields = list(kwds) for k, v in kwds.items(): setattr(self, k, v) def as_dict(self): d = {} for f in self.fields: v = getattr(self, f) if isinstance(v, Model): v = v.as_dict() elif isinstance(v, list) and v and isinstance(v[0], Model): v = [sub.as_dict() for sub in v] d[f] = v return d def __eq__(self, other): return self.as_dict() == other.as_dict() def __ne__(self, other): return not self.__eq__(other) class Event(Model): """A raw event from the source system. Events have Traits. Metrics will be derived from one or more Events. """ DUPLICATE = 1 UNKNOWN_PROBLEM = 2 INCOMPATIBLE_TRAIT = 3 def __init__(self, message_id, event_type, generated, traits, raw): """Create a new event. :param message_id: Unique ID for the message this event stemmed from. This is different than the Event ID, which comes from the underlying storage system. :param event_type: The type of the event. :param generated: UTC time for when the event occurred. :param traits: list of Traits on this Event. :param raw: Unindexed raw notification details. """ Model.__init__(self, message_id=message_id, event_type=event_type, generated=generated, traits=traits, raw=raw) def append_trait(self, trait_model): self.traits.append(trait_model) def __repr__(self): trait_list = [] if self.traits: trait_list = [str(trait) for trait in self.traits] return ("" % (self.message_id, self.event_type, self.generated, " ".join(trait_list))) def serialize(self): return {'message_id': self.message_id, 'event_type': self.event_type, 'generated': serialize_dt(self.generated), 'traits': [trait.serialize() for trait in self.traits], 'raw': self.raw} class Trait(Model): """A Trait is a key/value pair of data on an Event. The value is variant record of basic data types (int, date, float, etc). """ NONE_TYPE = 0 TEXT_TYPE = 1 INT_TYPE = 2 FLOAT_TYPE = 3 DATETIME_TYPE = 4 type_names = { NONE_TYPE: "none", TEXT_TYPE: "string", INT_TYPE: "integer", FLOAT_TYPE: "float", DATETIME_TYPE: "datetime" } def __init__(self, name, dtype, value): if not dtype: dtype = Trait.NONE_TYPE Model.__init__(self, name=name, dtype=dtype, value=value) def __repr__(self): return "" % (self.name, self.dtype, self.value) def serialize(self): return self.name, self.dtype, serialize_dt(self.value) def get_type_name(self): return self.get_name_by_type(self.dtype) @classmethod def get_type_by_name(cls, type_name): return getattr(cls, '%s_TYPE' % type_name.upper(), None) @classmethod def get_type_names(cls): return cls.type_names.values() @classmethod def get_name_by_type(cls, type_id): return cls.type_names.get(type_id, "none") @classmethod def convert_value(cls, trait_type, value): if trait_type is cls.INT_TYPE: return int(value) if trait_type is cls.FLOAT_TYPE: return float(value) if trait_type is cls.DATETIME_TYPE: return timeutils.normalize_time(timeutils.parse_isotime(value)) # Cropping the text value to match the TraitText value size if isinstance(value, bytes): return value.decode('utf-8')[:255] return str(value)[:255] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/event/trait_plugins.py0000664000175100017510000002405715033033467022514 0ustar00mylesmyles# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log from oslo_utils import timeutils LOG = log.getLogger(__name__) class TraitPluginBase(metaclass=abc.ABCMeta): """Base class for plugins. It converts notification fields to Trait values. """ support_return_all_values = False """If True, an exception will be raised if the user expect the plugin to return one trait per match_list, but the plugin doesn't allow/support that. """ def __init__(self, **kw): """Setup the trait plugin. For each Trait definition a plugin is used on in a conversion definition, a new instance of the plugin will be created, and initialized with the parameters (if any) specified in the config file. :param kw: the parameters specified in the event definitions file. """ super().__init__() @abc.abstractmethod def trait_values(self, match_list): """Convert a set of fields to one or multiple Trait values. This method is called each time a trait is attempted to be extracted from a notification. It will be called *even if* no matching fields are found in the notification (in that case, the match_list will be empty). If this method returns None, the trait *will not* be added to the event. Any other value returned by this method will be used as the value for the trait. Values returned will be coerced to the appropriate type for the trait. :param match_list: A list (may be empty if no matches) of *tuples*. Each tuple is (field_path, value) where field_path is the jsonpath for that specific field. Example:: trait's fields definition: ['payload.foobar', 'payload.baz', 'payload.thing.*'] notification body: { 'metadata': {'message_id': '12345'}, 'publisher': 'someservice.host', 'payload': { 'foobar': 'test', 'thing': { 'bar': 12, 'boing': 13, } } } match_list will be: [('payload.foobar','test'), ('payload.thing.bar',12), ('payload.thing.boing',13)] Here is a plugin that emulates the default (no plugin) behavior: .. code-block:: python class DefaultPlugin(TraitPluginBase): "Plugin that returns the first field value." def __init__(self, **kw): super(DefaultPlugin, self).__init__() def trait_values(self, match_list): if not match_list: return None return [ match[1] for match in match_list] """ class SplitterTraitPlugin(TraitPluginBase): """Plugin that splits a piece off of a string value.""" support_return_all_values = True def __init__(self, separator=".", segment=0, max_split=None, **kw): """Setup how do split the field. :param separator: String to split on. default "." :param segment: Which segment to return. (int) default 0 :param max_split: Limit number of splits. Default: None (no limit) """ LOG.warning('split plugin is deprecated, ' 'add ".`split(%(sep)s, %(segment)d, ' '%(max_split)d)`" to your jsonpath instead' % dict(sep=separator, segment=segment, max_split=(-1 if max_split is None else max_split))) self.separator = separator self.segment = segment self.max_split = max_split super().__init__(**kw) def trait_values(self, match_list): return [self._trait_value(match) for match in match_list] def _trait_value(self, match): value = str(match[1]) if self.max_split is not None: values = value.split(self.separator, self.max_split) else: values = value.split(self.separator) try: return values[self.segment] except IndexError: return None class BitfieldTraitPlugin(TraitPluginBase): """Plugin to set flags on a bitfield.""" def __init__(self, initial_bitfield=0, flags=None, **kw): """Setup bitfield trait. :param initial_bitfield: (int) initial value for the bitfield Flags that are set will be OR'ed with this. :param flags: List of dictionaries defining bitflags to set depending on data in the notification. Each one has the following keys: path: jsonpath of field to match. bit: (int) number of bit to set (lsb is bit 0) value: set bit if corresponding field's value matches this. If value is not provided, bit will be set if the field exists (and is non-null), regardless of its value. """ self.initial_bitfield = initial_bitfield if flags is None: flags = [] self.flags = flags super().__init__(**kw) def trait_values(self, match_list): matches = dict(match_list) bitfield = self.initial_bitfield for flagdef in self.flags: path = flagdef['path'] bit = 2 ** int(flagdef['bit']) if path in matches: if 'value' in flagdef: if matches[path] == flagdef['value']: bitfield |= bit else: bitfield |= bit return [bitfield] class TimedeltaPluginMissedFields(Exception): def __init__(self): msg = ('It is required to use two timestamp field with Timedelta ' 'plugin.') super().__init__(msg) class TimedeltaPlugin(TraitPluginBase): """Setup timedelta meter volume of two timestamps fields. Example:: trait's fields definition: ['payload.created_at', 'payload.launched_at'] value is been created as total seconds between 'launched_at' and 'created_at' timestamps. """ # TODO(idegtiarov): refactor code to have meter_plugins separate from # trait_plugins def trait_values(self, match_list): if len(match_list) != 2: LOG.warning('Timedelta plugin is required two timestamp fields' ' to create timedelta value.') return [None] start, end = match_list try: start_time = timeutils.parse_isotime(start[1]) end_time = timeutils.parse_isotime(end[1]) except Exception as err: LOG.warning('Failed to parse date from set fields, both ' 'fields %(start)s and %(end)s must be datetime: ' '%(err)s' % dict(start=start[0], end=end[0], err=err) ) return [None] return [abs((end_time - start_time).total_seconds())] class MapTraitPlugin(TraitPluginBase): """A trait plugin for mapping one set of values to another.""" def __init__(self, values=None, default=None, case_sensitive=True, **kw): """Setup map trait. :param values: (dict[Any, Any]) Mapping of values to their desired target values. :param default: (Any) Value to set if no mapping for a value is found. :param case_sensitive: (bool) Perform case-sensitive string lookups. """ if not values: raise ValueError("The 'values' parameter is required " "for the map trait plugin") if not isinstance(values, dict): raise ValueError("The 'values' parameter needs to be a dict " "for the map trait plugin") self.case_sensitive = case_sensitive if not self.case_sensitive: self.values = {(k.casefold() if isinstance(k, str) else k): v for k, v in values.items()} else: self.values = dict(values) self.default = default super().__init__(**kw) def trait_values(self, match_list): mapped_values = [] for match in match_list: key = match[1] folded_key = ( key.casefold() if not self.case_sensitive and isinstance(key, str) else key) try: value = self.values[folded_key] except KeyError: LOG.warning( ('Unknown value %s found when mapping %s, ' 'mapping to default value of %s'), repr(key), match[0], repr(self.default)) value = self.default else: LOG.debug('Value %s for %s mapped to value %s', repr(key), match[0], repr(value)) mapped_values.append(value) return mapped_values ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/gnocchi_client.py0000664000175100017510000002642315033033467021456 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from gnocchiclient import client from gnocchiclient import exceptions as gnocchi_exc import keystoneauth1.session from oslo_log import log from oslo_utils import versionutils from ceilometer import keystone_client LOG = log.getLogger(__name__) def get_gnocchiclient(conf, request_timeout=None): group = conf.gnocchi.auth_section session = keystone_client.get_session(conf, group=group, timeout=request_timeout) adapter = keystoneauth1.session.TCPKeepAliveAdapter( pool_maxsize=conf.max_parallel_requests) session.mount("http://", adapter) session.mount("https://", adapter) interface = conf[group].interface region_name = conf[group].region_name gnocchi_url = session.get_endpoint(service_type='metric', service_name='gnocchi', interface=interface, region_name=region_name) return client.Client( '1', session, adapter_options={'connect_retries': 3, 'interface': interface, 'region_name': region_name, 'endpoint_override': gnocchi_url}) # NOTE(sileht): This is the initial resource types created in Gnocchi # This list must never change to keep in sync with what Gnocchi early # database contents was containing resources_initial = { "image": { "name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "container_format": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "disk_format": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, }, "instance": { "flavor_id": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "image_ref": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "host": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "display_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "server_group": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, "instance_disk": { "name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "instance_id": {"type": "uuid", "required": True}, }, "instance_network_interface": { "name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "instance_id": {"type": "uuid", "required": True}, }, "volume": { "display_name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, "swift_account": {}, "ceph_account": {}, "network": {}, "identity": {}, "ipmi": {}, "stack": {}, "host": { "host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, }, "host_network_interface": { "host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "device_name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, "host_disk": { "host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "device_name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, }, } # NOTE(sileht): Order matter this have to be considered like alembic migration # code, because it updates the resources schema of Gnocchi resources_update_operations = [ {"desc": "add volume_type to volume", "type": "update_attribute_type", "resource_type": "volume", "data": [{ "op": "add", "path": "/attributes/volume_type", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": False} }]}, {"desc": "add flavor_name to instance", "type": "update_attribute_type", "resource_type": "instance", "data": [{ "op": "add", "path": "/attributes/flavor_name", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": True, "options": {'fill': ''}} }]}, {"desc": "add nova_compute resource type", "type": "create_resource_type", "resource_type": "nova_compute", "data": [{ "attributes": {"host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add manila share type", "type": "create_resource_type", "resource_type": "manila_share", "data": [{ "attributes": {"name": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "host": {"type": "string", "min_length": 0, "max_length": 255, "required": True}, "protocol": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "availability_zone": {"type": "string", "min_length": 0, "max_length": 255, "required": False}, "status": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add volume provider resource type", "type": "create_resource_type", "resource_type": "volume_provider", "data": [{ "attributes": {} }]}, {"desc": "add volume provider pool resource type", "type": "create_resource_type", "resource_type": "volume_provider_pool", "data": [{ "attributes": {"provider": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add ipmi sensor resource type", "type": "create_resource_type", "resource_type": "ipmi_sensor", "data": [{ "attributes": {"node": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, {"desc": "add launched_at to instance", "type": "update_attribute_type", "resource_type": "instance", "data": [ {"op": "add", "path": "/attributes/launched_at", "value": {"type": "datetime", "required": False}}, {"op": "add", "path": "/attributes/created_at", "value": {"type": "datetime", "required": False}}, {"op": "add", "path": "/attributes/deleted_at", "value": {"type": "datetime", "required": False}}, ]}, {"desc": "add instance_id/image_id to volume", "type": "update_attribute_type", "resource_type": "volume", "data": [ {"op": "add", "path": "/attributes/image_id", "value": {"type": "uuid", "required": False}}, {"op": "add", "path": "/attributes/instance_id", "value": {"type": "uuid", "required": False}}, ]}, {"desc": "add availability_zone to instance", "type": "update_attribute_type", "resource_type": "instance", "data": [{ "op": "add", "path": "/attributes/availability_zone", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": False} }]}, {"desc": "add volume_type_id to volume", "type": "update_attribute_type", "resource_type": "volume", "data": [{ "op": "add", "path": "/attributes/volume_type_id", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": False} }]}, {"desc": "add storage_policy to swift_account", "type": "update_attribute_type", "resource_type": "swift_account", "data": [{ "op": "add", "path": "/attributes/storage_policy", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": False} # Only containers have a storage policy }]}, {"desc": "make host optional for instance", "type": "update_attribute_type", "resource_type": "instance", "data": [{ "op": "add", # Usually update, the attribute likely already exists "path": "/attributes/host", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": False} # Allow the hypervisor to be withheld }]}, ] REQUIRED_VERSION = "4.2.0" def upgrade_resource_types(conf): gnocchi = get_gnocchiclient(conf) gnocchi_version = gnocchi.build.get() if not versionutils.is_compatible(REQUIRED_VERSION, gnocchi_version): raise Exception("required gnocchi version is %s, got %s" % (REQUIRED_VERSION, gnocchi_version)) for name, attributes in resources_initial.items(): try: gnocchi.resource_type.get(name=name) except (gnocchi_exc.ResourceTypeNotFound, gnocchi_exc.NotFound): rt = {'name': name, 'attributes': attributes} gnocchi.resource_type.create(resource_type=rt) for ops in resources_update_operations: if ops['type'] == 'update_attribute_type': rt = gnocchi.resource_type.get(name=ops['resource_type']) first_op = ops['data'][0] attrib = first_op['path'].replace('/attributes/', '') # Options are only used when adding/updating attributes. # Make a shallow copy of the new value type, and remove options # from the copy to make sure it isn't included in checks. value = first_op['value'].copy() value.pop('options', None) if (first_op['op'] == 'add' and attrib in rt['attributes'] and value == rt['attributes'][attrib]): continue if first_op['op'] == 'remove' and attrib not in rt['attributes']: continue gnocchi.resource_type.update(ops['resource_type'], ops['data']) elif ops['type'] == 'create_resource_type': try: gnocchi.resource_type.get(name=ops['resource_type']) except (gnocchi_exc.ResourceTypeNotFound, gnocchi_exc.NotFound): rt = {'name': ops['resource_type'], 'attributes': ops['data'][0]['attributes']} gnocchi.resource_type.create(resource_type=rt) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/hacking/0000775000175100017510000000000015033033521017520 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/hacking/__init__.py0000664000175100017510000000000015033033467021630 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/hacking/checks.py0000664000175100017510000000326415033033467021350 0ustar00mylesmyles# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Ceilometer specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range X3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the C3xx value. - List the new rule in the top level HACKING.rst file """ from hacking import core @core.flake8ext def no_log_warn(logical_line): """Disallow 'LOG.warn(' https://bugs.launchpad.net/tempest/+bug/1508442 C301 """ if logical_line.startswith('LOG.warn('): yield (0, 'C301 Use LOG.warning() rather than LOG.warn()') @core.flake8ext def no_os_popen(logical_line): """Disallow 'os.popen(' Deprecated library function os.popen() Replace it using subprocess https://bugs.launchpad.net/tempest/+bug/1529836 C302 """ if 'os.popen(' in logical_line: yield (0, 'C302 Deprecated library function os.popen(). ' 'Replace it using subprocess module. ') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/i18n.py0000664000175100017510000000204715033033467017261 0ustar00mylesmyles# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html """ import oslo_i18n DOMAIN = 'ceilometer' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/image/0000775000175100017510000000000015033033521017176 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/image/__init__.py0000664000175100017510000000000015033033467021306 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/image/discovery.py0000664000175100017510000000251615033033467021574 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glanceclient from oslo_config import cfg from ceilometer import keystone_client from ceilometer.polling import plugin_base SERVICE_OPTS = [ cfg.StrOpt('glance', default='image', help='Glance service type.'), ] class ImagesDiscovery(plugin_base.DiscoveryBase): def __init__(self, conf): super().__init__(conf) creds = conf.service_credentials self.glance_client = glanceclient.Client( version='2', session=keystone_client.get_session(conf), region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.glance) def discover(self, manager, param=None): """Discover resources to monitor.""" return self.glance_client.images.list() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/image/glance.py0000664000175100017510000000346015033033467021015 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with images """ from ceilometer.polling import plugin_base from ceilometer import sample class _Base(plugin_base.PollsterBase): @property def default_discovery(self): return 'images' @staticmethod def extract_image_metadata(image): return { k: getattr(image, k) for k in [ "status", "visibility", "name", "container_format", "created_at", "disk_format", "updated_at", "min_disk", "protected", "checksum", "min_ram", "tags", "virtual_size" ] } class ImageSizePollster(_Base): def get_samples(self, manager, cache, resources): for image in resources: yield sample.Sample( name='image.size', type=sample.TYPE_GAUGE, unit='B', volume=image.size, user_id=None, project_id=image.owner, resource_id=image.id, resource_metadata=self.extract_image_metadata(image), ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/ipmi/0000775000175100017510000000000015033033521017052 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/__init__.py0000664000175100017510000000000015033033467021162 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/ipmi/notifications/0000775000175100017510000000000015033033521021723 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/notifications/__init__.py0000664000175100017510000000000015033033467024033 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/notifications/ironic.py0000664000175100017510000001316315033033467023575 0ustar00mylesmyles# # Copyright 2014 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Converters for producing hardware sensor data sample messages from notification events. """ from oslo_log import log from ceilometer.pipeline import sample as endpoint from ceilometer import sample LOG = log.getLogger(__name__) # Map unit name to SI UNIT_MAP = { 'Watts': 'W', 'Volts': 'V', } def validate_reading(data): """Some sensors read "Disabled".""" return data != 'Disabled' def transform_id(data): return data.lower().replace(' ', '_') def parse_reading(data): try: volume, unit = data.split(' ', 1) unit = unit.rsplit(' ', 1)[-1] return float(volume), UNIT_MAP.get(unit, unit) except ValueError: raise InvalidSensorData('unable to parse sensor reading: %s' % data) class InvalidSensorData(ValueError): pass class SensorNotification(endpoint.SampleEndpoint): """A generic class for extracting samples from sensor data notifications. A notification message can contain multiple samples from multiple sensors, all with the same basic structure: the volume for the sample is found as part of the value of a 'Sensor Reading' key. The unit is in the same value. Subclasses exist solely to allow flexibility with stevedore configuration. """ event_types = ['hardware.ipmi.*'] metric = None def _get_sample(self, message): try: return (payload for _, payload in message['payload'][self.metric].items()) except KeyError: return [] @staticmethod def _package_payload(message, payload): # NOTE(chdent): How much of the payload should we keep? # FIXME(gordc): ironic adds timestamp and event_type in its payload # which we are using below. we should probably just use oslo.messaging # values instead? payload['node'] = message['payload']['node_uuid'] info = {'publisher_id': message['publisher_id'], 'timestamp': message['payload']['timestamp'], 'event_type': message['payload']['event_type'], 'user_id': message['payload'].get('user_id'), 'project_id': message['payload'].get('project_id'), 'payload': payload} return info def build_sample(self, message): """Read and process a notification. The guts of a message are in dict value of a 'payload' key which then itself has a payload key containing a dict of multiple sensor readings. If expected keys in the payload are missing or values are not in the expected form for transformations, KeyError and ValueError are caught and the current sensor payload is skipped. """ payloads = self._get_sample(message['payload']) for payload in payloads: try: # Provide a fallback resource_id in case parts are missing. resource_id = 'missing id' try: resource_id = '{nodeid}-{sensorid}'.format( nodeid=message['payload']['node_uuid'], sensorid=transform_id(payload['Sensor ID']) ) except KeyError as exc: raise InvalidSensorData('missing key in payload: %s' % exc) # Do not pick up power consumption metrics from Current sensor if ( self.metric == 'Current' and 'Pwr Consumption' in payload['Sensor ID'] ): continue info = self._package_payload(message, payload) try: sensor_reading = info['payload']['Sensor Reading'] except KeyError: raise InvalidSensorData( "missing 'Sensor Reading' in payload" ) if validate_reading(sensor_reading): volume, unit = parse_reading(sensor_reading) yield sample.Sample.from_notification( name='hardware.ipmi.%s' % self.metric.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, resource_id=resource_id, message=info, user_id=info['user_id'], project_id=info['project_id'], timestamp=info['timestamp']) except InvalidSensorData as exc: LOG.warning( 'invalid sensor data for %(resource)s: %(error)s' % dict(resource=resource_id, error=exc) ) continue class TemperatureSensorNotification(SensorNotification): metric = 'Temperature' class CurrentSensorNotification(SensorNotification): metric = 'Current' class FanSensorNotification(SensorNotification): metric = 'Fan' class VoltageSensorNotification(SensorNotification): metric = 'Voltage' class PowerSensorNotification(SensorNotification): metric = 'Power' ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/ipmi/platform/0000775000175100017510000000000015033033521020676 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/platform/__init__.py0000664000175100017510000000000015033033467023006 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/platform/exception.py0000664000175100017510000000124415033033467023260 0ustar00mylesmyles# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class IPMIException(Exception): pass ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/platform/ipmi_sensor.py0000664000175100017510000001021615033033467023610 0ustar00mylesmyles# Copyright 2014 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """IPMI sensor to collect various sensor data of compute node""" from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmitool IPMICMD = {"sdr_info": "sdr info", "sensor_dump": "sdr -v", "sensor_dump_temperature": "sdr -v type Temperature", "sensor_dump_current": "sdr -v type Current", "sensor_dump_fan": "sdr -v type Fan", "sensor_dump_voltage": "sdr -v type Voltage", "sensor_dump_power": "sensor get 'Pwr Consumption'"} # Requires translation of output into dict DICT_TRANSLATE_TEMPLATE = {"translate": 1} class IPMISensor: """The python implementation of IPMI sensor using ipmitool The class implements the IPMI sensor to get various sensor data of compute node. It uses ipmitool to execute the IPMI command and parse the output into dict. """ _inited = False _instance = None def __new__(cls, *args, **kwargs): """Singleton to avoid duplicated initialization.""" if not cls._instance: cls._instance = super().__new__(cls, *args, **kwargs) return cls._instance def __init__(self): if not (self._instance and self._inited): self.ipmi_support = False self._inited = True self.ipmi_support = self.check_ipmi() @ipmitool.execute_ipmi_cmd() def _get_sdr_info(self): """Get the SDR info.""" return IPMICMD['sdr_info'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_all(self): """Get the sensor data for type.""" return IPMICMD['sensor_dump'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_temperature(self): """Get the sensor data for Temperature.""" return IPMICMD['sensor_dump_temperature'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_voltage(self): """Get the sensor data for Voltage.""" return IPMICMD['sensor_dump_voltage'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_power(self): """Get the sensor data for Power.""" return IPMICMD['sensor_dump_power'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_current(self): """Get the sensor data for Current.""" return IPMICMD['sensor_dump_current'] @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) def _read_sensor_fan(self): """Get the sensor data for Fan.""" return IPMICMD['sensor_dump_fan'] def read_sensor_any(self, sensor_type=''): """Get the sensor data for type.""" if not self.ipmi_support: return {} mapping = {'': self._read_sensor_all, 'Temperature': self._read_sensor_temperature, 'Fan': self._read_sensor_fan, 'Voltage': self._read_sensor_voltage, 'Current': self._read_sensor_current, 'Power': self._read_sensor_power} try: return mapping[sensor_type]() except KeyError: raise ipmiexcept.IPMIException(_('Wrong sensor type')) def check_ipmi(self): """IPMI capability checking This function is used to detect if compute node is IPMI capable platform. Just run a simple IPMI command to get SDR info for check. """ try: self._get_sdr_info() except ipmiexcept.IPMIException: return False return True ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/platform/ipmitool.py0000664000175100017510000001064115033033467023117 0ustar00mylesmyles# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils to run ipmitool for data collection""" from oslo_concurrency import processutils from ceilometer.i18n import _ from ceilometer.ipmi.platform import exception as ipmiexcept import ceilometer.privsep.ipmitool import shlex # Following 2 functions are copied from ironic project to handle ipmitool's # sensor data output. Need code clean and sharing in future. # Check ironic/drivers/modules/ipmitool.py def _get_sensor_type(sensor_data_dict): # Have only three sensor type name IDs: 'Sensor Type (Analog)' # 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)' for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)', 'Sensor Type (Threshold)'): try: return sensor_data_dict[key].split(' ', 1)[0] except KeyError: continue raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "unknown sensor type")) def _process_sensor(sensor_data): sensor_data_fields = sensor_data.split('\n') sensor_data_dict = {} for field in sensor_data_fields: if not field: continue kv_value = field.split(':') if len(kv_value) != 2: continue sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip() return sensor_data_dict def _translate_output(output): """Translate the return value into JSON dict :param output: output of the execution of IPMI command(sensor reading) """ sensors_data_dict = {} sensors_data_array = output.split('\n\n') for sensor_data in sensors_data_array: sensor_data_dict = _process_sensor(sensor_data) if not sensor_data_dict: continue sensor_type = _get_sensor_type(sensor_data_dict) # ignore the sensors which have no current 'Sensor Reading' data sensor_id = sensor_data_dict['Sensor ID'] if 'Sensor Reading' in sensor_data_dict: sensors_data_dict.setdefault(sensor_type, {})[sensor_id] = sensor_data_dict # get nothing, no valid sensor data if not sensors_data_dict: raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," "No data retrieved from given input")) return sensors_data_dict def _parse_output(output, template): """Parse the return value of IPMI command into dict :param output: output of the execution of IPMI command :param template: a dict that contains the expected items of IPMI command and its length. """ ret = {} index = 0 if not (output and template): return ret if "translate" in template: ret = _translate_output(output) else: output_list = output.strip().replace('\n', '').split(' ') if sum(template.values()) != len(output_list): raise ipmiexcept.IPMIException(_("ipmitool output " "length mismatch")) for item in template.items(): index_end = index + item[1] update_value = output_list[index: index_end] ret[item[0]] = update_value index = index_end return ret def execute_ipmi_cmd(template=None): """Decorator for the execution of IPMI command. It parses the output of IPMI command into dictionary. """ template = template or [] def _execute_ipmi_cmd(f): def _execute(self, **kwargs): args = ['ipmitool'] command = f(self, **kwargs) args.extend(shlex.split(command)) try: (out, __) = ceilometer.privsep.ipmitool.ipmi(*args) except processutils.ProcessExecutionError: raise ipmiexcept.IPMIException(_("running ipmitool failure")) return _parse_output(out, template) return _execute return _execute_ipmi_cmd ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7929416 ceilometer-24.1.0.dev59/ceilometer/ipmi/pollsters/0000775000175100017510000000000015033033521021101 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/pollsters/__init__.py0000664000175100017510000000167415033033467023233 0ustar00mylesmyles# Copyright 2014 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Pollsters for IPMI and Intel Node Manager """ from oslo_config import cfg OPTS = [ cfg.IntOpt('polling_retry', default=3, help='Tolerance of IPMI/NM polling failures ' 'before disable this pollster. ' 'Negative indicates retrying forever.') ] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/ipmi/pollsters/sensor.py0000664000175100017510000001155015033033467022777 0ustar00mylesmyles# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.i18n import _ from ceilometer.ipmi.notifications import ironic as parser from ceilometer.ipmi.platform import exception as ipmiexcept from ceilometer.ipmi.platform import ipmi_sensor from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) class InvalidSensorData(ValueError): pass class SensorPollster(plugin_base.PollsterBase): METRIC = None def setup_environment(self): super().setup_environment() self.ipmi = ipmi_sensor.IPMISensor() self.polling_failures = 0 # Do not load this extension if no IPMI support if not self.ipmi.ipmi_support: raise plugin_base.ExtensionLoadError( "IPMITool not supported on host") @property def default_discovery(self): return 'local_node' @staticmethod def _get_sensor_types(data, sensor_type): # Ipmitool reports 'Pwr Consumption' as sensor type 'Current'. # Set sensor_type to 'Current' when polling 'Power' metrics. if sensor_type == 'Power': sensor_type = 'Current' try: return (sensor_type_data for _, sensor_type_data in data[sensor_type].items()) except KeyError: return [] def get_samples(self, manager, cache, resources): # Only one resource for IPMI pollster try: stats = self.ipmi.read_sensor_any(self.METRIC) except ipmiexcept.IPMIException: self.polling_failures += 1 LOG.warning(_( 'Polling %(mtr)s sensor failed for %(cnt)s times!') % ({'mtr': self.METRIC, 'cnt': self.polling_failures})) if 0 <= self.conf.ipmi.polling_retry < self.polling_failures: LOG.warning(_('Pollster for %s is disabled!') % self.METRIC) raise plugin_base.PollsterPermanentError(resources) else: return self.polling_failures = 0 sensor_type_data = self._get_sensor_types(stats, self.METRIC) for sensor_data in sensor_type_data: # Continue if sensor_data is not parseable. try: sensor_reading = sensor_data['Sensor Reading'] sensor_id = sensor_data['Sensor ID'] except KeyError: continue # Do not pick up power consumption metrics from 'Current' sensor if self.METRIC == 'Current' and 'Pwr Consumption' in sensor_id: continue if not parser.validate_reading(sensor_reading): continue try: volume, unit = parser.parse_reading(sensor_reading) except parser.InvalidSensorData: continue resource_id = '%(host)s-%(sensor-id)s' % { 'host': self.conf.host, 'sensor-id': parser.transform_id(sensor_id) } metadata = { 'node': self.conf.host } extra_metadata = self.get_extra_sensor_metadata(sensor_data) if extra_metadata: metadata.update(extra_metadata) yield sample.Sample( name='hardware.ipmi.%s' % self.METRIC.lower(), type=sample.TYPE_GAUGE, unit=unit, volume=volume, user_id=None, project_id=None, resource_id=resource_id, resource_metadata=metadata) def get_extra_sensor_metadata(self, sensor_data): # override get_extra_sensor_metadata to add specific metrics for # each sensor return {} class TemperatureSensorPollster(SensorPollster): METRIC = 'Temperature' class CurrentSensorPollster(SensorPollster): METRIC = 'Current' class FanSensorPollster(SensorPollster): METRIC = 'Fan' def get_extra_sensor_metadata(self, sensor_data): try: return { "maximum_rpm": sensor_data['Normal Maximum'], } except KeyError: # Maximum rpm might not be reported when usage # is reported as percent return {} class VoltageSensorPollster(SensorPollster): METRIC = 'Voltage' class PowerSensorPollster(SensorPollster): METRIC = 'Power' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/keystone_client.py0000664000175100017510000000733115033033467021702 0ustar00mylesmyles# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystoneauth1 import loading as ka_loading from keystoneclient.v3 import client as ks_client_v3 from oslo_config import cfg DEFAULT_GROUP = "service_credentials" # List of group that can set auth_section to use a different # credentials section OVERRIDABLE_GROUPS = ['gnocchi', 'zaqar'] def get_session(conf, requests_session=None, group=None, timeout=None): """Get a ceilometer service credentials auth session.""" group = group or DEFAULT_GROUP auth_plugin = ka_loading.load_auth_from_conf_options(conf, group) kwargs = {'auth': auth_plugin, 'session': requests_session} if timeout is not None: kwargs['timeout'] = timeout session = ka_loading.load_session_from_conf_options(conf, group, **kwargs) return session def get_client(conf, trust_id=None, requests_session=None, group=DEFAULT_GROUP): """Return a client for keystone v3 endpoint, optionally using a trust.""" session = get_session(conf, requests_session=requests_session, group=group) return ks_client_v3.Client(session=session, trust_id=trust_id, interface=conf[group].interface, region_name=conf[group].region_name) def get_service_catalog(client): return client.session.auth.get_access(client.session).service_catalog def get_auth_token(client): return client.session.auth.get_access(client.session).auth_token CLI_OPTS = [ cfg.StrOpt('region-name', deprecated_group="DEFAULT", deprecated_name="os-region-name", default=os.environ.get('OS_REGION_NAME'), help='Region name to use for OpenStack service endpoints.'), cfg.StrOpt('interface', default=os.environ.get( 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', 'public')), deprecated_name="os-endpoint-type", choices=('public', 'internal', 'admin', 'auth', 'publicURL', 'internalURL', 'adminURL'), help='Type of endpoint in Identity service catalog to use for ' 'communication with OpenStack services.'), ] def register_keystoneauth_opts(conf): _register_keystoneauth_group(conf, DEFAULT_GROUP) for group in OVERRIDABLE_GROUPS: _register_keystoneauth_group(conf, group) conf.set_default('auth_section', DEFAULT_GROUP, group=group) def _register_keystoneauth_group(conf, group): ka_loading.register_auth_conf_options(conf, group) ka_loading.register_session_conf_options( conf, group, deprecated_opts={'cacert': [ cfg.DeprecatedOpt('os-cacert', group=group), cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] }) conf.register_opts(CLI_OPTS, group=group) def post_register_keystoneauth_opts(conf): for group in OVERRIDABLE_GROUPS: if conf[group].auth_section != DEFAULT_GROUP: # NOTE(sileht): We register this again after the auth_section have # been read from the configuration file _register_keystoneauth_group(conf, conf[group].auth_section) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7879415 ceilometer-24.1.0.dev59/ceilometer/locale/0000775000175100017510000000000015033033521017353 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7869415 ceilometer-24.1.0.dev59/ceilometer/locale/de/0000775000175100017510000000000015033033521017743 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/de/LC_MESSAGES/0000775000175100017510000000000015033033521021530 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/de/LC_MESSAGES/ceilometer.po0000664000175100017510000001345015033033467024234 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Carsten Duch , 2014 # Christian Berendt , 2014 # Ettore Atalan , 2014 # Andreas Jaeger , 2016. #zanata # Andreas Jaeger , 2018. #zanata # Andreas Jaeger , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-10-03 08:56+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: German\n" #, python-format msgid "Could not load the following pipelines: %s" msgstr "Konnte die folgenden Pipelines nicht laden: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Löschen von Benachrichtigung %(type)s (UUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt während Suche nach Instanz : " "[Fehlercode %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Fehler beim Überprüfen von Daten der Instanz , " "Domänenstatus ist ABGESCHALTET." #, python-format msgid "" "Failed to inspect instance %(instance_uuid)s stats, can not get info from " "libvirt: %(error)s" msgstr "" "Fehler beim Überprüfen der Statistik der Instanz %(instance_uuid)s, " "Informationen können nicht von libvirt abgerufen werden: %(error)s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d Datenpunkte konnten nicht veröffentlicht werden; werden gelöscht" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "%d Datenpunkte konnten nicht veröffentlicht werden; in Warteschlange " "einreihen" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Ungültige YAML-Syntax in Definitionsdatei %(file)s in Zeile: %(line)s, " "Spalte: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Ungültiger Traittyp '%(type)s' für Trait %(trait)s" #, python-format msgid "Invalid type %s specified" msgstr "Ungültiger Typ %s angegeben" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Kein Plug-in mit dem Namen %(plugin)s verfügbar für %(name)s." msgid "Node Manager init failed" msgstr "Initialisierung von Knoten-Manager fehlgeschlagen" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Analysefehler in JSONPath-Spezifikation '%(jsonpath)s' für %(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in angegeben, aber kein Plug-in-Name für %s angegeben." #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling von %(mtr)s-Sensor %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling von %(name)s %(cnt)s Mal fehlgeschlagen!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster für %s ist inaktiviert!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Maximale Länge von local_queue für Publisher ist überschritten, die %d " "ältesten Beispiele werden gelöscht" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "Veröffentlichungsrichtlinie ist unbekannt (%s); auf Standardeinstellung " "setzen" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW-AdminOps-API hat Folgendes zurückgegeben: %(status)s %(reason)s" #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "Erforderliches Feld %(field)s muss %(type)s sein" #, python-format msgid "Required field %s not specified" msgstr "Erforderliches Feld %s nicht angegeben" #, python-format msgid "Required fields %s not specified" msgstr "Erforderliche Felder %s nicht angegeben." #, python-format msgid "The field 'fields' is required for %s" msgstr "Das Feld 'fields' ist erforderlich für %s" msgid "Unable to send sample over UDP" msgstr "Beispiel kann nicht über UDP gesendet werden" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für Firewall %(id)s; Beispiel wird " "übersprungen" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Unbekannten Status %(stat)s erhalten für VPN %(id)s; Beispiel wird " "übersprungen" msgid "Wrong sensor type" msgstr "Falscher Sensortyp" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "YAML-Fehler beim Lesen von Definitionsdatei %(file)s." msgid "ipmitool output length mismatch" msgstr "Abweichung bei ipmitool-Ausgabelänge" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Analyse von IPMI-Sensordaten fehlgeschlagen, keine Daten von angegebener " "Eingabe abgerufen" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "Analyse von IPMI-Sensordaten fehlgeschlagen, unbekannter Sensortyp" msgid "running ipmitool failure" msgstr "Fehler beim Ausführen von ipmitool" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7869415 ceilometer-24.1.0.dev59/ceilometer/locale/en_GB/0000775000175100017510000000000015033033521020325 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000015033033521022112 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po0000664000175100017510000001464115033033467024621 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Andi Chandler , 2013-2014 # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-04-14 06:55+0000\n" "Last-Translator: Andi Chandler \n" "Language: en_GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: English (United Kingdom)\n" #, python-format msgid "Could not load the following pipelines: %s" msgstr "Could not load the following pipelines: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Dropping Notification %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while checking blockStats, This may not be harmful, but " "please check : %(ex)s" msgstr "" "Error from libvirt while checking blockStats, This may not be harmful, but " "please check : %(ex)s" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Failed to inspect data of instance , domain state " "is SHUTOFF." #, python-format msgid "" "Failed to inspect instance %(instance_uuid)s stats, can not get info from " "libvirt: %(error)s" msgstr "" "Failed to inspect instance %(instance_uuid)s stats, can not get info from " "libvirt: %(error)s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Failed to publish %d datapoints, dropping them" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Failed to publish %d datapoints, queue them" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Invalid trait type '%(type)s' for trait %(trait)s" #, python-format msgid "Invalid type %s specified" msgstr "Invalid type %s specified" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "No plugin named %(plugin)s available for %(name)s" msgid "Node Manager init failed" msgstr "Node Manager init failed" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plugin specified, but no plugin name supplied for %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling %(mtr)s sensor failed for %(cnt)s times!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling %(name)s failed for %(cnt)s times!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster for %s is disabled!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Publishing policy is unknown (%s) force to default" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API returned %(status)s %(reason)s" #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "Required field %(field)s should be a %(type)s" #, python-format msgid "Required field %s not specified" msgstr "Required field %s not specified" #, python-format msgid "Required fields %s not specified" msgstr "Required fields %s not specified" msgid "Sample Check" msgstr "Sample Check" #, python-format msgid "The field 'fields' is required for %s" msgstr "The field 'fields' is required for %s" #, python-format msgid "" "Unable to connect to the remote endpoint %(host)s:%(port)d. Connection " "refused." msgstr "" "Unable to connect to the remote endpoint %(host)s:%(port)d. Connection " "refused." #, python-format msgid "" "Unable to connect to the remote endpoint %(host)s:%(port)d. The connection " "timed out." msgstr "" "Unable to connect to the remote endpoint %(host)s:%(port)d. The connection " "timed out." msgid "Unable to reconnect and resend sample over TCP" msgstr "Unable to reconnect and resend sample over TCP" #, python-format msgid "Unable to resolv the remote %(host)s" msgstr "Unable to resolve the remote %(host)s" msgid "" "Unable to send sample over TCP, trying to reconnect and resend the message" msgstr "" "Unable to send sample over TCP, trying to reconnect and resend the message" msgid "Unable to send sample over UDP" msgstr "Unable to send sample over UDP" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Unknown status %(stat)s received on fw %(id)s,skipping sample" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgid "Wrong sensor type" msgstr "Wrong sensor type" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "YAML error reading Definitions file %(file)s" msgid "ipmitool output length mismatch" msgstr "ipmitool output length mismatch" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "parse IPMI sensor data failed,No data retrieved from given input" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "parse IPMI sensor data failed,unknown sensor type" msgid "running ipmitool failure" msgstr "running ipmitool failure" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7869415 ceilometer-24.1.0.dev59/ceilometer/locale/es/0000775000175100017510000000000015033033521017762 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/es/LC_MESSAGES/0000775000175100017510000000000015033033521021547 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/es/LC_MESSAGES/ceilometer.po0000664000175100017510000001173615033033467024260 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Rafael Rivero , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando la notificación %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Error de libvirt al buscar la instancia : [Código " "de error %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "No se han podido analizar los datos de la instancia , el estado del dominio es SHUTOFF." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "No se han podido publicar los puntos de datos %d, descartándolos" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "No se han podido publicar los puntos de datos %d, póngalos en cola" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxis de YAML no válida en archivo de definiciones %(file)s en la línea: " "%(line)s, columna: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de rasgo no válido '%(type)s' para el rasgo %(trait)s" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "No hay ningún plug-in denominado %(plugin)s disponible para %(name)s" msgid "Node Manager init failed" msgstr "El inicio de Gestor de nodos ha fallado" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Error de análisis en especificación de JSONPath '%(jsonpath)s' para " "%(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "" "Se ha especificado un plug-in, pero no se ha proporcionado ningún nombre de " "plug-in para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "¡El sensor de sondeo %(mtr)s ha fallado %(cnt)s veces!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "El sondeo %(name)s ha fallado %(cnt)s veces." #, python-format msgid "Pollster for %s is disabled!" msgstr "¡El Pollster para %s está inhabilitado!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Se supera la longitud máxima de aplicación de publicación local_queue, " "descartando los ejemplos más antiguos %d" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "No se conoce la política de publicación (%s) forzar para tomar el valor " "predeterminado" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "La API de RGW AdminOps ha devuelto %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Campo necesario %s no especificado" #, python-format msgid "The field 'fields' is required for %s" msgstr "El campo 'campos' es obligatorio para %s" msgid "Unable to send sample over UDP" msgstr "No se ha podido enviar una muestra sobre UDP" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en fw %(id)s, se omitirá el " "ejemplo" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Se ha recibido un estado desconocido %(stat)s en vpn %(id)s, se omitirá el " "ejemplo" msgid "Wrong sensor type" msgstr "Tipo de sensor incorrecto" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Error de YAML al leer el archivo de definiciones %(file)s" msgid "ipmitool output length mismatch" msgstr "la longitud de salida de ipmitool no coincide" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "ha fallado el análisis de datos de sensor IPMI,no se ha recuperado ningún " "dato de la entrada" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "ha fallado el análisis de datos de sensor IPMI,tipo de sensor desconocido" msgid "running ipmitool failure" msgstr "fallo de ejecución de ipmitool" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7869415 ceilometer-24.1.0.dev59/ceilometer/locale/fr/0000775000175100017510000000000015033033521017762 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/fr/LC_MESSAGES/0000775000175100017510000000000015033033521021547 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po0000664000175100017510000001341715033033467024256 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Corinne Verheyde , 2013 # CHABERT Loic , 2013 # Christophe kryskool , 2013 # Corinne Verheyde , 2013-2014 # EVEILLARD , 2013-2014 # Francesco Vollero , 2015 # Jonathan Dupart , 2014 # CHABERT Loic , 2013 # Maxime COQUEREL , 2014 # Nick Barcet , 2013 # Nick Barcet , 2013 # Andrew Melim , 2014 # Patrice LACHANCE , 2013 # Patrice LACHANCE , 2013 # Rémi Le Trocquer , 2014 # EVEILLARD , 2013 # Corinne Verheyde , 2013 # Corinne Verheyde , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: French\n" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Suppression du %(type)s de notification (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erreur de libvirt lors de la recherche de l'instance : [Code d'erreur %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Echec de l'inspection des données de l'instance . " "Le domaine est à l'état SHUTOFF (INTERRUPTION)." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Echec de la publication des points de données %d. Suppression en cours" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "" "Echec de la publication des points de données %d. Mettez-les en file " "d'attente" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Syntaxe YAML non valide dans le fichier de définitions %(file)s à la ligne : " "%(line)s, colonne : %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Type de trait non valide '%(type)s' pour le trait %(trait)s" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Aucun plugin nommé %(plugin)s n'est disponible pour %(name)s" msgid "Node Manager init failed" msgstr "Echec de l'initialisation du gestionnaire de noeud" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erreur d'analyse dans la spécification JSONPath '%(jsonpath)s' pour " "%(name)s : %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plugin spécifié, mais aucun nom de plugin n'est fourni pour %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "L'interrogation du capteur %(mtr)s a échoué %(cnt)s fois !" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Sondage de %(name)s %(cnt)s fois en échec!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Le pollster pour %s est désactivé !" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La longueur maximale de local_queue du diffuseur est dépassée, suppression " "des %d échantillons les plus anciens" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "La politique de publication est inconnue (%s) forcé le défaut" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API AdminOps RGW a renvoyé %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Champ requis %s non spécifiée" #, python-format msgid "The field 'fields' is required for %s" msgstr "Le champ 'fields' est requis pour %s" msgid "Unable to send sample over UDP" msgstr "Impossible d'envoyer l'échantillon en UDP" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le pare-feu %(id)s, échantillon ignoré" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "Etat %(stat)s inconnu reçu sur le vpn %(id)s, échantillon ignoré" msgid "Wrong sensor type" msgstr "Type de détecteur incorrect" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erreur YAML lors de la lecture du fichier de définitions %(file)s" msgid "ipmitool output length mismatch" msgstr "Non-concordance de longueur de la sortie ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "Echec de l'analyse des données du détecteur IPMI, aucune donnée extraite à " "partir de l'entrée fournie" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "Echec de l'analyse des données du détecteur IPMI, type de détecteur inconnu" msgid "running ipmitool failure" msgstr "Echec d'exécution d'ipmitool" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7869415 ceilometer-24.1.0.dev59/ceilometer/locale/it/0000775000175100017510000000000015033033521017767 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/it/LC_MESSAGES/0000775000175100017510000000000015033033521021554 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/it/LC_MESSAGES/ceilometer.po0000664000175100017510000001153615033033467024263 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Italian\n" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Eliminazione della notifica %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante la ricerca dell'istanza : [Codice di errore %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Impossibile ispezionare i dati dell'istanza , " "stato dominio SHUTOFF." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Impossibile pubblicare %d datapoint, eliminati" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Impossibile pubblicare %d datapoint, inseriti in coda" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintassi YAML non valida nel file delle definizioni %(file)s alla riga: " "%(line)s, colonna: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "" "Tipo di caratteristica non valido '%(type)s' per la caratteristica %(trait)s" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nessun plug-in con nome %(plugin)s disponibile per %(name)s" msgid "Node Manager init failed" msgstr "Inizializzazione gestore nodi non riuscita" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Errore di analisi nella specifica JSONPath '%(jsonpath)s' per %(name)s: " "%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in specificato, ma nessun nome di plug-in fornito per %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "Polling del sensore %(mtr)s non riuscito per %(cnt)s volte!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Polling di %(name)s non riuscito per %(cnt)s volte!" #, python-format msgid "Pollster for %s is disabled!" msgstr "Pollster per %s disabilitato!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "La lunghezza local_queue massima del publisher è stata superata, " "eliminazione di esempi %d meno recenti" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "" "La politica di pubblicazione è sconosciuta (%s), applicazione del valore " "predefinito" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "L'API RGW AdminOps ha restituito %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Campo richiesto %s non specificato" #, python-format msgid "The field 'fields' is required for %s" msgstr "Il campo 'fields' è obbligatorio per %s" msgid "Unable to send sample over UDP" msgstr "Impossibile inviare l'esempio su UDP" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "Stato non conosciuto %(stat)s ricevuto su fw %(id)s,ignorare l'esempio" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Stato non conosciuto %(stat)s ricevuto su vpn %(id)s, ignorare l'esempio" msgid "Wrong sensor type" msgstr "Tipo di sensore errato" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Errore YAML durante la lettura del file definizioni %(file)s" msgid "ipmitool output length mismatch" msgstr "mancata corrispondenza della lunghezza dell'output ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "analisi dei dati del sensore IPMI non riuscita, nessun dato recuperato " "dall'input fornito" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "" "analisi dei dati del sensore IPMI non riuscita, tipo di sensore sconosciuto" msgid "running ipmitool failure" msgstr "errore nell'esecuzione ipmitool" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7869415 ceilometer-24.1.0.dev59/ceilometer/locale/ja/0000775000175100017510000000000015033033521017745 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/ja/LC_MESSAGES/0000775000175100017510000000000015033033521021532 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po0000664000175100017510000001262315033033467024237 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Tomoyuki KATO , 2013 # Andreas Jaeger , 2016. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-16 11:33+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" #, fuzzy, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "通知 %(type)s を除去しています (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "インスタンス の検索中に libvirt でエラーが発生しま" "した: [エラーコード %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "インスタンス のデータを検査できませんでした。ドメ" "イン状態は SHUTOFF です。" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d データポイントの公開に失敗しました。これらは廃棄されます" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d データポイントの公開に失敗しました。これらをキューに入れてください" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "%(line)s 行目の %(column)s 列で定義ファイル %(file)s の YAML 構文 が無効で" "す。" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特性 %(trait)s の特性タイプ '%(type)s' が無効です" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "%(name)s に使用できる %(plugin)s という名前のプラグインがありません" msgid "Node Manager init failed" msgstr "ノードマネージャーの初期化に失敗しました" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "%(name)s に関する JSONPath の指定 '%(jsonpath)s' のエラーを解析します: " "%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "プラグインが指定されていますが、%s にプラグイン名が提供されていません" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "センサー %(mtr)s のポーリングが %(cnt)s 回失敗しました" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "ポーリング %(name)s が %(cnt)s 回失敗しました" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s の pollster が無効になっています" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "パブリッシャー local_queue 最大長を超えました。古い方から %d 個のサンプルを除" "去します" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "パブリッシュポリシーが不明です (%s)。強制的にデフォルトに設定されます" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API から %(status)s %(reason)s が返されました" #, python-format msgid "Required field %s not specified" msgstr "必須フィールド %s が指定されていません" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s にはフィールド 'fields' が必要です" msgid "Unable to send sample over UDP" msgstr "UDP 経由でサンプルを送信できません" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "ファイアウォール %(id)s で不明な状態 %(stat)s を受信しました。サンプルをス" "キップします" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "vpn %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします" msgid "Wrong sensor type" msgstr "センサー種別が正しくありません" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "定義ファイル %(file)s での読み取りの YAML エラー" msgid "ipmitool output length mismatch" msgstr "ipmitool 出力の長さが一致しません" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI センサーデータの解析に失敗しました。指定された入力からデータが取得されま" "せんでした" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI センサーデータの解析に失敗しました。不明なセンサー種別です。" msgid "running ipmitool failure" msgstr "ipmitool の実行に失敗しました" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7879415 ceilometer-24.1.0.dev59/ceilometer/locale/ko_KR/0000775000175100017510000000000015033033521020360 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/ko_KR/LC_MESSAGES/0000775000175100017510000000000015033033521022145 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po0000664000175100017510000001267415033033467024660 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Seong-ho Cho , 2014 # Seunghyo Chun , 2013 # Seunghyo Chun , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Andreas Jaeger , 2016. #zanata # Lee Jongwon , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-10-05 01:55+0000\n" "Last-Translator: Lee Jongwon \n" "Language: ko_KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "Could not load the following pipelines: %s" msgstr "다음 파이프라인을 로드할 수 없음: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "알림 %(type)s 삭제 중(uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "인스턴스 검색 중 libvirt에서 오류 발생: [오류 코" "드 %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "인스턴스 <이름=%(name)s, id=%(id)s>의 데이터 검사 실패, 도메인 상태가 SHUTOFF" "입니다." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "%d 데이터포인트 공개 실패. 이를 삭제하는 중" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "%d 데이터포인트 공개 실패. 이를 큐에 대기시킴" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "다음에서 정의 파일 %(file)s의 올바르지 않은 YAML 구문: 행: %(line)s, 열: " "%(column)s" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "특성 %(trait)s에 대한 올바르지 않은 특성 유형 '%(type)s'" #, python-format msgid "Invalid type %s specified" msgstr "올바르지 않은 유형 %s이(가) 지정됨" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "%(name)s에 대해 %(plugin)s(이)라는 플러그인을 사용할 수 없음" msgid "Node Manager init failed" msgstr "노드 관리자 초기화 실패" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" " %(name)s에 대한 JSONPath 스펙 '%(jsonpath)s'의 구문 분석 오류: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "플러그인이 지정되지 않았지만, %s에 플러그인 이름이 제공되지 않음" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "폴링 %(mtr)s 센서가 %(cnt)s번 실패했습니다!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "폴링 %(name)s이(가) %(cnt)s번 실패했습니다!" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s의 의견조사자가 사용 안함으로 설정되어 있습니다!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "공개자 최대 local_queue 길이가 초과됨. %d 가장 오래된 샘플 삭제 중" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "공개 정책을 알 수 없음(%s). 기본값으로 강제 설정함" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API가 %(status)s %(reason)s을(를) 리턴함" #, python-format msgid "Required field %s not specified" msgstr "필수 필드 %s이(가) 지정되지 않음" msgid "Sample Check" msgstr "샘플 체크" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s에 'fields' 필드 필요" msgid "Unable to send sample over UDP" msgstr "UDP를 통해 샘플을 전송할 수 없음" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "fw %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "vpn%(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기" msgid "Wrong sensor type" msgstr "잘못된 센서 유형" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "정의 파일 %(file)s을(를) 읽는 중에 YAML 오류 발생" msgid "ipmitool output length mismatch" msgstr "ipmitool 출력 길이 불일치" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "IPMI 센서 데이터 구문 분석에 실패했음, 제공된 입력에서 검색된 데이터가 없음" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "IPMI 센서 데이터 구문 분석에 실패했음, 알 수 없는 센서 유형" msgid "running ipmitool failure" msgstr "ipmitool 실행 실패" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7879415 ceilometer-24.1.0.dev59/ceilometer/locale/pt_BR/0000775000175100017510000000000015033033521020361 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/pt_BR/LC_MESSAGES/0000775000175100017510000000000015033033521022146 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po0000664000175100017510000001147715033033467024661 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt_BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Descartando Notificação %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Erro de libvirt ao consultar instância : [Código " "de Erro %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Falha ao inspecionar os dados da instância , " "estado do domínio é SHUTOFF." #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Falha ao publicar %d pontos de dados, descartando-os" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Falha ao publicar %d pontos de dados, enfileire-os" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Sintaxe YAML inválida no arquivo de definições %(file)s na linha: %(line)s, " "coluna: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Tipo de traço inválido '%(type)s' para traço %(trait)s" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Nenhum plug-in nomeado %(plugin)s disponível para %(name)s" msgid "Node Manager init failed" msgstr "Inicialização do gerenciador de nó com falha" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Erro de análise na especificação JSONPath '%(jsonpath)s' para %(name)s: " "%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Plug-in especificado, mas nenhum nome de plug-in fornecido para %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "O sensor de pesquisa %(mtr)s falhou para %(cnt)s vezes!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "A pesquisa %(name)s falhou para %(cnt)s vezes!" #, python-format msgid "Pollster for %s is disabled!" msgstr "O pesquisador para %s está desativado!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Comprimento máximo de local_queue do publicador foi excedido, descartando %d " "amostras antigas" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Publicando política desconhecida (%s) força para o padrão" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "A API AdminOps RGW retornou %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "Campo obrigatório %s não especificado" #, python-format msgid "The field 'fields' is required for %s" msgstr "O campo 'fields' é necessário para %s" msgid "Unable to send sample over UDP" msgstr "Não é possível enviar amostra sobre UDP" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "Status desconhecido %(stat)s recebido na largura da fonte %(id)s, ignorando " "a amostra" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "Status desconhecido %(stat)s recebido recebido no vpn %(id)s, ignorando a " "amostra" msgid "Wrong sensor type" msgstr "Tipo de sensor errado" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Erro YAML ao ler o arquivo de definições %(file)s" msgid "ipmitool output length mismatch" msgstr "incompatibilidade no comprimento da saída de ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "análise dos dados do sensor IPMI com falha, nenhum dado recuperado da " "entrada fornecida" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "análise dos dados do sensor IPMI com falha,tipo de sensor desconhecido" msgid "running ipmitool failure" msgstr "executando falha de ipmitool" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7879415 ceilometer-24.1.0.dev59/ceilometer/locale/ru/0000775000175100017510000000000015033033521020001 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/ru/LC_MESSAGES/0000775000175100017510000000000015033033521021566 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po0000664000175100017510000001602115033033467024267 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Andreas Jaeger , 2016. #zanata # Roman Gorshunov , 2021. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2021-09-06 03:48+0000\n" "Last-Translator: Roman Gorshunov \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Russian\n" #, python-format msgid "Could not load the following pipelines: %s" msgstr "Не удалось загрузить следующие цепочки: %s" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "Удаление уведомления %(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while checking blockStats, This may not be harmful, but " "please check : %(ex)s" msgstr "" "Ошибка в libvirt при проверке blockStats, Это может быть нормальным, но, " "пожалуйста, проверьте : %(ex)s" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "Возникла ошибка в libvirt при поиске экземпляра <имя=%(name)s, ИД=%(id)s>: " "[Код ошибки: %(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "Не удалось проверить данные экземпляра <имя=%(name)s, ИД=%(id)s>, состояние " "домена - SHUTOFF." #, python-format msgid "" "Failed to inspect instance %(instance_uuid)s stats, can not get info from " "libvirt: %(error)s" msgstr "" "Не удалось проверить статистику инстанса %(instance_uuid)s, не удалось " "получить информацию от libvirt: %(error)s" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "Не удалось опубликовать %d точек данных, выполняется их удаление" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "Не удалось опубликовать %d точек данных, создайте для них очередь" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "" "Недопустимый синтаксис YAML в файле определений %(file)s; строка: %(line)s, " "столбец: %(column)s." #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "Недопустимый тип особенности %(type)s для особенности %(trait)s" #, python-format msgid "Invalid type %s specified" msgstr "Указан недопустимый тип %s" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "Нет доступного модуля %(plugin)s для %(name)s" msgid "Node Manager init failed" msgstr "Сбой инициализации администратора узлов" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "" "Ошибка анализа спецификации JSONPath %(jsonpath)s для %(name)s: %(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "Указан модуль, но не передано имя модуля для %s" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "%(cnt)s-кратный сбой датчика опроса %(mtr)s!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "Опрос %(name)s не удалось выполнить %(cnt)s раз." #, python-format msgid "Pollster for %s is disabled!" msgstr "Опрашивающий объект для %s выключен!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "" "Превышена максимальная длина local_queue публикатора, удаление %d самых " "старых образцов" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "Стратегия публикации неизвестна (%s). По умолчанию принудительная" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "Функция API RGW AdminOps вернула %(status)s %(reason)s" #, python-format msgid "Required field %(field)s should be a %(type)s" msgstr "Обязательное поле %(field)s должно быть типа %(type)s" #, python-format msgid "Required field %s not specified" msgstr "Не указано обязательное поле %s" #, python-format msgid "Required fields %s not specified" msgstr "Не указаны обязательные поля %s" msgid "Sample Check" msgstr "Тестовая проверка" #, python-format msgid "The field 'fields' is required for %s" msgstr "Поле 'fields' является обязательным для %s" msgid "Unable to send sample over UDP" msgstr "Не удалось отправить образец по UDP" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "" "В fw %(id)s получено неизвестное состояние %(stat)s,пример пропускается" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "" "В VPN %(id)s получено неизвестное состояние %(stat)s, пример пропускается" msgid "Wrong sensor type" msgstr "Неверный тип датчика" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "Ошибка YAML при чтении файла определений %(file)s" msgid "ipmitool output length mismatch" msgstr "несоответствие длины вывода ipmitool" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "" "сбой анализа данных датчика IPMI, не получены данные из переданного ввода" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "сбой анализа данных датчика IPMI, неизвестный тип датчика" msgid "running ipmitool failure" msgstr "сбой выполнения ipmitool" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7879415 ceilometer-24.1.0.dev59/ceilometer/locale/zh_CN/0000775000175100017510000000000015033033521020354 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/zh_CN/LC_MESSAGES/0000775000175100017510000000000015033033521022141 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po0000664000175100017510000001140515033033467024643 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # aji.zqfan , 2015 # yelu , 2013 # Tom Fifield , 2013 # 颜海峰 , 2014 # yelu , 2013 # Yu Zhang, 2013 # Yu Zhang, 2013 # 颜海峰 , 2014 # English translations for ceilometer. # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_CN\n" "Language-Team: Chinese (China)\n" "Plural-Forms: nplurals=1; plural=0\n" "Generated-By: Babel 2.2.0\n" "X-Generator: Zanata 4.3.3\n" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "正在丢弃通知%(type)s (uuid:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查找实例 <名称为 %(name)s,标识为 %(id)s> 时,libvirt 中出错:[错误代码 " "%(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "" "为虚拟机获取监控数据失败了,虚拟机状态为SHUTOFF" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "发布%d个数据点时失败,正在将其丢弃" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "发布%d个数据点时失败,将其入队" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "定义文件%(file)s中有非法YAML语法,行:%(line)s,列%(column)s。" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特征%(trait)s包含了不合法的特征类型'%(type)s' " #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "未对 %(name)s 提供名为 %(plugin)s 的插件" msgid "Node Manager init failed" msgstr "节点管理器初始化失败" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "对 %(name)s 指定的 JSONPath(即“%(jsonpath)s”)存在解析错误:%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "指定了插件,但未对 %s 提供插件名" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "拉取%(mtr)s传感器失败了%(cnt)s次!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "轮询 %(name)s 已失败 %(cnt)s 次!" #, python-format msgid "Pollster for %s is disabled!" msgstr "%s的采集器被禁用" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "发布的数据量超过本地队列最大长度,正在丢弃最老的%d个数据" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "未知的发布策略(%s),强制使用默认策略" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps接口返回%(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "必填项%s没有填写" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s 需要字段“fields”" msgid "Unable to send sample over UDP" msgstr "无法通过UDP发送采样" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "从fw %(id)s收到未知的状态%(stat)s,跳过该采样数据" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" msgid "Wrong sensor type" msgstr "错误的传感器类型" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "读取定义文件%(file)s时遇到YAML错误" msgid "ipmitool output length mismatch" msgstr "ipmi输出长度不匹配" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "解析IPMI传感器数据失败,从给定的输入中无法检索到数据" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "解析IPMI传感器数据失败,未知的传感器类型" msgid "running ipmitool failure" msgstr "运行ipmitool时失败了" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7879415 ceilometer-24.1.0.dev59/ceilometer/locale/zh_TW/0000775000175100017510000000000015033033521020406 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/locale/zh_TW/LC_MESSAGES/0000775000175100017510000000000015033033521022173 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po0000664000175100017510000001104615033033467024676 0ustar00mylesmyles# Translations template for ceilometer. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the ceilometer project. # # Translators: # Stefano Maffulli , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-02-06 09:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 04:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" msgstr "正在捨棄通知 %(type)s(UUID:%(msgid)s)" #, python-format msgid "" "Error from libvirt while looking up instance : " "[Error Code %(error_code)s] %(ex)s" msgstr "" "查閱實例 <名稱=%(name)s,ID=%(id)s> 時,libvirt 中發生錯誤:[錯誤碼 " "%(error_code)s] %(ex)s" #, python-format msgid "" "Failed to inspect data of instance , domain state " "is SHUTOFF." msgstr "無法檢查實例 <名稱=%(name)s,ID=%(id)s> 的資料,網域狀態為 SHUTOFF。" #, python-format msgid "Failed to publish %d datapoints, dropping them" msgstr "無法發佈 %d 個資料點,正在捨棄它們" #, python-format msgid "Failed to publish %d datapoints, queue them" msgstr "無法發佈 %d 個資料點,正在將它們排入佇列" #, python-format msgid "" "Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " "%(column)s." msgstr "定義檔 %(file)s 第 %(line)s 行第 %(column)s 列中的 YAML 語法無效。" #, python-format msgid "Invalid trait type '%(type)s' for trait %(trait)s" msgstr "特徵 %(trait)s 的特徵類型 '%(type)s' 無效" #, python-format msgid "No plugin named %(plugin)s available for %(name)s" msgstr "沒有名為 %(plugin)s 的外掛程式可供 %(name)s 使用" msgid "Node Manager init failed" msgstr "節點管理程式起始設定失敗" #, python-format msgid "" "Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" msgstr "%(name)s 的 JSONPath 規格 '%(jsonpath)s' 中發生剖析錯誤:%(err)s" #, python-format msgid "Plugin specified, but no plugin name supplied for %s" msgstr "已指定外掛程式,但卻未向 %s 提供外掛程式名稱" #, python-format msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" msgstr "輪詢 %(mtr)s 感應器已失敗 %(cnt)s 次!" #, python-format msgid "Polling %(name)s failed for %(cnt)s times!" msgstr "輪詢 %(name)s 失敗了 %(cnt)s 次!" #, python-format msgid "Pollster for %s is disabled!" msgstr "已停用 %s 的 Pollster!" #, python-format msgid "" "Publisher max local_queue length is exceeded, dropping %d oldest samples" msgstr "已超出發佈者 local_queue 長度上限,正在捨棄 %d 個最舊的樣本" #, python-format msgid "Publishing policy is unknown (%s) force to default" msgstr "發佈原則不明 (%s),強制設為預設值" #, python-format msgid "RGW AdminOps API returned %(status)s %(reason)s" msgstr "RGW AdminOps API 傳回了 %(status)s %(reason)s" #, python-format msgid "Required field %s not specified" msgstr "未指定必要欄位 %s" #, python-format msgid "The field 'fields' is required for %s" msgstr "%s 需要欄位「欄位」" msgid "Unable to send sample over UDP" msgstr "無法透過 UDP 來傳送樣本" #, python-format msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" msgstr "在防火牆 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" #, python-format msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" msgstr "在 VPN %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" msgid "Wrong sensor type" msgstr "感應器類型錯誤" #, python-format msgid "YAML error reading Definitions file %(file)s" msgstr "讀取定義檔 %(file)s 時發生 YAML 錯誤" msgid "ipmitool output length mismatch" msgstr "ipmitool 輸出長度不符" msgid "parse IPMI sensor data failed,No data retrieved from given input" msgstr "剖析 IPMI 感應器資料失敗,未從給定的輸入擷取任何資料" msgid "parse IPMI sensor data failed,unknown sensor type" msgstr "剖析 IPMI 感應器資料失敗,感應器類型不明" msgid "running ipmitool failure" msgstr "執行 ipmitool 失敗" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/messaging.py0000664000175100017510000000642515033033467020463 0ustar00mylesmyles# Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from oslo_messaging._drivers import impl_rabbit from oslo_messaging.notify import notifier from oslo_messaging import serializer as oslo_serializer DEFAULT_URL = "__default__" TRANSPORTS = {} def setup(): oslo_messaging.set_transport_defaults('ceilometer') # NOTE(sileht): When batch is not enabled, oslo.messaging read all messages # in the queue and can consume a lot of memory, that works for rpc because # you never have a lot of message, but sucks for notification. The # default is not changeable on oslo.messaging side. And we can't expose # this option to set set_transport_defaults because it a driver option. # 100 allow to prefetch a lot of messages but limit memory to 1G per # workers in worst case (~ 1M Nova notification) # And even driver options are located in private module, this is not going # to break soon. cfg.set_defaults( impl_rabbit.rabbit_opts, rabbit_qos_prefetch_count=100, ) def get_transport(conf, url=None, optional=False, cache=True): """Initialise the oslo_messaging layer.""" global TRANSPORTS, DEFAULT_URL cache_key = url or DEFAULT_URL transport = TRANSPORTS.get(cache_key) if not transport or not cache: try: transport = notifier.get_notification_transport(conf, url) except (oslo_messaging.InvalidTransportURL, oslo_messaging.DriverLoadFailure): if not optional or url: # NOTE(sileht): oslo_messaging is configured but unloadable # so reraise the exception raise return None else: if cache: TRANSPORTS[cache_key] = transport return transport def cleanup(): """Cleanup the oslo_messaging layer.""" global TRANSPORTS, NOTIFIERS NOTIFIERS = {} for url in TRANSPORTS: TRANSPORTS[url].cleanup() del TRANSPORTS[url] _SERIALIZER = oslo_serializer.JsonPayloadSerializer() def get_batch_notification_listener(transport, targets, endpoints, allow_requeue=False, batch_size=1, batch_timeout=None): """Return a configured oslo_messaging notification listener.""" return oslo_messaging.get_batch_notification_listener( transport, targets, endpoints, executor='threading', allow_requeue=allow_requeue, batch_size=batch_size, batch_timeout=batch_timeout) def get_notifier(transport, publisher_id): """Return a configured oslo_messaging notifier.""" notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) return notifier.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/meter/0000775000175100017510000000000015033033521017230 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/meter/__init__.py0000664000175100017510000000000015033033467021340 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/meter/notifications.py0000664000175100017510000002235315033033467022471 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import itertools import os import re from ceilometer import cache_utils from oslo_config import cfg from oslo_log import log from stevedore import extension from ceilometer import declarative from ceilometer.i18n import _ from ceilometer.pipeline import sample as endpoint from ceilometer import sample as sample_util OPTS = [ cfg.MultiStrOpt('meter_definitions_dirs', default=["/etc/ceilometer/meters.d", os.path.abspath( os.path.join( os.path.split( os.path.dirname(__file__))[0], "data", "meters.d"))], help="List directory to find files of " "defining meter notifications." ), ] LOG = log.getLogger(__name__) class MeterDefinition: SAMPLE_ATTRIBUTES = ["name", "type", "volume", "unit", "timestamp", "user_id", "project_id", "resource_id"] REQUIRED_FIELDS = ['name', 'type', 'event_type', 'unit', 'volume', 'resource_id'] def __init__(self, definition_cfg, conf, plugin_manager): self.conf = conf self.cfg = definition_cfg self._cache = cache_utils.get_client(self.conf) missing = [field for field in self.REQUIRED_FIELDS if not self.cfg.get(field)] if missing: raise declarative.MeterDefinitionException( _("Required fields %s not specified") % missing, self.cfg) self._event_type = self.cfg.get('event_type') if isinstance(self._event_type, str): self._event_type = [self._event_type] self._event_type = [re.compile(etype) for etype in self._event_type] if ('type' not in self.cfg.get('lookup', []) and self.cfg['type'] not in sample_util.TYPES): raise declarative.MeterDefinitionException( _("Invalid type %s specified") % self.cfg['type'], self.cfg) self._fallback_user_id = declarative.Definition( 'user_id', "ctxt.user_id|ctxt.user", plugin_manager) self._fallback_project_id = declarative.Definition( 'project_id', "ctxt.project_id|ctxt.tenant_id", plugin_manager) self._attributes = {} self._metadata_attributes = {} self._user_meta = None self._name_discovery = self.conf.polling.identity_name_discovery for name in self.SAMPLE_ATTRIBUTES: attr_cfg = self.cfg.get(name) if attr_cfg: self._attributes[name] = declarative.Definition( name, attr_cfg, plugin_manager) metadata = self.cfg.get('metadata', {}) for name in metadata: self._metadata_attributes[name] = declarative.Definition( name, metadata[name], plugin_manager) user_meta = self.cfg.get('user_metadata') if user_meta: self._user_meta = declarative.Definition(None, user_meta, plugin_manager) # List of fields we expected when multiple meter are in the payload self.lookup = self.cfg.get('lookup') if isinstance(self.lookup, str): self.lookup = [self.lookup] def match_type(self, meter_name): for t in self._event_type: if t.match(meter_name): return True def to_samples(self, message, all_values=False): # Sample defaults sample = { 'name': self.cfg["name"], 'type': self.cfg["type"], 'unit': self.cfg["unit"], 'volume': None, 'timestamp': None, 'user_id': self._fallback_user_id.parse(message), 'project_id': self._fallback_project_id.parse(message), 'resource_id': None, 'message': message, 'metadata': {}, } for name, parser in self._metadata_attributes.items(): value = parser.parse(message) if value: sample['metadata'][name] = value if self._user_meta: meta = self._user_meta.parse(message) if meta: sample_util.add_reserved_user_metadata( self.conf, meta, sample['metadata']) # NOTE(sileht): We expect multiple samples in the payload # so put each attribute into a list if self.lookup: for name in sample: sample[name] = [sample[name]] for name in self.SAMPLE_ATTRIBUTES: parser = self._attributes.get(name) if parser is not None: value = parser.parse(message, bool(self.lookup)) # NOTE(sileht): If we expect multiple samples # some attributes are overridden even we don't get any # result. Also note in this case value is always a list if ((not self.lookup and value is not None) or (self.lookup and ((name in self.lookup + ["name"]) or value))): sample[name] = value if self.lookup: nb_samples = len(sample['name']) # skip if no meters in payload if nb_samples <= 0: return attributes = self.SAMPLE_ATTRIBUTES + ["message", "metadata"] samples_values = [] for name in attributes: values = sample.get(name) nb_values = len(values) if nb_values == nb_samples: samples_values.append(values) elif nb_values == 1 and name not in self.lookup: samples_values.append(itertools.cycle(values)) else: nb = (0 if nb_values == 1 and values[0] is None else nb_values) LOG.warning('Only %(nb)d fetched meters contain ' '"%(name)s" field instead of %(total)d.' % dict(name=name, nb=nb, total=nb_samples)) return # NOTE(sileht): Transform the sample with multiple values per # attribute into multiple samples with one value per attribute. for values in zip(*samples_values): sample = {attributes[idx]: value for idx, value in enumerate(values)} if self._name_discovery and self._cache: # populate user_name and project_name fields in the sample # created from notifications if sample['user_id']: sample['user_name'] = \ self._cache.resolve_uuid_from_cache( 'users', sample['user_id']) if sample['project_id']: sample['project_name'] = \ self._cache.resolve_uuid_from_cache( 'projects', sample['project_id']) yield sample else: yield sample class ProcessMeterNotifications(endpoint.SampleEndpoint): event_types = [] def __init__(self, conf, publisher): super().__init__(conf, publisher) self.definitions = self._load_definitions() def _load_definitions(self): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') definitions = {} mfs = [] for dir in self.conf.meter.meter_definitions_dirs: for filepath in sorted(glob.glob(os.path.join(dir, "*.yaml"))): if filepath is not None: mfs.append(filepath) for mf in mfs: meters_cfg = declarative.load_definitions( self.conf, {}, mf) for meter_cfg in reversed(meters_cfg['metric']): if meter_cfg.get('name') in definitions: # skip duplicate meters LOG.warning("Skipping duplicate meter definition %s" % meter_cfg) continue try: md = MeterDefinition(meter_cfg, self.conf, plugin_manager) except declarative.DefinitionException as e: errmsg = "Error loading meter definition: %s" LOG.error(errmsg, str(e)) else: definitions[meter_cfg['name']] = md return definitions.values() def build_sample(self, notification): for d in self.definitions: if d.match_type(notification['event_type']): for s in d.to_samples(notification): yield sample_util.Sample.from_notification(**s) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/middleware.py0000664000175100017510000000245615033033467020623 0ustar00mylesmyles# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.pipeline import sample as endpoint from ceilometer import sample class HTTPRequest(endpoint.SampleEndpoint): event_types = ['http.request'] def build_sample(self, message): yield sample.Sample.from_notification( name=message['event_type'], type=sample.TYPE_DELTA, volume=1, unit=message['event_type'].split('.')[1], user_id=message['payload']['request'].get('HTTP_X_USER_ID'), project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'), resource_id=message['payload']['request'].get( 'HTTP_X_SERVICE_NAME'), message=message) class HTTPResponse(HTTPRequest): event_types = ['http.response'] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/network/0000775000175100017510000000000015033033521017605 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/network/__init__.py0000664000175100017510000000000015033033467021715 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/network/floatingip.py0000664000175100017510000000413315033033467022325 0ustar00mylesmyles# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # Copyright 2012 eNovance # Copyright 2013 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class FloatingIPPollster(base.BaseServicesPollster): FIELDS = ['router_id', 'status', 'floating_network_id', 'fixed_ip_address', 'port_id', 'floating_ip_address', ] @property def default_discovery(self): return 'fip_services' def get_samples(self, manager, cache, resources): for fip in resources or []: LOG.debug("FLOATING IP : %s", fip) status = self.get_status_id(fip['status']) if status == -1: LOG.warning( _("Unknown status %(status)s for floating IP address " "%(address)s (%(id)s), setting volume to -1") % { "status": fip['status'], "address": fip['floating_ip_address'], "id": fip['id']}) yield sample.Sample( name='ip.floating', type=sample.TYPE_GAUGE, unit='ip', volume=status, user_id=fip.get('user_id'), project_id=fip['tenant_id'], resource_id=fip['id'], resource_metadata=self.extract_metadata(fip) ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/network/services/0000775000175100017510000000000015033033521021430 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/network/services/__init__.py0000664000175100017510000000000015033033467023540 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/network/services/base.py0000664000175100017510000000223615033033467022730 0ustar00mylesmyles# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import plugin_base # status map for converting metric status to volume int STATUS = { 'inactive': 0, 'active': 1, 'pending_create': 2, 'down': 3, 'created': 4, 'pending_update': 5, 'pending_delete': 6, 'error': 7, } class BaseServicesPollster(plugin_base.PollsterBase): FIELDS = [] def extract_metadata(self, metric): return {k: metric[k] for k in self.FIELDS} @staticmethod def get_status_id(value): if not value: return -1 status = value.lower() return STATUS.get(status, -1) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/network/services/discovery.py0000664000175100017510000000371515033033467024030 0ustar00mylesmyles# # Copyright (c) 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import neutron_client from ceilometer.polling import plugin_base class _BaseServicesDiscovery(plugin_base.DiscoveryBase): KEYSTONE_REQUIRED_FOR_SERVICE = 'neutron' def __init__(self, conf): super().__init__(conf) self.neutron_cli = neutron_client.Client(conf) class VPNServicesDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" return self.neutron_cli.vpn_get_all() class IPSecConnectionsDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" conns = self.neutron_cli.ipsec_site_connections_get_all() return conns class FirewallDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" fw = self.neutron_cli.firewall_get_all() return [i for i in fw if i.get('status', None) != 'error'] class FirewallPolicyDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover resources to monitor.""" return self.neutron_cli.fw_policy_get_all() class FloatingIPDiscovery(_BaseServicesDiscovery): def discover(self, manager, param=None): """Discover floating IP resources to monitor.""" return self.neutron_cli.fip_get_all() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/network/services/fwaas.py0000664000175100017510000000661215033033467023121 0ustar00mylesmyles# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings from oslo_log import log from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class FirewallPollster(base.BaseServicesPollster): """Pollster to capture firewalls status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'firewall_policy_id', ] def __init__(self, conf): super().__init__(conf) warnings.warn('Support for Neutron FWaaS has been deprecated ' 'and will be removed in a future release.', category=DeprecationWarning, stacklevel=2) @property def default_discovery(self): return 'fw_services' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall : %s" % fw) status = self.get_status_id(fw['status']) if status == -1: # unknown status, skip this sample LOG.warning(_("Unknown status %(stat)s received on fw %(id)s," "skipping sample") % {'stat': fw['status'], 'id': fw['id']}) continue yield sample.Sample( name='network.services.firewall', type=sample.TYPE_GAUGE, unit='firewall', volume=status, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], resource_metadata=self.extract_metadata(fw) ) class FirewallPolicyPollster(base.BaseServicesPollster): """Pollster to capture firewall policy samples.""" FIELDS = ['name', 'description', 'name', 'firewall_rules', 'shared', 'audited', ] def __init__(self, conf): super().__init__(conf) warnings.warn('Support for Neutron FWaaS has been deprecated ' 'and will be removed in a future release.', category=DeprecationWarning, stacklevel=2) @property def default_discovery(self): return 'fw_policy' def get_samples(self, manager, cache, resources): resources = resources or [] for fw in resources: LOG.debug("Firewall Policy: %s" % fw) yield sample.Sample( name='network.services.firewall.policy', type=sample.TYPE_GAUGE, unit='firewall_policy', volume=1, user_id=None, project_id=fw['tenant_id'], resource_id=fw['id'], resource_metadata=self.extract_metadata(fw) ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/network/services/vpnaas.py0000664000175100017510000000622215033033467023305 0ustar00mylesmyles# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.i18n import _ from ceilometer.network.services import base from ceilometer import sample LOG = log.getLogger(__name__) class VPNServicesPollster(base.BaseServicesPollster): """Pollster to capture VPN status samples.""" FIELDS = ['admin_state_up', 'description', 'name', 'status', 'subnet_id', 'router_id' ] @property def default_discovery(self): return 'vpn_services' def get_samples(self, manager, cache, resources): resources = resources or [] for vpn in resources: LOG.debug("VPN : %s", vpn) status = self.get_status_id(vpn['status']) if status == -1: LOG.warning( _("Unknown status %(status)s for VPN %(name)s (%(id)s), " "setting volume to -1") % { "status": vpn['status'], "name": vpn['name'], "id": vpn['id']}) yield sample.Sample( name='network.services.vpn', type=sample.TYPE_GAUGE, unit='vpnservice', volume=status, user_id=None, project_id=vpn['tenant_id'], resource_id=vpn['id'], resource_metadata=self.extract_metadata(vpn) ) class IPSecConnectionsPollster(base.BaseServicesPollster): """Pollster to capture vpn ipsec connections status samples.""" FIELDS = ['name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'psk', 'initiator', 'ikepolicy_id', 'dpd', 'ipsecpolicy_id', 'vpnservice_id', 'mtu', 'admin_state_up', 'status', 'tenant_id' ] @property def default_discovery(self): return 'ipsec_connections' def get_samples(self, manager, cache, resources): resources = resources or [] for conn in resources: LOG.debug("IPSec Connection Info: %s", conn) yield sample.Sample( name='network.services.vpn.connections', type=sample.TYPE_GAUGE, unit='ipsec_site_connection', volume=1, user_id=None, project_id=conn['tenant_id'], resource_id=conn['id'], resource_metadata=self.extract_metadata(conn) ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/neutron_client.py0000664000175100017510000000516415033033467021535 0ustar00mylesmyles# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutronclient.common import exceptions from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client SERVICE_OPTS = [ cfg.StrOpt('neutron', default='network', help='Neutron service type.'), ] LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except exceptions.NeutronClientException as e: if e.status_code == 404: LOG.warning("The resource could not be found.") else: LOG.warning(e) return [] except Exception as e: LOG.exception(e) raise return with_logging class Client: """A client which gets information via python-neutronclient.""" def __init__(self, conf): creds = conf.service_credentials params = { 'session': keystone_client.get_session(conf), 'endpoint_type': creds.interface, 'region_name': creds.region_name, 'service_type': conf.service_types.neutron, } self.client = clientv20.Client(**params) @logged def port_get_all(self): resp = self.client.list_ports() return resp.get('ports') @logged def vpn_get_all(self): resp = self.client.list_vpnservices() return resp.get('vpnservices') @logged def ipsec_site_connections_get_all(self): resp = self.client.list_ipsec_site_connections() return resp.get('ipsec_site_connections') @logged def firewall_get_all(self): resp = self.client.list_firewalls() return resp.get('firewalls') @logged def fw_policy_get_all(self): resp = self.client.list_firewall_policies() return resp.get('firewall_policies') @logged def fip_get_all(self): fips = self.client.list_floatingips()['floatingips'] return fips ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/notification.py0000664000175100017510000001514515033033467021173 0ustar00mylesmyles# # Copyright 2017-2018 Red Hat, Inc. # Copyright 2012-2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import cotyledon from oslo_config import cfg from oslo_log import log import oslo_messaging from stevedore import named from ceilometer.i18n import _ from ceilometer import messaging LOG = log.getLogger(__name__) OPTS = [ cfg.BoolOpt('ack_on_event_error', default=True, help='Acknowledge message when event persistence fails.'), cfg.MultiStrOpt('messaging_urls', default=[], secret=True, help="Messaging URLs to listen for notifications. " "Example: rabbit://user:pass@host1:port1" "[,user:pass@hostN:portN]/virtual_host " "(DEFAULT/transport_url is used if empty). This " "is useful when you have dedicate messaging nodes " "for each service, for example, all nova " "notifications go to rabbit-nova:5672, while all " "cinder notifications go to rabbit-cinder:5672."), cfg.IntOpt('batch_size', default=1, min=1, help='Number of notification messages to wait before ' 'publishing them.'), cfg.IntOpt('batch_timeout', help='Number of seconds to wait before dispatching samples ' 'when batch_size is not reached (None means indefinitely).' ), cfg.IntOpt('workers', default=1, min=1, deprecated_group='DEFAULT', deprecated_name='notification_workers', help='Number of workers for notification service, ' 'default value is 1.'), cfg.MultiStrOpt('pipelines', default=['meter', 'event'], help="Select which pipeline managers to enable to " " generate data"), ] EXCHANGES_OPTS = [ cfg.MultiStrOpt('notification_control_exchanges', default=['nova', 'glance', 'neutron', 'cinder', 'heat', 'keystone', 'trove', 'zaqar', 'swift', 'ceilometer', 'magnum', 'dns', 'ironic', 'aodh'], deprecated_group='DEFAULT', deprecated_name="http_control_exchanges", help="Exchanges name to listen for notifications."), ] class NotificationService(cotyledon.Service): """Notification service. When running multiple agents, additional queuing sequence is required for inter process communication. Each agent has two listeners: one to listen to the main OpenStack queue and another listener(and notifier) for IPC to divide pipeline sink endpoints. Coordination should be enabled to have proper active/active HA. """ NOTIFICATION_NAMESPACE = 'ceilometer.notification.v2' def __init__(self, worker_id, conf, coordination_id=None): super().__init__(worker_id) self.startup_delay = worker_id self.conf = conf self.listeners = [] def get_targets(self): """Return a sequence of oslo_messaging.Target This sequence is defining the exchange and topics to be connected. """ topics = (self.conf.notification_topics if 'notification_topics' in self.conf else self.conf.oslo_messaging_notifications.topics) return [oslo_messaging.Target(topic=topic, exchange=exchange) for topic in set(topics) for exchange in set(self.conf.notification.notification_control_exchanges)] @staticmethod def _log_missing_pipeline(names): LOG.error(_('Could not load the following pipelines: %s'), names) def run(self): # Delay startup so workers are jittered time.sleep(self.startup_delay) super().run() self.managers = [ext.obj for ext in named.NamedExtensionManager( namespace='ceilometer.notification.pipeline', names=self.conf.notification.pipelines, invoke_on_load=True, on_missing_entrypoints_callback=self._log_missing_pipeline, invoke_args=(self.conf,))] # FIXME(sileht): endpoint uses the notification_topics option # and it should not because this is an oslo_messaging option # not a ceilometer. Until we have something to get the # notification_topics in another way, we must create a transport # to ensure the option has been registered by oslo_messaging. messaging.get_notifier(messaging.get_transport(self.conf), '') endpoints = [] for pipe_mgr in self.managers: LOG.debug("Loading manager endpoints for [%s].", pipe_mgr) endpoint = pipe_mgr.get_main_endpoints() LOG.debug("Loaded endpoints [%s] for manager [%s].", endpoint, pipe_mgr) endpoints.extend(endpoint) targets = self.get_targets() urls = self.conf.notification.messaging_urls or [None] for url in urls: transport = messaging.get_transport(self.conf, url) # NOTE(gordc): ignore batching as we want pull # to maintain sequencing as much as possible. listener = messaging.get_batch_notification_listener( transport, targets, endpoints, allow_requeue=True, batch_size=self.conf.notification.batch_size, batch_timeout=self.conf.notification.batch_timeout) listener.start( override_pool_size=self.conf.max_parallel_requests ) self.listeners.append(listener) @staticmethod def kill_listeners(listeners): # NOTE(gordc): correct usage of oslo.messaging listener is to stop(), # which stops new messages, and wait(), which processes remaining # messages and closes connection for listener in listeners: listener.stop() listener.wait() def terminate(self): self.kill_listeners(self.listeners) super().terminate() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/nova_client.py0000664000175100017510000001056115033033467021003 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import glanceclient import novaclient from novaclient import api_versions from novaclient import client as nova_client from oslo_config import cfg from oslo_log import log from ceilometer import keystone_client SERVICE_OPTS = [ cfg.StrOpt('nova', default='compute', help='Nova service type.'), ] LOG = log.getLogger(__name__) def logged(func): @functools.wraps(func) def with_logging(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: LOG.exception(e) raise return with_logging class Client: """A client which gets information via python-novaclient.""" def __init__(self, conf): """Initialize a nova client object.""" creds = conf.service_credentials ks_session = keystone_client.get_session(conf) self.nova_client = nova_client.Client( version=api_versions.APIVersion('2.1'), session=ks_session, # nova adapter options region_name=creds.region_name, endpoint_type=creds.interface, service_type=conf.service_types.nova) self.glance_client = glanceclient.Client( version='2', session=ks_session, region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.glance) def _with_flavor_and_image(self, instances): flavor_cache = {} image_cache = {} for instance in instances: self._with_flavor(instance, flavor_cache) self._with_image(instance, image_cache) return instances def _with_flavor(self, instance, cache): fid = instance.flavor['id'] if fid in cache: flavor = cache.get(fid) else: try: flavor = self.nova_client.flavors.get(fid) except novaclient.exceptions.NotFound: flavor = None cache[fid] = flavor attr_defaults = [('name', 'unknown-id-%s' % fid), ('vcpus', 0), ('ram', 0), ('disk', 0), ('ephemeral', 0)] for attr, default in attr_defaults: if not flavor: instance.flavor[attr] = default continue instance.flavor[attr] = getattr(flavor, attr, default) def _with_image(self, instance, cache): try: iid = instance.image['id'] except TypeError: instance.image = None instance.kernel_id = None instance.ramdisk_id = None return if iid in cache: image = cache.get(iid) else: try: image = self.glance_client.images.get(iid) except glanceclient.exc.HTTPNotFound: image = None cache[iid] = image attr_defaults = [('kernel_id', None), ('ramdisk_id', None)] instance.image['name'] = ( getattr(image, 'name') if image else 'unknown-id-%s' % iid) image_metadata = getattr(image, 'metadata', None) for attr, default in attr_defaults: ameta = image_metadata.get(attr) if image_metadata else default setattr(instance, attr, ameta) @logged def instance_get_all_by_host(self, hostname, since=None): """Returns list of instances on particular host. If since is supplied, it will return the instances changed since that datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' """ search_opts = {'host': hostname, 'all_tenants': True} if since: search_opts['changes-since'] = since return self._with_flavor_and_image(self.nova_client.servers.list( detailed=True, search_opts=search_opts)) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7939415 ceilometer-24.1.0.dev59/ceilometer/objectstore/0000775000175100017510000000000015033033521020437 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/objectstore/__init__.py0000664000175100017510000000000015033033467022547 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/objectstore/rgw.py0000664000175100017510000001727115033033467021631 0ustar00mylesmyles# # Copyright 2015 Reliance Jio Infocomm Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with ceph object stores """ from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from urllib import parse as urlparse from ceilometer import keystone_client from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('radosgw', help='Radosgw service type.'), ] CREDENTIAL_OPTS = [ cfg.StrOpt('access_key', secret=True, help='Access key for Radosgw Admin.'), cfg.StrOpt('secret_key', secret=True, help='Secret key for Radosgw Admin.') ] CLIENT_OPTS = [ cfg.BoolOpt('implicit_tenants', default=False, help='Whether RGW uses implicit tenants or not.'), ] class _Base(plugin_base.PollsterBase): METHOD = 'bucket' _ENDPOINT = None def __init__(self, conf): super().__init__(conf) self.access_key = self.conf.rgw_admin_credentials.access_key self.secret = self.conf.rgw_admin_credentials.secret_key self.implicit_tenants = self.conf.rgw_client.implicit_tenants @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'rgw.get_%s' % self.METHOD @staticmethod def _get_endpoint(conf, ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once, also we assume that in a single deployment # we may be only deploying `radosgw` or `swift` as the object-store if _Base._ENDPOINT is None and conf.service_types.radosgw: try: creds = conf.service_credentials rgw_url = keystone_client.get_service_catalog( ksclient).url_for( service_type=conf.service_types.radosgw, interface=creds.interface, region_name=creds.region_name) _Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin') except exceptions.EndpointNotFound: LOG.debug("Radosgw endpoint not found") return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(self.conf, ksclient) if not endpoint: return try: from ceilometer.objectstore import rgw_client as c_rgw_client rgw_client = c_rgw_client.RGWAdminClient(endpoint, self.access_key, self.secret, self.implicit_tenants) except ImportError: raise plugin_base.PollsterPermanentError(tenants) for t in tenants: api_method = 'get_%s' % self.METHOD yield t.id, getattr(rgw_client, api_method)(t.id) class ContainersObjectsPollster(_Base): """Get info about object counts in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects', type=sample.TYPE_GAUGE, volume=int(it.num_objects), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, resource_metadata=None, ) class ContainersSizePollster(_Base): """Get info about object sizes in a container using RGW Admin APIs.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): for it in bucket_info['buckets']: yield sample.Sample( name='radosgw.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(it.size * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + it.name, resource_metadata=None, ) class ObjectsSizePollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.size', type=sample.TYPE_GAUGE, volume=int(bucket_info['size'] * 1024), unit='B', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsPollster(_Base): """Iterate over all accounts, using keystone.""" def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_objects']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsContainersPollster(_Base): def get_samples(self, manager, cache, resources): for tenant, bucket_info in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.objects.containers', type=sample.TYPE_GAUGE, volume=int(bucket_info['num_buckets']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class UsagePollster(_Base): METHOD = 'usage' def get_samples(self, manager, cache, resources): for tenant, usage in self._iter_accounts(manager.keystone, cache, resources): yield sample.Sample( name='radosgw.api.request', type=sample.TYPE_GAUGE, volume=int(usage), unit='request', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/objectstore/rgw_client.py0000664000175100017510000000552415033033467023165 0ustar00mylesmyles# # Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple from awsauth import S3Auth import requests from urllib import parse as urlparse from ceilometer.i18n import _ class RGWAdminAPIFailed(Exception): pass class RGWAdminClient: Bucket = namedtuple('Bucket', 'name, num_objects, size') def __init__(self, endpoint, access_key, secret_key, implicit_tenants): self.access_key = access_key self.secret = secret_key self.endpoint = endpoint self.hostname = urlparse.urlparse(endpoint).netloc self.implicit_tenants = implicit_tenants def _make_request(self, path, req_params): uri = "{}/{}".format(self.endpoint, path) r = requests.get(uri, params=req_params, auth=S3Auth(self.access_key, self.secret, self.hostname) ) if r.status_code != 200: raise RGWAdminAPIFailed( _('RGW AdminOps API returned %(status)s %(reason)s') % {'status': r.status_code, 'reason': r.reason}) return r.json() def get_bucket(self, tenant_id): if self.implicit_tenants: rgw_uid = tenant_id + "$" + tenant_id else: rgw_uid = tenant_id path = "bucket" req_params = {"uid": rgw_uid, "stats": "true"} json_data = self._make_request(path, req_params) stats = {'num_buckets': 0, 'buckets': [], 'size': 0, 'num_objects': 0} stats['num_buckets'] = len(json_data) for it in json_data: for v in it["usage"].values(): stats['num_objects'] += v["num_objects"] stats['size'] += v["size_kb"] stats['buckets'].append(self.Bucket(it["bucket"], v["num_objects"], v["size_kb"])) return stats def get_usage(self, tenant_id): if self.implicit_tenants: rgw_uid = tenant_id + "$" + tenant_id else: rgw_uid = tenant_id path = "usage" req_params = {"uid": rgw_uid} json_data = self._make_request(path, req_params) usage_data = json_data["summary"] return sum(it["total"]["ops"] for it in usage_data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/objectstore/swift.py0000664000175100017510000001753315033033467022167 0ustar00mylesmyles# # Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with object stores """ from keystoneauth1 import exceptions from oslo_config import cfg from oslo_log import log from swiftclient import client as swift from swiftclient.exceptions import ClientException from urllib import parse as urlparse from ceilometer import keystone_client from ceilometer.polling import plugin_base from ceilometer import sample LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('reseller_prefix', default='AUTH_', help="Swift reseller prefix. Must be on par with " "reseller_prefix in proxy-server.conf."), ] SERVICE_OPTS = [ cfg.StrOpt('swift', default='object-store', help='Swift service type.'), ] class _Base(plugin_base.PollsterBase): METHOD = 'head' _ENDPOINT = None @property def default_discovery(self): return 'tenant' @property def CACHE_KEY_METHOD(self): return 'swift.%s_account' % self.METHOD @staticmethod def _get_endpoint(conf, ksclient): # we store the endpoint as a base class attribute, so keystone is # only ever called once if _Base._ENDPOINT is None: try: creds = conf.service_credentials _Base._ENDPOINT = keystone_client.get_service_catalog( ksclient).url_for( service_type=conf.service_types.swift, interface=creds.interface, region_name=creds.region_name) except exceptions.EndpointNotFound as e: LOG.info("Swift endpoint not found: %s", e) return _Base._ENDPOINT def _iter_accounts(self, ksclient, cache, tenants): if self.CACHE_KEY_METHOD not in cache: cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( ksclient, tenants)) return iter(cache[self.CACHE_KEY_METHOD]) def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(self.conf, ksclient) if not endpoint: return swift_api_method = getattr(swift, '%s_account' % self.METHOD) for t in tenants: try: http_conn = swift.http_connection( self._neaten_url(endpoint, t.id, self.conf.reseller_prefix), cacert=self.conf.service_credentials.cafile) yield (t.id, swift_api_method( None, keystone_client.get_auth_token(ksclient), http_conn=http_conn)) except ClientException as e: if e.http_status == 404: LOG.warning("Swift tenant id %s not found.", t.id) elif e.http_status == 403: LOG.error("The credentials configured does not have " "correct roles to access Swift tenant id %s.", t.id) else: raise e @staticmethod def _neaten_url(endpoint, tenant_id, reseller_prefix): """Transform the registered url to standard and valid format.""" return urlparse.urljoin(endpoint.split('/v1')[0].rstrip('/') + '/', 'v1/' + reseller_prefix + tenant_id) class _ContainersBase(_Base): FIELDS = ("storage_policy",) def _get_resource_metadata(self, container): # NOTE(callumdickinson): Sets value to None if a field is not found. return {f: container.get(f) for f in self.FIELDS} class ObjectsPollster(_Base): """Collect the total objects count for each project""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects', type=sample.TYPE_GAUGE, volume=int(account['x-account-object-count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsSizePollster(_Base): """Collect the total objects size of each project""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.size', type=sample.TYPE_GAUGE, volume=int(account['x-account-bytes-used']), unit='B', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ObjectsContainersPollster(_Base): """Collect the container count for each project""" def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): yield sample.Sample( name='storage.objects.containers', type=sample.TYPE_GAUGE, volume=int(account['x-account-container-count']), unit='container', user_id=None, project_id=tenant, resource_id=tenant, resource_metadata=None, ) class ContainersObjectsPollster(_ContainersBase): """Collect the objects count per container for each project""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects', type=sample.TYPE_GAUGE, volume=int(container['count']), unit='object', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], resource_metadata=self._get_resource_metadata(container), ) class ContainersSizePollster(_ContainersBase): """Collect the total objects size per container for each project""" METHOD = 'get' def get_samples(self, manager, cache, resources): tenants = resources for tenant, account in self._iter_accounts(manager.keystone, cache, tenants): containers_info = account[1] for container in containers_info: yield sample.Sample( name='storage.containers.objects.size', type=sample.TYPE_GAUGE, volume=int(container['bytes']), unit='B', user_id=None, project_id=tenant, resource_id=tenant + '/' + container['name'], resource_metadata=self._get_resource_metadata(container), ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/opts.py0000664000175100017510000001155515033033467017473 0ustar00mylesmyles# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import socket from keystoneauth1 import loading from oslo_config import cfg import ceilometer.alarm.discovery import ceilometer.cmd.polling import ceilometer.compute.discovery import ceilometer.compute.virt.inspector import ceilometer.compute.virt.libvirt.utils import ceilometer.event.converter import ceilometer.image.discovery import ceilometer.ipmi.pollsters import ceilometer.keystone_client import ceilometer.meter.notifications import ceilometer.neutron_client import ceilometer.notification import ceilometer.nova_client import ceilometer.objectstore.rgw import ceilometer.objectstore.swift import ceilometer.pipeline.base import ceilometer.polling.manager import ceilometer.publisher.messaging import ceilometer.publisher.utils import ceilometer.sample import ceilometer.utils import ceilometer.volume.discovery OPTS = [ cfg.HostAddressOpt('host', default=socket.gethostname(), sample_default='', help='Hostname, FQDN or IP address of this host. ' 'Must be valid within AMQP key.'), cfg.IntOpt('http_timeout', default=600, deprecated_for_removal=True, deprecated_reason='This option has no effect', help='Timeout seconds for HTTP requests. Set it to None to ' 'disable timeout.'), cfg.IntOpt('max_parallel_requests', default=64, min=1, help='Maximum number of parallel requests for ' 'services to handle at the same time.'), ] def list_opts(): # FIXME(sileht): readd pollster namespaces in the generated configfile # This have been removed due to a recursive import issue return [ ('DEFAULT', itertools.chain(ceilometer.cmd.polling.CLI_OPTS, ceilometer.compute.virt.inspector.OPTS, ceilometer.compute.virt.libvirt.utils.OPTS, ceilometer.objectstore.swift.OPTS, ceilometer.pipeline.base.OPTS, ceilometer.polling.manager.POLLING_OPTS, ceilometer.sample.OPTS, ceilometer.utils.OPTS, OPTS)), ('compute', ceilometer.compute.discovery.OPTS), ('coordination', [ cfg.StrOpt( 'backend_url', secret=True, help='The backend URL to use for distributed coordination. If ' 'left empty, per-deployment central agent and per-host ' 'compute agent won\'t do workload ' 'partitioning and will only function correctly if a ' 'single instance of that service is running.') ]), ('event', ceilometer.event.converter.OPTS), ('ipmi', ceilometer.ipmi.pollsters.OPTS), ('meter', ceilometer.meter.notifications.OPTS), ('notification', itertools.chain(ceilometer.notification.OPTS, ceilometer.notification.EXCHANGES_OPTS)), ('polling', ceilometer.polling.manager.POLLING_OPTS), ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), ('rgw_client', ceilometer.objectstore.rgw.CLIENT_OPTS), ('service_types', itertools.chain(ceilometer.alarm.discovery.SERVICE_OPTS, ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, ceilometer.nova_client.SERVICE_OPTS, ceilometer.objectstore.rgw.SERVICE_OPTS, ceilometer.objectstore.swift.SERVICE_OPTS, ceilometer.volume.discovery.SERVICE_OPTS,)) ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options # for the password plugin that handles keystone v2 and v3 API # with discovery. But other options are possible. return [('service_credentials', itertools.chain( loading.get_auth_common_conf_options(), loading.get_auth_plugin_conf_options('password'), ceilometer.keystone_client.CLI_OPTS ))] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7949414 ceilometer-24.1.0.dev59/ceilometer/pipeline/0000775000175100017510000000000015033033521017721 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/pipeline/__init__.py0000664000175100017510000000000015033033467022031 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/pipeline/base.py0000664000175100017510000002517715033033467021232 0ustar00mylesmyles# # Copyright 2013 Intel Corp. # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log import oslo_messaging from ceilometer import agent from ceilometer import publisher OPTS = [ cfg.StrOpt('pipeline_cfg_file', default="pipeline.yaml", help="Configuration file for pipeline definition." ), cfg.StrOpt('event_pipeline_cfg_file', default="event_pipeline.yaml", help="Configuration file for event pipeline definition." ), ] LOG = log.getLogger(__name__) class PipelineException(agent.ConfigException): def __init__(self, message, cfg): super().__init__('Pipeline', message, cfg) class PublishContext: def __init__(self, pipelines): self.pipelines = pipelines or [] def __enter__(self): def p(data): for p in self.pipelines: p.publish_data(data) return p def __exit__(self, exc_type, exc_value, traceback): for p in self.pipelines: p.flush() class PipelineSource(agent.Source): """Represents a source of samples or events.""" def __init__(self, cfg): try: super().__init__(cfg) except agent.SourceException as err: raise PipelineException(err.msg, cfg) try: self.sinks = cfg['sinks'] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) def check_sinks(self, sinks): if not self.sinks: raise PipelineException( "No sink defined in source %s" % self, self.cfg) for sink in self.sinks: if sink not in sinks: raise PipelineException( "Dangling sink {} from source {}".format(sink, self), self.cfg) class Sink: """Represents a sink for the transformation and publication of data. Each sink config is concerned *only* with the transformation rules and publication conduits for data. In effect, a sink describes a chain of handlers. The chain ends with one or more publishers. At the end of the chain, publishers publish the data. The exact publishing method depends on publisher type, for example, pushing into data storage via the message bus providing guaranteed delivery, or for loss-tolerant data UDP may be used. """ def __init__(self, conf, cfg, publisher_manager): self.conf = conf self.cfg = cfg try: self.name = cfg['name'] except KeyError as err: raise PipelineException( "Required field %s not specified" % err.args[0], cfg) if not cfg.get('publishers'): raise PipelineException("No publisher specified", cfg) self.publishers = [] for p in cfg['publishers']: if '://' not in p: # Support old format without URL p = p + "://" try: self.publishers.append(publisher_manager.get(p)) except Exception: LOG.error("Unable to load publisher %s", p, exc_info=True) self.multi_publish = True if len(self.publishers) > 1 else False def __str__(self): return self.name @staticmethod def flush(): """Flush data after all events have been injected to pipeline.""" class Pipeline(metaclass=abc.ABCMeta): """Represents a coupling between a sink and a corresponding source.""" def __init__(self, conf, source, sink): self.conf = conf self.source = source self.sink = sink self.name = str(self) def __str__(self): return (self.source.name if self.source.name == self.sink.name else '{}:{}'.format(self.source.name, self.sink.name)) def flush(self): self.sink.flush() @property def publishers(self): return self.sink.publishers @abc.abstractmethod def publish_data(self, data): """Publish data from pipeline.""" @abc.abstractmethod def supported(self, data): """Attribute to filter on. Pass if no partitioning.""" class PublisherManager: def __init__(self, conf, purpose): self._loaded_publishers = {} self._conf = conf self._purpose = purpose def get(self, url): if url not in self._loaded_publishers: p = publisher.get_publisher( self._conf, url, 'ceilometer.%s.publisher' % self._purpose) self._loaded_publishers[url] = p return self._loaded_publishers[url] class PipelineManager(agent.ConfigManagerBase): """Pipeline Manager Pipeline manager sets up pipelines according to config file """ def __init__(self, conf, cfg_file): """Setup the pipelines according to config. The configuration is supported as follows: Decoupled: the source and sink configuration are separately specified before being linked together. This allows source- specific configuration, such as meter handling, to be kept focused only on the fine-grained source while avoiding the necessity for wide duplication of sink-related config. The configuration is provided in the form of separate lists of dictionaries defining sources and sinks, for example: {"sources": [{"name": source_1, "meters" : ["meter_1", "meter_2"], "sinks" : ["sink_1", "sink_2"] }, {"name": source_2, "meters" : ["meter_3"], "sinks" : ["sink_2"] }, ], "sinks": [{"name": sink_1, "publishers": ["publisher_1", "publisher_2"] }, {"name": sink_2, "publishers": ["publisher_3"] }, ] } Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. Publisher's name is plugin name in setup.cfg """ super().__init__(conf) cfg = self.load_config(cfg_file) self.pipelines = [] if not ('sources' in cfg and 'sinks' in cfg): raise PipelineException("Both sources & sinks are required", cfg) publisher_manager = PublisherManager(self.conf, self.pm_type) unique_names = set() sources = [] for s in cfg.get('sources'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated source names: %s" % name, self) else: unique_names.add(name) sources.append(self.pm_source(s)) unique_names.clear() sinks = {} for s in cfg.get('sinks'): name = s.get('name') if name in unique_names: raise PipelineException("Duplicated sink names: %s" % name, self) else: unique_names.add(name) sinks[s['name']] = self.pm_sink(self.conf, s, publisher_manager) unique_names.clear() for source in sources: source.check_sinks(sinks) for target in source.sinks: pipe = self.pm_pipeline(self.conf, source, sinks[target]) if pipe.name in unique_names: raise PipelineException( "Duplicate pipeline name: %s. Ensure pipeline" " names are unique. (name is the source and sink" " names combined)" % pipe.name, cfg) else: unique_names.add(pipe.name) self.pipelines.append(pipe) unique_names.clear() @property @abc.abstractmethod def pm_type(self): """Pipeline manager type.""" @property @abc.abstractmethod def pm_pipeline(self): """Pipeline class""" @property @abc.abstractmethod def pm_source(self): """Pipeline source class""" @property @abc.abstractmethod def pm_sink(self): """Pipeline sink class""" def publisher(self): """Build publisher for pipeline publishing.""" return PublishContext(self.pipelines) def get_main_endpoints(self): """Return endpoints for main queue.""" pass class NotificationEndpoint: """Base Endpoint for plugins that support the notification API.""" event_types = [] """List of strings to filter messages on.""" def __init__(self, conf, publisher): super().__init__() # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch # messages to an endpoint. if self.event_types: self.filter_rule = oslo_messaging.NotificationFilter( event_type='|'.join(self.event_types)) self.conf = conf self.publisher = publisher @abc.abstractmethod def process_notifications(self, priority, notifications): """Return a sequence of Counter instances for the given message. :param message: Message to process. """ @classmethod def _consume_and_drop(cls, notifications): """RPC endpoint for useless notification level""" # NOTE(sileht): nothing special todo here, but because we listen # for the generic notification exchange we have to consume all its # queues audit = _consume_and_drop critical = _consume_and_drop debug = _consume_and_drop error = _consume_and_drop info = _consume_and_drop sample = _consume_and_drop warn = _consume_and_drop ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7949414 ceilometer-24.1.0.dev59/ceilometer/pipeline/data/0000775000175100017510000000000015033033521020632 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/pipeline/data/event_definitions.yaml0000664000175100017510000004072715033033467025255 0ustar00mylesmyles--- - event_type: 'compute.instance.*' traits: &instance_traits tenant_id: fields: payload.tenant_id user_id: fields: payload.user_id instance_id: fields: payload.instance_id display_name: fields: payload.display_name resource_id: fields: payload.instance_id cell_name: fields: payload.cell_name host: fields: publisher_id.`split(., 1, 1)` service: fields: publisher_id.`split(., 0, -1)` memory_mb: type: int fields: payload.memory_mb disk_gb: type: int fields: payload.disk_gb root_gb: type: int fields: payload.root_gb ephemeral_gb: type: int fields: payload.ephemeral_gb vcpus: type: int fields: payload.vcpus instance_type_id: fields: payload.instance_type_id instance_type: fields: payload.instance_type state: fields: payload.state os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' os_version: fields: payload.image_meta.'org.openstack__1__os_version' os_distro: fields: payload.image_meta.'org.openstack__1__os_distro' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: compute.instance.create.end traits: <<: *instance_traits availability_zone: fields: payload.availability_zone - event_type: compute.instance.update traits: <<: *instance_traits old_state: fields: payload.old_state - event_type: compute.instance.exists traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: ['volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*', 'volume.transfer.accept.end', 'snapshot.transfer.accept.end'] traits: &cinder_traits user_id: fields: payload.user_id project_id: fields: payload.tenant_id availability_zone: fields: payload.availability_zone display_name: fields: payload.display_name replication_status: fields: payload.replication_status status: fields: payload.status created_at: type: datetime fields: payload.created_at image_id: fields: payload.glance_metadata[?key=image_id].value instance_id: fields: payload.volume_attachment[0].server_id - event_type: ['volume.transfer.*', 'volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.transfer.accept.end'] traits: <<: *cinder_traits resource_id: fields: payload.volume_id host: fields: payload.host size: type: int fields: payload.size type: fields: payload.volume_type replication_status: fields: payload.replication_status - event_type: ['snapshot.transfer.accept.end'] traits: <<: *cinder_traits resource_id: fields: payload.snapshot_id project_id: fields: payload.tenant_id - event_type: ['share.create.*', 'share.delete.*', 'share.extend.*', 'share.shrink.*'] traits: &share_traits share_id: fields: payload.share_id user_id: fields: payload.user_id project_id: fields: payload.tenant_id snapshot_id: fields: payload.snapshot_id availability_zone: fields: payload.availability_zone status: fields: payload.status created_at: type: datetime fields: payload.created_at share_group_id: fields: payload.share_group_id size: type: int fields: payload.size name: fields: payload.name proto: fields: payload.proto is_public: fields: payload.is_public description: fields: payload.description host: fields: payload.host - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] traits: <<: *cinder_traits resource_id: fields: payload.snapshot_id volume_id: fields: payload.volume_id - event_type: ['image_volume_cache.*'] traits: image_id: fields: payload.image_id host: fields: payload.host - event_type: ['image.create', 'image.update', 'image.upload', 'image.delete'] traits: &glance_crud project_id: fields: payload.owner resource_id: fields: payload.id name: fields: payload.name status: fields: payload.status created_at: type: datetime fields: payload.created_at user_id: fields: payload.owner deleted_at: type: datetime fields: payload.deleted_at size: type: int fields: payload.size - event_type: image.send traits: &glance_send receiver_project: fields: payload.receiver_tenant_id receiver_user: fields: payload.receiver_user_id user_id: fields: payload.owner_id image_id: fields: payload.image_id destination_ip: fields: payload.destination_ip bytes_sent: type: int fields: payload.bytes_sent - event_type: orchestration.stack.* traits: &orchestration_crud project_id: fields: payload.tenant_id user_id: fields: ['ctxt.trustor_user_id', 'ctxt.user_id'] resource_id: fields: payload.stack_identity name: fields: payload.name - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] traits: &identity_crud resource_id: fields: payload.resource_info initiator_id: fields: payload.initiator.id project_id: fields: payload.initiator.project_id domain_id: fields: payload.initiator.domain_id - event_type: identity.role_assignment.* traits: &identity_role_assignment role: fields: payload.role group: fields: payload.group domain: fields: payload.domain user: fields: payload.user project: fields: payload.project - event_type: identity.authenticate traits: &identity_authenticate typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: type: datetime fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_agent: fields: payload.initiator.host.agent initiator_host_addr: fields: payload.initiator.host.address target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id observer_typeURI: fields: payload.observer.typeURI observer_id: fields: payload.observer.id - event_type: objectstore.http.request traits: &objectstore_request typeURI: fields: payload.typeURI id: fields: payload.id action: fields: payload.action eventType: fields: payload.eventType eventTime: type: datetime fields: payload.eventTime outcome: fields: payload.outcome initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_project_id: fields: payload.initiator.project_id target_typeURI: fields: payload.target.typeURI target_id: fields: payload.target.id target_action: fields: payload.target.action target_metadata_path: fields: payload.target.metadata.path target_metadata_version: fields: payload.target.metadata.version target_metadata_container: fields: payload.target.metadata.container target_metadata_object: fields: payload.target.metadata.object observer_id: fields: payload.observer.id - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] traits: &network_traits user_id: fields: ctxt.user_id project_id: fields: ctxt.tenant_id - event_type: network.* traits: <<: *network_traits name: fields: payload.network.name resource_id: fields: ['payload.network.id', 'payload.id'] - event_type: subnet.* traits: <<: *network_traits name: fields: payload.subnet.name resource_id: fields: ['payload.subnet.id', 'payload.id'] - event_type: port.* traits: <<: *network_traits name: fields: payload.port.name resource_id: fields: ['payload.port.id', 'payload.id'] - event_type: router.* traits: <<: *network_traits name: fields: payload.router.name resource_id: fields: ['payload.router.id', 'payload.id'] - event_type: floatingip.* traits: <<: *network_traits resource_id: fields: ['payload.floatingip.id', 'payload.id'] - event_type: firewall.* traits: <<: *network_traits name: fields: payload.firewall.name resource_id: fields: ['payload.firewall.id', 'payload.id'] - event_type: firewall_policy.* traits: <<: *network_traits name: fields: payload.firewall_policy.name resource_id: fields: ['payload.firewall_policy.id', 'payload.id'] - event_type: firewall_rule.* traits: <<: *network_traits name: fields: payload.firewall_rule.name resource_id: fields: ['payload.firewall_rule.id', 'payload.id'] - event_type: vpnservice.* traits: <<: *network_traits name: fields: payload.vpnservice.name resource_id: fields: ['payload.vpnservice.id', 'payload.id'] - event_type: ipsecpolicy.* traits: <<: *network_traits name: fields: payload.ipsecpolicy.name resource_id: fields: ['payload.ipsecpolicy.id', 'payload.id'] - event_type: ikepolicy.* traits: <<: *network_traits name: fields: payload.ikepolicy.name resource_id: fields: ['payload.ikepolicy.id', 'payload.id'] - event_type: ipsec_site_connection.* traits: <<: *network_traits resource_id: fields: ['payload.ipsec_site_connection.id', 'payload.id'] - event_type: '*http.*' traits: &http_audit project_id: fields: payload.initiator.project_id user_id: fields: payload.initiator.id typeURI: fields: payload.typeURI eventType: fields: payload.eventType action: fields: payload.action outcome: fields: payload.outcome id: fields: payload.id eventTime: type: datetime fields: payload.eventTime requestPath: fields: payload.requestPath observer_id: fields: payload.observer.id target_id: fields: payload.target.id target_typeURI: fields: payload.target.typeURI target_name: fields: payload.target.name initiator_typeURI: fields: payload.initiator.typeURI initiator_id: fields: payload.initiator.id initiator_name: fields: payload.initiator.name initiator_host_address: fields: payload.initiator.host.address - event_type: '*http.response' traits: <<: *http_audit reason_code: fields: payload.reason.reasonCode - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] traits: &dns_domain_traits status: fields: payload.status retry: fields: payload.retry description: fields: payload.description expire: fields: payload.expire email: fields: payload.email ttl: fields: payload.ttl action: fields: payload.action name: fields: payload.name resource_id: fields: payload.id created_at: type: datetime fields: payload.created_at updated_at: type: datetime fields: payload.updated_at version: fields: payload.version parent_domain_id: fields: parent_domain_id serial: fields: payload.serial - event_type: dns.domain.exists traits: <<: *dns_domain_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: trove.* traits: &trove_base_traits instance_type: fields: payload.instance_type user_id: fields: payload.user_id resource_id: fields: payload.instance_id instance_type_id: fields: payload.instance_type_id launched_at: type: datetime fields: payload.launched_at instance_name: fields: payload.instance_name state: fields: payload.state nova_instance_id: fields: payload.nova_instance_id service_id: fields: payload.service_id created_at: type: datetime fields: payload.created_at region: fields: payload.region - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] traits: &trove_common_traits name: fields: payload.name availability_zone: fields: payload.availability_zone instance_size: type: int fields: payload.instance_size volume_size: type: int fields: payload.volume_size nova_volume_id: fields: payload.nova_volume_id - event_type: trove.instance.create traits: <<: [*trove_base_traits, *trove_common_traits] - event_type: trove.instance.modify_volume traits: <<: [*trove_base_traits, *trove_common_traits] old_volume_size: type: int fields: payload.old_volume_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.modify_flavor traits: <<: [*trove_base_traits, *trove_common_traits] old_instance_size: type: int fields: payload.old_instance_size modify_at: type: datetime fields: payload.modify_at - event_type: trove.instance.delete traits: <<: [*trove_base_traits, *trove_common_traits] deleted_at: type: datetime fields: payload.deleted_at - event_type: trove.instance.exists traits: <<: *trove_base_traits display_name: fields: payload.display_name audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: type: datetime fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params - event_type: 'magnum.cluster.*' traits: &magnum_cluster_crud id: fields: payload.id typeURI: fields: payload.typeURI eventType: fields: payload.eventType eventTime: type: datetime fields: payload.eventTime action: fields: payload.action outcome: fields: payload.outcome initiator_id: fields: payload.initiator.id initiator_typeURI: fields: payload.initiator.typeURI initiator_name: fields: payload.initiator.name initiator_host_agent: fields: payload.initiator.host.agent initiator_host_address: fields: payload.initiator.host.address target_id: fields: payload.target.id target_typeURI: fields: payload.target.typeURI observer_id: fields: payload.observer.id observer_typeURI: fields: payload.observer.typeURI - event_type: 'alarm.*' traits: id: fields: payload.alarm_id user_id: fields: payload.user_id project_id: fields: payload.project_id on_behalf_of: fields: payload.on_behalf_of severity: fields: payload.severity detail: fields: payload.detail type: fields: payload.type ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/pipeline/data/event_pipeline.yaml0000664000175100017510000000026015033033467024533 0ustar00mylesmyles--- sources: - name: event_source events: - "*" sinks: - event_sink sinks: - name: event_sink publishers: - notifier:// ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/pipeline/data/pipeline.yaml0000664000175100017510000000025715033033467023340 0ustar00mylesmyles--- sources: - name: meter_source meters: - "*" sinks: - meter_sink sinks: - name: meter_sink publishers: - gnocchi:// ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/pipeline/event.py0000664000175100017510000001053415033033467021430 0ustar00mylesmyles# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import oslo_messaging from stevedore import extension from ceilometer import agent from ceilometer.event import converter from ceilometer.pipeline import base LOG = log.getLogger(__name__) class EventEndpoint(base.NotificationEndpoint): event_types = [] def __init__(self, conf, publisher): super().__init__(conf, publisher) LOG.debug('Loading event definitions') self.event_converter = converter.setup_events( conf, extension.ExtensionManager( namespace='ceilometer.event.trait_plugin')) def info(self, notifications): """Convert message at info level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notifications('info', notifications) def error(self, notifications): """Convert message at error level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notifications('error', notifications) def process_notifications(self, priority, notifications): for message in notifications: try: event = self.event_converter.to_event(priority, message) if event is not None: with self.publisher as p: p(event) except Exception: if not self.conf.notification.ack_on_event_error: return oslo_messaging.NotificationResult.REQUEUE LOG.error('Fail to process a notification', exc_info=True) return oslo_messaging.NotificationResult.HANDLED class EventSource(base.PipelineSource): """Represents a source of events. In effect it is a set of notification handlers capturing events for a set of matching notifications. """ def __init__(self, cfg): super().__init__(cfg) self.events = cfg.get('events') try: self.check_source_filtering(self.events, 'events') except agent.SourceException as err: raise base.PipelineException(err.msg, cfg) def support_event(self, event_name): return self.is_supported(self.events, event_name) class EventSink(base.Sink): def publish_events(self, events): if events: for p in self.publishers: try: p.publish_events(events) except Exception: LOG.error("Pipeline %(pipeline)s: %(status)s " "after error from publisher %(pub)s" % {'pipeline': self, 'status': 'Continue' if self.multi_publish else 'Exit', 'pub': p}, exc_info=True) if not self.multi_publish: raise class EventPipeline(base.Pipeline): """Represents a pipeline for Events.""" def __str__(self): # NOTE(gordc): prepend a namespace so we ensure event and sample # pipelines do not have the same name. return 'event:%s' % super().__str__() def publish_data(self, events): if not isinstance(events, list): events = [events] supported = [e for e in events if self.supported(e)] self.sink.publish_events(supported) def supported(self, event): return self.source.support_event(event.event_type) class EventPipelineManager(base.PipelineManager): pm_type = 'event' pm_pipeline = EventPipeline pm_source = EventSource pm_sink = EventSink def __init__(self, conf): super().__init__( conf, conf.event_pipeline_cfg_file) def get_main_endpoints(self): return [EventEndpoint(self.conf, self.publisher())] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/pipeline/sample.py0000664000175100017510000001263215033033467021571 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from stevedore import extension from ceilometer import agent from ceilometer.pipeline import base LOG = log.getLogger(__name__) class SampleEndpoint(base.NotificationEndpoint): def info(self, notifications): """Convert message at info level to Ceilometer sample. :param notifications: list of notifications """ return self.process_notifications('info', notifications) def sample(self, notifications): """Convert message at sample level to Ceilometer Event. :param notifications: list of notifications """ return self.process_notifications('sample', notifications) def process_notifications(self, priority, notifications): for message in notifications: try: LOG.debug("Processing sample notification [%s] for publisher " "[%s] with priority [%s] using the agent [%s].", message, self.publisher, priority, self) with self.publisher as p: p(list(self.build_sample(message))) except Exception as e: LOG.error('Fail to process notification message [%s]' % message, exc_info=True) raise e def build_sample(notification): """Build sample from provided notification.""" pass class SampleSource(base.PipelineSource): """Represents a source of samples. In effect it is a set of notification handlers processing samples for a set of matching meters. Each source encapsulates meter name matching and mapping to one or more sinks for publication. """ def __init__(self, cfg): super().__init__(cfg) try: self.meters = cfg['meters'] except KeyError: raise base.PipelineException("Missing meters value", cfg) try: self.check_source_filtering(self.meters, 'meters') except agent.SourceException as err: raise base.PipelineException(err.msg, cfg) def support_meter(self, meter_name): return self.is_supported(self.meters, meter_name) class SampleSink(base.Sink): def publish_samples(self, samples): """Push samples into pipeline for publishing. :param samples: Sample list. """ if samples: for p in self.publishers: try: p.publish_samples(samples) except Exception: LOG.error("Pipeline %(pipeline)s: Continue after " "error from publisher %(pub)s" % {'pipeline': self, 'pub': p}, exc_info=True) @staticmethod def flush(): pass class SamplePipeline(base.Pipeline): """Represents a pipeline for Samples.""" def _validate_volume(self, s): volume = s.volume if volume is None: LOG.warning( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: None), the sample will' ' be dropped' % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'} ) return False if not isinstance(volume, (int, float)): try: volume = float(volume) except ValueError: LOG.warning( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped' % {'counter_name': s.name, 'resource_id': s.resource_id, 'timestamp': ( s.timestamp if s.timestamp else 'NO TIMESTAMP'), 'counter_volume': volume} ) return False return True def publish_data(self, samples): if not isinstance(samples, list): samples = [samples] supported = [s for s in samples if self.supported(s) and self._validate_volume(s)] self.sink.publish_samples(supported) def supported(self, sample): return self.source.support_meter(sample.name) class SamplePipelineManager(base.PipelineManager): pm_type = 'sample' pm_pipeline = SamplePipeline pm_source = SampleSource pm_sink = SampleSink def __init__(self, conf): super().__init__( conf, conf.pipeline_cfg_file) def get_main_endpoints(self): exts = extension.ExtensionManager( namespace='ceilometer.sample.endpoint', invoke_on_load=True, invoke_args=(self.conf, self.publisher())) return [ext.obj for ext in exts] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7949414 ceilometer-24.1.0.dev59/ceilometer/polling/0000775000175100017510000000000015033033521017560 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/__init__.py0000664000175100017510000000000015033033467021670 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7949414 ceilometer-24.1.0.dev59/ceilometer/polling/discovery/0000775000175100017510000000000015033033521021567 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/discovery/__init__.py0000664000175100017510000000000015033033467023677 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/discovery/endpoint.py0000664000175100017510000000277015033033467024000 0ustar00mylesmyles# Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer import keystone_client from ceilometer.polling import plugin_base as plugin LOG = log.getLogger(__name__) class EndpointDiscovery(plugin.DiscoveryBase): """Discovery that supplies service endpoints. This discovery should be used when the relevant APIs are not well suited to dividing the pollster's work into smaller pieces than a whole service at once. """ def discover(self, manager, param=None): endpoints = keystone_client.get_service_catalog( manager.keystone).get_urls( service_type=param, interface=self.conf.service_credentials.interface, region_name=self.conf.service_credentials.region_name) if not endpoints: LOG.warning('No endpoints found for service %s', "" if param is None else param) return [] return endpoints ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/discovery/localnode.py0000664000175100017510000000155515033033467024120 0ustar00mylesmyles# Copyright 2015 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import plugin_base class LocalNodeDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): """Return local node as resource.""" return [self.conf.host] @property def group_id(self): return "LocalNode-%s" % self.conf.host ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/discovery/non_openstack_credentials_discovery.py0000664000175100017510000000415215033033467031461 0ustar00mylesmyles# Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.polling.discovery.endpoint import EndpointDiscovery from urllib import parse as urlparse import requests LOG = log.getLogger(__name__) class NonOpenStackCredentialsDiscovery(EndpointDiscovery): """Barbican secrets discovery Discovery that supplies non-OpenStack credentials for the dynamic pollster sub-system. This solution uses the EndpointDiscovery to find the Barbican URL where we can retrieve the credentials. """ BARBICAN_URL_GET_PAYLOAD_PATTERN = "/v1/secrets/%s/payload" def discover(self, manager, param=None): barbican_secret = "No secrets found" if not param: return [barbican_secret] barbican_endpoints = super().discover(manager, "key-manager") if not barbican_endpoints: LOG.warning("No Barbican endpoints found to execute the" " credentials discovery process to [%s].", param) return [barbican_secret] else: LOG.debug("Barbican endpoint found [%s].", barbican_endpoints) barbican_server = next(iter(barbican_endpoints)) barbican_endpoint = self.BARBICAN_URL_GET_PAYLOAD_PATTERN % param babrican_url = urlparse.urljoin(barbican_server, barbican_endpoint) LOG.debug("Retrieving secrets from: %s.", babrican_url) resp = manager._keystone.session.get(babrican_url, authenticated=True) if resp.status_code != requests.codes.ok: resp.raise_for_status() return [resp._content] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/discovery/tenant.py0000664000175100017510000000352315033033467023446 0ustar00mylesmyles# Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.polling import plugin_base as plugin LOG = log.getLogger(__name__) class TenantDiscovery(plugin.DiscoveryBase): """Discovery that supplies keystone tenants. This discovery should be used when the pollster's work can't be divided into smaller pieces than per-tenants. Example of this is the Swift pollster, which polls account details and does so per-project. """ def discover(self, manager, param=None): domains = manager.keystone.domains.list() LOG.debug(f"Found {len(domains)} keystone domains") tenants = [] for domain in domains: domain_tenants = manager.keystone.projects.list(domain) if self.conf.polling.ignore_disabled_projects: enabled_tenants = [tenant for tenant in domain_tenants if tenant.enabled] LOG.debug(f"Found {len(enabled_tenants)} enabled " f"tenants in domain {domain.name}") tenants = enabled_tenants + domain_tenants else: LOG.debug(f"Found {len(domain_tenants)} " f"tenants in domain {domain.name}") tenants = tenants + domain_tenants return tenants or [] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/dynamic_pollster.py0000664000175100017510000013563315033033467023526 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Dynamic pollster component This component enables operators to create new pollsters on the fly via configuration. The configuration files are read from '/etc/ceilometer/pollsters.d/'. The pollster are defined in YAML files similar to the idea used for handling notifications. """ import copy import json import re import subprocess import time import xmltodict from oslo_log import log from requests import RequestException from ceilometer import declarative from ceilometer.polling import plugin_base from ceilometer import sample as ceilometer_sample from ceilometer import utils as ceilometer_utils from functools import reduce import operator import requests from urllib import parse as urlparse LOG = log.getLogger(__name__) def validate_sample_type(sample_type): if sample_type not in ceilometer_sample.TYPES: raise declarative.DynamicPollsterDefinitionException( "Invalid sample type [%s]. Valid ones are [%s]." % (sample_type, ceilometer_sample.TYPES)) class XMLResponseHandler: """This response handler converts an XML in string format to a dict""" @staticmethod def handle(response): return xmltodict.parse(response) class JsonResponseHandler: """This response handler converts a JSON in string format to a dict""" @staticmethod def handle(response): return json.loads(response) class PlainTextResponseHandler: """Response handler converts string to a list of dict [{'out'=}]""" @staticmethod def handle(response): return [{'out': str(response)}] VALID_HANDLERS = { 'json': JsonResponseHandler, 'xml': XMLResponseHandler, 'text': PlainTextResponseHandler } def validate_response_handler(val): if not isinstance(val, list): raise declarative.DynamicPollsterDefinitionException( "Invalid response_handlers configuration. It must be a list. " "Provided value type: %s" % type(val).__name__) for value in val: if value not in VALID_HANDLERS: raise declarative.DynamicPollsterDefinitionException( "Invalid response_handler value [%s]. Accepted values " "are [%s]" % (value, ', '.join(list(VALID_HANDLERS)))) def validate_extra_metadata_skip_samples(val): if not isinstance(val, list) or next( filter(lambda v: not isinstance(v, dict), val), None): raise declarative.DynamicPollsterDefinitionException( "Invalid extra_metadata_fields_skip configuration." " It must be a list of maps. Provided value: %s," " value type: %s." % (val, type(val).__name__)) class ResponseHandlerChain: """Tries to convert a string to a dict using the response handlers""" def __init__(self, response_handlers, **meta): if not isinstance(response_handlers, list): response_handlers = list(response_handlers) self.response_handlers = response_handlers self.meta = meta def handle(self, response): failed_handlers = [] for handler in self.response_handlers: try: return handler.handle(response) except Exception as e: handler_name = handler.__name__ failed_handlers.append(handler_name) LOG.debug( "Error handling response [%s] with handler [%s]: %s. " "We will try the next one, if multiple handlers were " "configured.", response, handler_name, e) handlers_str = ', '.join(failed_handlers) raise declarative.InvalidResponseTypeException( "No remaining handlers to handle the response [%s], " "used handlers [%s]. [%s]." % (response, handlers_str, self.meta)) class PollsterDefinitionBuilder: def __init__(self, definitions): self.definitions = definitions def build_definitions(self, configurations): supported_definitions = [] for definition in self.definitions: if definition.is_field_applicable_to_definition(configurations): supported_definitions.append(definition) if not supported_definitions: raise declarative.DynamicPollsterDefinitionException( "Your configurations do not fit any type of DynamicPollsters, " "please recheck them. Used configurations are [%s]." % configurations) definition_name = self.join_supported_definitions_names( supported_definitions) definition_parents = tuple(supported_definitions) definition_attribs = {'extra_definitions': reduce( lambda d1, d2: d1 + d2, map(lambda df: df.extra_definitions, supported_definitions))} definition_type = type(definition_name, definition_parents, definition_attribs) return definition_type(configurations) @staticmethod def join_supported_definitions_names(supported_definitions): return ''.join(map(lambda df: df.__name__, supported_definitions)) class PollsterSampleExtractor: def __init__(self, definitions): self.definitions = definitions def generate_new_metadata_fields(self, metadata=None, pollster_definitions=None): pollster_definitions =\ pollster_definitions or self.definitions.configurations metadata_mapping = pollster_definitions['metadata_mapping'] if not metadata_mapping or not metadata: return metadata_keys = list(metadata.keys()) for k in metadata_keys: if k not in metadata_mapping: continue new_key = metadata_mapping[k] metadata[new_key] = metadata[k] LOG.debug("Generating new key [%s] with content [%s] of key [%s]", new_key, metadata[k], k) if pollster_definitions['preserve_mapped_metadata']: continue k_value = metadata.pop(k) LOG.debug("Removed key [%s] with value [%s] from " "metadata set that is sent to Gnocchi.", k, k_value) def generate_sample( self, pollster_sample, pollster_definitions=None, **kwargs): pollster_definitions =\ pollster_definitions or self.definitions.configurations metadata = dict() if 'metadata_fields' in pollster_definitions: for k in pollster_definitions['metadata_fields']: val = self.retrieve_attribute_nested_value( pollster_sample, value_attribute=k, definitions=self.definitions.configurations) LOG.debug("Assigning value [%s] to metadata key [%s].", val, k) metadata[k] = val self.generate_new_metadata_fields( metadata=metadata, pollster_definitions=pollster_definitions) pollster_sample['metadata'] = metadata extra_metadata = self.definitions.retrieve_extra_metadata( kwargs['manager'], pollster_sample, kwargs['conf']) LOG.debug("Extra metadata [%s] collected for sample [%s].", extra_metadata, pollster_sample) for key in extra_metadata.keys(): if key in metadata.keys(): LOG.warning("The extra metadata key [%s] already exist in " "pollster current metadata set [%s]. Therefore, " "we will ignore it with its value [%s].", key, metadata, extra_metadata[key]) continue metadata[key] = extra_metadata[key] return ceilometer_sample.Sample( timestamp=ceilometer_utils.isotime(), name=pollster_definitions['name'], type=pollster_definitions['sample_type'], unit=pollster_definitions['unit'], volume=pollster_sample['value'], user_id=pollster_sample.get("user_id"), project_id=pollster_sample.get("project_id"), resource_id=pollster_sample.get("id"), resource_metadata=metadata) def retrieve_attribute_nested_value(self, json_object, value_attribute=None, definitions=None, **kwargs): if not definitions: definitions = self.definitions.configurations attribute_key = value_attribute if not attribute_key: attribute_key = self.definitions.extract_attribute_key() LOG.debug( "Retrieving the nested keys [%s] from [%s] or pollster [""%s].", attribute_key, json_object, definitions["name"]) keys_and_operations = attribute_key.split("|") attribute_key = keys_and_operations[0].strip() if attribute_key == ".": value = json_object else: nested_keys = attribute_key.split(".") value = reduce(operator.getitem, nested_keys, json_object) return self.operate_value(keys_and_operations, value, definitions) def operate_value(self, keys_and_operations, value, definitions): # We do not have operations to be executed against the value extracted if len(keys_and_operations) < 2: return value for operation in keys_and_operations[1::]: # The operation must be performed onto the 'value' variable if 'value' not in operation: raise declarative.DynamicPollsterDefinitionException( "The attribute field operation [%s] must use the [" "value] variable." % operation, definitions) LOG.debug("Executing operation [%s] against value[%s] for " "pollster [%s].", operation, value, definitions["name"]) value = eval(operation.strip()) LOG.debug("Result [%s] of operation [%s] for pollster [%s].", value, operation, definitions["name"]) return value class SimplePollsterSampleExtractor(PollsterSampleExtractor): def generate_single_sample(self, pollster_sample, **kwargs): value = self.retrieve_attribute_nested_value( pollster_sample) value = self.definitions.value_mapper.map_or_skip_value( value, pollster_sample) if isinstance(value, SkippedSample): return value pollster_sample['value'] = value return self.generate_sample(pollster_sample, **kwargs) def extract_sample(self, pollster_sample, **kwargs): sample = self.generate_single_sample(pollster_sample, **kwargs) if isinstance(sample, SkippedSample): return sample yield sample class MultiMetricPollsterSampleExtractor(PollsterSampleExtractor): def extract_sample(self, pollster_sample, **kwargs): pollster_definitions = self.definitions.configurations value = self.retrieve_attribute_nested_value( pollster_sample, definitions=pollster_definitions) LOG.debug("We are dealing with a multi metric pollster. The " "value we are processing is the following: [%s].", value) self.validate_sample_is_list(value) sub_metric_placeholder, pollster_name, sub_metric_attribute_name = \ self.extract_names_attrs() value_attribute = \ self.extract_field_name_from_value_attribute_configuration() LOG.debug("Using attribute [%s] to look for values in the " "multi metric pollster [%s] with sample [%s]", value_attribute, pollster_definitions, value) pollster_definitions = copy.deepcopy(pollster_definitions) yield from self.extract_sub_samples(value, sub_metric_attribute_name, pollster_name, value_attribute, sub_metric_placeholder, pollster_definitions, pollster_sample, **kwargs) def extract_sub_samples(self, value, sub_metric_attribute_name, pollster_name, value_attribute, sub_metric_placeholder, pollster_definitions, pollster_sample, **kwargs): for sub_sample in value: sub_metric_name = sub_sample[sub_metric_attribute_name] new_metric_name = pollster_name.replace( sub_metric_placeholder, sub_metric_name) pollster_definitions['name'] = new_metric_name actual_value = self.retrieve_attribute_nested_value( sub_sample, value_attribute, definitions=pollster_definitions) pollster_sample['value'] = actual_value if self.should_skip_generate_sample(actual_value, sub_sample, sub_metric_name): continue yield self.generate_sample( pollster_sample, pollster_definitions, **kwargs) def extract_field_name_from_value_attribute_configuration(self): value_attribute = self.definitions.configurations['value_attribute'] return self.definitions.pattern_pollster_value_attribute.match( value_attribute).group(3)[1::] def extract_names_attrs(self): pollster_name = self.definitions.configurations['name'] sub_metric_placeholder = pollster_name.split(".").pop() return (sub_metric_placeholder, pollster_name, self.definitions.pattern_pollster_name.match( "." + sub_metric_placeholder).group(2)) def validate_sample_is_list(self, value): pollster_definitions = self.definitions.configurations if not isinstance(value, list): raise declarative.DynamicPollsterException( "Multi metric pollster defined, but the value [%s]" " obtained with [%s] attribute is not a list" " of objects." % (value, pollster_definitions['value_attribute']), pollster_definitions) def should_skip_generate_sample(self, actual_value, sub_sample, sub_metric_name): skip_sample_values = \ self.definitions.configurations['skip_sample_values'] if actual_value in skip_sample_values: LOG.debug( "Skipping multi metric sample [%s] because " "value [%s] is configured to be skipped in " "skip list [%s].", sub_sample, actual_value, skip_sample_values) return True if sub_metric_name in skip_sample_values: LOG.debug( "Skipping sample [%s] because its sub-metric " "name [%s] is configured to be skipped in " "skip list [%s].", sub_sample, sub_metric_name, skip_sample_values) return True return False class PollsterValueMapper: def __init__(self, definitions): self.definitions = definitions def map_or_skip_value(self, value, pollster_sample): skip_sample_values = \ self.definitions.configurations['skip_sample_values'] if value in skip_sample_values: LOG.debug("Skipping sample [%s] because value [%s] " "is configured to be skipped in skip list [%s].", pollster_sample, value, skip_sample_values) return SkippedSample() return self.execute_value_mapping(value) def execute_value_mapping(self, value): value_mapping = self.definitions.configurations['value_mapping'] if not value_mapping: return value if value in value_mapping: old_value = value value = value_mapping[value] LOG.debug("Value mapped from [%s] to [%s]", old_value, value) else: default_value = \ self.definitions.configurations['default_value'] LOG.warning( "Value [%s] was not found in value_mapping [%s]; " "therefore, we will use the default [%s].", value, value_mapping, default_value) value = default_value return value class PollsterDefinition: """Represents a dynamic pollster configuration/parameter It abstract the job of developers when creating or extending parameters, such as validating parameters name, values and so on. """ def __init__(self, name, required=False, on_missing=lambda df: df.default, default=None, validation_regex=None, creatable=True, validator=None): """Create a dynamic pollster configuration/parameter :param name: the name of the pollster parameter/configuration. :param required: indicates if the configuration/parameter is optional or not. :param on_missing: function that is executed when the parameter/configuration is missing. :param default: the default value to be used. :param validation_regex: the regular expression used to validate the name of the configuration/parameter. :param creatable: it is an override mechanism to avoid creating a configuration/parameter with the default value. The default is ``True``; therefore, we always use the default value. However, we can disable the use of the default value by setting ``False``. When we set this configuration to ``False``, the parameter is not added to the definition dictionary if not defined by the operator in the pollster YAML configuration file. :param validator: function used to validate the value of the parameter/configuration when it is given by the user. This function signature should receive a value that is the value of the parameter to be validate. """ self.name = name self.required = required self.on_missing = on_missing self.validation_regex = validation_regex self.creatable = creatable self.default = default if self.validation_regex: self.validation_pattern = re.compile(self.validation_regex) self.validator = validator def validate(self, val): if val is None: return self.on_missing(self) if self.validation_regex and not self.validation_pattern.match(val): raise declarative.DynamicPollsterDefinitionException( "Pollster %s [%s] does not match [%s]." % (self.name, val, self.validation_regex)) if self.validator: self.validator(val) return val class PollsterDefinitions: POLLSTER_VALID_NAMES_REGEXP = r"^([\w-]+)(\.[\w-]+)*(\.{[\w-]+})?$" EXTERNAL_ENDPOINT_TYPE = "external" standard_definitions = [ PollsterDefinition(name='name', required=True, validation_regex=POLLSTER_VALID_NAMES_REGEXP), PollsterDefinition(name='sample_type', required=True, validator=validate_sample_type), PollsterDefinition(name='unit', required=True), PollsterDefinition(name='endpoint_type', required=True), PollsterDefinition(name='url_path', required=True), PollsterDefinition(name='metadata_fields', creatable=False), PollsterDefinition(name='skip_sample_values', default=[]), PollsterDefinition(name='value_mapping', default={}), PollsterDefinition(name='default_value', default=-1), PollsterDefinition(name='metadata_mapping', default={}), PollsterDefinition(name='preserve_mapped_metadata', default=True), PollsterDefinition(name='response_entries_key'), PollsterDefinition(name='next_sample_url_attribute'), PollsterDefinition(name='user_id_attribute', default="user_id"), PollsterDefinition(name='resource_id_attribute', default="id"), PollsterDefinition(name='project_id_attribute', default="project_id"), PollsterDefinition(name='headers'), PollsterDefinition(name='timeout', default=30), PollsterDefinition(name='extra_metadata_fields_cache_seconds', default=3600), PollsterDefinition(name='extra_metadata_fields'), PollsterDefinition(name='extra_metadata_fields_skip', default=[{}], validator=validate_extra_metadata_skip_samples), PollsterDefinition(name='response_handlers', default=['json'], validator=validate_response_handler), PollsterDefinition(name='base_metadata', default={}) ] extra_definitions = [] def __init__(self, configurations): self.configurations = configurations self.value_mapper = PollsterValueMapper(self) self.definitions = self.map_definitions() self.validate_configurations(configurations) self.validate_missing() self.sample_gatherer = PollsterSampleGatherer(self) self.sample_extractor = SimplePollsterSampleExtractor(self) self.response_cache = {} def validate_configurations(self, configurations): for k, v in self.definitions.items(): if configurations.get(k) is not None: self.configurations[k] = self.definitions[k].validate( self.configurations[k]) elif self.definitions[k].creatable: self.configurations[k] = self.definitions[k].default @staticmethod def is_field_applicable_to_definition(configurations): return True def map_definitions(self): definitions = dict( map(lambda df: (df.name, df), self.standard_definitions)) extra_definitions = dict( map(lambda df: (df.name, df), self.extra_definitions)) definitions.update(extra_definitions) return definitions def extract_attribute_key(self): pass def validate_missing(self): required_configurations = map(lambda fdf: fdf.name, filter(lambda df: df.required, self.definitions.values())) missing = list(filter( lambda rf: rf not in map(lambda f: f[0], filter(lambda f: f[1], self.configurations.items())), required_configurations)) if missing: raise declarative.DynamicPollsterDefinitionException( "Required fields %s not specified." % missing, self.configurations) def should_skip_extra_metadata(self, skip, sample): match_msg = "Sample [%s] %smatches with configured" \ " extra_metadata_fields_skip [%s]." if skip == sample: LOG.debug(match_msg, sample, "", skip) return True if not isinstance(skip, dict) or not isinstance(sample, dict): LOG.debug(match_msg, sample, "not ", skip) return False for key in skip: if key not in sample: LOG.debug(match_msg, sample, "not ", skip) return False if not self.should_skip_extra_metadata(skip[key], sample[key]): LOG.debug(match_msg, sample, "not ", skip) return False LOG.debug(match_msg, sample, "", skip) return True def skip_sample(self, request_sample, skips): for skip in skips: if not skip: continue if self.should_skip_extra_metadata(skip, request_sample): LOG.debug("Skipping extra_metadata_field gathering for " "sample [%s] as defined in the " "extra_metadata_fields_skip [%s]", request_sample, skip) return True return False def retrieve_extra_metadata(self, manager, request_sample, pollster_conf): extra_metadata_fields = self.configurations['extra_metadata_fields'] if extra_metadata_fields: extra_metadata_samples = {} extra_metadata_by_name = {} if not isinstance(extra_metadata_fields, (list, tuple)): extra_metadata_fields = [extra_metadata_fields] for ext_metadata in extra_metadata_fields: ext_metadata.setdefault( 'extra_metadata_fields_skip', self.configurations['extra_metadata_fields_skip']) ext_metadata.setdefault( 'sample_type', self.configurations['sample_type']) ext_metadata.setdefault('unit', self.configurations['unit']) ext_metadata.setdefault( 'value_attribute', ext_metadata.get( 'value', self.configurations['value_attribute'])) ext_metadata['base_metadata'] = { 'extra_metadata_captured': extra_metadata_samples, 'extra_metadata_by_name': extra_metadata_by_name, 'sample': request_sample } parent_cache_ttl = self.configurations[ 'extra_metadata_fields_cache_seconds'] cache_ttl = ext_metadata.get( 'extra_metadata_fields_cache_seconds', parent_cache_ttl ) response_cache = self.response_cache extra_metadata_pollster = DynamicPollster( ext_metadata, conf=pollster_conf, cache_ttl=cache_ttl, extra_metadata_responses_cache=response_cache, ) skips = ext_metadata['extra_metadata_fields_skip'] if self.skip_sample(request_sample, skips): continue resources = [None] if ext_metadata.get('endpoint_type'): resources = manager.discover([ extra_metadata_pollster.default_discovery], {}) samples = extra_metadata_pollster.get_samples( manager, None, resources) for sample in samples: self.fill_extra_metadata_samples( extra_metadata_by_name, extra_metadata_samples, sample) return extra_metadata_samples LOG.debug("No extra metadata to be captured for pollsters [%s] and " "request sample [%s].", self.definitions, request_sample) return {} def fill_extra_metadata_samples(self, extra_metadata_by_name, extra_metadata_samples, sample): extra_metadata_samples[sample.name] = sample.volume LOG.debug("Merging the sample metadata [%s] of the " "extra_metadata_field [%s], with the " "extra_metadata_samples [%s].", sample.resource_metadata, sample.name, extra_metadata_samples) for key, value in sample.resource_metadata.items(): if value is None and key in extra_metadata_samples: LOG.debug("Metadata [%s] for extra_metadata_field [%s] " "is None, skipping metadata override by None " "value", key, sample.name) continue extra_metadata_samples[key] = value extra_metadata_by_name[sample.name] = { 'value': sample.volume, 'metadata': sample.resource_metadata } LOG.debug("extra_metadata_samples after merging: [%s].", extra_metadata_samples) class MultiMetricPollsterDefinitions(PollsterDefinitions): MULTI_METRIC_POLLSTER_NAME_REGEXP = r".*(\.{(\w+)})$" pattern_pollster_name = re.compile( MULTI_METRIC_POLLSTER_NAME_REGEXP) MULTI_METRIC_POLLSTER_VALUE_ATTRIBUTE_REGEXP = r"^(\[(\w+)\])((\.\w+)+)$" pattern_pollster_value_attribute = re.compile( MULTI_METRIC_POLLSTER_VALUE_ATTRIBUTE_REGEXP) extra_definitions = [ PollsterDefinition( name='value_attribute', required=True, validation_regex=MULTI_METRIC_POLLSTER_VALUE_ATTRIBUTE_REGEXP), ] def __init__(self, configurations): super().__init__(configurations) self.sample_extractor = MultiMetricPollsterSampleExtractor(self) @staticmethod def is_field_applicable_to_definition(configurations): return configurations.get( 'name') and MultiMetricPollsterDefinitions.\ pattern_pollster_name.match(configurations['name']) def extract_attribute_key(self): return self.pattern_pollster_value_attribute.match( self.configurations['value_attribute']).group(2) class SingleMetricPollsterDefinitions(PollsterDefinitions): extra_definitions = [ PollsterDefinition(name='value_attribute', required=True)] def __init__(self, configurations): super().__init__(configurations) def extract_attribute_key(self): return self.configurations['value_attribute'] @staticmethod def is_field_applicable_to_definition(configurations): return not MultiMetricPollsterDefinitions. \ is_field_applicable_to_definition(configurations) class PollsterSampleGatherer: def __init__(self, definitions): self.definitions = definitions self.response_handler_chain = ResponseHandlerChain( map(VALID_HANDLERS.get, self.definitions.configurations['response_handlers']), url_path=definitions.configurations['url_path'] ) def get_cache_key(self, definitions, **kwargs): return self.get_request_linked_samples_url(kwargs, definitions) def get_cached_response(self, definitions, **kwargs): if self.definitions.cache_ttl == 0: return cache_key = self.get_cache_key(definitions, **kwargs) response_cache = self.definitions.response_cache cached_response, max_ttl_for_cache = response_cache.get( cache_key, (None, None)) current_time = time.time() if cached_response and max_ttl_for_cache >= current_time: LOG.debug("Returning response [%s] for request [%s] as the TTL " "[max=%s, current_time=%s] has not expired yet.", cached_response, definitions, max_ttl_for_cache, current_time) return cached_response if cached_response and max_ttl_for_cache < current_time: LOG.debug("Cleaning cached response [%s] for request [%s] " "as the TTL [max=%s, current_time=%s] has expired.", cached_response, definitions, max_ttl_for_cache, current_time) response_cache.pop(cache_key, None) def store_cached_response(self, definitions, resp, **kwargs): if self.definitions.cache_ttl == 0: return cache_key = self.get_cache_key(definitions, **kwargs) extra_metadata_fields_cache_seconds = self.definitions.cache_ttl max_ttl_for_cache = time.time() + extra_metadata_fields_cache_seconds cache_tuple = (resp, max_ttl_for_cache) self.definitions.response_cache[cache_key] = cache_tuple @property def default_discovery(self): return 'endpoint:' + self.definitions.configurations['endpoint_type'] def execute_request_get_samples(self, **kwargs): return self.execute_request_for_definitions( self.definitions.configurations, **kwargs) def execute_request_for_definitions(self, definitions, **kwargs): if response_dict := self.get_cached_response(definitions, **kwargs): url = 'cached' else: resp, url = self._internal_execute_request_get_samples( definitions=definitions, **kwargs) response_dict = self.response_handler_chain.handle(resp.text) self.store_cached_response(definitions, response_dict, **kwargs) entry_size = len(response_dict) LOG.debug("Entries [%s] in the DICT for request [%s] " "for dynamic pollster [%s].", response_dict, url, definitions['name']) if entry_size > 0: samples = self.retrieve_entries_from_response( response_dict, definitions) url_to_next_sample = self.get_url_to_next_sample( response_dict, definitions) self.prepare_samples(definitions, samples, **kwargs) if url_to_next_sample: kwargs['next_sample_url'] = url_to_next_sample samples += self.execute_request_for_definitions( definitions=definitions, **kwargs) return samples return [] def prepare_samples( self, definitions, samples, execute_id_overrides=True, **kwargs): if samples and execute_id_overrides: for request_sample in samples: user_id_attribute = definitions.get( 'user_id_attribute', 'user_id') project_id_attribute = definitions.get( 'project_id_attribute', 'project_id') resource_id_attribute = definitions.get( 'resource_id_attribute', 'id') self.generate_new_attributes_in_sample( request_sample, user_id_attribute, 'user_id') self.generate_new_attributes_in_sample( request_sample, project_id_attribute, 'project_id') self.generate_new_attributes_in_sample( request_sample, resource_id_attribute, 'id') def generate_new_attributes_in_sample( self, sample, attribute_key, new_attribute_key): if attribute_key == new_attribute_key: LOG.debug("We do not need to generate new attribute as the " "attribute_key[%s] and the new_attribute_key[%s] " "configurations are the same.", attribute_key, new_attribute_key) return if attribute_key: attribute_value = self.definitions.sample_extractor.\ retrieve_attribute_nested_value(sample, attribute_key) LOG.debug("Mapped attribute [%s] to value [%s] in sample [%s].", attribute_key, attribute_value, sample) sample[new_attribute_key] = attribute_value def get_url_to_next_sample(self, resp, definitions): linked_sample_extractor = definitions.get('next_sample_url_attribute') if not linked_sample_extractor: return None try: return self.definitions.sample_extractor.\ retrieve_attribute_nested_value(resp, linked_sample_extractor) except KeyError: LOG.debug("There is no next sample url for the sample [%s] using " "the configuration [%s]", resp, linked_sample_extractor) return None def _internal_execute_request_get_samples(self, definitions=None, keystone_client=None, **kwargs): if not definitions: definitions = self.definitions.configurations url = self.get_request_linked_samples_url(kwargs, definitions) request_arguments = self.create_request_arguments(definitions) LOG.debug("Executing request against [url=%s] with parameters [" "%s] for pollsters [%s]", url, request_arguments, definitions["name"]) resp = keystone_client.session.get(url, **request_arguments) if resp.status_code != requests.codes.ok: resp.raise_for_status() return resp, url def create_request_arguments(self, definitions): request_args = { "authenticated": True } request_headers = definitions.get('headers', []) if request_headers: request_args['headers'] = request_headers request_args['timeout'] = definitions.get('timeout', 300) return request_args def get_request_linked_samples_url(self, kwargs, definitions): next_sample_url = kwargs.get('next_sample_url') if next_sample_url: return self.get_next_page_url(kwargs, next_sample_url) LOG.debug("Generating url with [%s] and path [%s].", kwargs, definitions['url_path']) return self.get_request_url( kwargs, definitions['url_path']) def get_next_page_url(self, kwargs, next_sample_url): parse_result = urlparse.urlparse(next_sample_url) if parse_result.netloc: return next_sample_url return self.get_request_url(kwargs, next_sample_url) def get_request_url(self, kwargs, url_path): endpoint = kwargs['resource'] params = copy.deepcopy( self.definitions.configurations.get( 'base_metadata', {})) try: url_path = eval(url_path, params) except Exception: LOG.debug("Cannot eval path [%s] with params [%s]," " using [%s] instead.", url_path, params, url_path) return urlparse.urljoin((endpoint if endpoint.endswith("/") else (endpoint + "/")), url_path) def retrieve_entries_from_response(self, response_json, definitions): if isinstance(response_json, list): return response_json first_entry_name = definitions.get('response_entries_key') if not first_entry_name: try: first_entry_name = next(iter(response_json)) except RuntimeError as e: LOG.debug("Generator threw a StopIteration " "and we need to catch it [%s].", e) return self.definitions.sample_extractor.\ retrieve_attribute_nested_value(response_json, first_entry_name) class NonOpenStackApisPollsterDefinition(PollsterDefinitions): extra_definitions = [ PollsterDefinition(name='value_attribute', required=True), PollsterDefinition(name='module', required=True), PollsterDefinition(name='authentication_object', required=True), PollsterDefinition(name='barbican_secret_id', default=""), PollsterDefinition(name='authentication_parameters', default=""), PollsterDefinition(name='endpoint_type')] def __init__(self, configurations): super().__init__( configurations) self.sample_gatherer = NonOpenStackApisSamplesGatherer(self) @staticmethod def is_field_applicable_to_definition(configurations): return configurations.get('module') class HostCommandPollsterDefinition(PollsterDefinitions): extra_definitions = [ PollsterDefinition(name='endpoint_type', required=False), PollsterDefinition(name='url_path', required=False), PollsterDefinition(name='host_command', required=True)] def __init__(self, configurations): super().__init__( configurations) self.sample_gatherer = HostCommandSamplesGatherer(self) @staticmethod def is_field_applicable_to_definition(configurations): return configurations.get('host_command') class HostCommandSamplesGatherer(PollsterSampleGatherer): class Response: def __init__(self, text): self.text = text def get_cache_key(self, definitions, **kwargs): return self.get_command(definitions) def _internal_execute_request_get_samples(self, definitions, **kwargs): command = self.get_command(definitions, **kwargs) LOG.debug('Running Host command: [%s]', command) result = subprocess.getoutput(command) LOG.debug('Host command [%s] result: [%s]', command, result) return self.Response(result), command def get_command(self, definitions, next_sample_url=None, **kwargs): command = next_sample_url or definitions['host_command'] params = copy.deepcopy( self.definitions.configurations.get( 'base_metadata', {})) try: command = eval(command, params) except Exception: LOG.debug("Cannot eval command [%s] with params [%s]," " using [%s] instead.", command, params, command) return command @property def default_discovery(self): return 'local_node' class NonOpenStackApisSamplesGatherer(PollsterSampleGatherer): @property def default_discovery(self): return 'barbican:' + \ self.definitions.configurations['barbican_secret_id'] def _internal_execute_request_get_samples(self, definitions, **kwargs): credentials = kwargs['resource'] override_credentials = definitions['authentication_parameters'] if override_credentials: credentials = override_credentials if not isinstance(credentials, str): credentials = self.normalize_credentials_to_string(credentials) url = self.get_request_linked_samples_url(kwargs, definitions) authenticator_module_name = definitions['module'] authenticator_class_name = definitions['authentication_object'] imported_module = __import__(authenticator_module_name) authenticator_class = getattr(imported_module, authenticator_class_name) authenticator_arguments = list(map(str.strip, credentials.split(","))) authenticator_instance = authenticator_class(*authenticator_arguments) request_arguments = self.create_request_arguments(definitions) request_arguments["auth"] = authenticator_instance LOG.debug("Executing request against [url=%s] with parameters [" "%s] for pollsters [%s]", url, request_arguments, definitions["name"]) resp = requests.get(url, **request_arguments) if resp.status_code != requests.codes.ok: raise declarative.NonOpenStackApisDynamicPollsterException( "Error while executing request[%s]." " Status[%s] and reason [%s]." % (url, resp.status_code, resp.reason)) return resp, url @staticmethod def normalize_credentials_to_string(credentials): if isinstance(credentials, bytes): credentials = credentials.decode('utf-8') else: credentials = str(credentials) LOG.debug("Credentials [%s] were not defined as a string. " "Therefore, we converted it to a string like object.", credentials) return credentials def create_request_arguments(self, definitions): request_arguments = super().create_request_arguments( definitions) request_arguments.pop("authenticated") return request_arguments def get_request_url(self, kwargs, url_path): endpoint = self.definitions.configurations['url_path'] if endpoint == url_path: return url_path return urlparse.urljoin((endpoint if endpoint.endswith("/") else (endpoint + "/")), url_path) def generate_new_attributes_in_sample( self, sample, attribute_key, new_attribute_key): if attribute_key: attribute_value = self.definitions.sample_extractor. \ retrieve_attribute_nested_value(sample, attribute_key) LOG.debug("Mapped attribute [%s] to value [%s] in sample [%s].", attribute_key, attribute_value, sample) sample[new_attribute_key] = attribute_value class SkippedSample: pass class DynamicPollster(plugin_base.PollsterBase): # Mandatory name field name = "" def __init__(self, pollster_definitions={}, conf=None, cache_ttl=0, extra_metadata_responses_cache=None, supported_definitions=[HostCommandPollsterDefinition, NonOpenStackApisPollsterDefinition, MultiMetricPollsterDefinitions, SingleMetricPollsterDefinitions]): super().__init__(conf) self.supported_definitions = supported_definitions LOG.debug("%s instantiated with [%s]", __name__, pollster_definitions) self.definitions = PollsterDefinitionBuilder( self.supported_definitions).build_definitions(pollster_definitions) self.definitions.cache_ttl = cache_ttl self.definitions.response_cache = extra_metadata_responses_cache if extra_metadata_responses_cache is None: self.definitions.response_cache = {} self.pollster_definitions = self.definitions.configurations if 'metadata_fields' in self.pollster_definitions: LOG.debug("Metadata fields configured to [%s].", self.pollster_definitions['metadata_fields']) self.name = self.pollster_definitions['name'] self.obj = self @property def default_discovery(self): return self.definitions.sample_gatherer.default_discovery def load_samples(self, resource, manager): try: return self.definitions.sample_gatherer.\ execute_request_get_samples(manager=manager, resource=resource, keystone_client=manager._keystone) except RequestException as e: LOG.warning("Error [%s] while loading samples for [%s] " "for dynamic pollster [%s].", e, resource, self.name) return list([]) def get_samples(self, manager, cache, resources): if not resources: LOG.debug("No resources received for processing.") yield None for r in resources: LOG.debug("Executing get sample for resource [%s].", r) samples = self.load_samples(r, manager) if not isinstance(samples, (list, tuple)): samples = [samples] for pollster_sample in samples: sample = self.extract_sample( pollster_sample, manager=manager, resource=r, conf=self.conf) if isinstance(sample, SkippedSample): continue yield from sample def extract_sample(self, pollster_sample, **kwargs): return self.definitions.sample_extractor.extract_sample( pollster_sample, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/manager.py0000664000175100017510000012722415033033467021565 0ustar00mylesmyles# # Copyright 2013 Julien Danjou # Copyright 2014-2017 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import glob import itertools import logging import os import queue import random import socket import threading import uuid from concurrent import futures import cotyledon from futurist import periodics from keystoneauth1 import exceptions as ka_exceptions from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import netutils from oslo_utils import timeutils from stevedore import extension from tooz import coordination from urllib import parse as urlparse from ceilometer import agent from ceilometer import cache_utils from ceilometer import declarative from ceilometer import keystone_client from ceilometer import messaging from ceilometer.polling import dynamic_pollster from ceilometer.polling import plugin_base from ceilometer.polling import prom_exporter from ceilometer.publisher import utils as publisher_utils from ceilometer import utils LOG = log.getLogger(__name__) POLLING_OPTS = [ cfg.StrOpt('cfg_file', default="polling.yaml", help="Configuration file for polling definition." ), cfg.StrOpt('heartbeat_socket_dir', default=None, help="Path to directory where socket file for polling " "heartbeat will be created."), cfg.StrOpt('partitioning_group_prefix', deprecated_group='central', help='Work-load partitioning group prefix. Use only if you ' 'want to run multiple polling agents with different ' 'config files. For each sub-group of the agent ' 'pool with the same partitioning_group_prefix a disjoint ' 'subset of pollsters should be loaded.'), cfg.IntOpt('batch_size', default=50, help='Batch size of samples to send to notification agent, ' 'Set to 0 to disable. When prometheus exporter feature ' 'is used, this should be largered than maximum number of ' 'samples per metric.'), cfg.MultiStrOpt('pollsters_definitions_dirs', default=["/etc/ceilometer/pollsters.d"], help="List of directories with YAML files used " "to created pollsters."), cfg.BoolOpt('identity_name_discovery', deprecated_name='tenant_name_discovery', default=False, help='Identify project and user names from polled samples. ' 'By default, collecting these values is disabled due ' 'to the fact that it could overwhelm keystone service ' 'with lots of continuous requests depending upon the ' 'number of projects, users and samples polled from ' 'the environment. While using this feature, it is ' 'recommended that ceilometer be configured with a ' 'caching backend to reduce the number of calls ' 'made to keystone.'), cfg.BoolOpt('enable_notifications', default=True, help='Whether the polling service should be sending ' 'notifications after polling cycles.'), cfg.BoolOpt('enable_prometheus_exporter', default=False, help='Allow this ceilometer polling instance to ' 'expose directly the retrieved metrics in Prometheus ' 'format.'), cfg.ListOpt('prometheus_listen_addresses', default=["127.0.0.1:9101"], help='A list of ipaddr:port combinations on which ' 'the exported metrics will be exposed.'), cfg.BoolOpt('ignore_disabled_projects', default=False, help='Whether the polling service should ignore ' 'disabled projects or not.'), cfg.BoolOpt('prometheus_tls_enable', default=False, help='Whether it will expose tls metrics or not'), cfg.StrOpt('prometheus_tls_certfile', default=None, help='The certificate file to allow this ceilometer to ' 'expose tls scrape endpoints'), cfg.StrOpt('prometheus_tls_keyfile', default=None, help='The private key to allow this ceilometer to ' 'expose tls scrape endpoints'), cfg.IntOpt('threads_to_process_pollsters', default=1, min=0, help='The number of threads used to process the pollsters.' 'The value one (1) means that the processing is in a' 'serial fashion (not ordered!). The value zero (0) means ' 'that the we will use as much threads as the number of ' 'pollsters configured in the polling task. Any other' 'positive integer can be used to fix an upper bound limit' 'to the number of threads used for processing pollsters in' 'parallel. One must bear in mind that, using more than one' 'thread might not take full advantage of the discovery ' 'cache and pollsters cache processes; it is possible ' 'though to improve/use pollsters that synchronize ' 'themselves in the cache objects.'), ] def hash_of_set(s): return str(hash(frozenset(s))) class PollingException(agent.ConfigException): def __init__(self, message, cfg): super().__init__('Polling', message, cfg) class HeartBeatException(agent.ConfigException): def __init__(self, message, cfg): super().__init__('Polling', message, cfg) class Resources: def __init__(self, agent_manager): self.agent_manager = agent_manager self._resources = [] self._discovery = [] self.blacklist = [] def setup(self, source): self._resources = source.resources self._discovery = source.discovery def get(self, discovery_cache=None): source_discovery = (self.agent_manager.discover(self._discovery, discovery_cache) if self._discovery else []) if self._resources: static_resources_group = self.agent_manager.construct_group_id( hash_of_set(self._resources)) return [v for v in self._resources if not self.agent_manager.partition_coordinator or self.agent_manager.hashrings[ static_resources_group].belongs_to_self( str(v))] + source_discovery return source_discovery @staticmethod def key(source_name, pollster): return '{}-{}'.format(source_name, pollster.name) def iter_random(iterable): """Iter over iterable in a random fashion.""" lst = list(iterable) random.shuffle(lst) return iter(lst) class PollingTask: """Polling task for polling samples and notifying. A polling task can be invoked periodically or only once. """ def __init__(self, agent_manager): self.manager = agent_manager # elements of the Cartesian product of sources X pollsters # with a common interval self.pollster_matches = collections.defaultdict(set) # we relate the static resources and per-source discovery to # each combination of pollster and matching source resource_factory = lambda: Resources(agent_manager) # noqa: E731 self.resources = collections.defaultdict(resource_factory) conf = self.manager.conf self._batch_size = conf.polling.batch_size self._telemetry_secret = conf.publisher.telemetry_secret self.ks_client = self.manager.keystone self._name_discovery = conf.polling.identity_name_discovery self._cache = cache_utils.get_client(conf) # element that provides a map between source names and source object self.sources_map = dict() def add(self, pollster, source): self.sources_map[source.name] = source self.pollster_matches[source.name].add(pollster) key = Resources.key(source.name, pollster) self.resources[key].setup(source) def poll_and_notify(self): """Polling sample and notify.""" cache = {} discovery_cache = {} poll_history = {} for source_name, pollsters in iter_random( self.pollster_matches.items()): self.execute_polling_task_processing(cache, discovery_cache, poll_history, pollsters, source_name) def execute_polling_task_processing(self, cache, discovery_cache, poll_history, pollsters, source_name): all_pollsters = list(pollsters) number_workers_for_pollsters =\ self.manager.conf.polling.threads_to_process_pollsters if number_workers_for_pollsters < 0: raise RuntimeError("The configuration " "'threads_to_process_pollsters' has a negative " "value [%s], which should not be allowed.", number_workers_for_pollsters) if number_workers_for_pollsters == 0: number_workers_for_pollsters = len(all_pollsters) if number_workers_for_pollsters < len(all_pollsters): LOG.debug("The number of pollsters in source [%s] is bigger " "than the number of worker threads to execute them. " "Therefore, one can expect the process to be longer " "than the expected.", source_name) all_pollster_scheduled = [] with futures.ThreadPoolExecutor( thread_name_prefix="Pollster-executor", max_workers=number_workers_for_pollsters) as executor: LOG.debug("Processing pollsters for [%s] with [%s] threads.", source_name, number_workers_for_pollsters) for pollster in all_pollsters: all_pollster_scheduled.append( self.register_pollster_execution( cache, discovery_cache, executor, poll_history, pollster, source_name)) for s in all_pollster_scheduled: LOG.debug(s.result()) def register_pollster_execution(self, cache, discovery_cache, executor, poll_history, pollster, source_name): LOG.debug("Registering pollster [%s] from source [%s] to be executed " "via executor [%s] with cache [%s], pollster history [%s], " "and discovery cache [%s].", pollster, source_name, executor, cache, poll_history, discovery_cache) def _internal_function(): self._internal_pollster_run(cache, discovery_cache, poll_history, pollster, source_name) return "Finished processing pollster [%s]." % pollster.name return executor.submit(_internal_function) def _internal_pollster_run(self, cache, discovery_cache, poll_history, pollster, source_name): key = Resources.key(source_name, pollster) candidate_res = list( self.resources[key].get(discovery_cache)) if not candidate_res and pollster.obj.default_discovery: LOG.debug("Executing discovery process for pollsters [%s] " "and discovery method [%s] via process [%s].", pollster.obj, pollster.obj.default_discovery, self.manager.discover) candidate_res = self.manager.discover( [pollster.obj.default_discovery], discovery_cache) # Remove duplicated resources and black resources. Using # set() requires well defined __hash__ for each resource. # Since __eq__ is defined, 'not in' is safe here. polling_resources = [] black_res = self.resources[key].blacklist history = poll_history.get(pollster.name, []) for x in candidate_res: if x not in history: history.append(x) if x not in black_res: polling_resources.append(x) poll_history[pollster.name] = history # If no resources, skip for this pollster if not polling_resources: p_context = 'new' if history else '' LOG.debug("Skip pollster %(name)s, no %(p_context)s " "resources found this cycle", {'name': pollster.name, 'p_context': p_context}) return LOG.info("Polling pollster %(poll)s in the context of " "%(src)s", dict(poll=pollster.name, src=source_name)) try: source_obj = self.sources_map[source_name] coordination_group_name = source_obj.group_for_coordination LOG.debug("Checking if we need coordination for pollster " "[%s] with coordination group name [%s].", pollster, coordination_group_name) if self.manager.hashrings and self.manager.hashrings.get( coordination_group_name): LOG.debug("The pollster [%s] is configured in a " "source for polling that requires " "coordination under name [%s].", pollster, coordination_group_name) group_coordination = self.manager.hashrings[ coordination_group_name].belongs_to_self( str(pollster.name)) LOG.debug("Pollster [%s] is configured with " "coordination [%s] under name [%s].", pollster.name, group_coordination, coordination_group_name) if not group_coordination: LOG.info("The pollster [%s] should be processed " "by other node.", pollster.name) return else: LOG.debug("The pollster [%s] is not configured in a " "source for polling that requires " "coordination. The current hashrings are " "the following [%s].", pollster, self.manager.hashrings) polling_timestamp = timeutils.utcnow().isoformat() samples = pollster.obj.get_samples( manager=self.manager, cache=cache, resources=polling_resources ) sample_batch = [] self.manager.heartbeat(pollster.name, polling_timestamp) for sample in samples: # Note(yuywz): Unify the timestamp of polled samples sample.set_timestamp(polling_timestamp) if self._name_discovery and self._cache: # Try to resolve project UUIDs from cache first, # and then keystone LOG.debug("Ceilometer is configured to resolve " "project IDs to name; loading the " "project name for project ID [%s] in " "sample [%s].", sample.project_id, sample) if sample.project_id: sample.project_name = \ self._cache.resolve_uuid_from_cache( "projects", sample.project_id ) # Try to resolve user UUIDs from cache first, # and then keystone LOG.debug("Ceilometer is configured to resolve " "user IDs to name; loading the " "user name for user ID [%s] in " "sample [%s].", sample.user_id, sample) if sample.user_id: sample.user_name = \ self._cache.resolve_uuid_from_cache( "users", sample.user_id ) LOG.debug("Final sample generated after loading " "the project and user names bases on " "the IDs [%s].", sample) sample_dict = ( publisher_utils.meter_message_from_counter( sample, self._telemetry_secret )) if self._batch_size: if len(sample_batch) >= self._batch_size: self._send_notification(sample_batch) sample_batch = [] sample_batch.append(sample_dict) else: self._send_notification([sample_dict]) if sample_batch: self._send_notification(sample_batch) LOG.info("Finished polling pollster %(poll)s in the " "context of %(src)s", dict(poll=pollster.name, src=source_name)) except plugin_base.PollsterPermanentError as err: LOG.error( 'Prevent pollster %(name)s from ' 'polling %(res_list)s on source %(source)s anymore!', dict(name=pollster.name, res_list=str(err.fail_res_list), source=source_name)) self.resources[key].blacklist.extend(err.fail_res_list) except Exception as err: LOG.error( 'Continue after error from %(name)s: %(error)s' % ({'name': pollster.name, 'error': err}), exc_info=True) def _send_notification(self, samples): if self.manager.conf.polling.enable_notifications: self.manager.notifier.sample( {}, 'telemetry.polling', {'samples': samples} ) if self.manager.conf.polling.enable_prometheus_exporter: prom_exporter.collect_metrics(samples) class AgentHeartBeatManager(cotyledon.Service): def __init__(self, worker_id, conf, namespaces=None, queue=None): super().__init__(worker_id) self.conf = conf if conf.polling.heartbeat_socket_dir is None: raise HeartBeatException("path to a directory containing " "heart beat sockets is required", conf) if type(namespaces) is not list: if namespaces is None: namespaces = "" namespaces = [namespaces] self._lock = threading.Lock() self._queue = queue self._status = dict() self._sock_pth = os.path.join( conf.polling.heartbeat_socket_dir, f"ceilometer-{'-'.join(sorted(namespaces))}.socket" ) self._delete_socket() self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: self._sock.bind(self._sock_pth) self._sock.listen(1) except OSError as err: raise HeartBeatException("Failed to open socket file " f"({self._sock_pth}): {err}", conf) LOG.info("Starting heartbeat child service. Listening" f" on {self._sock_pth}") def _delete_socket(self): try: os.remove(self._sock_pth) except OSError: pass def terminate(self): self._tpe.shutdown(wait=False, cancel_futures=True) self._sock.close() self._delete_socket() def _update_status(self): hb = self._queue.get() with self._lock: self._status[hb['pollster']] = hb['timestamp'] LOG.debug(f"Updated heartbeat for {hb['pollster']} " f"({hb['timestamp']})") def _send_heartbeat(self): s, addr = self._sock.accept() LOG.debug("Heartbeat status report requested " f"at {self._sock_pth}") with self._lock: out = '\n'.join([f"{k} {v}" for k, v in self._status.items()]) s.sendall(out.encode('utf-8')) s.close() LOG.debug(f"Reported heartbeat status:\n{out}") def run(self): super().run() LOG.debug("Started heartbeat child process.") def _read_queue(): LOG.debug("Started heartbeat update thread") while True: self._update_status() def _report_status(): LOG.debug("Started heartbeat reporting thread") while True: self._send_heartbeat() with futures.ThreadPoolExecutor(max_workers=2) as executor: self._tpe = executor executor.submit(_read_queue) executor.submit(_report_status) class AgentManager(cotyledon.Service): def __init__(self, worker_id, conf, namespaces=None, queue=None): namespaces = namespaces or ['compute', 'central'] group_prefix = conf.polling.partitioning_group_prefix super().__init__(worker_id) self.conf = conf self._queue = queue if type(namespaces) is not list: namespaces = [namespaces] # we'll have default ['compute', 'central'] here if no namespaces will # be passed extensions = (self._extensions('poll', namespace, self.conf).extensions for namespace in namespaces) # get the extensions from pollster builder extensions_fb = (self._extensions_from_builder('poll', namespace) for namespace in namespaces) # Create dynamic pollsters extensions_dynamic_pollsters = self.create_dynamic_pollsters( namespaces) self.extensions = list(itertools.chain(*list(extensions))) + list( itertools.chain(*list(extensions_fb))) + list( extensions_dynamic_pollsters) if not self.extensions: LOG.warning('No valid pollsters can be loaded from %s ' 'namespaces', namespaces) discoveries = (self._extensions('discover', namespace, self.conf).extensions for namespace in namespaces) self.discoveries = list(itertools.chain(*list(discoveries))) self.polling_periodics = None self.hashrings = None self.partition_coordinator = None if self.conf.coordination.backend_url: # XXX uuid4().bytes ought to work, but it requires ascii for now coordination_id = str(uuid.uuid4()).encode('ascii') self.partition_coordinator = coordination.get_coordinator( self.conf.coordination.backend_url, coordination_id) # Compose coordination group prefix. # We'll use namespaces as the basement for this partitioning. namespace_prefix = '-'.join(sorted(namespaces)) self.group_prefix = ('{}-{}'.format(namespace_prefix, group_prefix) if group_prefix else namespace_prefix) if self.conf.polling.enable_notifications: self.notifier = oslo_messaging.Notifier( messaging.get_transport(self.conf), driver=self.conf.publisher_notifier.telemetry_driver, publisher_id="ceilometer.polling") if self.conf.polling.enable_prometheus_exporter: for addr in self.conf.polling.prometheus_listen_addresses: address = netutils.parse_host_port(addr) if address[0] is None or address[1] is None: LOG.warning('Ignoring invalid address: %s', addr) certfile = self.conf.polling.prometheus_tls_certfile keyfile = self.conf.polling.prometheus_tls_keyfile if self.conf.polling.prometheus_tls_enable: if not certfile or not keyfile: raise ValueError( "Certfile and keyfile must be provided." ) else: certfile = keyfile = None prom_exporter.export( address[0], address[1], certfile, keyfile) self._keystone = None self._keystone_last_exception = None def heartbeat(self, name, timestamp): """Send heartbeat data if the agent is configured to do so.""" if self._queue is not None: try: hb = { 'timestamp': timestamp, 'pollster': name } self._queue.put_nowait(hb) LOG.debug(f"Polster heartbeat update: {name}") except queue.Full: LOG.warning(f"Heartbeat queue full. Update failed: {hb}") def create_dynamic_pollsters(self, namespaces): """Creates dynamic pollsters This method Creates dynamic pollsters based on configurations placed on 'pollsters_definitions_dirs' :param namespaces: The namespaces we are running on to validate if the pollster should be instantiated or not. :return: a list with the dynamic pollsters defined by the operator. """ namespaces_set = set(namespaces) pollsters_definitions_dirs = self.conf.pollsters_definitions_dirs if not pollsters_definitions_dirs: LOG.info("Variable 'pollsters_definitions_dirs' not defined.") return [] LOG.info("Looking for dynamic pollsters configurations at [%s].", pollsters_definitions_dirs) pollsters_definitions_files = [] for directory in pollsters_definitions_dirs: files = glob.glob(os.path.join(directory, "*.yaml")) if not files: LOG.info("No dynamic pollsters found in folder [%s].", directory) continue for filepath in sorted(files): if filepath is not None: pollsters_definitions_files.append(filepath) if not pollsters_definitions_files: LOG.info("No dynamic pollsters file found in dirs [%s].", pollsters_definitions_dirs) return [] pollsters_definitions = {} for pollsters_definitions_file in pollsters_definitions_files: pollsters_cfg = declarative.load_definitions( self.conf, {}, pollsters_definitions_file) LOG.info("File [%s] has [%s] dynamic pollster configurations.", pollsters_definitions_file, len(pollsters_cfg)) for pollster_cfg in pollsters_cfg: pollster_name = pollster_cfg['name'] pollster_namespaces = pollster_cfg.get( 'namespaces', ['central']) if isinstance(pollster_namespaces, list): pollster_namespaces = set(pollster_namespaces) else: pollster_namespaces = {pollster_namespaces} if not bool(namespaces_set & pollster_namespaces): LOG.info("The pollster [%s] is not configured to run in " "these namespaces %s, the configured namespaces " "for this pollster are %s. Therefore, we are " "skipping it.", pollster_name, namespaces_set, pollster_namespaces) continue if pollster_name not in pollsters_definitions: LOG.info("Loading dynamic pollster [%s] from file [%s].", pollster_name, pollsters_definitions_file) try: pollsters_definitions[pollster_name] =\ dynamic_pollster.DynamicPollster( pollster_cfg, self.conf) except Exception as e: LOG.error( "Error [%s] while loading dynamic pollster [%s].", e, pollster_name) else: LOG.info( "Dynamic pollster [%s] is already defined." "Therefore, we are skipping it.", pollster_name) LOG.debug("Total of dynamic pollsters [%s] loaded.", len(pollsters_definitions)) return pollsters_definitions.values() @staticmethod def _get_ext_mgr(namespace, *args, **kwargs): def _catch_extension_load_error(mgr, ep, exc): # Extension raising ExtensionLoadError can be ignored, # and ignore anything we can't import as a safety measure. if isinstance(exc, plugin_base.ExtensionLoadError): LOG.debug("Skip loading extension for %s: %s", ep.name, exc.msg) return show_exception = (LOG.isEnabledFor(logging.DEBUG) and isinstance(exc, ImportError)) LOG.error("Failed to import extension for %(name)r: " "%(error)s", {'name': ep.name, 'error': exc}, exc_info=show_exception) if isinstance(exc, ImportError): return raise exc return extension.ExtensionManager( namespace=namespace, invoke_on_load=True, invoke_args=args, invoke_kwds=kwargs, on_load_failure_callback=_catch_extension_load_error, ) def _extensions(self, category, agent_ns=None, *args, **kwargs): namespace = ('ceilometer.{}.{}'.format(category, agent_ns) if agent_ns else 'ceilometer.%s' % category) return self._get_ext_mgr(namespace, *args, **kwargs) def _extensions_from_builder(self, category, agent_ns=None): ns = ('ceilometer.builder.{}.{}'.format(category, agent_ns) if agent_ns else 'ceilometer.builder.%s' % category) mgr = self._get_ext_mgr(ns, self.conf) def _build(ext): return ext.plugin.get_pollsters_extensions(self.conf) # NOTE: this seems a stevedore bug. if no extensions are found, # map will raise runtimeError which is not documented. if mgr.names(): return list(itertools.chain(*mgr.map(_build))) else: return [] def join_partitioning_groups(self): groups = set() for d in self.discoveries: generated_group_id = self.construct_group_id(d.obj.group_id) LOG.debug("Adding discovery [%s] with group ID [%s] to build the " "coordination partitioning via constructed group ID " "[%s].", d.__dict__, d.obj.group_id, generated_group_id) groups.add(generated_group_id) # let each set of statically-defined resources have its own group static_resource_groups = set() for p in self.polling_manager.sources: if p.resources: generated_group_id = self.construct_group_id( hash_of_set(p.resources)) LOG.debug("Adding pollster group [%s] with resources [%s] to " "build the coordination partitioning via " "constructed group ID [%s].", p, p.resources, generated_group_id) static_resource_groups.add(generated_group_id) else: LOG.debug("Pollster group [%s] does not have resources defined" "to build the group ID for coordination.", p) groups.update(static_resource_groups) # (rafaelweingartner) here we will configure the dynamic # coordination process. It is useful to sync pollster that do not rely # on discovery process, such as the dynamic pollster on compute nodes. dynamic_pollster_groups_for_coordination = set() for p in self.polling_manager.sources: if p.group_id_coordination_expression: if p.resources: LOG.warning("The pollster group [%s] has resources to " "execute coordination. Therefore, we do not " "add it via the dynamic coordination process.", p.name) continue group_prefix = p.name generated_group_id = eval(p.group_id_coordination_expression) group_for_coordination = "{}-{}".format( group_prefix, generated_group_id) dynamic_pollster_groups_for_coordination.add( group_for_coordination) p.group_for_coordination = group_for_coordination LOG.debug("Adding pollster group [%s] with dynamic " "coordination to build the coordination " "partitioning via constructed group ID [%s].", p, dynamic_pollster_groups_for_coordination) else: LOG.debug("Pollster group [%s] does not have an expression to " "dynamically use in the coordination process.", p) groups.update(dynamic_pollster_groups_for_coordination) self.hashrings = { group: self.partition_coordinator.join_partitioned_group(group) for group in groups} LOG.debug("Hashrings [%s] created for pollsters definition.", self.hashrings) def setup_polling_tasks(self): polling_tasks = {} for source in self.polling_manager.sources: for pollster in self.extensions: if source.support_meter(pollster.name): polling_task = polling_tasks.get(source.get_interval()) if not polling_task: polling_task = PollingTask(self) polling_tasks[source.get_interval()] = polling_task polling_task.add(pollster, source) return polling_tasks def construct_group_id(self, discovery_group_id): return '{}-{}'.format(self.group_prefix, discovery_group_id) def start_polling_tasks(self): data = self.setup_polling_tasks() # Don't start useless threads if no task will run if not data: return # One thread per polling tasks is enough self.polling_periodics = periodics.PeriodicWorker.create( [], executor_factory=lambda: futures.ThreadPoolExecutor(max_workers=len(data))) for interval, polling_task in data.items(): @periodics.periodic(spacing=interval, run_immediately=True) def task(running_task): self.interval_task(running_task) self.polling_periodics.add(task, polling_task) utils.spawn_thread(self.polling_periodics.start, allow_empty=True) def run(self): super().run() self.polling_manager = PollingManager(self.conf) if self.partition_coordinator: self.partition_coordinator.start(start_heart=True) self.join_partitioning_groups() self.start_polling_tasks() def terminate(self): self.stop_pollsters_tasks() if self.partition_coordinator: self.partition_coordinator.stop() super().terminate() def interval_task(self, task): # NOTE(sileht): remove the previous keystone client # and exception to get a new one in this polling cycle. self._keystone = None self._keystone_last_exception = None # Note(leehom): if coordinator enabled call run_watchers to # update group member info before collecting if self.partition_coordinator: self.partition_coordinator.run_watchers() task.poll_and_notify() @property def keystone(self): # FIXME(sileht): This lazy loading of keystone client doesn't # look concurrently safe, we never see issue because once we have # connected to keystone everything is fine, and because all pollsters # are delayed during startup. But each polling task creates a new # client and overrides it which has been created by other polling # tasks. During this short time bad thing can occur. # # I think we must not reset keystone client before # running a polling task, but refresh it periodically instead. # NOTE(sileht): we do lazy loading of the keystone client # for multiple reasons: # * don't use it if no plugin need it # * use only one client for all plugins per polling cycle if self._keystone is None and self._keystone_last_exception is None: try: self._keystone = keystone_client.get_client(self.conf) self._keystone_last_exception = None except ka_exceptions.ClientException as e: self._keystone = None self._keystone_last_exception = e if self._keystone is not None: return self._keystone else: raise self._keystone_last_exception @staticmethod def _parse_discoverer(url): s = urlparse.urlparse(url) return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None) def _discoverer(self, name): for d in self.discoveries: if d.name == name: return d.obj return None def discover(self, discovery=None, discovery_cache=None): resources = [] discovery = discovery or [] for url in discovery: if discovery_cache is not None and url in discovery_cache: resources.extend(discovery_cache[url]) continue name, param = self._parse_discoverer(url) discoverer = self._discoverer(name) if discoverer: try: if discoverer.KEYSTONE_REQUIRED_FOR_SERVICE: service_type = getattr( self.conf.service_types, discoverer.KEYSTONE_REQUIRED_FOR_SERVICE) if not keystone_client.\ get_service_catalog(self.keystone).\ get_endpoints(service_type=service_type): LOG.warning( 'Skipping %(name)s, %(service_type)s service ' 'is not registered in keystone', {'name': name, 'service_type': service_type}) continue discovered = discoverer.discover(self, param) if self.partition_coordinator: discovered = [ v for v in discovered if self.hashrings[ self.construct_group_id(discoverer.group_id) ].belongs_to_self(str(v))] resources.extend(discovered) if discovery_cache is not None: discovery_cache[url] = discovered except ka_exceptions.ClientException as e: LOG.error('Skipping %(name)s, keystone issue: ' '%(exc)s', {'name': name, 'exc': e}) except Exception as err: LOG.exception('Unable to discover resources: %s', err) else: LOG.warning('Unknown discovery extension: %s', name) return resources def stop_pollsters_tasks(self): if self.polling_periodics: self.polling_periodics.stop() self.polling_periodics.wait() self.polling_periodics = None class PollingManager(agent.ConfigManagerBase): """Polling Manager to handle polling definition""" def __init__(self, conf): """Setup the polling according to config. The configuration is supported as follows: {"sources": [{"name": source_1, "interval": interval_time, "meters" : ["meter_1", "meter_2"], "resources": ["resource_uri1", "resource_uri2"], }, {"name": source_2, "interval": interval_time, "meters" : ["meter_3"], }, ]} } The interval determines the cadence of sample polling Valid meter format is '*', '!meter_name', or 'meter_name'. '*' is wildcard symbol means any meters; '!meter_name' means "meter_name" will be excluded; 'meter_name' means 'meter_name' will be included. Valid meters definition is all "included meter names", all "excluded meter names", wildcard and "excluded meter names", or only wildcard. The resources is list of URI indicating the resources from where the meters should be polled. It's optional and it's up to the specific pollster to decide how to use it. """ super().__init__(conf) cfg = self.load_config(conf.polling.cfg_file) self.sources = [] if 'sources' not in cfg: raise PollingException("sources required", cfg) for s in cfg.get('sources'): self.sources.append(PollingSource(s)) class PollingSource(agent.Source): """Represents a source of pollsters In effect it is a set of pollsters emitting samples for a set of matching meters. Each source encapsulates meter name matching, polling interval determination, optional resource enumeration or discovery. """ def __init__(self, cfg): try: super().__init__(cfg) except agent.SourceException as err: raise PollingException(err.msg, cfg) try: self.meters = cfg['meters'] except KeyError: raise PollingException("Missing meters value", cfg) try: self.interval = int(cfg['interval']) except ValueError: raise PollingException("Invalid interval value", cfg) except KeyError: raise PollingException("Missing interval value", cfg) if self.interval <= 0: raise PollingException("Interval value should > 0", cfg) self.resources = cfg.get('resources') or [] if not isinstance(self.resources, list): raise PollingException("Resources should be a list", cfg) self.discovery = cfg.get('discovery') or [] if not isinstance(self.discovery, list): raise PollingException("Discovery should be a list", cfg) try: self.check_source_filtering(self.meters, 'meters') except agent.SourceException as err: raise PollingException(err.msg, cfg) self.group_id_coordination_expression = cfg.get( 'group_id_coordination_expression') # This value is configured when coordination is enabled. self.group_for_coordination = None def get_interval(self): return self.interval def support_meter(self, meter_name): return self.is_supported(self.meters, meter_name) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/plugin_base.py0000664000175100017510000001370315033033467022437 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for plugins. """ import abc from stevedore import extension class ExtensionLoadError(Exception): """Error of loading pollster plugin. PollsterBase provides a hook, setup_environment, called in pollster loading to setup required HW/SW dependency. Any exception from it would be propagated as ExtensionLoadError, then skip loading this pollster. """ def __init__(self, msg=None): self.msg = msg class PollsterPermanentError(Exception): """Permanent error when polling. When unrecoverable error happened in polling, pollster can raise this exception with failed resource to prevent itself from polling any more. Resource is one of parameter resources from get_samples that cause polling error. """ def __init__(self, resources): self.fail_res_list = resources class PollsterBase(metaclass=abc.ABCMeta): """Base class for plugins that support the polling API.""" def setup_environment(self): """Setup required environment for pollster. Each subclass could overwrite it for specific usage. Any exception raised in this function would prevent pollster being loaded. """ pass def __init__(self, conf): super().__init__() self.conf = conf try: self.setup_environment() except Exception as err: raise ExtensionLoadError(err) @property @abc.abstractmethod def default_discovery(self): """Default discovery to use for this pollster. There are three ways a pollster can get a list of resources to poll, listed here in ascending order of precedence: 1. from the per-agent discovery, 2. from the per-pollster discovery (defined here) 3. from the per-pipeline configured discovery and/or per-pipeline configured static resources. If a pollster should only get resources from #1 or #3, this property should be set to None. """ @abc.abstractmethod def get_samples(self, manager, cache, resources): """Return a sequence of Counter instances from polling the resources. :param manager: The service manager class invoking the plugin. :param cache: A dictionary to allow pollsters to pass data between themselves when recomputing it would be expensive (e.g., asking another service for a list of objects). :param resources: A list of resources the pollster will get data from. It's up to the specific pollster to decide how to use it. It is usually supplied by a discovery, see ``default_discovery`` for more information. """ @classmethod def build_pollsters(cls, conf): """Return a list of tuple (name, pollster). The name is the meter name which the pollster would return, the pollster is a pollster object instance. The pollster which implements this method should be registered in the namespace of ceilometer.builder.xxx instead of ceilometer.poll.xxx. """ return [] @classmethod def get_pollsters_extensions(cls, conf): """Return a list of stevedore extensions. The returned stevedore extensions wrap the pollster object instances returned by build_pollsters. """ extensions = [] try: for name, pollster in cls.build_pollsters(conf): ext = extension.Extension(name, None, cls, pollster) extensions.append(ext) except Exception as err: raise ExtensionLoadError(err) return extensions class DiscoveryBase(metaclass=abc.ABCMeta): KEYSTONE_REQUIRED_FOR_SERVICE = None """Service type required in keystone catalog to works""" def __init__(self, conf): self.conf = conf @abc.abstractmethod def discover(self, manager, param=None): """Discover resources to monitor. The most fine-grained discovery should be preferred, so the work is the most evenly distributed among multiple agents (if they exist). For example: if the pollster can separately poll individual resources, it should have its own discovery implementation to discover those resources. If it can only poll per-tenant, then the `TenantDiscovery` should be used. If even that is not possible, use `EndpointDiscovery` (see their respective docstrings). :param manager: The service manager class invoking the plugin. :param param: an optional parameter to guide the discovery """ @property def group_id(self): """Return group id of this discovery. All running discoveries with the same group_id should return the same set of resources at a given point in time. By default, a discovery is put into a global group, meaning that all discoveries of its type running anywhere in the cloud, return the same set of resources. This property can be overridden to provide correct grouping of localized discoveries. For example, compute discovery is localized to a host, which is reflected in its group_id. A None value signifies that this discovery does not want to be part of workload partitioning at all. """ return 'global' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/polling/prom_exporter.py0000664000175100017510000001210615033033467023050 0ustar00mylesmyles# # Copyright 2024 Juan Larriba # Copyright 2024 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import prometheus_client as prom CEILOMETER_REGISTRY = prom.CollectorRegistry() def export(prom_iface, prom_port, tls_cert=None, tls_key=None): prom.start_http_server(port=prom_port, addr=prom_iface, registry=CEILOMETER_REGISTRY, certfile=tls_cert, keyfile=tls_key) def collect_metrics(samples): metric_cleared = False for sample in samples: name = "ceilometer_" + sample['counter_name'].replace('.', '_') labels = _gen_labels(sample) metric = CEILOMETER_REGISTRY._names_to_collectors.get(name, None) # NOTE: Ungregister the metric at the first iteration to purge stale # samples if not metric_cleared: if metric: CEILOMETER_REGISTRY.unregister(metric) metric = None metric_cleared = True if metric is None: metric = prom.Gauge(name=name, documentation="", labelnames=labels['keys'], registry=CEILOMETER_REGISTRY) metric.labels(*labels['values']).set(sample['counter_volume']) def _gen_labels(sample): labels = dict(keys=[], values=[]) cNameShards = sample['counter_name'].split(".") ctype = '' plugin = cNameShards[0] pluginVal = sample['resource_id'] if len(cNameShards) > 2: pluginVal = cNameShards[2] if len(cNameShards) > 1: ctype = cNameShards[1] else: ctype = cNameShards[0] labels['keys'].append(plugin) labels['values'].append(pluginVal) labels['keys'].append("publisher") labels['values'].append("ceilometer") labels['keys'].append("type") labels['values'].append(ctype) if sample.get('counter_name'): labels['keys'].append("counter") labels['values'].append(sample['counter_name']) if sample.get('project_id'): labels['keys'].append("project") labels['values'].append(sample['project_id']) if sample.get('project_name'): labels['keys'].append("project_name") labels['values'].append(sample['project_name']) if sample.get('user_id'): labels['keys'].append("user") labels['values'].append(sample['user_id']) if sample.get('user_name'): labels['keys'].append("user_name") labels['values'].append(sample['user_name']) if sample.get('counter_unit'): labels['keys'].append("unit") labels['values'].append(sample['counter_unit']) if sample.get('resource_id'): labels['keys'].append("resource") labels['values'].append(sample['resource_id']) if sample.get('resource_metadata'): resource_metadata = sample['resource_metadata'] if resource_metadata.get('host'): labels['keys'].append("vm_instance") labels['values'].append(resource_metadata['host']) if resource_metadata.get('display_name'): value = resource_metadata['display_name'] if resource_metadata.get('name'): value = ':'.join([value, resource_metadata['name']]) labels['keys'].append("resource_name") labels['values'].append(value) elif resource_metadata.get('name'): labels['keys'].append("resource_name") labels['values'].append(resource_metadata['name']) # NOTE(jwysogla): The prometheus_client library doesn't support # variable count of labels for the same metric. That's why the # prometheus exporter cannot support custom metric labels added # with the --property metering.= when # creating a server. This still works with publishers though. # The "server_group" label is used for autoscaling and so it's # the only one getting parsed. To always have the same number # of labels, it's added to all metrics and where there isn't a # value defined, it's substituted with "none". user_metadata = resource_metadata.get('user_metadata', {}) if user_metadata.get('server_group'): labels['keys'].append('server_group') labels['values'].append(user_metadata['server_group']) else: labels['keys'].append('server_group') labels['values'].append('none') if resource_metadata.get('alarm_state', '') != '': labels['keys'].append('state') labels['values'].append(resource_metadata['alarm_state']) return labels ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7949414 ceilometer-24.1.0.dev59/ceilometer/privsep/0000775000175100017510000000000015033033521017604 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/privsep/__init__.py0000664000175100017510000000212515033033467021726 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Setup privsep decorator.""" from oslo_privsep import capabilities from oslo_privsep import priv_context sys_admin_pctxt = priv_context.PrivContext( 'ceilometer', cfg_section='ceilometer_sys_admin', pypath=__name__ + '.sys_admin_pctxt', capabilities=[capabilities.CAP_CHOWN, capabilities.CAP_DAC_OVERRIDE, capabilities.CAP_DAC_READ_SEARCH, capabilities.CAP_FOWNER, capabilities.CAP_NET_ADMIN, capabilities.CAP_SYS_ADMIN], ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/privsep/ipmitool.py0000664000175100017510000000142715033033467022027 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for impi related routines. """ from oslo_concurrency import processutils import ceilometer.privsep @ceilometer.privsep.sys_admin_pctxt.entrypoint def ipmi(*cmd): return processutils.execute(*cmd) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7959414 ceilometer-24.1.0.dev59/ceilometer/publisher/0000775000175100017510000000000015033033521020111 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/__init__.py0000664000175100017510000000273115033033467022236 0ustar00mylesmyles# # Copyright 2013 Intel Corp. # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log from oslo_utils import netutils from stevedore import driver LOG = log.getLogger(__name__) def get_publisher(conf, url, namespace): """Get publisher driver and load it. :param url: URL for the publisher :param namespace: Namespace to use to look for drivers. """ parse_result = netutils.urlsplit(url) loaded_driver = driver.DriverManager(namespace, parse_result.scheme) return loaded_driver.driver(conf, parse_result) class ConfigPublisherBase(metaclass=abc.ABCMeta): """Base class for plugins that publish data.""" def __init__(self, conf, parsed_url): self.conf = conf @abc.abstractmethod def publish_samples(self, samples): """Publish samples into final conduit.""" @abc.abstractmethod def publish_events(self, events): """Publish events into final conduit.""" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7959414 ceilometer-24.1.0.dev59/ceilometer/publisher/data/0000775000175100017510000000000015033033521021022 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/data/gnocchi_resources.yaml0000664000175100017510000002121315033033467025422 0ustar00mylesmyles--- archive_policy_default: ceilometer-low archive_policies: # NOTE(sileht): We keep "mean" for now to not break all gating that # use the current tempest scenario. - name: ceilometer-low aggregation_methods: - mean back_window: 0 definition: - granularity: 5 minutes timespan: 30 days - name: ceilometer-low-rate aggregation_methods: - mean - rate:mean back_window: 0 definition: - granularity: 5 minutes timespan: 30 days - name: ceilometer-high aggregation_methods: - mean back_window: 0 definition: - granularity: 1 second timespan: 1 hour - granularity: 1 minute timespan: 1 day - granularity: 1 hour timespan: 365 days - name: ceilometer-high-rate aggregation_methods: - mean - rate:mean back_window: 0 definition: - granularity: 1 second timespan: 1 hour - granularity: 1 minute timespan: 1 day - granularity: 1 hour timespan: 365 days resources: - resource_type: identity metrics: identity.authenticate.success: identity.authenticate.pending: identity.authenticate.failure: identity.user.created: identity.user.deleted: identity.user.updated: identity.group.created: identity.group.deleted: identity.group.updated: identity.role.created: identity.role.deleted: identity.role.updated: identity.project.created: identity.project.deleted: identity.project.updated: identity.trust.created: identity.trust.deleted: identity.role_assignment.created: identity.role_assignment.deleted: - resource_type: ceph_account metrics: radosgw.objects: radosgw.objects.size: radosgw.objects.containers: radosgw.api.request: radosgw.containers.objects: radosgw.containers.objects.size: - resource_type: instance metrics: memory: memory.usage: memory.resident: memory.swap.in: memory.swap.out: vcpus: power.state: cpu: archive_policy_name: ceilometer-low-rate disk.root.size: disk.ephemeral.size: disk.latency: disk.iops: disk.capacity: disk.allocation: disk.usage: compute.instance.booting.time: perf.cpu.cycles: perf.instructions: perf.cache.references: perf.cache.misses: attributes: host: resource_metadata.(instance_host|host) image_ref: resource_metadata.image_ref launched_at: resource_metadata.launched_at created_at: resource_metadata.created_at deleted_at: resource_metadata.deleted_at display_name: resource_metadata.display_name flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) server_group: resource_metadata.user_metadata.server_group event_delete: compute.instance.delete.start event_create: compute.instance.create.end event_attributes: id: instance_id display_name: display_name host: host availability_zone: availability_zone flavor_id: instance_type_id flavor_name: instance_type user_id: user_id project_id: project_id event_associated_resources: instance_network_interface: '{"=": {"instance_id": "%s"}}' instance_disk: '{"=": {"instance_id": "%s"}}' - resource_type: instance_network_interface metrics: network.outgoing.packets: archive_policy_name: ceilometer-low-rate network.incoming.packets: archive_policy_name: ceilometer-low-rate network.outgoing.packets.drop: archive_policy_name: ceilometer-low-rate network.incoming.packets.drop: archive_policy_name: ceilometer-low-rate network.outgoing.packets.error: archive_policy_name: ceilometer-low-rate network.incoming.packets.error: archive_policy_name: ceilometer-low-rate network.outgoing.bytes: archive_policy_name: ceilometer-low-rate network.incoming.bytes: archive_policy_name: ceilometer-low-rate attributes: name: resource_metadata.vnic_name instance_id: resource_metadata.instance_id - resource_type: instance_disk metrics: disk.device.read.requests: archive_policy_name: ceilometer-low-rate disk.device.write.requests: archive_policy_name: ceilometer-low-rate disk.device.read.bytes: archive_policy_name: ceilometer-low-rate disk.device.write.bytes: archive_policy_name: ceilometer-low-rate disk.device.read.latency: disk.device.write.latency: disk.device.capacity: disk.device.allocation: disk.device.usage: attributes: name: resource_metadata.disk_name instance_id: resource_metadata.instance_id - resource_type: image metrics: image.size: image.download: image.serve: attributes: name: resource_metadata.name container_format: resource_metadata.container_format disk_format: resource_metadata.disk_format event_delete: image.delete event_attributes: id: resource_id - resource_type: ipmi metrics: hardware.ipmi.node.power: hardware.ipmi.node.temperature: hardware.ipmi.node.inlet_temperature: hardware.ipmi.node.outlet_temperature: hardware.ipmi.node.fan: hardware.ipmi.node.current: hardware.ipmi.node.voltage: hardware.ipmi.node.airflow: hardware.ipmi.node.cups: hardware.ipmi.node.cpu_util: hardware.ipmi.node.mem_util: hardware.ipmi.node.io_util: - resource_type: ipmi_sensor metrics: - 'hardware.ipmi.power' - 'hardware.ipmi.temperature' - 'hardware.ipmi.current' - 'hardware.ipmi.voltage' - 'hardware.ipmi.fan' attributes: node: resource_metadata.node - resource_type: network metrics: bandwidth: ip.floating: event_delete: floatingip.delete.end event_attributes: id: resource_id - resource_type: stack metrics: stack.create: stack.update: stack.delete: stack.resume: stack.suspend: - resource_type: swift_account metrics: storage.objects.incoming.bytes: storage.objects.outgoing.bytes: storage.objects.size: storage.objects: storage.objects.containers: storage.containers.objects: storage.containers.objects.size: attributes: storage_policy: resource_metadata.storage_policy - resource_type: volume metrics: volume: volume.size: snapshot.size: volume.snapshot.size: volume.backup.size: backup.size: volume.manage_existing.start: volume.manage_existing.end: volume.manage_existing_snapshot.start: volume.manage_existing_snapshot.end: attributes: display_name: resource_metadata.(display_name|name) volume_type: resource_metadata.volume_type volume_type_id: resource_metadata.volume_type_id image_id: resource_metadata.image_id instance_id: resource_metadata.instance_id event_delete: - volume.delete.end - snapshot.delete.end event_update: - volume.transfer.accept.end - snapshot.transfer.accept.end event_attributes: id: resource_id project_id: project_id - resource_type: volume_provider metrics: volume.provider.capacity.total: volume.provider.capacity.free: volume.provider.capacity.allocated: volume.provider.capacity.provisioned: volume.provider.capacity.virtual_free: - resource_type: volume_provider_pool metrics: volume.provider.pool.capacity.total: volume.provider.pool.capacity.free: volume.provider.pool.capacity.allocated: volume.provider.pool.capacity.provisioned: volume.provider.pool.capacity.virtual_free: attributes: provider: resource_metadata.provider - resource_type: nova_compute metrics: compute.node.cpu.frequency: compute.node.cpu.idle.percent: compute.node.cpu.idle.time: compute.node.cpu.iowait.percent: compute.node.cpu.iowait.time: compute.node.cpu.kernel.percent: compute.node.cpu.kernel.time: compute.node.cpu.percent: compute.node.cpu.user.percent: compute.node.cpu.user.time: attributes: host_name: resource_metadata.host - resource_type: manila_share metrics: manila.share.size: attributes: name: resource_metadata.name host: resource_metadata.host status: resource_metadata.status availability_zone: resource_metadata.availability_zone protocol: resource_metadata.protocol ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/file.py0000664000175100017510000001026315033033467021415 0ustar00mylesmyles# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import logging.handlers from oslo_log import log from urllib import parse as urlparse from ceilometer import publisher LOG = log.getLogger(__name__) class FilePublisher(publisher.ConfigPublisherBase): """Publisher metering data to file. The file publisher pushes metering data into a file. The file name and location should be configured in ceilometer pipeline configuration file. If a file name and location is not specified, this File Publisher will not log any meters other than log a warning in Ceilometer log file. To enable this publisher, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: - name: meter_file meters: - "*" publishers: - file:///var/test?max_bytes=10000000&backup_count=5&json File path is required for this publisher to work properly. If max_bytes or backup_count is missing, FileHandler will be used to save the metering data. If max_bytes and backup_count are present, RotatingFileHandler will be used to save the metering data. The json argument is used to explicitely ask ceilometer to write json into the file. """ def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) self.publisher_logger = None path = parsed_url.path if not path: LOG.error('The path for the file publisher is required') return rfh = None max_bytes = 0 backup_count = 0 self.output_json = None # Handling other configuration options in the query string if parsed_url.query: params = urlparse.parse_qs(parsed_url.query, keep_blank_values=True) if "json" in params: self.output_json = True if params.get('max_bytes') and params.get('backup_count'): try: max_bytes = int(params.get('max_bytes')[0]) backup_count = int(params.get('backup_count')[0]) except ValueError: LOG.error('max_bytes and backup_count should be ' 'numbers.') return # create rotating file handler rfh = logging.handlers.RotatingFileHandler( path, encoding='utf8', maxBytes=max_bytes, backupCount=backup_count) self.publisher_logger = logging.Logger('publisher.file') self.publisher_logger.propagate = False self.publisher_logger.setLevel(logging.INFO) rfh.setLevel(logging.INFO) self.publisher_logger.addHandler(rfh) def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ if self.publisher_logger: for sample in samples: if self.output_json: self.publisher_logger.info(json.dumps(sample.as_dict())) else: self.publisher_logger.info(sample.as_dict()) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ if self.publisher_logger: for event in events: if self.output_json: self.publisher_logger.info(json.dumps(event.as_dict(), default=str)) else: self.publisher_logger.info(event.as_dict()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/gnocchi.py0000664000175100017510000006322515033033467022116 0ustar00mylesmyles# # Copyright 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict import fnmatch import itertools import json import operator import os import threading from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import exceptions as ka_exceptions from oslo_log import log from oslo_utils import timeutils from stevedore import extension import tenacity from urllib import parse as urlparse from ceilometer import cache_utils from ceilometer import declarative from ceilometer import gnocchi_client from ceilometer.i18n import _ from ceilometer import keystone_client from ceilometer import publisher LOG = log.getLogger(__name__) EVENT_CREATE, EVENT_UPDATE, EVENT_DELETE = ("create", "update", "delete") class ResourcesDefinition: MANDATORY_FIELDS = {'resource_type': str, 'metrics': (dict, list)} MANDATORY_EVENT_FIELDS = {'id': str} def __init__(self, definition_cfg, archive_policy_default, archive_policy_override, plugin_manager): self.cfg = definition_cfg self._check_required_and_types(self.MANDATORY_FIELDS, self.cfg) if self.support_events(): self._check_required_and_types(self.MANDATORY_EVENT_FIELDS, self.cfg['event_attributes']) self._attributes = {} for name, attr_cfg in self.cfg.get('attributes', {}).items(): self._attributes[name] = declarative.Definition(name, attr_cfg, plugin_manager) self._event_attributes = {} for name, attr_cfg in self.cfg.get('event_attributes', {}).items(): self._event_attributes[name] = declarative.Definition( name, attr_cfg, plugin_manager) self.metrics = {} # NOTE(sileht): Convert old list to new dict format if isinstance(self.cfg['metrics'], list): values = [None] * len(self.cfg['metrics']) self.cfg['metrics'] = dict(zip(self.cfg['metrics'], values)) for m, extra in self.cfg['metrics'].items(): if not extra: extra = {} if not extra.get("archive_policy_name"): extra["archive_policy_name"] = archive_policy_default if archive_policy_override: extra["archive_policy_name"] = archive_policy_override # NOTE(sileht): For backward compat, this is after the override to # preserve the wierd previous behavior. We don't really care as we # deprecate it. if 'archive_policy' in self.cfg: LOG.warning("archive_policy '%s' for a resource-type (%s) is " "deprecated, set it for each metric instead.", self.cfg["archive_policy"], self.cfg["resource_type"]) extra["archive_policy_name"] = self.cfg['archive_policy'] self.metrics[m] = extra @staticmethod def _check_required_and_types(expected, definition): for field, field_types in expected.items(): if field not in definition: raise declarative.ResourceDefinitionException( _("Required field %s not specified") % field, definition) if not isinstance(definition[field], field_types): raise declarative.ResourceDefinitionException( _("Required field %(field)s should be a %(type)s") % {'field': field, 'type': field_types}, definition) @staticmethod def _ensure_list(value): if isinstance(value, list): return value return [value] def support_events(self): for e in ["event_create", "event_delete", "event_update"]: if e in self.cfg: return True return False def event_match(self, event_type): for e in self._ensure_list(self.cfg.get('event_create', [])): if fnmatch.fnmatch(event_type, e): return EVENT_CREATE for e in self._ensure_list(self.cfg.get('event_delete', [])): if fnmatch.fnmatch(event_type, e): return EVENT_DELETE for e in self._ensure_list(self.cfg.get('event_update', [])): if fnmatch.fnmatch(event_type, e): return EVENT_UPDATE def sample_attributes(self, sample): attrs = {} sample_dict = sample.as_dict() for name, definition in self._attributes.items(): value = definition.parse(sample_dict) if value is not None: attrs[name] = value return attrs def event_attributes(self, event): attrs = {'type': self.cfg['resource_type']} traits = {trait.name: trait.value for trait in event.traits} for attr, field in self.cfg.get('event_attributes', {}).items(): value = traits.get(field) if value is not None: attrs[attr] = value return attrs class LockedDefaultDict(defaultdict): """defaultdict with lock to handle threading Dictionary only deletes if nothing is accessing dict and nothing is holding lock to be deleted. If both cases are not true, it will skip delete. """ def __init__(self, *args, **kwargs): self.lock = threading.Lock() super().__init__(*args, **kwargs) def __getitem__(self, key): with self.lock: return super().__getitem__(key) def pop(self, key, *args): with self.lock: key_lock = super().__getitem__(key) if key_lock.acquire(False): try: super().pop(key, *args) finally: key_lock.release() class GnocchiPublisher(publisher.ConfigPublisherBase): """Publisher class for recording metering data into the Gnocchi service. The publisher class records each meter into the gnocchi service configured in Ceilometer pipeline file. An example target may look like the following: gnocchi://?archive_policy=low&filter_project=gnocchi """ def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) # TODO(jd) allow to override Gnocchi endpoint via the host in the URL options = urlparse.parse_qs(parsed_url.query) self.filter_project = options.get('filter_project', ['service'])[-1] self.filter_domain = options.get('filter_domain', ['Default'])[-1] resources_definition_file = options.get( 'resources_definition_file', ['gnocchi_resources.yaml'])[-1] archive_policy_override = options.get('archive_policy', [None])[-1] self.resources_definition, self.archive_policies_definition = ( self._load_definitions(conf, archive_policy_override, resources_definition_file)) self.metric_map = {metric: rd for rd in self.resources_definition for metric in rd.metrics} timeout = options.get('timeout', [6.05])[-1] self._ks_client = keystone_client.get_client(conf) # NOTE(cdent): The default cache backend is a real but # noop backend. We don't want to use that here because # we want to avoid the cache pathways entirely if the # cache has not been configured explicitly. self.cache = cache_utils.get_client(conf) self._gnocchi_project_id = None self._gnocchi_project_id_lock = threading.Lock() self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) try: self._gnocchi = self._get_gnocchi_client(conf, timeout) except tenacity.RetryError as e: raise e.last_attempt._exception from None self._already_logged_event_types = set() self._already_logged_metric_names = set() self._already_configured_archive_policies = False @tenacity.retry( stop=tenacity.stop_after_attempt(10), wait=tenacity.wait_fixed(5), retry=( tenacity.retry_if_exception_type(ka_exceptions.ServiceUnavailable) | tenacity.retry_if_exception_type(ka_exceptions.DiscoveryFailure) | tenacity.retry_if_exception_type(ka_exceptions.ConnectTimeout) ), reraise=False) def _get_gnocchi_client(self, conf, timeout): return gnocchi_client.get_gnocchiclient(conf, request_timeout=timeout) @staticmethod def _load_definitions(conf, archive_policy_override, resources_definition_file): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') data = declarative.load_definitions( conf, {}, resources_definition_file, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'gnocchi_resources.yaml')) archive_policy_default = data.get("archive_policy_default", "ceilometer-low") resource_defs = [] for resource in data.get('resources', []): try: resource_defs.append(ResourcesDefinition( resource, archive_policy_default, archive_policy_override, plugin_manager)) except Exception: LOG.error("Failed to load resource due to error", exc_info=True) return resource_defs, data.get("archive_policies", []) def ensures_archives_policies(self): if not self._already_configured_archive_policies: for ap in self.archive_policies_definition: try: self._gnocchi.archive_policy.create(ap) except gnocchi_exc.ArchivePolicyAlreadyExists: # created in the meantime by another worker pass self._already_configured_archive_policies = True @property def gnocchi_project_id(self): if self._gnocchi_project_id is not None: return self._gnocchi_project_id with self._gnocchi_project_id_lock: if self._gnocchi_project_id is None: if not self.filter_project: LOG.debug( "Multiple executions were locked on " "self._gnocchi_project_id_lock`. This execution " "should no call `_internal_gnocchi_project_discovery` " "as `self.filter_project` is None.") return None try: domain = self._ks_client.domains.find( name=self.filter_domain) project = self._ks_client.projects.find( name=self.filter_project, domain_id=domain.id) except ka_exceptions.NotFound: LOG.warning('Filtered project [%s] not found in keystone, ' 'ignoring the filter_project option' % self.filter_project) self.filter_project = None return None except Exception: LOG.exception('Failed to retrieve filtered project [%s].' % self.filter_project) raise self._gnocchi_project_id = project.id LOG.debug("Filtered project [%s] found with ID [%s].", self.filter_project, self._gnocchi_project_id) return self._gnocchi_project_id def _is_swift_account_sample(self, sample): try: return (self.metric_map[sample.name].cfg['resource_type'] == 'swift_account') except KeyError: return False def _is_gnocchi_activity(self, sample): return (self.filter_project and self.gnocchi_project_id and ( # avoid anything from the user used by gnocchi sample.project_id == self.gnocchi_project_id or # avoid anything in the swift account used by gnocchi (sample.resource_id == self.gnocchi_project_id and self._is_swift_account_sample(sample)) )) def _get_resource_definition_from_event(self, event_type): for rd in self.resources_definition: operation = rd.event_match(event_type) if operation: return rd, operation def filter_gnocchi_activity_openstack(self, samples): """Skip sample generated by gnocchi itself This method will filter out the samples that are generated by Gnocchi itself. """ filtered_samples = [] for sample in samples: if not self._is_gnocchi_activity(sample): filtered_samples.append(sample) LOG.debug("Sample [%s] is not a Gnocchi activity; therefore, " "we do not filter it out and push it to Gnocchi.", sample) else: LOG.debug("Sample [%s] is a Gnocchi activity; therefore, " "we filter it out and do not push it to Gnocchi.", sample) return filtered_samples def publish_samples(self, data): self.ensures_archives_policies() data = self.filter_gnocchi_activity_openstack(data) def value_to_sort(object_to_sort): value = object_to_sort.resource_id if not value: LOG.debug("Resource ID was not defined for sample data [%s]. " "Therefore, we will use an empty string as the " "resource ID.", object_to_sort) value = '' return value data.sort(key=value_to_sort) resource_grouped_samples = itertools.groupby( data, key=operator.attrgetter('resource_id')) gnocchi_data = {} measures = {} for resource_id, samples_of_resource in resource_grouped_samples: for sample in samples_of_resource: metric_name = sample.name LOG.debug("Processing sample [%s] for resource ID [%s].", sample, resource_id) rd = self.metric_map.get(metric_name) if rd is None: if metric_name not in self._already_logged_metric_names: LOG.warning("metric %s is not handled by Gnocchi" % metric_name) self._already_logged_metric_names.add(metric_name) continue # NOTE(sileht): / is forbidden by Gnocchi resource_id = resource_id.replace('/', '_') if resource_id not in gnocchi_data: gnocchi_data[resource_id] = { 'resource_type': rd.cfg['resource_type'], 'resource': {"id": resource_id, "user_id": sample.user_id, "project_id": sample.project_id}} gnocchi_data[resource_id].setdefault( "resource_extra", {}).update(rd.sample_attributes(sample)) measures.setdefault(resource_id, {}).setdefault( metric_name, {"measures": [], "archive_policy_name": rd.metrics[metric_name]["archive_policy_name"], "unit": sample.unit} )["measures"].append( {'timestamp': sample.timestamp, 'value': sample.volume} ) try: self.batch_measures(measures, gnocchi_data) except gnocchi_exc.ClientException as e: LOG.error("Gnocchi client exception while pushing measures [%s] " "for gnocchi data [%s]: [%s].", measures, gnocchi_data, str(e)) except Exception as e: LOG.error("Unexpected exception while pushing measures [%s] for " "gnocchi data [%s]: [%s].", measures, gnocchi_data, str(e), exc_info=True) for info in gnocchi_data.values(): resource = info["resource"] resource_type = info["resource_type"] resource_extra = info["resource_extra"] if not resource_extra: continue try: self._if_not_cached(resource_type, resource['id'], resource_extra) except gnocchi_exc.ClientException as e: LOG.error("Gnocchi client exception updating resource type " "[%s] with ID [%s] for resource data [%s]: [%s].", resource_type, resource.get('id'), resource_extra, str(e)) except Exception as e: LOG.error("Unexpected exception updating resource type [%s] " "with ID [%s] for resource data [%s]: [%s].", resource_type, resource.get('id'), resource_extra, str(e), exc_info=True) @staticmethod def _extract_resources_from_error(e, resource_infos): resource_ids = {r['original_resource_id'] for r in e.message['detail']} return [(resource_infos[rid]['resource_type'], resource_infos[rid]['resource'], resource_infos[rid]['resource_extra']) for rid in resource_ids] def batch_measures(self, measures, resource_infos): # NOTE(sileht): We don't care about error here, we want # resources metadata always been updated try: LOG.debug("Executing batch resource metrics measures for resource " "[%s] and measures [%s].", resource_infos, measures) self._gnocchi.metric.batch_resources_metrics_measures( measures, create_metrics=True) except gnocchi_exc.BadRequest as e: if not isinstance(e.message, dict): raise if e.message.get('cause') != 'Unknown resources': raise resources = self._extract_resources_from_error(e, resource_infos) for resource_type, resource, resource_extra in resources: try: resource.update(resource_extra) self._create_resource(resource_type, resource) except gnocchi_exc.ResourceAlreadyExists: # NOTE(sileht): resource created in the meantime pass except gnocchi_exc.ClientException as e: LOG.error('Error creating resource %(id)s: %(err)s', {'id': resource['id'], 'err': str(e)}) # We cannot post measures for this resource # and we can't patch it later del measures[resource['id']] del resource_infos[resource['id']] else: if self.cache and resource_extra: self.cache.set(resource['id'], self._hash_resource(resource_extra)) # NOTE(sileht): we have created missing resources/metrics, # now retry to post measures self._gnocchi.metric.batch_resources_metrics_measures( measures, create_metrics=True) LOG.debug( "%d measures posted against %d metrics through %d resources", sum(len(m["measures"]) for rid in measures for m in measures[rid].values()), sum(len(m) for m in measures.values()), len(resource_infos)) def _create_resource(self, resource_type, resource): self._gnocchi.resource.create(resource_type, resource) LOG.debug('Resource %s created', resource["id"]) def _update_resource(self, resource_type, res_id, resource_extra): self._gnocchi.resource.update(resource_type, res_id, resource_extra) LOG.debug('Resource %s updated', res_id) def _if_not_cached(self, resource_type, res_id, resource_extra): if self.cache: attribute_hash = self._hash_resource(resource_extra) if self._resource_cache_diff(res_id, attribute_hash): with self._gnocchi_resource_lock[res_id]: # NOTE(luogangyi): there is a possibility that the # resource was already built in cache by another # ceilometer-notification-agent when we get the lock here. if self._resource_cache_diff(res_id, attribute_hash): self._update_resource(resource_type, res_id, resource_extra) self.cache.set(res_id, attribute_hash) else: LOG.debug('Resource cache hit for %s', res_id) self._gnocchi_resource_lock.pop(res_id, None) else: LOG.debug('Resource cache hit for %s', res_id) else: self._update_resource(resource_type, res_id, resource_extra) @staticmethod def _hash_resource(resource): return hash(tuple(i for i in resource.items() if i[0] != 'metrics')) def _resource_cache_diff(self, key, attribute_hash): cached_hash = self.cache.get(key) return not cached_hash or cached_hash != attribute_hash def publish_events(self, events): for event in events: rd = self._get_resource_definition_from_event(event.event_type) if not rd: if event.event_type not in self._already_logged_event_types: LOG.debug("No gnocchi definition for event type: %s", event.event_type) self._already_logged_event_types.add(event.event_type) continue rd, operation = rd if operation == EVENT_DELETE: self._delete_event(rd, event) if operation == EVENT_CREATE: self._create_event(rd, event) if operation == EVENT_UPDATE: self._update_event(rd, event) def _update_event(self, rd, event): resource = rd.event_attributes(event) associated_resources = rd.cfg.get('event_associated_resources', {}) if associated_resources: to_update = itertools.chain([resource], *[ self._search_resource(resource_type, query % resource['id']) for resource_type, query in associated_resources.items() ]) else: to_update = [resource] for resource in to_update: self._set_update_attributes(resource) def _delete_event(self, rd, event): ended_at = timeutils.utcnow().isoformat() resource = rd.event_attributes(event) associated_resources = rd.cfg.get('event_associated_resources', {}) if associated_resources: to_end = itertools.chain([resource], *[ self._search_resource(resource_type, query % resource['id']) for resource_type, query in associated_resources.items() ]) else: to_end = [resource] for resource in to_end: self._set_ended_at(resource, ended_at) def _create_event(self, rd, event): resource = rd.event_attributes(event) resource_type = resource.pop('type') try: self._create_resource(resource_type, resource) except gnocchi_exc.ResourceAlreadyExists: LOG.debug("Create event received on existing resource (%s), " "ignore it.", resource['id']) except Exception: LOG.error("Failed to create resource %s", resource, exc_info=True) def _search_resource(self, resource_type, query): try: return self._gnocchi.resource.search( resource_type, json.loads(query)) except Exception: LOG.error("Fail to search resource type %(resource_type)s " "with '%(query)s'", {'resource_type': resource_type, 'query': query}, exc_info=True) return [] def _set_update_attributes(self, resource): resource_id = resource.pop('id') resource_type = resource.pop('type') try: self._if_not_cached(resource_type, resource_id, resource) except gnocchi_exc.ResourceNotFound: LOG.debug("Update event received on unexisting resource (%s), " "ignore it.", resource_id) except Exception: LOG.error("Fail to update the resource %s", resource, exc_info=True) def _set_ended_at(self, resource, ended_at): try: self._gnocchi.resource.update(resource['type'], resource['id'], {'ended_at': ended_at}) except gnocchi_exc.ResourceNotFound: LOG.debug("Delete event received on unexisting resource (%s), " "ignore it.", resource['id']) except Exception: LOG.error("Fail to update the resource %s", resource, exc_info=True) LOG.debug('Resource {} ended at {}'.format(resource["id"], ended_at)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/http.py0000664000175100017510000001720415033033467021457 0ustar00mylesmyles# # Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log from oslo_utils import strutils import requests from requests import adapters from urllib import parse as urlparse from ceilometer import publisher LOG = log.getLogger(__name__) class HttpPublisher(publisher.ConfigPublisherBase): """Publish metering data to a http endpoint This publisher pushes metering data to a specified http endpoint. The endpoint should be configured in ceilometer pipeline configuration file. If the `timeout` and/or `max_retries` are not specified, the default `timeout` and `max_retries` will be set to 5 and 2 respectively. Additional parameters are: - ssl certificate verification can be disabled by setting `verify_ssl` to False - batching can be configured by `batch` - Basic authentication can be configured using the URL authentication scheme: http://username:password@example.com - For certificate authentication, `clientcert` and `clientkey` are the paths to the certificate and key files respectively. `clientkey` is only required if the clientcert file doesn't already contain the key. All of the parameters mentioned above get removed during processing, with the remaining portion of the URL being used as the actual endpoint. e.g. https://username:password@example.com/path?verify_ssl=False&q=foo will result in a call to https://example.com/path?q=foo To use this publisher for samples, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: - name: meter_file meters: - "*" publishers: - http://host:80/path?timeout=1&max_retries=2&batch=False In the event_pipeline.yaml file, you can use the publisher in one of the sinks like the following: - name: event_sink publishers: - http://host:80/path?timeout=1&max_retries=2 """ HEADERS = {'Content-type': 'application/json'} def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) if not parsed_url.hostname: raise ValueError('The hostname of an endpoint for ' 'HttpPublisher is required') # non-numeric port from the url string will cause a ValueError # exception when the port is read. Do a read to make sure the port # is valid, if not, ValueError will be thrown. parsed_url.port # Handling other configuration options in the query string params = urlparse.parse_qs(parsed_url.query) self.timeout = self._get_param(params, 'timeout', 5, int) self.max_retries = self._get_param(params, 'max_retries', 2, int) self.poster = ( self._batch_post if strutils.bool_from_string(self._get_param( params, 'batch', True)) else self._individual_post) verify_ssl = self._get_param(params, 'verify_ssl', True) try: self.verify_ssl = strutils.bool_from_string(verify_ssl, strict=True) except ValueError: self.verify_ssl = (verify_ssl or True) username = parsed_url.username password = parsed_url.password if username: self.client_auth = (username, password) netloc = parsed_url.netloc.replace(username + ':' + password + '@', '') else: self.client_auth = None netloc = parsed_url.netloc clientcert = self._get_param(params, 'clientcert', None) clientkey = self._get_param(params, 'clientkey', None) if clientcert: if clientkey: self.client_cert = (clientcert, clientkey) else: self.client_cert = clientcert else: self.client_cert = None self.raw_only = strutils.bool_from_string( self._get_param(params, 'raw_only', False)) kwargs = {'max_retries': self.max_retries, 'pool_connections': conf.max_parallel_requests, 'pool_maxsize': conf.max_parallel_requests} self.session = requests.Session() if parsed_url.scheme in ["http", "https"]: scheme = parsed_url.scheme else: ssl = self._get_param(params, 'ssl', False) try: ssl = strutils.bool_from_string(ssl, strict=True) except ValueError: ssl = (ssl or False) scheme = "https" if ssl else "http" # authentication & config params have been removed, so use URL with # updated query string self.target = urlparse.urlunsplit([ scheme, netloc, parsed_url.path, urlparse.urlencode(params, doseq=True), parsed_url.fragment]) self.session.mount(self.target, adapters.HTTPAdapter(**kwargs)) LOG.debug('HttpPublisher for endpoint %s is initialized!' % self.target) @staticmethod def _get_param(params, name, default_value, cast=None): try: return cast(params.pop(name)[-1]) if cast else params.pop(name)[-1] except (ValueError, TypeError, KeyError): LOG.debug('Default value %(value)s is used for %(name)s' % {'value': default_value, 'name': name}) return default_value def _individual_post(self, data): for d in data: self._do_post(json.dumps(d)) def _batch_post(self, data): if not data: LOG.debug('Data set is empty!') return self._do_post(json.dumps(data)) def _do_post(self, data): LOG.trace('Message: %s', data) try: res = self.session.post(self.target, data=data, headers=self.HEADERS, timeout=self.timeout, auth=self.client_auth, cert=self.client_cert, verify=self.verify_ssl) res.raise_for_status() LOG.debug('Message posting to %s: status code %d.', self.target, res.status_code) except requests.exceptions.HTTPError: LOG.exception('Status Code: %(code)s. ' 'Failed to dispatch message: %(data)s' % {'code': res.status_code, 'data': data}) def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ self.poster([sample.as_dict() for sample in samples]) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ if self.raw_only: data = [evt.as_dict()['raw']['payload'] for evt in events if evt.as_dict().get('raw', {}).get('payload')] else: data = [event.serialize() for event in events] self.poster(data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/messaging.py0000664000175100017510000002227415033033467022460 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using the preferred RPC mechanism. """ import abc import itertools import operator import threading from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import excutils from urllib import parse as urlparse from ceilometer.i18n import _ from ceilometer import messaging from ceilometer import publisher from ceilometer.publisher import utils LOG = log.getLogger(__name__) NOTIFIER_OPTS = [ cfg.StrOpt('metering_topic', default='metering', help='The topic that ceilometer uses for metering ' 'notifications.', deprecated_for_removal=True, ), cfg.StrOpt('event_topic', default='event', help='The topic that ceilometer uses for event ' 'notifications.', deprecated_for_removal=True, ), cfg.StrOpt('telemetry_driver', default='messagingv2', help='The driver that ceilometer uses for metering ' 'notifications.', deprecated_name='metering_driver', ) ] class DeliveryFailure(Exception): def __init__(self, message=None, cause=None): super().__init__(message) self.cause = cause def raise_delivery_failure(exc): excutils.raise_with_cause(DeliveryFailure, str(exc), cause=exc) class MessagingPublisher(publisher.ConfigPublisherBase, metaclass=abc.ABCMeta): def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) options = urlparse.parse_qs(parsed_url.query) # the value of options is a list of url param values # only take care of the latest one if the option # is provided more than once self.per_meter_topic = bool(int( options.get('per_meter_topic', [0])[-1])) self.policy = options.get('policy', ['default'])[-1] self.max_queue_length = int(options.get( 'max_queue_length', [1024])[-1]) self.max_retry = 0 self.queue_lock = threading.Lock() self.local_queue = [] if self.policy in ['default', 'queue', 'drop']: LOG.info('Publishing policy set to %s', self.policy) else: LOG.warning(_('Publishing policy is unknown (%s) force to ' 'default'), self.policy) self.policy = 'default' self.retry = 1 if self.policy in ['queue', 'drop'] else None def publish_samples(self, samples): """Publish samples on RPC. :param samples: Samples from pipeline. """ meters = [ utils.meter_message_from_counter( sample, self.conf.publisher.telemetry_secret) for sample in samples ] topic = self.conf.publisher_notifier.metering_topic with self.queue_lock: self.local_queue.append((topic, meters)) if self.per_meter_topic: queue_per_meter_topic = [] for meter_name, meter_list in itertools.groupby( sorted(meters, key=operator.itemgetter('counter_name')), operator.itemgetter('counter_name')): meter_list = list(meter_list) topic_name = topic + '.' + meter_name LOG.debug('Publishing %(m)d samples on %(n)s', {'m': len(meter_list), 'n': topic_name}) queue_per_meter_topic.append((topic_name, meter_list)) with self.queue_lock: self.local_queue.extend(queue_per_meter_topic) self.flush() def flush(self): with self.queue_lock: queue = self.local_queue self.local_queue = [] queue = self._process_queue(queue, self.policy) with self.queue_lock: self.local_queue = (queue + self.local_queue) if self.policy == 'queue': self._check_queue_length() def _check_queue_length(self): queue_length = len(self.local_queue) if queue_length > self.max_queue_length > 0: count = queue_length - self.max_queue_length self.local_queue = self.local_queue[count:] LOG.warning(_("Publisher max local_queue length is exceeded, " "dropping %d oldest samples") % count) def _process_queue(self, queue, policy): current_retry = 0 while queue: topic, data = queue[0] try: self._send(topic, data) except DeliveryFailure: data = sum([len(m) for __, m in queue]) if policy == 'queue': LOG.warning(_("Failed to publish %d datapoints, queue " "them"), data) return queue elif policy == 'drop': LOG.warning(_("Failed to publish %d datapoints, " "dropping them"), data) return [] current_retry += 1 if current_retry >= self.max_retry: LOG.exception("Failed to retry to send sample data " "with max_retry times") raise else: queue.pop(0) return [] def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline. """ ev_list = [utils.message_from_event( event, self.conf.publisher.telemetry_secret) for event in events] topic = self.conf.publisher_notifier.event_topic with self.queue_lock: self.local_queue.append((topic, ev_list)) self.flush() @abc.abstractmethod def _send(self, topic, meters): """Send the meters to the messaging topic.""" class NotifierPublisher(MessagingPublisher): """Publish metering data from notifier publisher. The ip address and port number of notifier can be configured in ceilometer pipeline configuration file. User can customize the transport driver such as rabbit, kafka and so on. The Notifier uses `sample` method as default method to send notifications. This publisher has transmit options such as queue, drop, and retry. These options are specified using policy field of URL parameter. When queue option could be selected, local queue length can be determined using max_queue_length field as well. When the transfer fails with retry option, try to resend the data as many times as specified in max_retry field. If max_retry is not specified, by default the number of retry is 100. To enable this publisher, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: meter: - name: meter_notifier meters: - "*" sinks: - notifier_sink sinks: - name: notifier_sink publishers: - notifier://[notifier_ip]:[notifier_port]?topic=[topic]& driver=driver&max_retry=100 """ def __init__(self, conf, parsed_url, default_topic): super().__init__(conf, parsed_url) options = urlparse.parse_qs(parsed_url.query) topics = options.pop('topic', [default_topic]) driver = options.pop('driver', ['rabbit'])[0] self.max_retry = int(options.get('max_retry', [100])[-1]) url = None if parsed_url.netloc != '': url = urlparse.urlunsplit([driver, parsed_url.netloc, parsed_url.path, urlparse.urlencode(options, True), parsed_url.fragment]) self.notifier = oslo_messaging.Notifier( messaging.get_transport(self.conf, url), driver=self.conf.publisher_notifier.telemetry_driver, publisher_id='telemetry.publisher.%s' % self.conf.host, topics=topics, retry=self.retry ) def _send(self, event_type, data): try: self.notifier.sample({}, event_type=event_type, payload=data) except oslo_messaging.MessageDeliveryFailure as e: raise_delivery_failure(e) class SampleNotifierPublisher(NotifierPublisher): def __init__(self, conf, parsed_url): super().__init__( conf, parsed_url, conf.publisher_notifier.metering_topic) class EventNotifierPublisher(NotifierPublisher): def __init__(self, conf, parsed_url): super().__init__( conf, parsed_url, conf.publisher_notifier.event_topic) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/opentelemetry_http.py0000664000175100017510000001116515033033467024433 0ustar00mylesmyles# # Copyright 2024 cmss, inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time from oslo_log import log from oslo_utils import timeutils from ceilometer.publisher import http from ceilometer import sample as smp LOG = log.getLogger(__name__) class OpentelemetryHttpPublisher(http.HttpPublisher): """Publish metering data to Opentelemetry Collector endpoint This dispatcher inherits from all options of the http dispatcher. To use this publisher for samples, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: - name: meter_file meters: - "*" publishers: - opentelemetryhttp://opentelemetry-http-ip:4318/v1/metrics """ HEADERS = {'Content-type': 'application/json'} @staticmethod def get_attribute_model(key, value): return { "key": key, "value": { "string_value": value } } def get_attributes_model(self, sample): attributes = [] resource_id_attr = self.get_attribute_model("resource_id", sample.resource_id) user_id_attr = self.get_attribute_model("user_id", sample.user_id) project_id_attr = self.get_attribute_model("project_id", sample.project_id) attributes.append(resource_id_attr) attributes.append(user_id_attr) attributes.append(project_id_attr) return attributes @staticmethod def get_metrics_model(sample, data_points): name = sample.name.replace(".", "_") desc = str(sample.name) + " unit:" + sample.unit unit = sample.unit metrics = dict() metric_type = None if sample.type == smp.TYPE_CUMULATIVE: metric_type = "counter" else: metric_type = "gauge" metrics.update({ "name": name, "description": desc, "unit": unit, metric_type: {"data_points": data_points} }) return metrics @staticmethod def get_data_points_model(timestamp, attributes, volume): data_points = dict() struct_time = timeutils.parse_isotime(timestamp).timetuple() unix_time = int(time.mktime(struct_time)) data_points.update({ 'attributes': attributes, "start_time_unix_nano": unix_time, "time_unix_nano": unix_time, "as_double": volume, "flags": 0 }) return data_points def get_data_model(self, sample, data_points): metrics = [self.get_metrics_model(sample, data_points)] data = { "resource_metrics": [{ "scope_metrics": [{ "scope": { "name": "ceilometer", "version": "v1" }, "metrics": metrics }] }] } return data def get_data_points(self, sample): # attributes contain basic metadata attributes = self.get_attributes_model(sample) try: return [self.get_data_points_model( sample.timestamp, attributes, sample.volume)] except Exception as e: LOG.warning("Get data point error, %s" % e) return [] def get_opentelemetry_model(self, sample): data_points = self.get_data_points(sample) if data_points: data = self.get_data_model(sample, data_points) return data else: return None def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ if not samples: LOG.warning('Data samples is empty!') return for s in samples: data = self.get_opentelemetry_model(s) if data: self._do_post(json.dumps(data)) @staticmethod def publish_events(events): raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/prometheus.py0000664000175100017510000000534715033033467022700 0ustar00mylesmyles# # Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.publisher import http from ceilometer import sample class PrometheusPublisher(http.HttpPublisher): """Publish metering data to Prometheus Pushgateway endpoint This dispatcher inherits from all options of the http dispatcher. To use this publisher for samples, add the following section to the /etc/ceilometer/pipeline.yaml file or simply add it to an existing pipeline:: - name: meter_file meters: - "*" publishers: - prometheus://mypushgateway/metrics/job/ceilometer """ HEADERS = {'Content-type': 'plain/text'} def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ if not samples: return data = "" doc_done = set() for s in samples: # NOTE(sileht): delta can't be converted into prometheus data # format so don't set the metric type for it metric_type = None if s.type == sample.TYPE_CUMULATIVE: metric_type = "counter" elif s.type == sample.TYPE_GAUGE: metric_type = "gauge" curated_sname = s.name.replace(".", "_") if metric_type and curated_sname not in doc_done: data += "# TYPE {} {}\n".format(curated_sname, metric_type) doc_done.add(curated_sname) # NOTE(sileht): prometheus pushgateway doesn't allow to push # timestamp_ms # # timestamp_ms = ( # s.get_iso_timestamp().replace(tzinfo=None) - # datetime.utcfromtimestamp(0) # ).total_seconds() * 1000 # data += '%s{resource_id="%s"} %s %d\n' % ( # curated_sname, s.resource_id, s.volume, timestamp_ms) data += '%s{resource_id="%s", user_id="%s", project_id="%s"}' \ ' %s\n' % (curated_sname, s.resource_id, s.user_id, s.project_id, s.volume) self._do_post(data) @staticmethod def publish_events(events): raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/tcp.py0000664000175100017510000000764615033033467021277 0ustar00mylesmyles# # Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using a TCP mechanism """ import socket import msgpack from oslo_log import log from oslo_utils import netutils import ceilometer from ceilometer.i18n import _ from ceilometer import publisher from ceilometer.publisher import utils LOG = log.getLogger(__name__) class TCPPublisher(publisher.ConfigPublisherBase): def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) self.inet_addr = netutils.parse_host_port( parsed_url.netloc, default_port=4952) self.socket = None self.connect_socket() def connect_socket(self): try: self.socket = socket.create_connection(self.inet_addr) return True except socket.gaierror: LOG.error(_("Unable to resolv the remote %(host)s") % {'host': self.inet_addr[0], 'port': self.inet_addr[1]}) except TimeoutError: LOG.error(_("Unable to connect to the remote endpoint " "%(host)s:%(port)d. The connection timed out.") % {'host': self.inet_addr[0], 'port': self.inet_addr[1]}) except ConnectionRefusedError: LOG.error(_("Unable to connect to the remote endpoint " "%(host)s:%(port)d. Connection refused.") % {'host': self.inet_addr[0], 'port': self.inet_addr[1]}) return False def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ for sample in samples: msg = utils.meter_message_from_counter( sample, self.conf.publisher.telemetry_secret, self.conf.host) LOG.debug("Publishing sample %(msg)s over TCP to " "%(host)s:%(port)d", {'msg': msg, 'host': self.inet_addr[0], 'port': self.inet_addr[1]}) encoded_msg = msgpack.dumps(msg, use_bin_type=True) msg_len = len(encoded_msg).to_bytes(8, 'little') if self.socket: try: self.socket.send(msg_len + encoded_msg) continue except OSError: LOG.warning(_("Unable to send sample over TCP, trying " "to reconnect and resend the message")) if self.connect_socket(): try: self.socket.send(msg_len + encoded_msg) continue except OSError: pass LOG.error(_("Unable to reconnect and resend sample over TCP")) # NOTE (jokke): We do not handle exceptions in the calling code # so raising the exception from here needs quite a bit more work. # Same time we don't want to spam the retry messages as it's # unlikely to change between iterations on this loop. 'break' # rather than 'return' even the end result is the same feels # more appropriate for now. break def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ raise ceilometer.NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/test.py0000664000175100017510000000253715033033467021462 0ustar00mylesmyles# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample in memory, useful for testing """ from ceilometer import publisher class TestPublisher(publisher.ConfigPublisherBase): """Publisher used in unit testing.""" def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) self.samples = [] self.events = [] self.calls = 0 def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ self.samples.extend(samples) self.calls += 1 def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ self.events.extend(events) self.calls += 1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/udp.py0000664000175100017510000000557115033033467021274 0ustar00mylesmyles# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Publish a sample using an UDP mechanism """ import socket import msgpack from oslo_log import log from oslo_utils import netutils import ceilometer from ceilometer.i18n import _ from ceilometer import publisher from ceilometer.publisher import utils LOG = log.getLogger(__name__) class UDPPublisher(publisher.ConfigPublisherBase): def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) self.host, self.port = netutils.parse_host_port( parsed_url.netloc, default_port=4952) addrinfo = None try: addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET6, socket.SOCK_DGRAM)[0] except socket.gaierror: try: addrinfo = socket.getaddrinfo(self.host, None, socket.AF_INET, socket.SOCK_DGRAM)[0] except socket.gaierror: pass if addrinfo: addr_family = addrinfo[0] else: LOG.warning( "Cannot resolve host %s, creating AF_INET socket...", self.host) addr_family = socket.AF_INET self.socket = socket.socket(addr_family, socket.SOCK_DGRAM) def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline after transformation """ for sample in samples: msg = utils.meter_message_from_counter( sample, self.conf.publisher.telemetry_secret) host = self.host port = self.port LOG.debug("Publishing sample %(msg)s over UDP to " "%(host)s:%(port)d", {'msg': msg, 'host': host, 'port': port}) try: self.socket.sendto(msgpack.dumps(msg, use_bin_type=True), (self.host, self.port)) except Exception as e: LOG.warning(_("Unable to send sample over UDP")) LOG.exception(e) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline after transformation """ raise ceilometer.NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/utils.py0000664000175100017510000001235715033033467021644 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utils for publishers """ import hashlib import hmac from oslo_config import cfg OPTS = [ cfg.StrOpt('telemetry_secret', secret=True, default='change this for valid signing', help='Secret value for signing messages. Set value empty if ' 'signing is not required to avoid computational overhead.', deprecated_opts=[cfg.DeprecatedOpt("metering_secret", "DEFAULT"), cfg.DeprecatedOpt("metering_secret", "publisher_rpc"), cfg.DeprecatedOpt("metering_secret", "publisher")] ), ] def decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): temp = {} # If the input data is a dict, create an equivalent dict with a # predictable insertion order to avoid inconsistencies in the # message signature computation for equivalent payloads modulo # ordering for key, value in sorted(input.items()): temp[decode_unicode(key)] = decode_unicode(value) return temp elif isinstance(input, (tuple, list)): # When doing a pair of JSON encode/decode operations to the tuple, # the tuple would become list. So we have to generate the value as # list here. return [decode_unicode(element) for element in input] elif isinstance(input, str): return input.encode('utf-8') elif isinstance(input, bytes): return input.decode('utf-8') else: return input def recursive_keypairs(d, separator=':'): """Generator that produces sequence of keypairs for nested dictionaries.""" for name, value in sorted(d.items()): if isinstance(value, dict): for subname, subvalue in recursive_keypairs(value, separator): yield ('{}{}{}'.format(name, separator, subname), subvalue) elif isinstance(value, (tuple, list)): yield name, decode_unicode(value) else: yield name, value def compute_signature(message, secret): """Return the signature for a message dictionary.""" if not secret: return '' if isinstance(secret, str): secret = secret.encode('utf-8') digest_maker = hmac.new(secret, b'', hashlib.sha256) for name, value in recursive_keypairs(message): if name == 'message_signature': # Skip any existing signature value, which would not have # been part of the original message. continue digest_maker.update(str(name).encode('utf-8')) digest_maker.update(str(value).encode('utf-8')) return digest_maker.hexdigest() def verify_signature(message, secret): """Check the signature in the message. Message is verified against the value computed from the rest of the contents. """ if not secret: return True old_sig = message.get('message_signature', '') new_sig = compute_signature(message, secret) if isinstance(old_sig, str): try: old_sig = old_sig.encode('ascii') except UnicodeDecodeError: return False new_sig = new_sig.encode('ascii') return hmac.compare_digest(new_sig, old_sig) def meter_message_from_counter(sample, secret, publisher_id=None): """Make a metering message ready to be published or stored. Returns a dictionary containing a metering message for a notification message and a Sample instance. """ msg = {'source': sample.source, 'counter_name': sample.name, 'counter_type': sample.type, 'counter_unit': sample.unit, 'counter_volume': sample.volume, 'user_id': sample.user_id, 'user_name': sample.user_name, 'project_id': sample.project_id, 'project_name': sample.project_name, 'resource_id': sample.resource_id, 'timestamp': sample.timestamp, 'resource_metadata': sample.resource_metadata, 'message_id': sample.id, 'monotonic_time': sample.monotonic_time, } if publisher_id is not None: msg['publisher_id'] = publisher_id msg['message_signature'] = compute_signature(msg, secret) return msg def message_from_event(event, secret): """Make an event message ready to be published or stored. Returns a serialized model of Event containing an event message """ msg = event.serialize() msg['message_signature'] = compute_signature(msg, secret) return msg ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/publisher/zaqar.py0000664000175100017510000000522315033033467021614 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse as urlparse from ceilometer import keystone_client from ceilometer import publisher from zaqarclient.queues.v2 import client as zaqarclient DEFAULT_TTL = 3600 class ZaqarPublisher(publisher.ConfigPublisherBase): """Publish metering data to a Zaqar queue. The target queue name must be configured in the ceilometer pipeline configuration file. The TTL can also optionally be specified as a query argument:: meter: - name: meter_zaqar meters: - "*" sinks: - zaqar_sink sinks: - name: zaqar_sink publishers: - zaqar://?queue=meter_queue&ttl=1200 The credentials to access Zaqar must be set in the [zaqar] section in the configuration. """ def __init__(self, conf, parsed_url): super().__init__(conf, parsed_url) options = urlparse.parse_qs(parsed_url.query) self.queue_name = options.get('queue', [None])[0] if not self.queue_name: raise ValueError('Must specify a queue in the zaqar publisher') self.ttl = int(options.pop('ttl', [DEFAULT_TTL])[0]) self._client = None @property def client(self): if self._client is None: session = keystone_client.get_session( self.conf, group=self.conf.zaqar.auth_section) self._client = zaqarclient.Client(session=session) return self._client def publish_samples(self, samples): """Send a metering message for publishing :param samples: Samples from pipeline. """ queue = self.client.queue(self.queue_name) messages = [{'body': sample.as_dict(), 'ttl': self.ttl} for sample in samples] queue.post(messages) def publish_events(self, events): """Send an event message for publishing :param events: events from pipeline. """ queue = self.client.queue(self.queue_name) messages = [{'body': event.serialize(), 'ttl': self.ttl} for event in events] queue.post(messages) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/sample.py0000664000175100017510000001410615033033467017762 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample class for holding data about a metering event. A Sample doesn't really do anything, but we need a way to ensure that all of the appropriate fields have been filled in by the plugins that create them. """ import copy import uuid from oslo_config import cfg from oslo_utils import timeutils OPTS = [ cfg.StrOpt('sample_source', default='openstack', help='Source for samples emitted on this instance.'), cfg.ListOpt('reserved_metadata_namespace', default=['metering.'], help='List of metadata prefixes reserved for metering use.'), cfg.IntOpt('reserved_metadata_length', default=256, help='Limit on length of reserved metadata values.'), cfg.ListOpt('reserved_metadata_keys', default=[], help='List of metadata keys reserved for metering use. And ' 'these keys are additional to the ones included in the ' 'namespace.'), ] def add_reserved_user_metadata(conf, src_metadata, dest_metadata): limit = conf.reserved_metadata_length user_metadata = {} for prefix in conf.reserved_metadata_namespace: md = { k[len(prefix):].replace('.', '_'): v[:limit] if isinstance(v, str) else v for k, v in src_metadata.items() if (k.startswith(prefix) and k[len(prefix):].replace('.', '_') not in dest_metadata) } user_metadata.update(md) for metadata_key in conf.reserved_metadata_keys: md = { k.replace('.', '_'): v[:limit] if isinstance(v, str) else v for k, v in src_metadata.items() if (k == metadata_key and k.replace('.', '_') not in dest_metadata) } user_metadata.update(md) if user_metadata: dest_metadata['user_metadata'] = user_metadata return dest_metadata # Fields explanation: # # Source: the source of this sample # Name: the name of the meter, must be unique # Type: the type of the meter, must be either: # - cumulative: the value is incremented and never reset to 0 # - delta: the value is reset to 0 each time it is sent # - gauge: the value is an absolute value and is not a counter # Unit: the unit of the meter # Volume: the sample value # User ID: the user ID # Project ID: the project ID # Resource ID: the resource ID # Timestamp: when the sample has been read # Resource metadata: various metadata # id: an uuid of a sample, can be taken from API when post sample via API class Sample: SOURCE_DEFAULT = "openstack" def __init__(self, name, type, unit, volume, user_id, project_id, resource_id, timestamp=None, resource_metadata=None, source=None, id=None, monotonic_time=None, user_name=None, project_name=None): if type not in TYPES: raise ValueError('Unsupported type: %s') self.name = name self.type = type self.unit = unit self.volume = volume self.user_id = user_id self.user_name = user_name self.project_id = project_id self.project_name = project_name self.resource_id = resource_id self.timestamp = timestamp self.resource_metadata = resource_metadata or {} self.source = source or self.SOURCE_DEFAULT self.id = id or str(uuid.uuid1()) self.monotonic_time = monotonic_time def as_dict(self): return copy.copy(self.__dict__) def __repr__(self): return ''.format( self.name, self.volume, self.resource_id, self.timestamp) @classmethod def from_notification(cls, name, type, volume, unit, user_id, project_id, resource_id, message, timestamp=None, metadata=None, source=None, user_name=None, project_name=None): if not metadata: metadata = (copy.copy(message['payload']) if isinstance(message['payload'], dict) else {}) metadata['event_type'] = message['event_type'] metadata['host'] = message['publisher_id'] ts = timestamp if timestamp else message['metadata']['timestamp'] ts = timeutils.parse_isotime(ts).isoformat() # add UTC if necessary return cls(name=name, type=type, volume=volume, unit=unit, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=ts, resource_metadata=metadata, source=source, user_name=user_name, project_name=project_name) def set_timestamp(self, timestamp): self.timestamp = timestamp def get_iso_timestamp(self): return timeutils.parse_isotime(self.timestamp) def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ return False def __ne__(self, other): return not self.__eq__(other) def setup(conf): # NOTE(sileht): Instead of passing the cfg.CONF everywhere in ceilometer # prepare_service will override this default Sample.SOURCE_DEFAULT = conf.sample_source TYPE_GAUGE = 'gauge' TYPE_DELTA = 'delta' TYPE_CUMULATIVE = 'cumulative' TYPES = (TYPE_GAUGE, TYPE_DELTA, TYPE_CUMULATIVE) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/service.py0000664000175100017510000000401415033033467020136 0ustar00mylesmyles# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg import oslo_i18n from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from ceilometer import keystone_client from ceilometer import messaging from ceilometer import opts from ceilometer import sample from ceilometer import utils from ceilometer import version def prepare_service(argv=None, config_files=None, conf=None): if argv is None: argv = sys.argv if conf is None: conf = cfg.ConfigOpts() oslo_i18n.enable_lazy() for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) keystone_client.register_keystoneauth_opts(conf) log.register_options(conf) log_levels = (conf.default_log_levels + ['futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO']) log.set_defaults(default_log_levels=log_levels) conf(argv[1:], project='ceilometer', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) keystone_client.post_register_keystoneauth_opts(conf) log.setup(conf, 'ceilometer') utils.setup_root_helper(conf) sample.setup(conf) gmr_opts.set_defaults(conf) gmr.TextGuruMeditation.setup_autorun(version, conf=conf) messaging.setup() return conf ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7959414 ceilometer-24.1.0.dev59/ceilometer/telemetry/0000775000175100017510000000000015033033521020126 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/telemetry/__init__.py0000664000175100017510000000000015033033467022236 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/telemetry/notifications.py0000664000175100017510000000356015033033467023366 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.pipeline import sample as endpoint from ceilometer import sample class TelemetryIpc(endpoint.SampleEndpoint): """Handle sample from notification bus Telemetry samples polled by polling agent. """ event_types = ['telemetry.polling'] def build_sample(self, message): samples = message['payload']['samples'] for sample_dict in samples: yield sample.Sample( name=sample_dict['counter_name'], type=sample_dict['counter_type'], unit=sample_dict['counter_unit'], volume=sample_dict['counter_volume'], user_id=sample_dict['user_id'], project_id=sample_dict['project_id'], resource_id=sample_dict['resource_id'], timestamp=sample_dict['timestamp'], resource_metadata=sample_dict['resource_metadata'], source=sample_dict['source'], id=sample_dict['message_id'], # Project name and username might not be set, depending on the # configuration `identity_name_discovery`. Therefore, we cannot # assume that they exist in the sample dictionary. user_name=sample_dict.get('user_name'), project_name=sample_dict.get('project_name') ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7959414 ceilometer-24.1.0.dev59/ceilometer/tests/0000775000175100017510000000000015033033521017256 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/__init__.py0000664000175100017510000000000015033033467021366 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/base.py0000664000175100017510000000534015033033467020555 0ustar00mylesmyles# Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test base classes. """ import functools import os import tempfile import unittest import fixtures import oslo_messaging.conffixture from oslotest import base import yaml import ceilometer from ceilometer import messaging class BaseTestCase(base.BaseTestCase): def setup_messaging(self, conf, exchange=None): self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) conf.set_override("notification_driver", ["messaging"]) if not exchange: exchange = 'ceilometer' conf.set_override("control_exchange", exchange) # NOTE(sileht): Ensure a new oslo.messaging driver is loaded # between each tests self.transport = messaging.get_transport(conf, "fake://", cache=False) self.useFixture(fixtures.MockPatch( 'ceilometer.messaging.get_transport', return_value=self.transport)) def cfg2file(self, data): cfgfile = tempfile.NamedTemporaryFile(mode='w', delete=False) self.addCleanup(os.remove, cfgfile.name) cfgfile.write(yaml.safe_dump(data)) cfgfile.close() return cfgfile.name @staticmethod def path_get(project_file=None): root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root def _skip_decorator(func): @functools.wraps(func) def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except ceilometer.NotImplementedError as e: raise unittest.SkipTest(str(e)) return skip_if_not_implemented class SkipNotImplementedMeta(type): def __new__(cls, name, bases, local): for attr in local: value = local[attr] if callable(value) and ( attr.startswith('test_') or attr == 'setUp'): local[attr] = _skip_decorator(value) return type.__new__(cls, name, bases, local) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7959414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/0000775000175100017510000000000015033033521020235 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/__init__.py0000664000175100017510000000000015033033467022345 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7959414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/alarm/0000775000175100017510000000000015033033521021331 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/alarm/__init__.py0000664000175100017510000000000015033033467023441 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/alarm/test_aodh.py0000664000175100017510000000452015033033467023667 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.alarm import aodh from ceilometer.polling import manager from ceilometer import service import ceilometer.tests.base as base ALARM_METRIC_LIST = [ { 'evaluation_results': [{ 'alarm_id': 'b8e17f58-089a-43fc-a96b-e9bcac4d4b53', 'project_id': '2dd8edd6c8c24f49bf04670534f6b357', 'state_counters': { 'ok': 2, 'alarm': 5, 'insufficient data': 0, } }, { 'alarm_id': 'fa386719-67e3-42ff-aec8-17e547dac77a', 'project_id': 'd45b070bcce04ca99546128a40854e7c', 'state_counters': { 'ok': 50, 'alarm': 3, 'insufficient data': 10, } }], }, ] class TestAlarmEvaluationResultPollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = aodh.EvaluationResultPollster(conf) def test_alarm_pollster(self): alarm_samples = list( self.pollster.get_samples(self.manager, {}, resources=ALARM_METRIC_LIST)) self.assertEqual(6, len(alarm_samples)) self.assertEqual('alarm.evaluation_result', alarm_samples[0].name) self.assertEqual(2, alarm_samples[0].volume) self.assertEqual('2dd8edd6c8c24f49bf04670534f6b357', alarm_samples[0].project_id) self.assertEqual('b8e17f58-089a-43fc-a96b-e9bcac4d4b53', alarm_samples[0].resource_id) self.assertEqual('ok', alarm_samples[0].resource_metadata['alarm_state']) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/cmd/0000775000175100017510000000000015033033521021000 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/cmd/__init__.py0000664000175100017510000000000015033033467023110 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/cmd/test_status.py0000664000175100017510000000175515033033467023755 0ustar00mylesmyles# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_upgradecheck.upgradecheck import Code from ceilometer.cmd import status from ceilometer.tests import base class TestUpgradeChecks(base.BaseTestCase): def setUp(self): super().setUp() self.cmd = status.Checks() def test__sample_check(self): check_result = self.cmd._sample_check() self.assertEqual( Code.SUCCESS, check_result.code) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/0000775000175100017510000000000015033033521021711 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/__init__.py0000664000175100017510000000000015033033467024021 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/0000775000175100017510000000000015033033521023740 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/__init__.py0000664000175100017510000000000015033033467026050 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/base.py0000664000175100017510000000516715033033467025246 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures from ceilometer.compute.virt import inspector as virt_inspector from ceilometer import service import ceilometer.tests.base as base class TestPollsterBase(base.BaseTestCase): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.inspector = mock.Mock() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') setattr(self.instance, 'OS-EXT-STS:task_state', None) self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} self.useFixture(fixtures.MockPatch( 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', new=mock.Mock(return_value=self.inspector))) # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests self.useFixture(fixtures.MockPatch( 'ceilometer.compute.pollsters.' 'GenericComputePollster._get_inspector', return_value=self.inspector)) def _mock_inspect_instance(self, *data): next_value = iter(data) def inspect(instance, duration): value = next(next_value) if isinstance(value, virt_inspector.InstanceStats): return value else: raise value self.inspector.inspect_instance = mock.Mock(side_effect=inspect) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/test_cpu.py0000664000175100017510000001065415033033467026157 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from ceilometer.compute.pollsters import instance_stats from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class TestCPUPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(cpu_time=1 * (10 ** 6), cpu_number=2), virt_inspector.InstanceStats(cpu_time=3 * (10 ** 6), cpu_number=2), # cpu_time resets on instance restart virt_inspector.InstanceStats(cpu_time=2 * (10 ** 6), cpu_number=2), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) def _verify_cpu_metering(expected_time): cache = {} samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual({'cpu'}, {s.name for s in samples}) self.assertEqual(expected_time, samples[0].volume) self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) # ensure elapsed time between polling cycles is non-zero time.sleep(0.001) _verify_cpu_metering(1 * (10 ** 6)) _verify_cpu_metering(3 * (10 ** 6)) _verify_cpu_metering(2 * (10 ** 6)) # the following apply to all instance resource pollsters but are tested # here alone. def test_get_metadata(self): mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual(1, samples[0].resource_metadata['vcpus']) self.assertEqual(512, samples[0].resource_metadata['memory_mb']) self.assertEqual(20, samples[0].resource_metadata['disk_gb']) self.assertEqual(20, samples[0].resource_metadata['root_gb']) self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) self.assertEqual('active', samples[0].resource_metadata['status']) self.assertEqual('active', samples[0].resource_metadata['state']) self.assertIsNone(samples[0].resource_metadata['task_state']) def test_get_reserved_metadata_with_keys(self): self.CONF.set_override('reserved_metadata_keys', ['fqdn']) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'fqdn': 'vm_fqdn', 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) def test_get_reserved_metadata_with_namespace(self): mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, samples[0].resource_metadata['user_metadata']) self.CONF.set_override('reserved_metadata_namespace', []) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertNotIn('user_metadata', samples[0].resource_metadata) def test_get_flavor_name_as_metadata_instance_type(self): mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.CPUPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual('m1.small', samples[0].resource_metadata['instance_type']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/test_disk.py0000664000175100017510000001202115033033467026310 0ustar00mylesmyles# Copyright 2025 Catalyst Cloud Limited # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from ceilometer.compute.pollsters import disk from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class TestDiskPollsterBase(base.TestPollsterBase): TYPE = 'gauge' def setUp(self): super().setUp() self.instances = self._get_fake_instances() def _get_fake_instances(self, ephemeral=0): instances = [] for i in [1, 2]: instance = mock.MagicMock() instance.name = f'instance-{i}' setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', instance.name) instance.id = i instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': ephemeral} instance.status = 'active' instances.append(instance) return instances def _check_get_samples(self, factory, name, instances=None, expected_count=2): pollster = factory(self.CONF) mgr = manager.AgentManager(0, self.CONF) samples = list(pollster.get_samples(mgr, {}, instances or self.instances)) self.assertGreater(len(samples), 0) self.assertEqual({name}, {s.name for s in samples}, (f"Only samples for meter {name} " "should be published")) self.assertEqual(expected_count, len(samples)) return samples class TestDiskSizePollsters(TestDiskPollsterBase): TYPE = 'gauge' def test_ephemeral_disk_zero(self): samples = { sample.resource_id: sample for sample in self._check_get_samples( disk.EphemeralSizePollster, 'disk.ephemeral.size', expected_count=len(self.instances))} for instance in self.instances: with self.subTest(instance.name): self.assertIn(instance.id, samples) sample = samples[instance.id] self.assertEqual(instance.flavor['ephemeral'], sample.volume) self.assertEqual(self.TYPE, sample.type) def test_ephemeral_disk_nonzero(self): instances = self._get_fake_instances(ephemeral=10) samples = { sample.resource_id: sample for sample in self._check_get_samples( disk.EphemeralSizePollster, 'disk.ephemeral.size', instances=instances, expected_count=len(instances))} for instance in instances: with self.subTest(instance.name): self.assertIn(instance.id, samples) sample = samples[instance.id] self.assertEqual(instance.flavor['ephemeral'], sample.volume) self.assertEqual(self.TYPE, sample.type) def test_root_disk(self): samples = { sample.resource_id: sample for sample in self._check_get_samples( disk.RootSizePollster, 'disk.root.size', expected_count=len(self.instances))} for instance in self.instances: with self.subTest(instance.name): self.assertIn(instance.id, samples) sample = samples[instance.id] self.assertEqual((instance.flavor['disk'] - instance.flavor['ephemeral']), sample.volume) self.assertEqual(self.TYPE, sample.type) def test_root_disk_ephemeral_nonzero(self): instances = self._get_fake_instances(ephemeral=10) samples = { sample.resource_id: sample for sample in self._check_get_samples( disk.RootSizePollster, 'disk.root.size', instances=instances, expected_count=len(instances))} for instance in instances: with self.subTest(instance.name): self.assertIn(instance.id, samples) sample = samples[instance.id] self.assertEqual((instance.flavor['disk'] - instance.flavor['ephemeral']), sample.volume) self.assertEqual(self.TYPE, sample.type) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/test_diskio.py0000664000175100017510000002046415033033467026652 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from ceilometer.compute.pollsters import disk from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class TestBaseDiskIO(base.TestPollsterBase): TYPE = 'cumulative' def setUp(self): super().setUp() self.instance = self._get_fake_instances() @staticmethod def _get_fake_instances(): instances = [] for i in [1, 2]: instance = mock.MagicMock() instance.name = 'instance-%s' % i setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', instance.name) instance.id = i instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} instance.status = 'active' instances.append(instance) return instances def _check_get_samples(self, factory, name, expected_count=2): pollster = factory(self.CONF) mgr = manager.AgentManager(0, self.CONF) cache = {} samples = list(pollster.get_samples(mgr, cache, self.instance)) self.assertNotEqual(samples, []) cache_key = pollster.inspector_method self.assertIn(cache_key, cache) for instance in self.instance: self.assertIn(instance.id, cache[cache_key]) self.assertEqual({name}, {s.name for s in samples}) match = [s for s in samples if s.name == name] self.assertEqual(len(match), expected_count, 'missing counter %s' % name) return match def _check_aggregate_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(self.TYPE, match[0].type) if expected_device is not None: self.assertEqual(set(expected_device), set(match[0].resource_metadata.get('device'))) instances = [i.id for i in self.instance] for m in match: self.assertIn(m.resource_id, instances) def _check_per_device_samples(self, factory, name, expected_volume, expected_device=None): match = self._check_get_samples(factory, name, expected_count=4) match_dict = {} for m in match: match_dict[m.resource_id] = m for instance in self.instance: key = "{}-{}".format(instance.id, expected_device) self.assertEqual(expected_volume, match_dict[key].volume) self.assertEqual(self.TYPE, match_dict[key].type) self.assertEqual(key, match_dict[key].resource_id) class TestDiskPollsters(TestBaseDiskIO): DISKS = [ virt_inspector.DiskStats(device='vda1', read_bytes=1, read_requests=2, write_bytes=3, write_requests=4, errors=-1, rd_total_times=100, wr_total_times=200,), virt_inspector.DiskStats(device='vda2', read_bytes=2, read_requests=3, write_bytes=5, write_requests=7, errors=-1, rd_total_times=300, wr_total_times=400,), ] def setUp(self): super().setUp() self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) def test_per_disk_read_requests(self): self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, 'disk.device.read.requests', 3, 'vda2') def test_per_disk_write_requests(self): self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 4, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, 'disk.device.write.requests', 7, 'vda2') def test_per_disk_read_bytes(self): self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 1, 'vda1') self._check_per_device_samples(disk.PerDeviceReadBytesPollster, 'disk.device.read.bytes', 2, 'vda2') def test_per_disk_write_bytes(self): self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, 'disk.device.write.bytes', 5, 'vda2') def test_per_device_read_latency(self): self._check_per_device_samples( disk.PerDeviceDiskReadLatencyPollster, 'disk.device.read.latency', 100, 'vda1') self._check_per_device_samples( disk.PerDeviceDiskReadLatencyPollster, 'disk.device.read.latency', 300, 'vda2') def test_per_device_write_latency(self): self._check_per_device_samples( disk.PerDeviceDiskWriteLatencyPollster, 'disk.device.write.latency', 200, 'vda1') self._check_per_device_samples( disk.PerDeviceDiskWriteLatencyPollster, 'disk.device.write.latency', 400, 'vda2') class TestDiskInfoPollsters(TestBaseDiskIO): DISKS = [ virt_inspector.DiskInfo(device="vda1", capacity=3, allocation=2, physical=1), virt_inspector.DiskInfo(device="vda2", capacity=4, allocation=3, physical=2), ] TYPE = 'gauge' def setUp(self): super().setUp() self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) def test_per_disk_capacity(self): self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 3, 'vda1') self._check_per_device_samples(disk.PerDeviceCapacityPollster, 'disk.device.capacity', 4, 'vda2') def test_per_disk_allocation(self): self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 2, 'vda1') self._check_per_device_samples(disk.PerDeviceAllocationPollster, 'disk.device.allocation', 3, 'vda2') def test_per_disk_physical(self): self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 1, 'vda1') self._check_per_device_samples(disk.PerDevicePhysicalPollster, 'disk.device.usage', 2, 'vda2') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py0000664000175100017510000001172715033033467031042 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the compute pollsters. """ from oslotest import base from ceilometer.compute.pollsters import util from ceilometer.polling import manager from ceilometer import service class FauxInstance: def __init__(self, **kwds): for name, value in kwds.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): try: return getattr(self, key) except AttributeError: return default class TestLocationMetadata(base.BaseTestCase): def setUp(self): self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) super().setUp() # Mimics an instance returned from nova api call self.INSTANCE_PROPERTIES = {'name': 'display name', 'id': ('234cbe81-4e09-4f64-9b2a-' '714f6b9046e3'), 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'name': 'm1.tiny', 'id': 1, 'disk': 20, 'ram': 512, 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.ephemeral_gb': 42}} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) def test_metadata(self): md = util._get_metadata_from_object(self.CONF, self.instance) for prop, value in self.INSTANCE_PROPERTIES.items(): if prop not in ("metadata"): # Special cases if prop == 'name': prop = 'display_name' elif prop == 'hostId': prop = "host" elif prop == 'OS-EXT-SRV-ATTR:host': prop = "instance_host" elif prop == 'OS-EXT-SRV-ATTR:instance_name': prop = 'name' elif prop == "id": prop = "instance_id" self.assertEqual(value, md[prop]) user_metadata = md['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(1, len(user_metadata)) def test_metadata_empty_image(self): self.INSTANCE_PROPERTIES['image'] = None self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.CONF, self.instance) self.assertIsNone(md['image']) self.assertIsNone(md['image_ref']) self.assertIsNone(md['image_ref_url']) def test_metadata_image_through_conductor(self): # There should be no links here, should default to None self.INSTANCE_PROPERTIES['image'] = {'id': 1} self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) md = util._get_metadata_from_object(self.CONF, self.instance) self.assertEqual(1, md['image_ref']) self.assertIsNone(md['image_ref_url']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/test_memory.py0000664000175100017510000001313715033033467026677 0ustar00mylesmyles# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from ceilometer.compute.pollsters import instance_stats from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class TestMemoryPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(memory_usage=1.0), virt_inspector.InstanceStats(memory_usage=2.0), virt_inspector.InstanceStats(), virt_inspector.InstanceShutOffException(), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemoryUsagePollster(self.CONF) @mock.patch('ceilometer.compute.pollsters.LOG') def _verify_memory_metering(expected_count, expected_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual({'memory.usage'}, {s.name for s in samples}) self.assertEqual(expected_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_memory_metering(1, 1.0, 0) _verify_memory_metering(1, 2.0, 0) _verify_memory_metering(0, 0, 1) _verify_memory_metering(0, 0, 0) def test_get_samples_with_empty_stats(self): self._mock_inspect_instance(virt_inspector.NoDataException()) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemoryUsagePollster(self.CONF) def all_samples(): return list(pollster.get_samples(mgr, {}, [self.instance])) class TestResidentMemoryPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(memory_resident=1.0), virt_inspector.InstanceStats(memory_resident=2.0), virt_inspector.InstanceStats(), virt_inspector.InstanceShutOffException(), ) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemoryResidentPollster(self.CONF) @mock.patch('ceilometer.compute.pollsters.LOG') def _verify_resident_memory_metering(expected_count, expected_resident_memory_mb, expected_warnings, mylog): samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(expected_count, len(samples)) if expected_count > 0: self.assertEqual({'memory.resident'}, {s.name for s in samples}) self.assertEqual(expected_resident_memory_mb, samples[0].volume) else: self.assertEqual(expected_warnings, mylog.warning.call_count) self.assertEqual(0, mylog.exception.call_count) _verify_resident_memory_metering(1, 1.0, 0) _verify_resident_memory_metering(1, 2.0, 0) _verify_resident_memory_metering(0, 0, 1) _verify_resident_memory_metering(0, 0, 0) class TestMemorySwapPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(memory_swap_in=1.0, memory_swap_out=2.0), virt_inspector.InstanceStats(memory_swap_in=3.0, memory_swap_out=4.0), ) mgr = manager.AgentManager(0, self.CONF) def _check_memory_swap_in(expected_swap_in): pollster = instance_stats.MemorySwapInPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual({'memory.swap.in'}, {s.name for s in samples}) self.assertEqual(expected_swap_in, samples[0].volume) def _check_memory_swap_out(expected_swap_out): pollster = instance_stats.MemorySwapOutPollster(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual({'memory.swap.out'}, {s.name for s in samples}) self.assertEqual(expected_swap_out, samples[0].volume) _check_memory_swap_in(1.0) _check_memory_swap_out(4.0) def test_get_samples_with_empty_stats(self): self._mock_inspect_instance(virt_inspector.NoDataException()) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.MemorySwapInPollster(self.CONF) def all_samples(): return list(pollster.get_samples(mgr, {}, [self.instance])) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/test_net.py0000664000175100017510000003410515033033467026153 0ustar00mylesmyles# # Copyright 2012 eNovance # Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from ceilometer.compute.pollsters import net from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class FauxInstance: def __init__(self, **kwargs): for name, value in kwargs.items(): setattr(self, name, value) def __getitem__(self, key): return getattr(self, key) def get(self, key, default): return getattr(self, key, default) class TestNetPollster(base.TestPollsterBase): def setUp(self): super().setUp() self.vnic0 = virt_inspector.InterfaceStats( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1'), rx_bytes=1, rx_packets=2, rx_drop=20, rx_errors=21, tx_bytes=3, tx_packets=4, tx_drop=22, tx_errors=23, rx_bytes_delta=42, tx_bytes_delta=43) self.vnic1 = virt_inspector.InterfaceStats( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2'), rx_bytes=5, rx_packets=6, rx_drop=24, rx_errors=25, tx_bytes=7, tx_packets=8, tx_drop=26, tx_errors=27, rx_bytes_delta=44, tx_bytes_delta=45) self.vnic2 = virt_inspector.InterfaceStats( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3'), rx_bytes=9, rx_packets=10, rx_drop=28, rx_errors=29, tx_bytes=11, tx_packets=12, tx_drop=30, tx_errors=31, rx_bytes_delta=46, tx_bytes_delta=47) vnics = [ self.vnic0, self.vnic1, self.vnic2, ] self.inspector.inspect_vnics = mock.Mock(return_value=vnics) self.INSTANCE_PROPERTIES = {'name': 'display name', 'OS-EXT-SRV-ATTR:instance_name': 'instance-000001', 'OS-EXT-AZ:availability_zone': 'foo-zone', 'reservation_id': 'reservation id', 'id': 'instance id', 'user_id': 'user id', 'tenant_id': 'tenant id', 'architecture': 'x86_64', 'kernel_id': 'kernel id', 'os_type': 'linux', 'ramdisk_id': 'ramdisk id', 'status': 'active', 'ephemeral_gb': 0, 'root_gb': 20, 'disk_gb': 20, 'image': {'id': 1, 'links': [{"rel": "bookmark", 'href': 2}]}, 'hostId': '1234-5678', 'OS-EXT-SRV-ATTR:host': 'host-test', 'flavor': {'disk': 20, 'ram': 512, 'name': 'tiny', 'vcpus': 2, 'ephemeral': 0}, 'metadata': {'metering.autoscale.group': 'X' * 512, 'metering.foobar': 42}} self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) def _check_get_samples(self, factory, expected, expected_name, kind='cumulative'): mgr = manager.AgentManager(0, self.CONF) pollster = factory(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual({expected_name}, {s.name for s in samples}) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(len(match), 1, 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual(kind, match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesPollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 5, self.vnic1.fref), ('192.168.0.4', 9, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.incoming.bytes', ) def test_outgoing_bytes(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesPollster, [('10.0.0.2', 3, self.vnic0.fref), ('192.168.0.3', 7, self.vnic1.fref), ('192.168.0.4', 11, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.outgoing.bytes', ) def test_incoming_bytes_delta(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesDeltaPollster, [('10.0.0.2', 42, self.vnic0.fref), ('192.168.0.3', 44, self.vnic1.fref), ('192.168.0.4', 46, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.incoming.bytes.delta', 'delta', ) def test_outgoing_bytes_delta(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesDeltaPollster, [('10.0.0.2', 43, self.vnic0.fref), ('192.168.0.3', 45, self.vnic1.fref), ('192.168.0.4', 47, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.outgoing.bytes.delta', 'delta', ) def test_incoming_packets(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.IncomingPacketsPollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 6, self.vnic1.fref), ('192.168.0.4', 10, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.incoming.packets', ) def test_outgoing_packets(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingPacketsPollster, [('10.0.0.2', 4, self.vnic0.fref), ('192.168.0.3', 8, self.vnic1.fref), ('192.168.0.4', 12, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.outgoing.packets', ) def test_incoming_drops(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.IncomingDropPollster, [('10.0.0.2', 20, self.vnic0.fref), ('192.168.0.3', 24, self.vnic1.fref), ('192.168.0.4', 28, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.incoming.packets.drop', ) def test_outgoing_drops(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingDropPollster, [('10.0.0.2', 22, self.vnic0.fref), ('192.168.0.3', 26, self.vnic1.fref), ('192.168.0.4', 30, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.outgoing.packets.drop', ) def test_incoming_errors(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.IncomingErrorsPollster, [('10.0.0.2', 21, self.vnic0.fref), ('192.168.0.3', 25, self.vnic1.fref), ('192.168.0.4', 29, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.incoming.packets.error', ) def test_outgoing_errors(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingErrorsPollster, [('10.0.0.2', 23, self.vnic0.fref), ('192.168.0.3', 27, self.vnic1.fref), ('192.168.0.4', 31, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.outgoing.packets.error', ) def test_metadata(self): factory = net.OutgoingBytesPollster pollster = factory(self.CONF) mgr = manager.AgentManager(0, self.CONF) pollster = factory(self.CONF) s = list(pollster.get_samples(mgr, {}, [self.faux_instance]))[0] user_metadata = s.resource_metadata['user_metadata'] expected = self.INSTANCE_PROPERTIES[ 'metadata']['metering.autoscale.group'][:256] self.assertEqual(expected, user_metadata['autoscale_group']) self.assertEqual(2, len(user_metadata)) class TestNetRatesPollster(base.TestPollsterBase): def setUp(self): super().setUp() self.vnic0 = virt_inspector.InterfaceRateStats( name='vnet0', fref='fa163e71ec6e', mac='fa:16:3e:71:ec:6d', parameters=dict(ip='10.0.0.2', projmask='255.255.255.0', projnet='proj1', dhcp_server='10.0.0.1'), rx_bytes_rate=1, tx_bytes_rate=2) self.vnic1 = virt_inspector.InterfaceRateStats( name='vnet1', fref='fa163e71ec6f', mac='fa:16:3e:71:ec:6e', parameters=dict(ip='192.168.0.3', projmask='255.255.255.0', projnet='proj2', dhcp_server='10.0.0.2'), rx_bytes_rate=3, tx_bytes_rate=4) self.vnic2 = virt_inspector.InterfaceRateStats( name='vnet2', fref=None, mac='fa:18:4e:72:fc:7e', parameters=dict(ip='192.168.0.4', projmask='255.255.255.0', projnet='proj3', dhcp_server='10.0.0.3'), rx_bytes_rate=5, tx_bytes_rate=6) vnics = [ self.vnic0, self.vnic1, self.vnic2, ] self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) def _check_get_samples(self, factory, expected, expected_name): mgr = manager.AgentManager(0, self.CONF) pollster = factory(self.CONF) samples = list(pollster.get_samples(mgr, {}, [self.instance])) self.assertEqual(3, len(samples)) # one for each nic self.assertEqual({expected_name}, {s.name for s in samples}) def _verify_vnic_metering(ip, expected_volume, expected_rid): match = [s for s in samples if s.resource_metadata['parameters']['ip'] == ip ] self.assertEqual(1, len(match), 'missing ip %s' % ip) self.assertEqual(expected_volume, match[0].volume) self.assertEqual('gauge', match[0].type) self.assertEqual(expected_rid, match[0].resource_id) for ip, volume, rid in expected: _verify_vnic_metering(ip, volume, rid) def test_incoming_bytes_rate(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.IncomingBytesRatePollster, [('10.0.0.2', 1, self.vnic0.fref), ('192.168.0.3', 3, self.vnic1.fref), ('192.168.0.4', 5, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.incoming.bytes.rate', ) def test_outgoing_bytes_rate(self): instance_name_id = "{}-{}".format(self.instance.name, self.instance.id) self._check_get_samples( net.OutgoingBytesRatePollster, [('10.0.0.2', 2, self.vnic0.fref), ('192.168.0.3', 4, self.vnic1.fref), ('192.168.0.4', 6, "{}-{}".format(instance_name_id, self.vnic2.name)), ], 'network.outgoing.bytes.rate', ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/pollsters/test_perf.py0000664000175100017510000000673015033033467026324 0ustar00mylesmyles# Copyright 2016 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute.pollsters import instance_stats from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.polling import manager from ceilometer.tests.unit.compute.pollsters import base class TestPerfPollster(base.TestPollsterBase): def test_get_samples(self): self._mock_inspect_instance( virt_inspector.InstanceStats(cpu_cycles=7259361, instructions=8815623, cache_references=74184, cache_misses=16737) ) mgr = manager.AgentManager(0, self.CONF) cache = {} def _check_perf_events_cpu_cycles(expected_usage): pollster = instance_stats.PerfCPUCyclesPollster(self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual({'perf.cpu.cycles'}, {s.name for s in samples}) self.assertEqual(expected_usage, samples[0].volume) def _check_perf_events_instructions(expected_usage): pollster = instance_stats.PerfInstructionsPollster(self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual({'perf.instructions'}, {s.name for s in samples}) self.assertEqual(expected_usage, samples[0].volume) def _check_perf_events_cache_references(expected_usage): pollster = instance_stats.PerfCacheReferencesPollster( self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual({'perf.cache.references'}, {s.name for s in samples}) self.assertEqual(expected_usage, samples[0].volume) def _check_perf_events_cache_misses(expected_usage): pollster = instance_stats.PerfCacheMissesPollster(self.CONF) samples = list(pollster.get_samples(mgr, cache, [self.instance])) self.assertEqual(1, len(samples)) self.assertEqual({'perf.cache.misses'}, {s.name for s in samples}) self.assertEqual(expected_usage, samples[0].volume) _check_perf_events_cpu_cycles(7259361) _check_perf_events_instructions(8815623) _check_perf_events_cache_references(74184) _check_perf_events_cache_misses(16737) def test_get_samples_with_empty_stats(self): self._mock_inspect_instance(virt_inspector.NoDataException()) mgr = manager.AgentManager(0, self.CONF) pollster = instance_stats.PerfCPUCyclesPollster(self.CONF) def all_samples(): return list(pollster.get_samples(mgr, {}, [self.instance])) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/test_discovery.py0000664000175100017510000003046215033033467025347 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import fixtures import libvirt from novaclient import exceptions from ceilometer.compute import discovery from ceilometer.compute.pollsters import util from ceilometer import service from ceilometer.tests import base LIBVIRT_METADATA_XML = """ test.dom.com 2016-11-16 07:35:06 512 1 0 0 1 admin admin """ LIBVIRT_DESC_XML = """ instance-00000001 a75c2fa5-6c03-45a8-bbf7-b993cfcdec27 hvm /opt/stack/data/nova/instances/a75c2fa5-6c03-45a8-bbf7-b993cfcdec27/kernel /opt/stack/data/nova/instances/a75c2fa5-6c03-45a8-bbf7-b993cfcdec27/ramdisk root=/dev/vda console=tty0 console=ttyS0 """ LIBVIRT_MANUAL_INSTANCE_DESC_XML = """ Manual-instance-00000001 5e637d0d-8c0e-441a-a11a-a9dc95aed84e hvm /opt/instances/5e637d0d-8c0e-441a-a11a-a9dc95aed84e/kernel /opt/instances/5e637d0d-8c0e-441a-a11a-a9dc95aed84e/ramdisk root=/dev/vda console=tty0 console=ttyS0 """ class FakeDomain: def state(self): return [1, 2] def name(self): return "instance-00000001" def UUIDString(self): return "a75c2fa5-6c03-45a8-bbf7-b993cfcdec27" def XMLDesc(self): return LIBVIRT_DESC_XML def metadata(self, flags, url): return LIBVIRT_METADATA_XML class FakeConn: def listAllDomains(self): return [FakeDomain()] def isAlive(self): return True class FakeManualInstanceDomain: def state(self): return [1, 2] def name(self): return "Manual-instance-00000001" def UUIDString(self): return "5e637d0d-8c0e-441a-a11a-a9dc95aed84e" def XMLDesc(self): return LIBVIRT_MANUAL_INSTANCE_DESC_XML def metadata(self, flags, url): # Note(xiexianbin): vm not create by nova-compute don't have metadata # elements like: '' # When invoke get metadata method, raise libvirtError. e = libvirt.libvirtError( "metadata not found: Requested metadata element is not present") def fake_error_code(*args, **kwargs): return libvirt.VIR_ERR_NO_DOMAIN_METADATA e.get_error_code = fake_error_code raise e class FakeManualInstanceConn: def listAllDomains(self): return [FakeManualInstanceDomain()] def isAlive(self): return True class TestDiscovery(base.BaseTestCase): def setUp(self): super().setUp() self.instance = mock.MagicMock() self.instance.name = 'instance-00000001' setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', self.instance.name) setattr(self.instance, 'OS-EXT-STS:vm_state', 'active') # FIXME(sileht): This is wrong, this should be a uuid # The internal id of nova can't be retrieved via API or notification self.instance.id = 1 self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, 'ram': 512, 'disk': 20, 'ephemeral': 0} self.instance.status = 'active' self.instance.metadata = { 'fqdn': 'vm_fqdn', 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', 'project_cos': 'dev'} # as we're having lazy hypervisor inspector singleton object in the # base compute pollster class, that leads to the fact that we # need to mock all this class property to avoid context sharing between # the tests self.client = mock.MagicMock() self.client.instance_get_all_by_host.return_value = [self.instance] patch_client = fixtures.MockPatch('ceilometer.nova_client.Client', return_value=self.client) self.useFixture(patch_client) self.utc_now = mock.MagicMock( return_value=datetime.datetime( 2016, 1, 1, tzinfo=datetime.timezone.utc)) patch_timeutils = fixtures.MockPatch('oslo_utils.timeutils.utcnow', self.utc_now) self.useFixture(patch_timeutils) self.CONF = service.prepare_service([], []) self.CONF.set_override('host', 'test') def test_normal_discovery(self): self.CONF.set_override("instance_discovery_method", "naive", group="compute") dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( 'test', None) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_with( self.CONF.host, "2016-01-01T00:00:00+00:00") def test_discovery_with_resource_update_interval(self): self.CONF.set_override("instance_discovery_method", "naive", group="compute") self.CONF.set_override("resource_update_interval", 600, group="compute") dsc = discovery.InstanceDiscovery(self.CONF) dsc.last_run = datetime.datetime( 2016, 1, 1, tzinfo=datetime.timezone.utc) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=5, tzinfo=datetime.timezone.utc) resources = dsc.discover(mock.MagicMock()) self.assertEqual(0, len(resources)) self.client.instance_get_all_by_host.assert_not_called() self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=20, tzinfo=datetime.timezone.utc) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.assertEqual(1, list(resources)[0].id) self.client.instance_get_all_by_host.assert_called_once_with( self.CONF.host, "2016-01-01T00:00:00+00:00") @mock.patch("ceilometer.compute.virt.libvirt.utils." "refresh_libvirt_connection") def test_discovery_with_libvirt(self, mock_libvirt_conn): self.CONF.set_override("instance_discovery_method", "libvirt_metadata", group="compute") mock_libvirt_conn.return_value = FakeConn() dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) r = list(resources)[0] s = util.make_sample_from_instance(self.CONF, r, "metric", "delta", "carrot", 1) self.assertEqual("a75c2fa5-6c03-45a8-bbf7-b993cfcdec27", s.resource_id) self.assertEqual("d99c829753f64057bc0f2030da309943", s.project_id) self.assertEqual("a1f4684e58bd4c88aefd2ecb0783b497", s.user_id) metadata = s.resource_metadata self.assertEqual(1, metadata["vcpus"]) self.assertEqual(512, metadata["memory_mb"]) self.assertEqual(1, metadata["disk_gb"]) self.assertEqual(0, metadata["ephemeral_gb"]) self.assertEqual(1, metadata["root_gb"]) self.assertEqual("bdaf114a-35e9-4163-accd-226d5944bf11", metadata["image_ref"]) self.assertEqual("test.dom.com", metadata["display_name"]) self.assertEqual("instance-00000001", metadata["name"]) self.assertEqual("a75c2fa5-6c03-45a8-bbf7-b993cfcdec27", metadata["instance_id"]) self.assertEqual("m1.tiny", metadata["instance_type"]) self.assertEqual( "4d0bc931ea7f0513da2efd9acb4cf3a273c64b7bcc544e15c070e662", metadata["host"]) self.assertEqual(self.CONF.host, metadata["instance_host"]) self.assertEqual("active", metadata["status"]) self.assertEqual("running", metadata["state"]) self.assertEqual("hvm", metadata["os_type"]) self.assertEqual("x86_64", metadata["architecture"]) def test_discovery_with_legacy_resource_cache_cleanup(self): self.CONF.set_override("instance_discovery_method", "naive", group="compute") self.CONF.set_override("resource_update_interval", 600, group="compute") self.CONF.set_override("resource_cache_expiry", 1800, group="compute") dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=20, tzinfo=datetime.timezone.utc) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) self.utc_now.return_value = datetime.datetime( 2016, 1, 1, minute=31, tzinfo=datetime.timezone.utc) resources = dsc.discover(mock.MagicMock()) self.assertEqual(1, len(resources)) expected_calls = [mock.call('test', None), mock.call('test', '2016-01-01T00:00:00+00:00'), mock.call('test', None)] self.assertEqual(expected_calls, self.client.instance_get_all_by_host.call_args_list) @mock.patch("ceilometer.compute.virt.libvirt.utils." "refresh_libvirt_connection") def test_discovery_with_libvirt_error(self, mock_libvirt_conn): self.CONF.set_override("instance_discovery_method", "libvirt_metadata", group="compute") mock_libvirt_conn.return_value = FakeManualInstanceConn() dsc = discovery.InstanceDiscovery(self.CONF) resources = dsc.discover(mock.MagicMock()) self.assertEqual(0, len(resources)) def test_get_server(self): self.client.nova_client = mock.MagicMock() self.client.nova_client.servers = mock.MagicMock() fake_server = mock.MagicMock() fake_server.metadata = {'metering.server_group': 'group1'} fake_flavor = mock.MagicMock() fake_flavor.id = 'fake_id' fake_server.flavor = fake_flavor self.client.nova_client.servers.get = mock.MagicMock( return_value=fake_server) dsc = discovery.InstanceDiscovery(self.CONF) uuid = '123456' ret_server = dsc.get_server(uuid) self.assertEqual('fake_id', ret_server.flavor.id) self.assertEqual({'metering.server_group': 'group1'}, ret_server.metadata) # test raise NotFound exception self.client.nova_client.servers.get = mock.MagicMock( side_effect=exceptions.NotFound(404)) dsc = discovery.InstanceDiscovery(self.CONF) ret_server = dsc.get_server(uuid) self.assertEqual(None, ret_server) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/virt/0000775000175100017510000000000015033033521022675 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/virt/__init__.py0000664000175100017510000000000015033033467025005 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/virt/libvirt/0000775000175100017510000000000015033033521024350 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/virt/libvirt/__init__.py0000664000175100017510000000000015033033467026460 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py0000664000175100017510000005674315033033467030017 0ustar00mylesmyles# Copyright 2012 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for libvirt inspector.""" from unittest import mock import fixtures from oslo_utils import units from oslotest import base from ceilometer.compute.virt import inspector as virt_inspector from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector from ceilometer.compute.virt.libvirt import utils from ceilometer import service class FakeLibvirtError(Exception): pass class VMInstance: id = 'ff58e738-12f4-4c58-acde-77617b68da56' name = 'instance-00000001' class TestLibvirtInspection(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.instance = VMInstance() libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.getVersion.return_value = 5001001 libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 libvirt_inspector.libvirt.libvirtError = FakeLibvirtError utils.libvirt = libvirt_inspector.libvirt with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=None): self.inspector = libvirt_inspector.LibvirtInspector(conf) def test_inspect_instance_stats(self): domain = mock.Mock() domain.info.return_value = (0, 0, 0, 2, 999999) domain.memoryStats.return_value = {'available': 51200, 'unused': 25600, 'rss': 30000, 'swap_in': 5120, 'swap_out': 8192} conn = mock.Mock() conn.lookupByUUIDString.return_value = domain conn.domainListGetStats.return_value = [({}, { 'cpu.time': 999999, 'vcpu.maximum': 4, 'vcpu.current': 2, 'vcpu.0.time': 10000, 'vcpu.0.wait': 10000, 'vcpu.2.time': 10000, 'vcpu.2.wait': 10000, 'perf.cpu_cycles': 7259361, 'perf.instructions': 8815623, 'perf.cache_references': 74184, 'perf.cache_misses': 16737})] with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance, None) self.assertEqual(0, stats.power_state) self.assertEqual(2, stats.cpu_number) self.assertEqual(40000, stats.cpu_time) self.assertEqual(25600 / units.Ki, stats.memory_usage) self.assertEqual(30000 / units.Ki, stats.memory_resident) self.assertEqual(5120 / units.Ki, stats.memory_swap_in) self.assertEqual(8192 / units.Ki, stats.memory_swap_out) self.assertEqual(7259361, stats.cpu_cycles) self.assertEqual(8815623, stats.instructions) self.assertEqual(74184, stats.cache_references) self.assertEqual(16737, stats.cache_misses) def test_inspect_instance_stats_fallback_cpu_time(self): domain = mock.Mock() domain.info.return_value = (0, 0, 0, 2, 20000) domain.memoryStats.return_value = {'available': 51200, 'unused': 25600, 'rss': 30000} conn = mock.Mock() conn.lookupByUUIDString.return_value = domain conn.domainListGetStats.return_value = [({}, { 'vcpu.current': 2, 'vcpu.maximum': 4, 'vcpu.0.time': 10000, 'vcpu.1.time': 10000, 'cpu.time': 999999})] with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance) self.assertEqual(2, stats.cpu_number) self.assertEqual(999999, stats.cpu_time) def test_inspect_cpus_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_instance, self.instance, None) def test_inspect_vnics(self): dom_xml = """
""" interface_stats = { 'vnet0': (1, 2, 21, 22, 3, 4, 23, 24), 'vnet1': (5, 6, 25, 26, 7, 8, 27, 28), 'vnet2': (9, 10, 29, 30, 11, 12, 31, 32), } interfaceStats = interface_stats.__getitem__ domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.info.return_value = (0, 0, 0, 2, 999999) domain.interfaceStats.side_effect = interfaceStats conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): interfaces = list(self.inspector.inspect_vnics( self.instance, None)) self.assertEqual(3, len(interfaces)) vnic0 = interfaces[0] self.assertEqual('vnet0', vnic0.name) self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) self.assertEqual(1, vnic0.rx_bytes) self.assertEqual(2, vnic0.rx_packets) self.assertEqual(3, vnic0.tx_bytes) self.assertEqual(4, vnic0.tx_packets) self.assertEqual(21, vnic0.rx_errors) self.assertEqual(22, vnic0.rx_drop) self.assertEqual(23, vnic0.tx_errors) self.assertEqual(24, vnic0.tx_drop) vnic1 = interfaces[1] self.assertEqual('vnet1', vnic1.name) self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) self.assertEqual(5, vnic1.rx_bytes) self.assertEqual(6, vnic1.rx_packets) self.assertEqual(7, vnic1.tx_bytes) self.assertEqual(8, vnic1.tx_packets) self.assertEqual(25, vnic1.rx_errors) self.assertEqual(26, vnic1.rx_drop) self.assertEqual(27, vnic1.tx_errors) self.assertEqual(28, vnic1.tx_drop) vnic2 = interfaces[2] self.assertEqual('vnet2', vnic2.name) self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) self.assertIsNone(vnic2.fref) self.assertEqual( {'interfaceid': None, 'bridge': 'qbr420008b3-7c'}, vnic2.parameters) self.assertEqual(9, vnic2.rx_bytes) self.assertEqual(10, vnic2.rx_packets) self.assertEqual(11, vnic2.tx_bytes) self.assertEqual(12, vnic2.tx_packets) self.assertEqual(29, vnic2.rx_errors) self.assertEqual(30, vnic2.rx_drop) self.assertEqual(31, vnic2.tx_errors) self.assertEqual(32, vnic2.tx_drop) def test_inspect_vnics_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): inspect = self.inspector.inspect_vnics self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance, None)) def test_inspect_disks(self): dom_xml = """
""" blockStatsFlags = {'wr_total_times': 91752302267, 'rd_operations': 6756, 'flush_total_times': 1310427331, 'rd_total_times': 29142253616, 'rd_bytes': 171460096, 'flush_operations': 746, 'wr_operations': 1437, 'wr_bytes': 13574656} domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.info.return_value = (0, 0, 0, 2, 999999) domain.blockStats.return_value = (1, 2, 3, 4, -1) domain.blockStatsFlags.return_value = blockStatsFlags conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disks(self.instance, None)) self.assertEqual(1, len(disks)) self.assertEqual('vda', disks[0].device) self.assertEqual(1, disks[0].read_requests) self.assertEqual(2, disks[0].read_bytes) self.assertEqual(3, disks[0].write_requests) self.assertEqual(4, disks[0].write_bytes) self.assertEqual(91752302267, disks[0].wr_total_times) self.assertEqual(29142253616, disks[0].rd_total_times) def test_inspect_disks_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): inspect = self.inspector.inspect_disks self.assertRaises(virt_inspector.InstanceShutOffException, list, inspect(self.instance, None)) def test_inspect_disk_info(self): dom_xml = """
""" domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.blockInfo.return_value = (1, 2, 3, -1) domain.info.return_value = (0, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info( self.instance, None)) self.assertEqual(1, len(disks)) self.assertEqual('vda', disks[0].device) self.assertEqual(3, disks[0].capacity) self.assertEqual(2, disks[0].allocation) self.assertEqual(3, disks[0].physical) def test_inspect_disk_info_network_type(self): dom_xml = """
""" domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.blockInfo.return_value = (1, 2, 3, -1) domain.info.return_value = (0, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info(self.instance, None)) self.assertEqual(1, len(disks)) def test_inspect_disk_info_without_source_element(self): dom_xml = """
""" domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.blockInfo.return_value = (1, 2, 3, -1) domain.info.return_value = (0, 0, 0, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disk_info(self.instance, None)) self.assertEqual(0, len(disks)) def test_inspect_disks_without_source_element(self): dom_xml = """
""" blockStatsFlags = {'wr_total_times': 91752302267, 'rd_operations': 6756, 'flush_total_times': 1310427331, 'rd_total_times': 29142253616, 'rd_bytes': 171460096, 'flush_operations': 746, 'wr_operations': 1437, 'wr_bytes': 13574656} domain = mock.Mock() domain.XMLDesc.return_value = dom_xml domain.info.return_value = (0, 0, 0, 2, 999999) domain.blockStats.return_value = (1, 2, 3, 4, -1) domain.blockStatsFlags.return_value = blockStatsFlags conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): disks = list(self.inspector.inspect_disks(self.instance, None)) self.assertEqual(0, len(disks)) def test_inspect_memory_usage_with_domain_shutoff(self): domain = mock.Mock() domain.info.return_value = (5, 0, 51200, 2, 999999) conn = mock.Mock() conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): self.assertRaises(virt_inspector.InstanceShutOffException, self.inspector.inspect_instance, self.instance, None) def test_inspect_memory_with_empty_stats(self): domain = mock.Mock() domain.info.return_value = (0, 0, 51200, 2, 999999) domain.memoryStats.return_value = {} conn = mock.Mock() conn.domainListGetStats.return_value = [({}, {})] conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance, None) self.assertIsNone(stats.memory_usage) self.assertIsNone(stats.memory_resident) self.assertIsNone(stats.memory_swap_in) self.assertIsNone(stats.memory_swap_out) def test_inspect_memory_with_usable(self): domain = mock.Mock() domain.info.return_value = (0, 0, 0, 2, 999999) domain.memoryStats.return_value = {'available': 76800, 'rss': 30000, 'swap_in': 5120, 'swap_out': 8192, 'unused': 25600, 'usable': 51200} conn = mock.Mock() conn.domainListGetStats.return_value = [({}, {})] conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance, None) self.assertEqual(25600 / units.Ki, stats.memory_usage) self.assertEqual(30000 / units.Ki, stats.memory_resident) self.assertEqual(5120 / units.Ki, stats.memory_swap_in) self.assertEqual(8192 / units.Ki, stats.memory_swap_out) def test_inspect_perf_events_libvirt_less_than_2_3_0(self): domain = mock.Mock() domain.info.return_value = (0, 0, 51200, 2, 999999) domain.memoryStats.return_value = {'rss': 0, 'available': 51200, 'unused': 25600} conn = mock.Mock() conn.domainListGetStats.return_value = [({}, {})] conn.lookupByUUIDString.return_value = domain with mock.patch('ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', return_value=conn): stats = self.inspector.inspect_instance(self.instance, None) self.assertIsNone(stats.cpu_cycles) self.assertIsNone(stats.instructions) self.assertIsNone(stats.cache_references) self.assertIsNone(stats.cache_misses) class TestLibvirtInspectionWithError(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.useFixture(fixtures.MonkeyPatch( 'ceilometer.compute.virt.libvirt.utils.' 'refresh_libvirt_connection', mock.MagicMock(side_effect=[None, Exception('dummy')]))) libvirt_inspector.libvirt = mock.Mock() libvirt_inspector.libvirt.libvirtError = FakeLibvirtError utils.libvirt = libvirt_inspector.libvirt self.inspector = libvirt_inspector.LibvirtInspector(conf) def test_inspect_unknown_error(self): self.assertRaises(virt_inspector.InspectorException, self.inspector.inspect_instance, 'foo', None) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/event/0000775000175100017510000000000015033033521021356 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/event/__init__.py0000664000175100017510000000000015033033467023466 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/event/test_converter.py0000664000175100017510000010030515033033467025006 0ustar00mylesmyles# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import jsonpath_rw_ext from ceilometer import declarative from ceilometer.event import converter from ceilometer.event import models from ceilometer import service as ceilometer_service from ceilometer.tests import base class ConverterBase(base.BaseTestCase): @staticmethod def _create_test_notification(event_type, message_id, **kw): return dict(event_type=event_type, metadata=dict(message_id=message_id, timestamp="2013-08-08 21:06:37.803826"), publisher_id="compute.host-1-2-3", payload=kw, ) def assertIsValidEvent(self, event, notification): self.assertIsNot( None, event, "Notification dropped unexpectedly:" " %s" % str(notification)) self.assertIsInstance(event, models.Event) def assertIsNotValidEvent(self, event, notification): self.assertIs( None, event, "Notification NOT dropped when expected to be dropped:" " %s" % str(notification)) def assertHasTrait(self, event, name, value=None, dtype=None): traits = [trait for trait in event.traits if trait.name == name] self.assertGreater( len(traits), 0, "Trait {} not found in event {}".format(name, event)) trait = traits[0] if value is not None: self.assertEqual(value, trait.value) if dtype is not None: self.assertEqual(dtype, trait.dtype) if dtype == models.Trait.INT_TYPE: self.assertIsInstance(trait.value, int) elif dtype == models.Trait.FLOAT_TYPE: self.assertIsInstance(trait.value, float) elif dtype == models.Trait.DATETIME_TYPE: self.assertIsInstance(trait.value, datetime.datetime) elif dtype == models.Trait.TEXT_TYPE: self.assertIsInstance(trait.value, str) def assertDoesNotHaveTrait(self, event, name): traits = [trait for trait in event.traits if trait.name == name] self.assertEqual( len(traits), 0, "Extra Trait {} found in event {}".format(name, event)) def assertHasDefaultTraits(self, event): text = models.Trait.TEXT_TYPE self.assertHasTrait(event, 'service', dtype=text) def _cmp_tree(self, this, other): if hasattr(this, 'right') and hasattr(other, 'right'): return (self._cmp_tree(this.right, other.right) and self._cmp_tree(this.left, other.left)) if not hasattr(this, 'right') and not hasattr(other, 'right'): return this == other return False def assertPathsEqual(self, path1, path2): self.assertTrue(self._cmp_tree(path1, path2), 'JSONPaths not equivalent {} {}'.format(path1, path2)) class TestTraitDefinition(ConverterBase): def setUp(self): super().setUp() self.n1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_uuid="uuid-for-instance-0001", instance_id="id-for-instance-0001", instance_uuid2=None, instance_id2=None, host='host-1-2-3', bogus_date='', image_meta=dict( disk_gb='20', thing='whatzit'), foobar=50) self.ext1 = mock.MagicMock(name='mock_test_plugin') self.test_plugin_class = self.ext1.plugin self.test_plugin = self.test_plugin_class() self.test_plugin.trait_values.return_value = ['foobar'] self.ext1.reset_mock() self.ext2 = mock.MagicMock(name='mock_nothing_plugin') self.nothing_plugin_class = self.ext2.plugin self.nothing_plugin = self.nothing_plugin_class() self.nothing_plugin.trait_values.return_value = [None] self.ext2.reset_mock() self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) def test_to_trait_with_plugin(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_null_match_with_plugin(self): cfg = dict(type='text', fields=['payload.nothere', 'payload.bogus'], plugin=dict(name='test')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with() self.test_plugin.trait_values.assert_called_once_with([]) def test_to_trait_with_plugin_null(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='nothing')) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) self.nothing_plugin_class.assert_called_once_with() self.nothing_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait_with_plugin_with_parameters(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid'], plugin=dict(name='test', parameters=dict(a=1, b='foo'))) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('foobar', t.value) self.test_plugin_class.assert_called_once_with(a=1, b='foo') self.test_plugin.trait_values.assert_called_once_with([ ('payload.instance_id', 'id-for-instance-0001'), ('payload.instance_uuid', 'uuid-for-instance-0001')]) def test_to_trait(self): cfg = dict(type='text', fields='payload.instance_id') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='int', fields='payload.image_meta.disk_gb') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('test_trait', t.name) self.assertEqual(models.Trait.INT_TYPE, t.dtype) self.assertEqual(20, t.value) def test_to_trait_multiple(self): cfg = dict(type='text', fields=['payload.instance_id', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('id-for-instance-0001', t.value) cfg = dict(type='text', fields=['payload.instance_uuid', 'payload.instance_id']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_multiple_different_nesting(self): cfg = dict(type='int', fields=['payload.foobar', 'payload.image_meta.disk_gb']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(50, t.value) cfg = dict(type='int', fields=['payload.image_meta.disk_gb', 'payload.foobar']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual(20, t.value) def test_to_trait_some_null_multiple(self): cfg = dict(type='text', fields=['payload.instance_id2', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_some_missing_multiple(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_uuid']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsInstance(t, models.Trait) self.assertEqual('uuid-for-instance-0001', t.value) def test_to_trait_missing(self): cfg = dict(type='text', fields='payload.not_here_boss') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_to_trait_null(self): cfg = dict(type='text', fields='payload.instance_id2') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_to_trait_empty_nontext(self): cfg = dict(type='datetime', fields='payload.bogus_date') tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_to_trait_multiple_null_missing(self): cfg = dict(type='text', fields=['payload.not_here_boss', 'payload.instance_id2']) tdef = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) t = tdef.to_trait(self.n1) self.assertIsNone(t) def test_missing_fields_config(self): self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', dict(), self.fake_plugin_mgr) def test_string_fields_config(self): cfg = dict(fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual(t.getter.__self__, jsonpath_rw_ext.parse('payload.test')) def test_list_fields_config(self): cfg = dict(fields=['payload.test', 'payload.other']) t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertPathsEqual( t.getter.__self__, jsonpath_rw_ext.parse('(payload.test)|(payload.other)')) def test_invalid_path_config(self): # test invalid jsonpath... cfg = dict(fields='payload.bogus(') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) def test_invalid_plugin_config(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(bogus="true")) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_unknown_plugin(self): # test invalid jsonpath... cfg = dict(fields='payload.test', plugin=dict(name='bogus')) self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'test_trait', cfg, self.fake_plugin_mgr) def test_type_config(self): cfg = dict(type='text', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) cfg = dict(type='int', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.INT_TYPE, t.trait_type) cfg = dict(type='float', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) cfg = dict(type='datetime', fields='payload.test') t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) def test_invalid_type_config(self): # test invalid jsonpath... cfg = dict(type='bogus', fields='payload.test') self.assertRaises(declarative.DefinitionException, converter.TraitDefinition, 'bogus_trait', cfg, self.fake_plugin_mgr) class TestEventDefinition(ConverterBase): def setUp(self): super().setUp() self.traits_cfg = { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, } self.test_notification1 = self._create_test_notification( "test.thing", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "test.thing", "uuid-for-notif-0002", instance_id="uuid-for-instance-0002") self.test_notification3 = self._create_test_notification( "test.thing", "uuid-for-notif-0003", instance_id="uuid-for-instance-0003", host=None) self.fake_plugin_mgr = {} def test_to_event(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) e = edef.to_event('INFO', self.test_notification1) self.assertEqual('test.thing', e.event_type) self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), e.generated) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0001', dtype=dtype) def test_to_event_missing_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) e = edef.to_event('INFO', self.test_notification2) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0002', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_to_event_null_trait(self): dtype = models.Trait.TEXT_TYPE cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) e = edef.to_event('INFO', self.test_notification3) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id', value='uuid-for-instance-0003', dtype=dtype) self.assertDoesNotHaveTrait(e, 'host') def test_bogus_cfg_no_traits(self): bogus = dict(event_type='test.foo') self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr, []) def test_bogus_cfg_no_type(self): bogus = dict(traits=self.traits_cfg) self.assertRaises(declarative.DefinitionException, converter.EventDefinition, bogus, self.fake_plugin_mgr, []) def test_included_type_string(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual('test.thing', edef._included_types[0]) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertFalse(edef.match_type('random.thing')) def test_included_type_list(self): cfg = dict(event_type=['test.thing', 'other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(2, len(edef._included_types)) self.assertEqual(0, len(edef._excluded_types)) self.assertTrue(edef.included_type('test.thing')) self.assertTrue(edef.included_type('other.thing')) self.assertFalse(edef.excluded_type('test.thing')) self.assertTrue(edef.match_type('test.thing')) self.assertTrue(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.thing')) def test_excluded_type_string(self): cfg = dict(event_type='!test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual('*', edef._included_types[0]) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertEqual(1, len(edef._excluded_types)) self.assertEqual('test.thing', edef._excluded_types[0]) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.included_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertTrue(edef.match_type('random.thing')) def test_excluded_type_list(self): cfg = dict(event_type=['!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertTrue(edef.match_type('random.thing')) def test_mixed_type_list(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertEqual(1, len(edef._included_types)) self.assertEqual(2, len(edef._excluded_types)) self.assertTrue(edef.excluded_type('test.thing')) self.assertTrue(edef.excluded_type('other.thing')) self.assertFalse(edef.excluded_type('random.thing')) self.assertFalse(edef.match_type('test.thing')) self.assertFalse(edef.match_type('other.thing')) self.assertFalse(edef.match_type('random.whatzit')) self.assertTrue(edef.match_type('random.thing')) def test_catchall(self): cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*', '!other.thing'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertFalse(edef.is_catchall) cfg = dict(event_type=['*'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertTrue(edef.is_catchall) cfg = dict(event_type=['*', 'foo'], traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) self.assertTrue(edef.is_catchall) def test_default_traits(self): cfg = dict(event_type='test.thing', traits={}) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), len(edef.traits)) def test_traits(self): cfg = dict(event_type='test.thing', traits=self.traits_cfg) edef = converter.EventDefinition(cfg, self.fake_plugin_mgr, []) default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() traits = set(edef.traits.keys()) for dt in default_traits: self.assertIn(dt, traits) self.assertIn('host', traits) self.assertIn('instance_id', traits) self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, len(edef.traits)) class TestNotificationConverter(ConverterBase): def setUp(self): super().setUp() self.CONF = ceilometer_service.prepare_service([], []) self.valid_event_def1 = [{ 'event_type': 'compute.instance.create.*', 'traits': { 'instance_id': { 'type': 'text', 'fields': ['payload.instance_uuid', 'payload.instance_id'], }, 'host': { 'type': 'text', 'fields': 'payload.host', }, }, }] self.test_notification1 = self._create_test_notification( "compute.instance.create.start", "uuid-for-notif-0001", instance_id="uuid-for-instance-0001", host='host-1-2-3') self.test_notification2 = self._create_test_notification( "bogus.notification.from.mars", "uuid-for-notif-0002", weird='true', host='cydonia') self.fake_plugin_mgr = {} @mock.patch('oslo_utils.timeutils.utcnow') def test_converter_missing_keys(self, mock_utcnow): self.CONF.set_override('drop_unmatched_notifications', False, group='event') # test a malformed notification now = datetime.datetime.utcnow() mock_utcnow.return_value = now c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) message = {'event_type': "foo", 'metadata': {'message_id': "abc", 'timestamp': str(now)}, 'publisher_id': "1"} e = c.to_event('INFO', message) self.assertIsValidEvent(e, message) self.assertEqual(1, len(e.traits)) self.assertEqual("foo", e.event_type) self.assertEqual(now, e.generated) def test_converter_with_catchall(self): self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.NotificationEventsConverter( self.CONF, self.valid_event_def1, self.fake_plugin_mgr) self.assertEqual(2, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event('INFO', self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) self.assertDoesNotHaveTrait(e, 'instance_id') self.assertDoesNotHaveTrait(e, 'host') def test_converter_without_catchall(self): self.CONF.set_override('drop_unmatched_notifications', True, group='event') c = converter.NotificationEventsConverter( self.CONF, self.valid_event_def1, self.fake_plugin_mgr) self.assertEqual(1, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(3, len(e.traits)) self.assertHasDefaultTraits(e) self.assertHasTrait(e, 'instance_id') self.assertHasTrait(e, 'host') e = c.to_event('INFO', self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) def test_converter_empty_cfg_with_catchall(self): self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertEqual(1, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsValidEvent(e, self.test_notification1) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) e = c.to_event('INFO', self.test_notification2) self.assertIsValidEvent(e, self.test_notification2) self.assertEqual(1, len(e.traits)) self.assertHasDefaultTraits(e) def test_converter_empty_cfg_without_catchall(self): self.CONF.set_override('drop_unmatched_notifications', True, group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertEqual(0, len(c.definitions)) e = c.to_event('INFO', self.test_notification1) self.assertIsNotValidEvent(e, self.test_notification1) e = c.to_event('INFO', self.test_notification2) self.assertIsNotValidEvent(e, self.test_notification2) @staticmethod def _convert_message(convert, level): message = {'priority': level, 'event_type': "foo", 'publisher_id': "1", 'metadata': {'message_id': "abc", 'timestamp': "2013-08-08 21:06:37.803826"}} return convert.to_event(level, message) def test_store_raw_all(self): self.CONF.set_override('store_raw', ['info', 'error'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_info_only(self): self.CONF.set_override('store_raw', ['info'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_error_only(self): self.CONF.set_override('store_raw', ['error'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertTrue(self._convert_message(c, 'error').raw) def test_store_raw_skip_all(self): c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_info_only_no_case(self): self.CONF.set_override('store_raw', ['INFO'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_skip_all(self): self.CONF.set_override('store_raw', ['unknown'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertFalse(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) def test_store_raw_bad_and_good(self): self.CONF.set_override('store_raw', ['info', 'unknown'], group='event') c = converter.NotificationEventsConverter( self.CONF, [], self.fake_plugin_mgr) self.assertTrue(self._convert_message(c, 'info').raw) self.assertFalse(self._convert_message(c, 'error').raw) @mock.patch('ceilometer.declarative.LOG') def test_setup_events_load_config_in_code_tree(self, mocked_log): self.CONF.set_override('definitions_cfg_file', '/not/existing/file', group='event') self.CONF.set_override('drop_unmatched_notifications', False, group='event') c = converter.setup_events(self.CONF, self.fake_plugin_mgr) self.assertIsInstance(c, converter.NotificationEventsConverter) log_called_args = mocked_log.debug.call_args_list self.assertEqual( 'No Definitions configuration file found! Using default config.', log_called_args[0][0][0]) self.assertTrue(log_called_args[1][0][0].startswith( 'Loading definitions configuration file:')) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/event/test_endpoint.py0000664000175100017510000001620215033033467024621 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" from unittest import mock import fixtures import oslo_messaging from oslo_utils import fileutils import yaml from ceilometer.pipeline import event as event_pipe from ceilometer import publisher from ceilometer.publisher import test from ceilometer import service from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { 'auth_token': '3d8b13de1b7d499587dfc69b77dc09c2', 'is_admin': True, 'project_id': '7c150a59fe714e6f9263774af9688f0e', 'quota_class': None, 'read_deleted': 'no', 'remote_address': '10.0.2.15', 'request_id': 'req-d68b36e0-9233-467f-9afb-d81435d64d66', 'roles': ['admin'], 'timestamp': '2012-05-08T20:23:41.425105', 'user_id': '1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { 'message_id': 'dae6f69c-00e0-41c0-b371-41ec3b7f4451', 'timestamp': '2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { 'created_at': '2012-05-08 20:23:41', 'deleted_at': '', 'disk_gb': 0, 'display_name': 'testme', 'fixed_ips': [{'address': '10.0.0.2', 'floating_ips': [], 'meta': {}, 'type': 'fixed', 'version': 4}], 'image_ref_url': 'http://10.0.2.15:9292/images/UUID', 'instance_id': '9f9d01b9-4a58-4271-9e27-398b21ab20d1', 'instance_type': 'm1.tiny', 'instance_type_id': 2, 'launched_at': '2012-05-08 20:23:47.985999', 'memory_mb': 512, 'state': 'active', 'state_description': '', 'tenant_id': '7c150a59fe714e6f9263774af9688f0e', 'user_id': '1e3ce043029547f1a61c1996d1a531a2', 'reservation_id': '1e3ce043029547f1a61c1996d1a531a3', 'vcpus': 1, 'root_gb': 0, 'ephemeral_gb': 0, 'host': 'compute-host-name', 'availability_zone': '1e3ce043029547f1a61c1996d1a531a4', 'os_type': 'linux?', 'architecture': 'x86', 'image_ref': 'UUID', 'kernel_id': '1e3ce043029547f1a61c1996d1a531a5', 'ramdisk_id': '1e3ce043029547f1a61c1996d1a531a6', } class TestEventEndpoint(tests_base.BaseTestCase): @staticmethod def get_publisher(conf, url, namespace=''): fake_drivers = {'test://': test.TestPublisher, 'except://': test.TestPublisher} return fake_drivers[url](conf, url) def _setup_pipeline(self, publishers): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': ['test.test'], 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': publishers }] }) ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") self.CONF.set_override('event_pipeline_cfg_file', ev_pipeline_cfg_file) ev_pipeline_mgr = event_pipe.EventPipelineManager(self.CONF) return ev_pipeline_mgr def _setup_endpoint(self, publishers): ev_pipeline_mgr = self._setup_pipeline(publishers) self.endpoint = event_pipe.EventEndpoint( ev_pipeline_mgr.conf, ev_pipeline_mgr.publisher()) self.endpoint.event_converter = mock.MagicMock() self.endpoint.event_converter.to_event.return_value = mock.MagicMock( event_type='test.test') def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) self.useFixture(fixtures.MockPatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self.fake_publisher = mock.Mock() self.useFixture(fixtures.MockPatch( 'ceilometer.publisher.test.TestPublisher', return_value=self.fake_publisher)) def test_message_to_event(self): self._setup_endpoint(['test://']) self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) def test_bad_event_non_ack_and_requeue(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") ret = self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, 'publisher_id': 'compute.vagrant-precise', 'event_type': 'compute.instance.create.end', 'payload': TEST_NOTICE_PAYLOAD, 'metadata': TEST_NOTICE_METADATA}]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) def test_message_to_event_bad_event(self): self._setup_endpoint(['test://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.event.LOG") as mock_logger: ret = self.endpoint.process_notifications('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) exception_mock = mock_logger.error self.assertIn('Exit after error from publisher', exception_mock.call_args_list[0][0][0]) def test_message_to_event_bad_event_multi_publish(self): self._setup_endpoint(['test://', 'except://']) self.fake_publisher.publish_events.side_effect = Exception self.CONF.set_override("ack_on_event_error", False, group="notification") message = { 'payload': {'event_type': "foo", 'message_id': "abc"}, 'metadata': {}, 'ctxt': {} } with mock.patch("ceilometer.pipeline.event.LOG") as mock_logger: ret = self.endpoint.process_notifications('info', [message]) self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret) exception_mock = mock_logger.error self.assertIn('Continue after error from publisher', exception_mock.call_args_list[0][0][0]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/event/test_trait_plugins.py0000664000175100017510000001342215033033467025666 0ustar00mylesmyles# # Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from ceilometer.event import trait_plugins class TestSplitterPlugin(base.BaseTestCase): def setUp(self): super().setUp() self.pclass = trait_plugins.SplitterTraitPlugin def test_split(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test', value) param = dict(separator='-', segment=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar', value) param = dict(separator='-', segment=1, max_split=1) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('foobar-baz', value) def test_no_sep(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [('test.thing', 'test.foobar.baz')] value = plugin.trait_values(match_list)[0] self.assertEqual('test.foobar.baz', value) def test_no_segment(self): param = dict(separator='-', segment=5) plugin = self.pclass(**param) match_list = [('test.thing', 'test-foobar-baz')] value = plugin.trait_values(match_list)[0] self.assertIsNone(value) def test_no_match(self): param = dict(separator='-', segment=0) plugin = self.pclass(**param) match_list = [] value = plugin.trait_values(match_list) self.assertEqual([], value) class TestBitfieldPlugin(base.BaseTestCase): def setUp(self): super().setUp() self.pclass = trait_plugins.BitfieldTraitPlugin self.init = 0 self.params = dict(initial_bitfield=self.init, flags=[dict(path='payload.foo', bit=0, value=42), dict(path='payload.foo', bit=1, value=12), dict(path='payload.thud', bit=1, value=23), dict(path='thingy.boink', bit=4), dict(path='thingy.quux', bit=6, value="wokka"), dict(path='payload.bar', bit=10, value='test')]) def test_bitfield(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) def test_initial(self): match_list = [('payload.foo', 12), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] self.params['initial_bitfield'] = 0x2000 plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x2412, value[0]) def test_no_match(self): match_list = [] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(self.init, value[0]) def test_multi(self): match_list = [('payload.foo', 12), ('payload.thud', 23), ('payload.bar', 'test'), ('thingy.boink', 'testagain')] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual(0x412, value[0]) class TestMapTraitPlugin(base.BaseTestCase): def setUp(self): super().setUp() self.pclass = trait_plugins.MapTraitPlugin self.params = dict(values={'ACTIVE': 1, 'ERROR': 2, 3: 4}, default=-1) def test_map(self): match_list = [('payload.foo', 'ACTIVE'), ('payload.bar', 'ERROR'), ('thingy.boink', 3), ('thingy.invalid', 999)] plugin = self.pclass(**self.params) value = plugin.trait_values(match_list) self.assertEqual([1, 2, 4, -1], value) def test_case_sensitive(self): match_list = [('payload.foo', 'ACTIVE'), ('payload.bar', 'error'), ('thingy.boink', 3), ('thingy.invalid', 999)] plugin = self.pclass(case_sensitive=True, **self.params) value = plugin.trait_values(match_list) self.assertEqual([1, -1, 4, -1], value) def test_case_insensitive(self): match_list = [('payload.foo', 'active'), ('payload.bar', 'ErRoR'), ('thingy.boink', 3), ('thingy.invalid', 999)] plugin = self.pclass(case_sensitive=False, **self.params) value = plugin.trait_values(match_list) self.assertEqual([1, 2, 4, -1], value) def test_values_undefined(self): self.assertRaises(ValueError, self.pclass) def test_values_invalid(self): self.assertRaises( ValueError, lambda: self.pclass(values=[('ACTIVE', 1), ('ERROR', 2), (3, 4)])) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/image/0000775000175100017510000000000015033033521021317 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/image/__init__.py0000664000175100017510000000000015033033467023427 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/image/test_glance.py0000664000175100017510000000760115033033467024176 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.image import glance from ceilometer.polling import manager from ceilometer import service import ceilometer.tests.base as base IMAGE_LIST = [ type('Image', (object,), {'status': 'active', 'tags': [], 'kernel_id': 'fd24d91a-dfd5-4a3c-b990-d4563eb27396', 'container_format': 'ami', 'min_ram': 0, 'ramdisk_id': 'd629522b-ebaa-4c92-9514-9e31fe760d18', 'updated_at': '2016-06-20T13: 34: 41Z', 'visibility': 'public', 'owner': '6824974c08974d4db864bbaa6bc08303', 'file': '/v2/images/fda54a44-3f96-40bf-ab07-0a4ce9e1761d/file', 'min_disk': 0, 'virtual_size': None, 'id': 'fda54a44-3f96-40bf-ab07-0a4ce9e1761d', 'size': 25165824, 'name': 'cirros-0.3.4-x86_64-uec', 'checksum': 'eb9139e4942121f22bbc2afc0400b2a4', 'created_at': '2016-06-20T13: 34: 40Z', 'disk_format': 'ami', 'protected': False, 'schema': '/v2/schemas/image'}), type('Image', (object,), {'status': 'active', 'tags': [], 'container_format': 'ari', 'min_ram': 0, 'updated_at': '2016-06-20T13: 34: 38Z', 'visibility': 'public', 'owner': '6824974c08974d4db864bbaa6bc08303', 'file': '/v2/images/d629522b-ebaa-4c92-9514-9e31fe760d18/file', 'min_disk': 0, 'virtual_size': None, 'id': 'd629522b-ebaa-4c92-9514-9e31fe760d18', 'size': 3740163, 'name': 'cirros-0.3.4-x86_64-uec-ramdisk', 'checksum': 'be575a2b939972276ef675752936977f', 'created_at': '2016-06-20T13: 34: 37Z', 'disk_format': 'ari', 'protected': False, 'schema': '/v2/schemas/image'}), type('Image', (object,), {'status': 'active', 'tags': [], 'container_format': 'aki', 'min_ram': 0, 'updated_at': '2016-06-20T13: 34: 35Z', 'visibility': 'public', 'owner': '6824974c08974d4db864bbaa6bc08303', 'file': '/v2/images/fd24d91a-dfd5-4a3c-b990-d4563eb27396/file', 'min_disk': 0, 'virtual_size': None, 'id': 'fd24d91a-dfd5-4a3c-b990-d4563eb27396', 'size': 4979632, 'name': 'cirros-0.3.4-x86_64-uec-kernel', 'checksum': '8a40c862b5735975d82605c1dd395796', 'created_at': '2016-06-20T13: 34: 35Z', 'disk_format': 'aki', 'protected': False, 'schema': '/v2/schemas/image'}), ] class TestImagePollsterPageSize(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = glance.ImageSizePollster(conf) def test_image_pollster(self): image_samples = list( self.pollster.get_samples(self.manager, {}, resources=IMAGE_LIST)) self.assertEqual(3, len(image_samples)) self.assertEqual('image.size', image_samples[0].name) self.assertEqual(25165824, image_samples[0].volume) self.assertEqual('6824974c08974d4db864bbaa6bc08303', image_samples[0].project_id) self.assertEqual('fda54a44-3f96-40bf-ab07-0a4ce9e1761d', image_samples[0].resource_id) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/0000775000175100017510000000000015033033521021173 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/__init__.py0000664000175100017510000000000015033033467023303 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7969415 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/notifications/0000775000175100017510000000000015033033521024044 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/notifications/__init__.py0000664000175100017510000000000015033033467026154 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py0000664000175100017510000010050015033033467027411 0ustar00mylesmyles# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_ipmi. This data is provided as a sample of the data expected from the ipmitool driver in the Ironic project, which is the publisher of the notifications being tested. """ TEMPERATURE_DATA = { 'DIMM GH VR Temp (0x3b)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.6 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM GH VR Temp (0x3b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU1 VR Temp (0x36)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '32 (+/- 0.500) degrees C', 'Entity ID': '20.1 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU1 VR Temp (0x36)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM EF VR Temp (0x3a)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '26 (+/- 0.500) degrees C', 'Entity ID': '20.5 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM EF VR Temp (0x3a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'CPU2 VR Temp (0x37)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '31 (+/- 0.500) degrees C', 'Entity ID': '20.2 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'CPU2 VR Temp (0x37)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'Ambient Temp (0x32)': { 'Status': 'ok', 'Sensor Reading': '25 (+/- 0) degrees C', 'Entity ID': '12.1 (Front Panel Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Upper non-critical': '43.000', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Upper non-recoverable': '50.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '46.000', 'Sensor ID': 'Ambient Temp (0x32)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'Mezz Card Temp (0x35)': { 'Status': 'Disabled', 'Sensor Reading': 'Disabled', 'Entity ID': '44.1 (I/O Module)', 'Event Message Control': 'Per-threshold', 'Upper non-critical': '70.000', 'Upper non-recoverable': '85.000', 'Positive Hysteresis': '4.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'Mezz Card Temp (0x35)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '25.000' }, 'PCH Temp (0x3c)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '46 (+/- 0.500) degrees C', 'Entity ID': '45.1 (Processor/IO Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '93.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '103.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '98.000', 'Sensor ID': 'PCH Temp (0x3c)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM CD VR Temp (0x39)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '27 (+/- 0.500) degrees C', 'Entity ID': '20.4 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM CD VR Temp (0x39)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 2 Temp (0x34)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '30 (+/- 0) degrees C', 'Entity ID': '16.2 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 2 Temp (0x34)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'DIMM AB VR Temp (0x38)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '28 (+/- 0.500) degrees C', 'Entity ID': '20.3 (Power Module)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '95.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '105.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '100.000', 'Sensor ID': 'DIMM AB VR Temp (0x38)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': '38 (+/- 0) degrees C', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } CURRENT_DATA = { 'Current 1 (0x6b)': { 'Status': 'ok', 'Sensor Reading': '0.800 (+/- 0) Amps', 'Entity ID': '21.0 (Power Management)', 'Assertions Enabled': '', 'Event Message Control': 'Per-threshold', 'Readable Thresholds': 'No Thresholds', 'Positive Hysteresis': 'Unspecified', 'Sensor Type (Analog)': 'Current', 'Negative Hysteresis': 'Unspecified', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Current 1 (0x6b)', 'Assertion Events': '', 'Minimum sensor range': '2550.000', 'Settable Thresholds': 'No Thresholds' }, 'Pwr Consumption (0x76)': { 'Entity ID': '7.1 (System Board)', 'Sensor Type (Threshold)': 'Current (0x03)', 'Sensor Reading': '160 (+/- 0) Watts', 'Status': 'ok', 'Nominal Reading': '1034.000', 'Normal Maximum': '1056.000', 'Upper critical': '1914.000', 'Upper non-critical': '1738.000', 'Positive Hysteresis': 'Unspecified', 'Negative Hysteresis': 'Unspecified', 'Minimum sensor range': 'Unspecified', 'Maximum sensor range': '5588.000', 'Sensor ID': 'Pwr Consumption (0x76)', 'Event Message Control': 'Per-threshold', 'Readable Thresholds': 'unc ucr', 'Settable Thresholds': 'unc', 'Assertion Events': '', 'Assertions Enabled': 'unc+ ucr+', 'Deassertions Enabled': 'unc+ ucr+' } } POWER_DATA = { 'Pwr Consumption (0x76)': { 'Entity ID': '7.1 (System Board)', 'Sensor Type (Threshold)': 'Current (0x03)', 'Sensor Reading': '154 (+/- 0) Watts', 'Status': 'ok', 'Nominal Reading': '1034.000', 'Normal Maximum': '1056.000', 'Upper critical': '1914.000', 'Upper non-critical': '1738.000', 'Positive Hysteresis': 'Unspecified', 'Negative Hysteresis': 'Unspecified', 'Minimum sensor range': 'Unspecified', 'Maximum sensor range': '5588.000', 'Sensor ID': 'Pwr Consumption (0x76)', 'Event Message Control': 'Per-threshold', 'Readable Thresholds': 'unc ucr', 'Settable Thresholds': 'unc', 'Assertion Events': '', 'Assertions Enabled': 'unc+ ucr+', 'Deassertions Enabled': 'unc+ ucr+' } } FAN_DATA = { 'Fan 4A Tach (0x46)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4A Tach (0x46)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 5A Tach (0x48)': { 'Status': 'ok', 'Sensor Reading': '7140 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5A Tach (0x48)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3A Tach (0x44)': { 'Status': 'ok', 'Sensor Reading': '6900 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3A Tach (0x44)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 1A Tach (0x40)': { 'Status': 'ok', 'Sensor Reading': '6960 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1A Tach (0x40)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 3B Tach (0x45)': { 'Status': 'ok', 'Sensor Reading': '7104 (+/- 0) RPM', 'Entity ID': '29.3 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 3B Tach (0x45)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2A Tach (0x42)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2A Tach (0x42)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' }, 'Fan 4B Tach (0x47)': { 'Status': 'ok', 'Sensor Reading': '7488 (+/- 0) RPM', 'Entity ID': '29.4 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 4B Tach (0x47)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 2B Tach (0x43)': { 'Status': 'ok', 'Sensor Reading': '7168 (+/- 0) RPM', 'Entity ID': '29.2 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 2B Tach (0x43)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 5B Tach (0x49)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.5 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 5B Tach (0x49)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 1B Tach (0x41)': { 'Status': 'ok', 'Sensor Reading': '7296 (+/- 0) RPM', 'Entity ID': '29.1 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 1B Tach (0x41)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6B Tach (0x4b)': { 'Status': 'ok', 'Sensor Reading': '7616 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2752.000', 'Positive Hysteresis': '128.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '16320.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '128.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6B Tach (0x4b)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3968.000' }, 'Fan 6A Tach (0x4a)': { 'Status': 'ok', 'Sensor Reading': '7080 (+/- 0) RPM', 'Entity ID': '29.6 (Fan Device)', 'Assertions Enabled': 'lcr-', 'Normal Minimum': '2580.000', 'Positive Hysteresis': '120.000', 'Assertion Events': '', 'Event Message Control': 'Per-threshold', 'Normal Maximum': '15300.000', 'Deassertions Enabled': 'lcr-', 'Sensor Type (Analog)': 'Fan', 'Lower critical': '1920.000', 'Negative Hysteresis': '120.000', 'Threshold Read Mask': 'lcr', 'Maximum sensor range': 'Unspecified', 'Readable Thresholds': 'lcr', 'Sensor ID': 'Fan 6A Tach (0x4a)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4020.000' } } FAN_DATA_PERCENT = { 'Fan 1 (0x23)': { 'Sensor ID': 'Fan 1 (0x23)', 'Entity ID': '7.1 (System Board)', 'Sensor Type (Threshold)': 'Fan (0x04)', 'Sensor Reading': '47.040 (+/- 0) percent', 'Status': 'ok', 'Positive Hysteresis': 'Unspecified', 'Negative Hysteresis': 'Unspecified', 'Minimum sensor range': 'Unspecified', 'Maximum sensor range': 'Unspecified', 'Event Message Control': 'Global Disable Only', 'Readable Thresholds': '', 'Settable Thresholds': '', 'Assertions Enabled': '' } } VOLTAGE_DATA = { 'Planar 12V (0x18)': { 'Status': 'ok', 'Sensor Reading': '12.312 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.108', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '10.692', 'Negative Hysteresis': '0.108', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '13.446', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 12V (0x18)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '12.042' }, 'Planar 3.3V (0x16)': { 'Status': 'ok', 'Sensor Reading': '3.309 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.028', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '3.039', 'Negative Hysteresis': '0.028', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '3.564', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 3.3V (0x16)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.309' }, 'Planar VBAT (0x1c)': { 'Status': 'ok', 'Sensor Reading': '3.137 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lnc- lcr-', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Readable Thresholds': 'lcr lnc', 'Positive Hysteresis': '0.025', 'Deassertions Enabled': 'lnc- lcr-', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '2.095', 'Negative Hysteresis': '0.025', 'Lower non-critical': '2.248', 'Maximum sensor range': 'Unspecified', 'Sensor ID': 'Planar VBAT (0x1c)', 'Settable Thresholds': 'lcr lnc', 'Threshold Read Mask': 'lcr lnc', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '3.010' }, 'Planar 5V (0x17)': { 'Status': 'ok', 'Sensor Reading': '5.062 (+/- 0) Volts', 'Entity ID': '7.1 (System Board)', 'Assertions Enabled': 'lcr- ucr+', 'Event Message Control': 'Per-threshold', 'Assertion Events': '', 'Maximum sensor range': 'Unspecified', 'Positive Hysteresis': '0.045', 'Deassertions Enabled': 'lcr- ucr+', 'Sensor Type (Analog)': 'Voltage', 'Lower critical': '4.475', 'Negative Hysteresis': '0.045', 'Threshold Read Mask': 'lcr ucr', 'Upper critical': '5.582', 'Readable Thresholds': 'lcr ucr', 'Sensor ID': 'Planar 5V (0x17)', 'Settable Thresholds': 'lcr ucr', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '4.995' } } SENSOR_DATA = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': TEMPERATURE_DATA, 'Current': CURRENT_DATA, 'Fan': FAN_DATA, 'Voltage': VOLTAGE_DATA, 'Power': POWER_DATA } } } EMPTY_PAYLOAD = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { } } } MISSING_SENSOR = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } BAD_SENSOR = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Status': 'ok', 'Deassertions Enabled': 'unc+ ucr+ unr+', 'Sensor Reading': 'some bad stuff', 'Entity ID': '16.1 (System Internal Expansion Board)', 'Assertions Enabled': 'unc+ ucr+ unr+', 'Positive Hysteresis': '4.000', 'Assertion Events': '', 'Upper non-critical': '70.000', 'Event Message Control': 'Per-threshold', 'Upper non-recoverable': '85.000', 'Normal Maximum': '112.000', 'Maximum sensor range': 'Unspecified', 'Sensor Type (Analog)': 'Temperature', 'Readable Thresholds': 'unc ucr unr', 'Negative Hysteresis': 'Unspecified', 'Threshold Read Mask': 'unc ucr unr', 'Upper critical': '80.000', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', 'Settable Thresholds': '', 'Minimum sensor range': 'Unspecified', 'Nominal Reading': '16.000' }, } } } } NO_SENSOR_ID = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', }, } } } } NO_NODE_ID = { 'metadata': {'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', 'timestamp': '2015-06-1909:19:35.786893'}, 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', 'payload': { 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', 'timestamp': '2017-07-07 15:54:12.169510', 'event_type': 'hardware.ipmi.metrics.update', 'payload': { 'Temperature': { 'PCI Riser 1 Temp (0x33)': { 'Sensor Reading': '26 C', 'Sensor ID': 'PCI Riser 1 Temp (0x33)', }, } } } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/notifications/test_ironic.py0000664000175100017510000002162015033033467026752 0ustar00mylesmyles# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for producing IPMI sample messages from notification events.""" from unittest import mock from oslotest import base from ceilometer.ipmi.notifications import ironic as ipmi from ceilometer import sample from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data class TestNotifications(base.BaseTestCase): def test_ipmi_temperature_notification(self): """Test IPMI Temperature sensor data. Based on the above ipmi_testdata the expected sample for a single temperature reading has:: * a resource_id composed from the node_uuid Sensor ID * a name composed from 'hardware.ipmi.' and 'temperature' * a volume from the first chunk of the Sensor Reading * a unit from the last chunk of the Sensor Reading * some readings are skipped if the value is 'Disabled' * metatata with the node id """ processor = ipmi.TemperatureSensorNotification(None, None) counters = {counter.resource_id: counter for counter in processor.build_sample(ipmi_test_data.SENSOR_DATA)} self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' ) test_counter = counters[resource_id] self.assertEqual(26.0, test_counter.volume) self.assertEqual('C', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.temperature', test_counter.name) self.assertEqual('hardware.ipmi.metrics.update', test_counter.resource_metadata['event_type']) self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', test_counter.resource_metadata['node']) def test_ipmi_current_notification(self): """Test IPMI Current sensor data. A single current reading is effectively the same as temperature, modulo "current". """ processor = ipmi.CurrentSensorNotification(None, None) counters = {counter.resource_id: counter for counter in processor.build_sample(ipmi_test_data.SENSOR_DATA)} self.assertEqual(1, len(counters), 'expected 1 current reading') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-current_1_(0x6b)' ) test_counter = counters[resource_id] self.assertEqual(0.800, test_counter.volume) self.assertEqual('Amps', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.current', test_counter.name) def test_ipmi_power_notification(self): """Test IPMI Power sample from Current sensor. A single power reading is effectively the same as temperature, modulo "power". """ processor = ipmi.PowerSensorNotification(None, None) counters = {counter.resource_id: counter for counter in processor.build_sample(ipmi_test_data.SENSOR_DATA)} self.assertEqual(1, len(counters), 'expected 1 current reading') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pwr_consumption_(0x76)' ) test_counter = counters[resource_id] self.assertEqual(154, test_counter.volume) self.assertEqual('W', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.power', test_counter.name) def test_ipmi_fan_notification(self): """Test IPMI Fan sensor data. A single fan reading is effectively the same as temperature, modulo "fan". """ processor = ipmi.FanSensorNotification(None, None) counters = {counter.resource_id: counter for counter in processor.build_sample(ipmi_test_data.SENSOR_DATA)} self.assertEqual(12, len(counters), 'expected 12 fan readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' ) test_counter = counters[resource_id] self.assertEqual(6900.0, test_counter.volume) self.assertEqual('RPM', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.fan', test_counter.name) def test_ipmi_voltage_notification(self): """Test IPMI Voltage sensor data. A single voltage reading is effectively the same as temperature, modulo "voltage". """ processor = ipmi.VoltageSensorNotification(None, None) counters = {counter.resource_id: counter for counter in processor.build_sample(ipmi_test_data.SENSOR_DATA)} self.assertEqual(4, len(counters), 'expected 4 volate readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' ) test_counter = counters[resource_id] self.assertEqual(3.137, test_counter.volume) self.assertEqual('V', test_counter.unit) self.assertEqual(sample.TYPE_GAUGE, test_counter.type) self.assertEqual('hardware.ipmi.voltage', test_counter.name) def test_disabed_skips_metric(self): """Test that a meter which a disabled volume is skipped.""" processor = ipmi.TemperatureSensorNotification(None, None) counters = {counter.resource_id: counter for counter in processor.build_sample(ipmi_test_data.SENSOR_DATA)} self.assertEqual(10, len(counters), 'expected 10 temperature readings') resource_id = ( 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' ) self.assertNotIn(resource_id, counters) def test_empty_payload_no_metrics_success(self): processor = ipmi.TemperatureSensorNotification(None, None) counters = {counter.resource_id: counter for counter in processor.build_sample(ipmi_test_data.EMPTY_PAYLOAD)} self.assertEqual(0, len(counters), 'expected 0 readings') @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_data(self, mylog): processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.MISSING_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' "missing 'Sensor Reading' in payload", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_sensor_data_malformed(self, mylog): processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.BAD_SENSOR)) self.assertEqual( 'invalid sensor data for ' 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' 'unable to parse sensor reading: some bad stuff', messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_node_uuid(self, mylog): """Test for desired error message when 'node_uuid' missing. Presumably this will never happen given the way the data is created, but better defensive than dead. """ processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.NO_NODE_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'node_uuid'", messages[0] ) @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') def test_missing_sensor_id(self, mylog): """Test for desired error message when 'Sensor ID' missing.""" processor = ipmi.TemperatureSensorNotification(None, None) messages = [] mylog.warning = lambda *args: messages.extend(args) list(processor.build_sample(ipmi_test_data.NO_SENSOR_ID)) self.assertEqual( 'invalid sensor data for missing id: missing key in payload: ' "'Sensor ID'", messages[0] ) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7979414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/platform/0000775000175100017510000000000015033033521023017 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/platform/__init__.py0000664000175100017510000000000015033033467025127 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/platform/fake_utils.py0000664000175100017510000000262715033033467025537 0ustar00mylesmyles# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.ipmi.platform import exception as nmexcept from ceilometer.tests.unit.ipmi.platform import ipmitool_test_data as test_data def get_sensor_status_init(parameter=''): return (' 01\n', '') def get_sensor_status_uninit(parameter=''): return (' 00\n', '') def init_sensor_agent(parameter=''): return (' 00\n', '') def execute(*cmd, **kwargs): datas = { test_data.sdr_info_cmd: test_data.sdr_info, test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, test_data.read_sensor_current_cmd: test_data.sensor_current, test_data.read_sensor_fan_cmd: test_data.sensor_fan, } cmd_str = "".join(cmd) return datas[cmd_str] def execute_without_ipmi(*cmd, **kwargs): raise nmexcept.IPMIException ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py0000664000175100017510000002751215033033467027275 0ustar00mylesmyles# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample data for test_ipmi_sensor. This data is provided as a sample of the data expected from the ipmitool binary, which produce Node Manager/IPMI raw data """ sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Temperature Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB P1 VR Temp (0x20) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 25 (+/- 0) degrees C Status : ok Nominal Reading : 58.000 Normal Minimum : 10.000 Normal Maximum : 105.000 Upper critical : 115.000 Upper non-critical : 110.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : Front Panel Temp (0x21) Entity ID : 12.1 (Front Panel Board) Sensor Type (Analog) : Temperature Sensor Reading : 23 (+/- 0) degrees C Status : ok Nominal Reading : 28.000 Normal Minimum : 10.000 Normal Maximum : 45.000 Upper critical : 55.000 Upper non-critical : 50.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : SSB Temp (0x22) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Temperature Sensor Reading : 43 (+/- 0) degrees C Status : ok Nominal Reading : 52.000 Normal Minimum : 10.000 Normal Maximum : 93.000 Upper critical : 103.000 Upper non-critical : 98.000 Lower critical : 0.000 Lower non-critical : 5.000 Positive Hysteresis : 2.000 Negative Hysteresis : 2.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) Entity ID : 7.1 (System Board) Sensor Type (Discrete): Voltage Assertions Enabled : Digital State [State Asserted] Deassertions Enabled : Digital State [State Asserted] Sensor ID : BB +12.0V (0xd0) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 11.831 (+/- 0) Volts Status : ok Nominal Reading : 11.935 Normal Minimum : 11.363 Normal Maximum : 12.559 Upper critical : 13.391 Upper non-critical : 13.027 Lower critical : 10.635 Lower non-critical : 10.947 Positive Hysteresis : 0.052 Negative Hysteresis : 0.052 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +1.35 P1LV AB (0xe4) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : Disabled Status : Disabled Nominal Reading : 1.342 Normal Minimum : 1.275 Normal Maximum : 1.409 Upper critical : 1.488 Upper non-critical : 1.445 Lower critical : 1.201 Lower non-critical : 1.244 Positive Hysteresis : 0.006 Negative Hysteresis : 0.006 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Event Status : Unavailable Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ Sensor ID : BB +5.0V (0xd1) Entity ID : 7.1 (System Board) Sensor Type (Analog) : Voltage Sensor Reading : 4.959 (+/- 0) Volts Status : ok Nominal Reading : 4.981 Normal Minimum : 4.742 Normal Maximum : 5.241 Upper critical : 5.566 Upper non-critical : 5.415 Lower critical : 4.416 Lower non-critical : 4.546 Positive Hysteresis : 0.022 Negative Hysteresis : 0.022 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc unc ucr Settable Thresholds : lcr lnc unc ucr Threshold Read Mask : lcr lnc unc ucr Assertion Events : Assertions Enabled : lnc- lcr- unc+ ucr+ Deassertions Enabled : lnc- lcr- unc+ ucr+ """ sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) Entity ID : 10.1 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 11 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ Sensor ID : PS2 Curr Out % (0x59) Entity ID : 10.2 (Power Supply) Sensor Type (Analog) : Current Sensor Reading : 0 (+/- 0) unspecified Status : ok Nominal Reading : 50.000 Normal Minimum : 0.000 Normal Maximum : 100.000 Upper critical : 118.000 Upper non-critical : 100.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc ucr Threshold Read Mask : unc ucr Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ Sensor ID : Pwr Consumption (0x76) Entity ID : 7.1 (System Board) Sensor Type (Threshold) : Current (0x03) Sensor Reading : 154 (+/- 0) Watts Status : ok Nominal Reading : 1034.000 Normal Maximum : 1056.000 Upper critical : 1914.000 Upper non-critical : 1738.000 Positive Hysteresis : Unspecified Negative Hysteresis : Unspecified Minimum sensor range : Unspecified Maximum sensor range : 5588.000 Event Message Control : Per-threshold Readable Thresholds : unc ucr Settable Thresholds : unc Assertion Events : Assertions Enabled : unc+ ucr+ Deassertions Enabled : unc+ ucr+ """ sensor_fan_data = """Sensor ID : System Fan 1 (0x30) Entity ID : 29.1 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 2 (0x32) Entity ID : 29.2 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 3 (0x34) Entity ID : 29.3 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4704 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- Sensor ID : System Fan 4 (0x36) Entity ID : 29.4 (Fan Device) Sensor Type (Analog) : Fan Sensor Reading : 4606 (+/- 0) RPM Status : ok Nominal Reading : 7497.000 Normal Minimum : 2499.000 Normal Maximum : 12495.000 Lower critical : 1715.000 Lower non-critical : 1960.000 Positive Hysteresis : 49.000 Negative Hysteresis : 49.000 Minimum sensor range : Unspecified Maximum sensor range : Unspecified Event Message Control : Per-threshold Readable Thresholds : lcr lnc Settable Thresholds : lcr lnc Threshold Read Mask : lcr lnc Assertion Events : Assertions Enabled : lnc- lcr- Deassertions Enabled : lnc- lcr- """ sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' sdr_info_cmd = 'ipmitoolsdrinfo' read_sensor_all_cmd = 'ipmitoolsdr-v' read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' sdr_info = ('', '') sensor_temperature = (sensor_temperature_data, '') sensor_voltage = (sensor_voltage_data, '') sensor_current = (sensor_current_data, '') sensor_fan = (sensor_fan_data, '') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py0000664000175100017510000001163415033033467026775 0ustar00mylesmyles# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslotest import base from ceilometer.ipmi.platform import ipmi_sensor from ceilometer.privsep import ipmitool from ceilometer.tests.unit.ipmi.platform import fake_utils class TestIPMISensor(base.BaseTestCase): def setUp(self): super().setUp() ipmitool.ipmi = mock.Mock(side_effect=fake_utils.execute) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super().tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertTrue(self.ipmi.ipmi_support) # only temperature data returned. self.assertIn('Temperature', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Temperature'])) sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # only voltage data returned. self.assertIn('Voltage', sensors) self.assertEqual(1, len(sensors)) # 4 sensor data in total, ignore 1 without 'Sensor Reading'. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Voltage'])) sensor = sensors['Voltage']['BB +5.0V (0xd1)'] self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # only Current data returned. self.assertIn('Current', sensors) self.assertEqual(1, len(sensors)) # 3 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Current'])) sensor = sensors['Current']['PS1 Curr Out % (0x58)'] self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) def test_read_sensor_power(self): sensors = self.ipmi.read_sensor_any('Current') # only Current data returned. self.assertIn('Current', sensors) self.assertEqual(1, len(sensors)) # 3 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(3, len(sensors['Current'])) sensor = sensors['Current']['Pwr Consumption (0x76)'] self.assertEqual('154 (+/- 0) Watts', sensor['Sensor Reading']) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # only Fan data returned. self.assertIn('Fan', sensors) self.assertEqual(1, len(sensors)) # 2 sensor data in total. # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py self.assertEqual(4, len(sensors['Fan'])) sensor = sensors['Fan']['System Fan 2 (0x32)'] self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) class TestNonIPMISensor(base.BaseTestCase): def setUp(self): super().setUp() ipmitool.ipmi = mock.Mock(side_effect=fake_utils.execute_without_ipmi) self.ipmi = ipmi_sensor.IPMISensor() @classmethod def tearDownClass(cls): # reset inited to force an initialization of singleton for next test ipmi_sensor.IPMISensor()._inited = False super().tearDownClass() def test_read_sensor_temperature(self): sensors = self.ipmi.read_sensor_any('Temperature') self.assertFalse(self.ipmi.ipmi_support) # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_voltage(self): sensors = self.ipmi.read_sensor_any('Voltage') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_current(self): sensors = self.ipmi.read_sensor_any('Current') # Non-IPMI platform return empty data self.assertEqual({}, sensors) def test_read_sensor_fan(self): sensors = self.ipmi.read_sensor_any('Fan') # Non-IPMI platform return empty data self.assertEqual({}, sensors) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7979414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/pollsters/0000775000175100017510000000000015033033521023222 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/pollsters/__init__.py0000664000175100017510000000000015033033467025332 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/pollsters/base.py0000664000175100017510000000454715033033467024531 0ustar00mylesmyles# Copyright 2014 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from unittest import mock import fixtures from ceilometer.polling import manager from ceilometer import service from ceilometer.tests import base class TestPollsterBase(base.BaseTestCase, metaclass=abc.ABCMeta): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) def fake_data(self): """Fake data used for test.""" return None def fake_sensor_data(self, sensor_type): """Fake sensor data used for test.""" return None @abc.abstractmethod def make_pollster(self): """Produce right pollster for test.""" def _test_get_samples(self): nm = mock.Mock() nm.read_inlet_temperature.side_effect = self.fake_data nm.read_outlet_temperature.side_effect = self.fake_data nm.read_power_all.side_effect = self.fake_data nm.read_airflow.side_effect = self.fake_data nm.read_cups_index.side_effect = self.fake_data nm.read_cups_utilization.side_effect = self.fake_data nm.read_sensor_any.side_effect = self.fake_sensor_data self.useFixture(fixtures.MockPatch( 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', return_value=nm)) self.mgr = manager.AgentManager(0, self.CONF, ['ipmi']) self.pollster = self.make_pollster() def _verify_metering(self, length, expected_vol=None, node=None): cache = {} resources = ['local_host'] samples = list(self.pollster.get_samples(self.mgr, cache, resources)) self.assertEqual(length, len(samples)) if expected_vol: self.assertTrue(any(s.volume == expected_vol for s in samples)) if node: self.assertTrue(any(s.resource_metadata['node'] == node for s in samples)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py0000664000175100017510000001057315033033467026163 0ustar00mylesmyles# Copyright 2014 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.ipmi.pollsters import sensor from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data from ceilometer.tests.unit.ipmi.pollsters import base TEMPERATURE_SENSOR_DATA = { 'Temperature': ipmi_test_data.TEMPERATURE_DATA } CURRENT_SENSOR_DATA = { 'Current': ipmi_test_data.CURRENT_DATA } FAN_SENSOR_DATA = { 'Fan': ipmi_test_data.FAN_DATA } FAN_SENSOR_DATA_PERCENT = { 'Fan': ipmi_test_data.FAN_DATA_PERCENT } VOLTAGE_SENSOR_DATA = { 'Voltage': ipmi_test_data.VOLTAGE_DATA } POWER_SENSOR_DATA = { 'Current': ipmi_test_data.POWER_DATA } MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] class TestTemperatureSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return TEMPERATURE_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(10, float(32), self.CONF.host) class TestMissingSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMalformedSensorData(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MALFORMED_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestMissingSensorId(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return MISSING_ID_SENSOR_DATA def make_pollster(self): return sensor.TemperatureSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(0) class TestFanSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return FAN_SENSOR_DATA def make_pollster(self): return sensor.FanSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(12, float(7140), self.CONF.host) class TestFanPercentSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return FAN_SENSOR_DATA_PERCENT def make_pollster(self): return sensor.FanSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(1, float(47.04), self.CONF.host) class TestCurrentSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return CURRENT_SENSOR_DATA def make_pollster(self): return sensor.CurrentSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(1, float(0.800), self.CONF.host) class TestVoltageSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return VOLTAGE_SENSOR_DATA def make_pollster(self): return sensor.VoltageSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(4, float(3.309), self.CONF.host) class TestPowerSensorPollster(base.TestPollsterBase): def fake_sensor_data(self, sensor_type): return POWER_SENSOR_DATA def make_pollster(self): return sensor.PowerSensorPollster(self.CONF) def test_get_samples(self): self._test_get_samples() self._verify_metering(1, int(154), self.CONF.host) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7979414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/meter/0000775000175100017510000000000015033033521021351 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/meter/__init__.py0000664000175100017510000000000015033033467023461 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/meter/test_meter_plugins.py0000664000175100017510000000622515033033467025655 0ustar00mylesmyles# # Copyright 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslotest import base from ceilometer.event import trait_plugins class TestTimedeltaPlugin(base.BaseTestCase): def setUp(self): super().setUp() self.plugin = trait_plugins.TimedeltaPlugin() def test_timedelta_transformation(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32')] value = self.plugin.trait_values(match_list) self.assertEqual([3600], value) def test_timedelta_missing_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32')] with mock.patch('%s.LOG' % self.plugin.trait_values.__module__) as log: self.assertEqual([None], self.plugin.trait_values(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_exceed_field(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T16:04:32'), ('test.timestamp3', '2016-03-02T16:10:32')] with mock.patch('%s.LOG' % self.plugin.trait_values.__module__) as log: self.assertEqual([None], self.plugin.trait_values(match_list)) log.warning.assert_called_once_with( 'Timedelta plugin is required two timestamp fields to create ' 'timedelta value.') def test_timedelta_invalid_timestamp(self): match_list = [('test.timestamp1', '2016-03-02T15:04:32'), ('test.timestamp2', '2016-03-02T15:004:32')] with mock.patch('%s.LOG' % self.plugin.trait_values.__module__) as log: self.assertEqual([None], self.plugin.trait_values(match_list)) msg = log.warning._mock_call_args[0][0] self.assertTrue(msg.startswith('Failed to parse date from set ' 'fields, both fields ') ) def test_timedelta_reverse_timestamp_order(self): match_list = [('test.timestamp1', '2016-03-02T15:15:32'), ('test.timestamp2', '2016-03-02T15:10:32')] value = self.plugin.trait_values(match_list) self.assertEqual([300], value) def test_timedelta_precise_difference(self): match_list = [('test.timestamp1', '2016-03-02T15:10:32.786893'), ('test.timestamp2', '2016-03-02T15:10:32.786899')] value = self.plugin.trait_values(match_list) self.assertEqual([0.000006], value) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/meter/test_notifications.py0000664000175100017510000012144415033033467025652 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer.meter.notifications""" import copy from unittest import mock import fixtures from oslo_cache import core as cache from oslo_config import fixture as config_fixture from oslo_utils import fileutils import yaml from ceilometer import declarative from ceilometer.meter import notifications from ceilometer import service as ceilometer_service from ceilometer.tests import base as test NOTIFICATION = { 'event_type': 'test.create', 'metadata': {'timestamp': '2015-06-19T09:19:35.786893', 'message_id': '939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {'user_id': 'e1d870e51c7340cb9d555b15cbfcaec2', 'resource_id': 'bea70e51c7340cb9d555b15cbfcaec23', 'timestamp': '2015-06-19T09:19:35.785330', 'created_at': '2015-06-19T09:25:35.785330', 'launched_at': '2015-06-19T09:25:40.785330', 'message_signature': 'fake_signature1', 'resource_metadata': {'foo': 'bar'}, 'source': '30be1fc9a03c4e94ab05c403a8a377f2: openstack', 'volume': 1.0, 'project_id': '30be1fc9a03c4e94ab05c403a8a377f2', }, 'ctxt': {'tenant': '30be1fc9a03c4e94ab05c403a8a377f2', 'request_id': 'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', 'user': 'e1d870e51c7340cb9d555b15cbfcaec2'}, 'publisher_id': "foo123" } USER_META = { 'event_type': 'test.create', 'metadata': {'timestamp': '2015-06-19T09:19:35.786893', 'message_id': '939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {'user_id': 'e1d870e51c7340cb9d555b15cbfcaec2', 'resource_id': 'bea70e51c7340cb9d555b15cbfcaec23', 'timestamp': '2015-06-19T09:19:35.785330', 'created_at': '2015-06-19T09:25:35.785330', 'launched_at': '2015-06-19T09:25:40.785330', 'message_signature': 'fake_signature1', 'resource_metadata': {'foo': 'bar'}, 'source': '30be1fc9a03c4e94ab05c403a8a377f2: openstack', 'volume': 1.0, 'project_id': '30be1fc9a03c4e94ab05c403a8a377f2', 'metadata': {'metering.xyz': 'abc', 'ignore': 'this'}, }, 'ctxt': {'tenant': '30be1fc9a03c4e94ab05c403a8a377f2', 'request_id': 'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', 'user': 'e1d870e51c7340cb9d555b15cbfcaec2'}, 'publisher_id': "foo123" } MIDDLEWARE_EVENT = { 'ctxt': {'request_id': 'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', 'quota_class': None, 'service_catalog': [], 'auth_token': None, 'user_id': None, 'is_admin': True, 'user': None, 'remote_address': None, 'roles': [], 'timestamp': '2013-07-29T06:51:34.348091', 'project_name': None, 'read_deleted': 'no', 'tenant': None, 'instance_lock_checked': False, 'project_id': None, 'user_name': None}, 'event_type': 'objectstore.http.request', 'publisher_id': 'ceilometermiddleware', 'metadata': {'message_id': '6eccedba-120e-4db8-9735-2ad5f061e5ee', 'timestamp': '2013-07-29T06:51:34.474815+00:00', '_unique_id': '0ee26117077648e18d88ac76e28a72e2'}, 'payload': { 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', 'eventTime': '2013-07-29T06:51:34.474815+00:00', 'target': { 'action': 'get', 'typeURI': 'service/storage/object', 'id': 'account', 'metadata': { 'path': '/1.0/CUSTOM_account/container/obj', 'version': '1.0', 'container': 'container', 'object': 'obj' } }, 'observer': { 'id': 'target' }, 'eventType': 'activity', 'measurements': [ { 'metric': { 'metricId': 'openstack: uuid', 'name': 'storage.objects.outgoing.bytes', 'unit': 'B' }, 'result': 28 }, { 'metric': { 'metricId': 'openstack: uuid2', 'name': 'storage.objects.incoming.bytes', 'unit': 'B' }, 'result': 1 } ], 'initiator': { 'typeURI': 'service/security/account/user', 'project_id': None, 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' }, 'action': 'read', 'outcome': 'success', 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' } } FULL_MULTI_MSG = { 'event_type': 'full.sample', 'payload': [{ 'counter_name': 'instance1', 'user_id': 'user1', 'user_name': 'fake-name', 'resource_id': 'res1', 'counter_unit': 'ns', 'counter_volume': 28.0, 'project_id': 'proj1', 'project_name': 'fake-name', 'counter_type': 'gauge' }, { 'counter_name': 'instance2', 'user_id': 'user2', 'user_name': 'fake-name', 'resource_id': 'res2', 'counter_unit': '%', 'counter_volume': 1.0, 'project_id': 'proj2', 'project_name': 'fake-name', 'counter_type': 'delta' }], 'ctxt': {'domain': None, 'request_id': 'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', 'auth_token': None, 'read_only': False, 'resource_uuid': None, 'user_identity': 'fake_user_identity---', 'show_deleted': False, 'tenant': '30be1fc9a03c4e94ab05c403a8a377f2', 'is_admin': True, 'project_domain': None, 'user': 'e1d870e51c7340cb9d555b15cbfcaec2', 'user_domain': None}, 'publisher_id': 'ceilometer.api', 'metadata': {'message_id': '939823de-c242-45a2-a399-083f4d6a8c3e', 'timestamp': '2015-06-19T09:19:35.786893'}, } METRICS_UPDATE = { 'event_type': 'compute.metrics.update', 'payload': { 'metrics': [ {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.frequency', 'value': 1600, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.user.time', 'value': 17421440000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.time', 'value': 7852600000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.idle.time', 'value': 1307374400000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.time', 'value': 11697470000000, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.user.percent', 'value': 0.012959045637294348, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, 'source': 'libvirt.LibvirtDriver'}, {'timestamp': '2013-07-29T06:51:34.472416', 'name': 'cpu.percent', 'value': 0.027501485834103515, 'source': 'libvirt.LibvirtDriver'}], 'nodename': 'tianst.sh.intel.com', 'host': 'tianst', 'host_id': '10.0.1.1'}, 'publisher_id': 'compute.tianst.sh.intel.com', 'metadata': {'message_id': '6eccedba-120e-4db8-9735-2ad5f061e5ee', 'timestamp': '2013-07-29 06:51:34.474815', '_unique_id': '0ee26117077648e18d88ac76e28a72e2'}, 'ctxt': {'request_id': 'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', 'quota_class': None, 'service_catalog': [], 'auth_token': None, 'user_id': None, 'is_admin': True, 'user': None, 'remote_address': None, 'roles': [], 'timestamp': '2013-07-29T06:51:34.348091', 'project_name': None, 'read_deleted': 'no', 'tenant': None, 'instance_lock_checked': False, 'project_id': None, 'user_name': None} } class TestMeterDefinition(test.BaseTestCase): def test_config_definition(self): cfg = dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id") conf = ceilometer_service.prepare_service([], []) handler = notifications.MeterDefinition(cfg, conf, mock.Mock()) self.assertTrue(handler.match_type("test.create")) sample = list(handler.to_samples(NOTIFICATION))[0] self.assertEqual(1.0, sample["volume"]) self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", sample["resource_id"]) self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", sample["project_id"]) def test_config_required_missing_fields(self): cfg = dict() conf = ceilometer_service.prepare_service([], []) try: notifications.MeterDefinition(cfg, conf, mock.Mock()) except declarative.DefinitionException as e: self.assertIn("Required fields ['name', 'type', 'event_type'," " 'unit', 'volume', 'resource_id']" " not specified", str(e)) def test_bad_type_cfg_definition(self): cfg = dict(name="test", type="foo", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23") conf = ceilometer_service.prepare_service([], []) try: notifications.MeterDefinition(cfg, conf, mock.Mock()) except declarative.DefinitionException as e: self.assertIn("Invalid type foo specified", str(e)) class CacheConfFixture(config_fixture.Config): def setUp(self): super().setUp() self.conf = ceilometer_service.\ prepare_service(argv=[], config_files=[]) cache.configure(self.conf) class TestMeterProcessing(test.BaseTestCase): def setUp(self): super().setUp() self.CONF = ceilometer_service.prepare_service([], []) dict_conf_fixture = CacheConfFixture(self.CONF) self.useFixture(dict_conf_fixture) dict_conf_fixture.config(enabled=True, group='cache') dict_conf_fixture.config(expiration_time=600, backend='oslo_cache.dict', group='cache') dict_conf_fixture.config(identity_name_discovery=True, group='polling') self.CONF = dict_conf_fixture.conf self.path = self.useFixture(fixtures.TempDir()).path self.handler = notifications.ProcessMeterNotifications( self.CONF, mock.Mock()) def _load_meter_def_file(self, cfgs=None): self.CONF.set_override('meter_definitions_dirs', [self.path], group='meter') cfgs = cfgs or [] if not isinstance(cfgs, list): cfgs = [cfgs] meter_cfg_files = list() for cfg in cfgs: cfg = cfg.encode('utf-8') meter_cfg_files.append(fileutils.write_to_tempfile(content=cfg, path=self.path, prefix="meters", suffix=".yaml")) self.handler.definitions = self.handler._load_definitions() @mock.patch('ceilometer.meter.notifications.LOG') def test_bad_meter_definition_skip(self, LOG): cfg = yaml.dump( {'metric': [dict(name="good_test_1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="bad_test_2", type="bad_type", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23"), dict(name="good_test_3", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) self.assertEqual(2, len(self.handler.definitions)) args, kwargs = LOG.error.call_args_list[0] self.assertEqual("Error loading meter definition: %s", args[0]) self.assertTrue(args[1].endswith("Invalid type bad_type specified")) def test_jsonpath_values_parsed(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('test1', s1['name']) self.assertEqual(1.0, s1['volume']) self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id']) self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id']) def test_multiple_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) def test_unmatched_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.update", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(0, len(c)) def test_regex_match_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) def test_default_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['metadata']['timestamp'], s1['timestamp']) def test_custom_timestamp(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", multi="name", timestamp='$.payload.eventTime')]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'], s1['timestamp']) def test_custom_timestamp_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename", timestamp="$.payload.metrics" "[?(@.name='cpu.frequency')].timestamp")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual("2013-07-29T06:51:34.472416+00:00", s1['timestamp']) def test_default_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = NOTIFICATION['payload'].copy() meta['host'] = NOTIFICATION['publisher_id'] meta['event_type'] = NOTIFICATION['event_type'] self.assertEqual(meta, s1['resource_metadata']) def test_datetime_plugin(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="gauge", unit="sec", volume={"fields": ["$.payload.created_at", "$.payload.launched_at"], "plugin": "timedelta"}, resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual(5.0, s1['volume']) def test_custom_metadata(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", metadata={'proj': '$.payload.project_id', 'dict': '$.payload.resource_metadata'})]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'proj': s1['project_id'], 'dict': NOTIFICATION['payload']['resource_metadata']} self.assertEqual(meta, s1['resource_metadata']) def test_user_meta(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", user_metadata="$.payload.metadata",)]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(USER_META)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'user_metadata': {'xyz': 'abc'}} self.assertEqual(meta, s1['resource_metadata']) def test_user_meta_and_custom(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.*", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id", user_metadata="$.payload.metadata", metadata={'proj': '$.payload.project_id'})]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(USER_META)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() meta = {'user_metadata': {'xyz': 'abc'}, 'proj': s1['project_id']} self.assertEqual(meta, s1['resource_metadata']) def test_multi_match_event_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(c)) def test_multi_meter_payload(self): cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "volume", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(MIDDLEWARE_EVENT)) self.assertEqual(2, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) s2 = c[1].as_dict() self.assertEqual('storage.objects.incoming.bytes', s2['name']) self.assertEqual(1, s2['volume']) self.assertEqual('B', s2['unit']) def test_multi_meter_payload_single(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][1] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('storage.objects.outgoing.bytes', s1['name']) self.assertEqual(28, s1['volume']) self.assertEqual('B', s1['unit']) def test_multi_meter_payload_none(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(0, len(c)) @mock.patch( 'ceilometer.cache_utils.CacheClient._resolve_uuid_from_keystone' ) def test_multi_meter_payload_all_multi(self, resolved_uuid): resolved_uuid.return_value = "fake-name" cfg = yaml.dump( {'metric': [dict(name="$.payload.[*].counter_name", event_type="full.sample", type="$.payload.[*].counter_type", unit="$.payload.[*].counter_unit", volume="$.payload.[*].counter_volume", resource_id="$.payload.[*].resource_id", project_id="$.payload.[*].project_id", user_id="$.payload.[*].user_id", lookup=['name', 'type', 'unit', 'volume', 'resource_id', 'project_id', 'user_id'])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(FULL_MULTI_MSG)) self.assertEqual(2, len(c)) msg = FULL_MULTI_MSG['payload'] for idx, val in enumerate(c): s1 = val.as_dict() self.assertEqual(msg[idx]['counter_name'], s1['name']) self.assertEqual(msg[idx]['counter_volume'], s1['volume']) self.assertEqual(msg[idx]['counter_unit'], s1['unit']) self.assertEqual(msg[idx]['counter_type'], s1['type']) self.assertEqual(msg[idx]['resource_id'], s1['resource_id']) self.assertEqual(msg[idx]['project_id'], s1['project_id']) self.assertEqual(msg[idx]['user_id'], s1['user_id']) self.assertEqual(msg[idx]['project_name'], s1['project_name']) self.assertEqual(msg[idx]['user_name'], s1['user_name']) @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_missing(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] del event['payload']['measurements'][1]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 0 fetched meters contain ' '"volume" field instead of 2.') @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_meter_payload_invalid_short(self, LOG): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'][0]['result'] cfg = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup=["name", "unit", "volume"])]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(event)) self.assertEqual(0, len(c)) LOG.warning.assert_called_with('Only 1 fetched meters contain ' '"volume" field instead of 2.') def test_arithmetic_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.percent', event_type="compute.metrics.update", type='gauge', unit="percent", volume="$.payload.metrics[" "?(@.name='cpu.percent')].value" " * 100", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.percent', s1['name']) self.assertEqual(2.7501485834103514, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_string_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="$.payload.host + '_'" " + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("tianst_tianst.sh.intel.com", s1['resource_id']) def test_prefix_expr_meter(self): cfg = yaml.dump( {'metric': [dict(name='compute.node.cpu.frequency', event_type="compute.metrics.update", type='gauge', unit="ns", volume="$.payload.metrics[?(@.name='cpu.frequency')]" ".value", resource_id="'prefix-' + $.payload.nodename")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(METRICS_UPDATE)) self.assertEqual(1, len(c)) s1 = c[0].as_dict() self.assertEqual('compute.node.cpu.frequency', s1['name']) self.assertEqual(1600, s1['volume']) self.assertEqual("prefix-tianst.sh.intel.com", s1['resource_id']) def test_duplicate_meter(self): cfg = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file(cfg) c = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(c)) def test_multi_files_multi_meters(self): cfg1 = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) cfg2 = yaml.dump( {'metric': [dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) def test_multi_files_duplicate_meter(self): cfg1 = yaml.dump( {'metric': [dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) cfg2 = yaml.dump( {'metric': [dict(name="test", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(data)) self.assertEqual(data[0].as_dict()['name'], 'test') def test_multi_files_empty_payload(self): event = copy.deepcopy(MIDDLEWARE_EVENT) del event['payload']['measurements'] cfg1 = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) cfg2 = yaml.dump( {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", event_type="objectstore.http.request", type="delta", unit="$.payload.measurements.[*].metric.[*].unit", volume="$.payload.measurements.[*].result", resource_id="$.payload.target_id", project_id="$.payload.initiator.project_id", lookup="name")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(event)) self.assertEqual(0, len(data)) def test_multi_files_unmatched_meter(self): cfg1 = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) cfg2 = yaml.dump( {'metric': [dict(name="test2", event_type="test.update", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(1, len(data)) self.assertEqual(data[0].as_dict()['name'], 'test1') @mock.patch('ceilometer.meter.notifications.LOG') def test_multi_files_bad_meter(self, LOG): cfg1 = yaml.dump( {'metric': [dict(name="test1", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id"), dict(name="bad_test", type="bad_type", event_type="bar.create", unit="foo", volume="bar", resource_id="bea70e51c7340cb9d555b15cbfcaec23")]}) cfg2 = yaml.dump( {'metric': [dict(name="test2", event_type="test.create", type="delta", unit="B", volume="$.payload.volume", resource_id="$.payload.resource_id", project_id="$.payload.project_id")]}) self._load_meter_def_file([cfg1, cfg2]) data = list(self.handler.build_sample(NOTIFICATION)) self.assertEqual(2, len(data)) expected_names = ['test1', 'test2'] for s in data: self.assertIn(s.as_dict()['name'], expected_names) args, kwargs = LOG.error.call_args_list[0] self.assertEqual("Error loading meter definition: %s", args[0]) self.assertTrue(args[1].endswith("Invalid type bad_type specified")) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7979414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/network/0000775000175100017510000000000015033033521021726 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/network/__init__.py0000664000175100017510000000000015033033467024036 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7979414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/network/services/0000775000175100017510000000000015033033521023551 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/network/services/__init__.py0000664000175100017510000000000015033033467025661 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/network/services/test_fwaas.py0000664000175100017510000001563115033033467026302 0ustar00mylesmyles# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures from oslotest import base from ceilometer.network.services import discovery from ceilometer.network.services import fwaas from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestFWPollster(base.BaseTestCase): def setUp(self): super().setUp() self.addCleanup(mock.patch.stopall) self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestFirewallPollster(_BaseTestFWPollster): def setUp(self): super().setUp() self.pollster = fwaas.FirewallPollster(self.CONF) fake_fw = self.fake_get_fw_service() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'firewall_get_all', return_value=fake_fw)) @staticmethod def fake_get_fw_service(): return [{'status': 'ACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'INACTIVE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'PENDING_CREATE', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, {'status': 'error', 'name': 'myfw', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, ] def test_fw_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(4, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_service()[0][field], samples[0].resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_service())) self.assertEqual({'network.services.firewall'}, {s.name for s in samples}) def test_vpn_discovery(self): discovered_fws = discovery.FirewallDiscovery( self.CONF).discover(self.manager) self.assertEqual(3, len(discovered_fws)) for vpn in self.fake_get_fw_service(): if vpn['status'] == 'error': self.assertNotIn(vpn, discovered_fws) else: self.assertIn(vpn, discovered_fws) class TestIPSecConnectionsPollster(_BaseTestFWPollster): def setUp(self): super().setUp() self.pollster = fwaas.FirewallPolicyPollster(self.CONF) fake_fw_policy = self.fake_get_fw_policy() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'fw_policy_get_all', return_value=fake_fw_policy)) @staticmethod def fake_get_fw_policy(): return [{'name': 'my_fw_policy', 'description': 'fw_policy', 'admin_state_up': True, 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'firewall_rules': [{'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'source_ip_address': '10.24.4.2'}, {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22'}], 'shared': True, 'audited': True, 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_policy_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_fw_policy()[0][field], samples[0].resource_metadata[field]) def test_get_policy_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_fw_policy())) self.assertEqual({'network.services.firewall.policy'}, {s.name for s in samples}) def test_fw_policy_discovery(self): discovered_policy = discovery.FirewallPolicyDiscovery( self.CONF).discover(self.manager) self.assertEqual(1, len(discovered_policy)) self.assertEqual(self.fake_get_fw_policy(), discovered_policy) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/network/services/test_vpnaas.py0000664000175100017510000002052415033033467026466 0ustar00mylesmyles# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures from oslotest import base from ceilometer.network.services import discovery from ceilometer.network.services import vpnaas from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestVPNPollster(base.BaseTestCase): def setUp(self): super().setUp() self.addCleanup(mock.patch.stopall) self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) plugin_base._get_keystone = mock.Mock() catalog = (plugin_base._get_keystone.session.auth.get_access. return_value.service_catalog) catalog.get_endpoints = mock.MagicMock( return_value={'network': mock.ANY}) class TestVPNServicesPollster(_BaseTestVPNPollster): def setUp(self): super().setUp() self.pollster = vpnaas.VPNServicesPollster(self.CONF) self.fake_vpn = self.fake_get_vpn_service() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'vpn_get_all', return_value=self.fake_vpn)) @staticmethod def fake_get_vpn_service(): return [{'status': 'ACTIVE', 'name': 'myvpn1', 'description': '', 'admin_state_up': True, 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'INACTIVE', 'name': 'myvpn2', 'description': '', 'admin_state_up': True, 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'PENDING_CREATE', 'name': 'myvpn3', 'description': '', 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': True, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'error', 'name': 'myvpn4', 'description': '', 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', 'admin_state_up': False, 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, {'status': 'UNKNOWN', 'name': 'myvpn5', 'description': '', 'id': '34e6383a-b1ab-4602-b26a-a1ae7b759212', 'admin_state_up': False, 'subnet_id': '8c20bbbf-1409-4bc4-b652-3aeda66746c1', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': '0e5c9333-2ef5-4c90-9cca-5cc898515da4'}, {'status': None, 'name': 'myvpn6', 'description': '', 'id': '6e94ff61-8dea-4154-98f1-4020e4b2cecd', 'admin_state_up': False, 'subnet_id': '5e2a20c3-547a-43e4-90c5-26d32ea42d10', 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', 'router_id': '5b14df87-60c1-4fc7-8ad5-7811b2199c7f'}, ] def test_vpn_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_vpn)) self.assertEqual(len(self.fake_vpn), len(samples)) self.assertEqual({vpn['id'] for vpn in self.fake_vpn}, {sample.resource_id for sample in samples}) samples_dict = {sample.resource_id: sample for sample in samples} for vpn in self.fake_vpn: sample = samples_dict[vpn['id']] for field in self.pollster.FIELDS: self.assertEqual(vpn[field], sample.resource_metadata[field]) def test_vpn_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_vpn)) self.assertEqual(1, samples[0].volume) self.assertEqual(0, samples[1].volume) self.assertEqual(2, samples[2].volume) self.assertEqual(7, samples[3].volume) self.assertEqual(-1, samples[4].volume) self.assertEqual(-1, samples[5].volume) def test_get_vpn_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_vpn)) self.assertEqual({'network.services.vpn'}, {s.name for s in samples}) def test_vpn_discovery(self): discovered_vpns = discovery.VPNServicesDiscovery( self.CONF).discover(self.manager) self.assertEqual(len(self.fake_vpn), len(discovered_vpns)) for vpn in self.fake_get_vpn_service(): self.assertIn(vpn, discovered_vpns) class TestIPSecConnectionsPollster(_BaseTestVPNPollster): def setUp(self): super().setUp() self.pollster = vpnaas.IPSecConnectionsPollster(self.CONF) fake_conns = self.fake_get_ipsec_connections() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'ipsec_site_connections_get_all', return_value=fake_conns)) @staticmethod def fake_get_ipsec_connections(): return [{'name': 'connection1', 'description': 'Remote-connection1', 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'], 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', 'admin_state_up': True, 'status': 'ACTIVE', 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} ] def test_conns_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual(1, len(samples)) for field in self.pollster.FIELDS: self.assertEqual(self.fake_get_ipsec_connections()[0][field], samples[0].resource_metadata[field]) def test_get_conns_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_get_ipsec_connections())) self.assertEqual({'network.services.vpn.connections'}, {s.name for s in samples}) def test_conns_discovery(self): discovered_conns = discovery.IPSecConnectionsDiscovery( self.CONF).discover(self.manager) self.assertEqual(1, len(discovered_conns)) self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/network/test_floating_ip.py0000664000175100017510000001335315033033467025650 0ustar00mylesmyles# Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures from oslotest import base from ceilometer.network import floatingip from ceilometer.network.services import discovery from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import service class _BaseTestFloatingIPPollster(base.BaseTestCase): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.manager = manager.AgentManager(0, self.CONF) plugin_base._get_keystone = mock.Mock() class TestFloatingIPPollster(_BaseTestFloatingIPPollster): def setUp(self): super().setUp() self.pollster = floatingip.FloatingIPPollster(self.CONF) self.fake_fip = self.fake_get_fip_service() self.useFixture(fixtures.MockPatch('ceilometer.neutron_client.Client.' 'fip_get_all', return_value=self.fake_fip)) @staticmethod def fake_get_fip_service(): return [{'router_id': 'e24f8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'ACTIVE', 'tenant_id': '54a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'f41f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.6', 'floating_ip_address': '65.79.162.11', 'port_id': '93a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '18ca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'astf8a37-1bb7-49e4-833c-049bb21986d2', 'status': 'DOWN', 'tenant_id': '34a00c50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'gh1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.7', 'floating_ip_address': '65.79.162.12', 'port_id': '453a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': 'jkca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'e2478937-1bb7-49e4-833c-049bb21986d2', 'status': 'error', 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'po1f399e-d63e-47c6-9a19-21c4e4fbbba0', 'fixed_ip_address': '10.0.0.8', 'floating_ip_address': '65.79.162.13', 'port_id': '67a0d2c7-a397-444c-9d75-d2ac89b6f209', 'id': '90ca27bf-72bc-40c8-9c13-414d564ea367'}, {'router_id': 'a27ac630-939f-4e2e-bbc3-09a6b4f19a77', 'status': 'UNKNOWN', 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', 'floating_network_id': '4d0c3f4f-79c7-40ff-9b0d-6e3a396547db', 'fixed_ip_address': '10.0.0.9', 'floating_ip_address': '65.79.162.14', 'port_id': '59cc6efa-7c89-4730-b051-b15f594e6728', 'id': 'a8a11884-7666-4f35-901e-dbb84e7111b5'}, {'router_id': '7eb0adde-6c3b-4a77-9714-f718a17afb83', 'status': None, 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', 'floating_network_id': 'bd6290e6-b014-4cd3-91f0-7e8a1b4c26ab', 'fixed_ip_address': '10.0.0.10', 'floating_ip_address': '65.79.162.15', 'port_id': 'd3b9436d-4b2b-4832-852b-34df7513c935', 'id': '27c539ca-94ce-42fc-a639-1bf2c8690d76'}] def test_fip_get_samples(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_fip)) self.assertEqual(len(self.fake_fip), len(samples)) self.assertEqual({fip['id'] for fip in self.fake_fip}, {sample.resource_id for sample in samples}) samples_dict = {sample.resource_id: sample for sample in samples} for fip in self.fake_fip: sample = samples_dict[fip['id']] for field in self.pollster.FIELDS: self.assertEqual(fip[field], sample.resource_metadata[field]) def test_fip_volume(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_fip)) self.assertEqual(1, samples[0].volume) self.assertEqual(3, samples[1].volume) self.assertEqual(7, samples[2].volume) self.assertEqual(-1, samples[3].volume) self.assertEqual(-1, samples[4].volume) def test_get_fip_meter_names(self): samples = list(self.pollster.get_samples( self.manager, {}, resources=self.fake_fip)) self.assertEqual({'ip.floating'}, {s.name for s in samples}) def test_fip_discovery(self): discovered_fips = discovery.FloatingIPDiscovery( self.CONF).discover(self.manager) self.assertEqual(len(self.fake_fip), len(discovered_fips)) for fip in self.fake_fip: self.assertIn(fip, discovered_fips) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7979414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/objectstore/0000775000175100017510000000000015033033521022560 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/objectstore/__init__.py0000664000175100017510000000000015033033467024670 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/objectstore/test_rgw.py0000664000175100017510000001674615033033467025017 0ustar00mylesmyles# Copyright 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from unittest import mock import fixtures from keystoneauth1 import exceptions from oslotest import base import testscenarios.testcase from ceilometer.objectstore import rgw from ceilometer.objectstore import rgw_client from ceilometer.polling import manager from ceilometer import service bucket_list1 = [rgw_client.RGWAdminClient.Bucket('somefoo1', 10, 7)] bucket_list2 = [rgw_client.RGWAdminClient.Bucket('somefoo2', 2, 9)] bucket_list3 = [rgw_client.RGWAdminClient.Bucket('unlisted', 100, 100)] GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list1}), ('tenant-001', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list2}), ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list3})] GET_USAGE = [('tenant-000', 10), ('tenant-001', 11), ('tenant-002-ignored', 12)] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self, worker_id, conf): super().__init__(worker_id, conf) self._keystone = mock.Mock() self._catalog = (self._keystone.session.auth.get_access. return_value.service_catalog) self._catalog.url_for.return_value = 'http://foobar/endpoint' class TestRgwPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('radosgw.objects', {'factory': rgw.ObjectsPollster}), ('radosgw.objects.size', {'factory': rgw.ObjectsSizePollster}), ('radosgw.objects.containers', {'factory': rgw.ObjectsContainersPollster}), ('radosgw.containers.objects', {'factory': rgw.ContainersObjectsPollster}), ('radosgw.containers.objects.size', {'factory': rgw.ContainersSizePollster}), ('radosgw.api.request', {'factory': rgw.UsagePollster}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i def setUp(self): super().setUp() conf = service.prepare_service([], []) conf.set_override('radosgw', 'object-store', group='service_types') self.pollster = self.factory(conf) self.manager = TestManager(0, conf) if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': self.ACCOUNTS = GET_BUCKETS else: self.ACCOUNTS = GET_USAGE def tearDown(self): super().tearDown() rgw._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with fixtures.MockPatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call rgw_clinet. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = 'get_%s' % self.pollster.METHOD with fixtures.MockPatchObject(rgw_client.RGWAdminClient, api_method, new=mock_method): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_metering(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) def test_get_meter_names(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual({samples[0].name}, {s.name for s in samples}) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'http://127.0.0.1:8000/admin' api_method = 'get_%s' % self.pollster.METHOD with fixtures.MockPatchObject(rgw_client.RGWAdminClient, api_method, new=mock_method): with fixtures.MockPatchObject( self.manager._catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(t.id) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): mock_url_for = mock.MagicMock() mock_url_for.return_value = '/endpoint' api_method = 'get_%s' % self.pollster.METHOD with fixtures.MockPatchObject(rgw_client.RGWAdminClient, api_method, new=mock.MagicMock()): with fixtures.MockPatchObject( self.manager._catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with fixtures.MockPatchObject( self.manager._catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/objectstore/test_rgw_client.py0000664000175100017510000001706715033033467026352 0ustar00mylesmyles# Copyright (C) 2015 Reliance Jio Infocomm Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock from oslotest import base from ceilometer.objectstore import rgw_client RGW_ADMIN_BUCKETS = ''' [ { "max_marker": "", "ver": 2001, "usage": { "rgw.main": { "size_kb_actual": 16000, "num_objects": 1000, "size_kb": 1000 } }, "bucket": "somefoo", "owner": "admin", "master_ver": 0, "mtime": 1420176126, "marker": "default.4126.1", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.1", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" }, { "max_marker": "", "ver": 3, "usage": { "rgw.main": { "size_kb_actual": 43, "num_objects": 1, "size_kb": 42 } }, "bucket": "somefoo31", "owner": "admin", "master_ver": 0, "mtime": 1420176134, "marker": "default.4126.5", "bucket_quota": { "max_objects": -1, "enabled": false, "max_size_kb": -1 }, "id": "default.4126.5", "pool": ".rgw.buckets", "index_pool": ".rgw.buckets.index" } ]''' RGW_ADMIN_USAGE = ''' { "entries": [ { "owner": "5f7fe2d5352e466f948f49341e33d107", "buckets": [ { "bucket": "", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}]}, { "bucket": "foodsgh", "time": "2015-01-23 09:00:00.000000Z", "epoch": 1422003600, "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}]}]}], "summary": [ { "user": "5f7fe2d5352e466f948f49341e33d107", "categories": [ { "category": "create_bucket", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}, { "category": "get_obj", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 0}, { "category": "list_buckets", "bytes_sent": 46, "bytes_received": 0, "ops": 3, "successful_ops": 3}, { "category": "put_obj", "bytes_sent": 0, "bytes_received": 238, "ops": 1, "successful_ops": 1}, { "category": "stat_account", "bytes_sent": 0, "bytes_received": 0, "ops": 1, "successful_ops": 1}], "total": { "bytes_sent": 46, "bytes_received": 238, "ops": 7, "successful_ops": 6}}]} ''' buckets_json = json.loads(RGW_ADMIN_BUCKETS) usage_json = json.loads(RGW_ADMIN_USAGE) class TestRGWAdminClient(base.BaseTestCase): def setUp(self): super().setUp() self.client = rgw_client.RGWAdminClient('http://127.0.0.1:8080/admin', 'abcde', 'secret', False) self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() def test_make_request_exception(self): self.get_resp.status_code = 403 self.assertRaises(rgw_client.RGWAdminAPIFailed, self.client._make_request, *('foo', {})) def test_make_request(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client._make_request('foo', []) self.assertEqual(buckets_json, actual) def test_get_buckets(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json actual = self.client.get_bucket('foo') bucket_list = [rgw_client.RGWAdminClient.Bucket('somefoo', 1000, 1000), rgw_client.RGWAdminClient.Bucket('somefoo31', 1, 42), ] expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list} self.assertEqual(expected, actual) self.assertEqual(1, len(self.get.call_args_list)) self.assertEqual('foo', self.get.call_args_list[0][1]['params']['uid']) def test_get_buckets_implicit_tenants(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = buckets_json self.client.implicit_tenants = True actual = self.client.get_bucket('foo') bucket_list = [rgw_client.RGWAdminClient.Bucket('somefoo', 1000, 1000), rgw_client.RGWAdminClient.Bucket('somefoo31', 1, 42), ] expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list} self.assertEqual(expected, actual) self.assertEqual(1, len(self.get.call_args_list)) self.assertEqual('foo$foo', self.get.call_args_list[0][1]['params']['uid']) def test_get_usage(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = usage_json actual = self.client.get_usage('foo') expected = 7 self.assertEqual(expected, actual) self.assertEqual(1, len(self.get.call_args_list)) self.assertEqual('foo', self.get.call_args_list[0][1]['params']['uid']) def test_get_usage_implicit_tenants(self): self.get_resp.status_code = 200 self.get_resp.json.return_value = usage_json self.client.implicit_tenants = True actual = self.client.get_usage('foo') expected = 7 self.assertEqual(expected, actual) self.assertEqual(1, len(self.get.call_args_list)) self.assertEqual('foo$foo', self.get.call_args_list[0][1]['params']['uid']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/objectstore/test_swift.py0000664000175100017510000003000115033033467025330 0ustar00mylesmyles# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools from unittest import mock import fixtures from keystoneauth1 import exceptions from oslotest import base from swiftclient import client as swift_client import testscenarios.testcase from ceilometer.objectstore import swift from ceilometer.polling import manager from ceilometer import service HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, 'x-account-bytes-used': 321321321, 'x-account-container-count': 7, }), ('tenant-001', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, }), ('tenant-002-ignored', {'x-account-object-count': 34, 'x-account-bytes-used': 9898989898, 'x-account-container-count': 17, })] GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, 'x-account-bytes-used': 123123, 'x-account-container-count': 2, }, [{'count': 10, 'bytes': 123123, 'name': 'my_container', 'storage_policy': 'Policy-0', }, {'count': 0, 'bytes': 0, 'name': 'new_container', # NOTE(callumdickinson): No storage policy, # to test backwards compatibility with older # versions of Swift. }])), ('tenant-001', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, [])), ('tenant-002-ignored', ({'x-account-object-count': 0, 'x-account-bytes-used': 0, 'x-account-container-count': 0, }, []))] Tenant = collections.namedtuple('Tenant', 'id') ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] class TestManager(manager.AgentManager): def __init__(self, worker_id, conf): super().__init__(worker_id, conf) self._keystone = mock.MagicMock() self._keystone_last_exception = None self._service_catalog = (self._keystone.session.auth. get_access.return_value.service_catalog) self._auth_token = (self._keystone.session.auth. get_access.return_value.auth_token) class TestSwiftPollster(testscenarios.testcase.WithScenarios, base.BaseTestCase): # Define scenarios to run all of the tests against all of the # pollsters. scenarios = [ ('storage.objects', {'factory': swift.ObjectsPollster, 'resources': {}}), ('storage.objects.size', {'factory': swift.ObjectsSizePollster, 'resources': {}}), ('storage.objects.containers', {'factory': swift.ObjectsContainersPollster, 'resources': {}}), ('storage.containers.objects', {'factory': swift.ContainersObjectsPollster, 'resources': { f"{project_id}/{container['name']}": container for project_id, container in itertools.chain.from_iterable( itertools.product([acc[0]], acc[1][1]) for acc in GET_ACCOUNTS) }}), ('storage.containers.objects.size', {'factory': swift.ContainersSizePollster, 'resources': { f"{project_id}/{container['name']}": container for project_id, container in itertools.chain.from_iterable( itertools.product([acc[0]], acc[1][1]) for acc in GET_ACCOUNTS) }}), ] @staticmethod def fake_ks_service_catalog_url_for(*args, **kwargs): raise exceptions.EndpointNotFound("Fake keystone exception") def fake_iter_accounts(self, ksclient, cache, tenants): tenant_ids = [t.id for t in tenants] for i in self.ACCOUNTS: if i[0] in tenant_ids: yield i def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.pollster = self.factory(self.CONF) self.manager = TestManager(0, self.CONF) if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': self.ACCOUNTS = HEAD_ACCOUNTS else: self.ACCOUNTS = GET_ACCOUNTS def tearDown(self): super().tearDown() swift._Base._ENDPOINT = None def test_iter_accounts_no_cache(self): cache = {} with fixtures.MockPatchObject(self.factory, '_get_account_info', return_value=[]): data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) self.assertEqual([], data) def test_iter_accounts_cached(self): # Verify that if a method has already been called, _iter_accounts # uses the cached version and doesn't call swiftclient. mock_method = mock.Mock() mock_method.side_effect = AssertionError( 'should not be called', ) api_method = '%s_account' % self.pollster.METHOD with fixtures.MockPatchObject(swift_client, api_method, new=mock_method): with fixtures.MockPatchObject(self.factory, '_neaten_url'): cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} data = list(self.pollster._iter_accounts(mock.Mock(), cache, ASSIGNED_TENANTS)) self.assertEqual([self.ACCOUNTS[0]], data) def test_neaten_url(self): reseller_prefix = self.CONF.reseller_prefix test_endpoints = ['http://127.0.0.1:8080', 'http://127.0.0.1:8080/swift'] test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' for test_endpoint in test_endpoints: standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id url = swift._Base._neaten_url(test_endpoint, test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(test_endpoint + '/v1', test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) url = swift._Base._neaten_url(standard_url, test_tenant_id, reseller_prefix) self.assertEqual(standard_url, url) def test_metering(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(2, len(samples), self.pollster.__class__) for resource_id, resource in self.resources.items(): for field in getattr(self.pollster, 'FIELDS', []): with self.subTest(f'{resource_id}-{field}'): sample = next(s for s in samples if s.resource_id == resource_id) if field in resource: self.assertEqual(resource[field], sample.resource_metadata[field]) else: self.assertIsNone(sample.resource_metadata[field]) def test_get_meter_names(self): with fixtures.MockPatchObject(self.factory, '_iter_accounts', side_effect=self.fake_iter_accounts): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual({samples[0].name}, {s.name for s in samples}) def test_only_poll_assigned(self): mock_method = mock.MagicMock() endpoint = 'end://point/' api_method = '%s_account' % self.pollster.METHOD mock_connection = mock.MagicMock() with fixtures.MockPatchObject(swift_client, api_method, new=mock_method): with fixtures.MockPatchObject(swift_client, 'http_connection', new=mock_connection): with fixtures.MockPatchObject( self.manager._service_catalog, 'url_for', return_value=endpoint): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) expected = [mock.call(self.pollster._neaten_url( endpoint, t.id, self.CONF.reseller_prefix), cacert=None) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_connection.call_args_list) expected = [mock.call(None, self.manager._auth_token, http_conn=mock_connection.return_value) for t in ASSIGNED_TENANTS] self.assertEqual(expected, mock_method.call_args_list) def test_get_endpoint_only_once(self): endpoint = 'end://point/' mock_url_for = mock.MagicMock(return_value=endpoint) api_method = '%s_account' % self.pollster.METHOD with fixtures.MockPatchObject(swift_client, api_method, new=mock.MagicMock()): with fixtures.MockPatchObject(swift_client, 'http_connection', new=mock.MagicMock()): with fixtures.MockPatchObject( self.manager._service_catalog, 'url_for', new=mock_url_for): list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(1, mock_url_for.call_count) def test_endpoint_notfound(self): with fixtures.MockPatchObject( self.manager._service_catalog, 'url_for', side_effect=self.fake_ks_service_catalog_url_for): samples = list(self.pollster.get_samples(self.manager, {}, ASSIGNED_TENANTS)) self.assertEqual(0, len(samples)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/pipeline_base.py0000664000175100017510000004306415033033467023426 0ustar00mylesmyles# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import traceback from unittest import mock import fixtures from oslo_utils import timeutils from ceilometer.pipeline import base as pipe_base from ceilometer.pipeline import sample as pipeline from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer import sample from ceilometer import service from ceilometer.tests import base class BasePipelineTestCase(base.BaseTestCase, metaclass=abc.ABCMeta): def get_publisher(self, conf, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](conf, url) class PublisherClassException(publisher.ConfigPublisherBase): def publish_samples(self, samples): raise Exception() def publish_events(self, events): raise Exception() def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.test_counter = sample.Sample( name='a', type=sample.TYPE_GAUGE, volume=1, unit='B', user_id="test_user", project_id="test_proj", resource_id="test_resource", timestamp=timeutils.utcnow().isoformat(), resource_metadata={} ) self.useFixture(fixtures.MockPatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(fixtures.MockPatch( 'ceilometer.pipeline.base.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, *args, **kwargs): if self._reraise_exception: raise Exception(traceback.format_exc()) @abc.abstractmethod def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" @abc.abstractmethod def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" @abc.abstractmethod def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" @abc.abstractmethod def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" @abc.abstractmethod def _set_pipeline_cfg(self, field, value): """Set a field to a value in the pipeline config.""" @abc.abstractmethod def _extend_pipeline_cfg(self, field, value): """Extend an existing field in the pipeline config with a value.""" @abc.abstractmethod def _unset_pipeline_cfg(self, field): """Clear an existing field in the pipeline config.""" def _build_and_set_new_pipeline(self): name = self.cfg2file(self.pipeline_cfg) self.CONF.set_override('pipeline_cfg_file', name) def _exception_create_pipelinemanager(self): self._build_and_set_new_pipeline() self.assertRaises(pipe_base.PipelineException, pipeline.SamplePipelineManager, self.CONF) def test_no_meters(self): self._unset_pipeline_cfg('meters') self._exception_create_pipelinemanager() def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_check_counters_include_exclude_same(self): counter_cfg = ['a', '!a'] self._set_pipeline_cfg('meters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_include_exclude(self): counter_cfg = ['a', '!b'] self._set_pipeline_cfg('meters', counter_cfg) self._exception_create_pipelinemanager() def test_check_counters_wildcard_included(self): counter_cfg = ['a', '*'] self._set_pipeline_cfg('meters', counter_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_multiple_included_counters(self): counter_cfg = ['a', 'b'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(2, len(publisher.samples)) self.assertEqual('a', getattr(publisher.samples[0], "name")) self.assertEqual('b', getattr(publisher.samples[1], "name")) @mock.patch('ceilometer.pipeline.sample.LOG') def test_none_volume_counter(self, LOG): self._set_pipeline_cfg('meters', ['empty_volume']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='empty_volume', type=self.test_counter.type, volume=None, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has no volume (volume: %(counter_volume)s), the ' 'sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) @mock.patch('ceilometer.pipeline.sample.LOG') def test_fake_volume_counter(self, LOG): self._set_pipeline_cfg('meters', ['fake_volume']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) publisher = pipeline_manager.pipelines[0].publishers[0] test_s = sample.Sample( name='fake_volume', type=self.test_counter.type, volume='fake_value', unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([test_s]) LOG.warning.assert_called_once_with( 'metering data %(counter_name)s for %(resource_id)s ' '@ %(timestamp)s has volume which is not a number ' '(volume: %(counter_volume)s), the sample will be dropped' % {'counter_name': test_s.name, 'resource_id': test_s.resource_id, 'timestamp': test_s.timestamp, 'counter_volume': test_s.volume}) self.assertEqual(0, len(publisher.samples)) def test_counter_dont_match(self): counter_cfg = ['nomatch'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.samples)) self.assertEqual(0, publisher.calls) def test_wildcard_counter(self): counter_cfg = ['*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual('a', getattr(publisher.samples[0], "name")) def test_wildcard_excluded_counters(self): counter_cfg = ['*', '!a'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('a')) def test_wildcard_excluded_counters_not_excluded(self): counter_cfg = ['*', '!b'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual('a', getattr(publisher.samples[0], "name")) def test_all_excluded_counters_not_excluded(self): counter_cfg = ['!b', '!c'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual('a', getattr(publisher.samples[0], "name")) def test_all_excluded_counters_is_excluded(self): counter_cfg = ['!a', '!c'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('a')) self.assertTrue(pipe.source.support_meter('b')) self.assertFalse(pipe.source.support_meter('c')) def test_wildcard_and_excluded_wildcard_counters(self): counter_cfg = ['*', '!disk.*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('disk.read.bytes')) self.assertTrue(pipe.source.support_meter('cpu')) def test_included_counter_and_wildcard_counters(self): counter_cfg = ['cpu', 'disk.*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertTrue(pipe.source.support_meter('disk.read.bytes')) self.assertTrue(pipe.source.support_meter('cpu')) self.assertFalse(pipe.source.support_meter('instance')) def test_excluded_counter_and_excluded_wildcard_counters(self): counter_cfg = ['!cpu', '!disk.*'] self._set_pipeline_cfg('meters', counter_cfg) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_meter('disk.read.bytes')) self.assertFalse(pipe.source.support_meter('cpu')) self.assertTrue(pipe.source.support_meter('instance')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], "name")) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b', getattr(new_publisher.samples[0], "name")) def test_multiple_pipeline_exception(self): self._reraise_exception = False self._break_pipeline_cfg() self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, publisher.calls) self.assertEqual(1, len(publisher.samples)) self.assertEqual('a', getattr(publisher.samples[0], "name")) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.samples)) self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a', getattr(new_publisher.samples[0], 'name')) self.assertEqual('a', getattr(publisher.samples[0], 'name')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(new_publisher.samples)) self.assertEqual('a', getattr(new_publisher.samples[0], 'name')) def test_multiple_counter_pipeline(self): self._set_pipeline_cfg('meters', ['a', 'b']) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter, sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, )]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(2, len(publisher.samples)) self.assertEqual('a', getattr(publisher.samples[0], 'name')) self.assertEqual('b', getattr(publisher.samples[1], 'name')) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7979414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/0000775000175100017510000000000015033033521021701 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/__init__.py0000664000175100017510000000000015033033467024011 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/test_discovery.py0000664000175100017510000001331315033033467025333 0ustar00mylesmyles# # Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/central/manager.py""" from unittest import mock from oslotest import base from ceilometer.polling.discovery import endpoint from ceilometer.polling.discovery import localnode from ceilometer.polling.discovery import tenant as project from ceilometer import service class TestEndpointDiscovery(base.BaseTestCase): def setUp(self): super().setUp() CONF = service.prepare_service([], []) CONF.set_override('interface', 'publicURL', group='service_credentials') CONF.set_override('region_name', 'test-region-name', group='service_credentials') self.discovery = endpoint.EndpointDiscovery(CONF) self.manager = mock.MagicMock() self.catalog = (self.manager.keystone.session.auth.get_access. return_value.service_catalog) def test_keystone_called(self): self.discovery.discover(self.manager, param='test-service-type') expected = [mock.call(service_type='test-service-type', interface='publicURL', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls.call_args_list) def test_keystone_called_no_service_type(self): self.discovery.discover(self.manager) expected = [mock.call(service_type=None, interface='publicURL', region_name='test-region-name')] self.assertEqual(expected, self.catalog.get_urls .call_args_list) def test_keystone_called_no_endpoints(self): self.catalog.get_urls.return_value = [] self.assertEqual([], self.discovery.discover(self.manager)) class TestLocalnodeDiscovery(base.BaseTestCase): def setUp(self): super().setUp() self.conf = service.prepare_service([], []) self.discovery = localnode.LocalNodeDiscovery(self.conf) self.manager = mock.MagicMock() def test_lockalnode_discovery(self): self.assertEqual([self.conf.host], self.discovery.discover(self.manager)) class TestProjectDiscovery(base.BaseTestCase): def prepare_mock_data(self): domain_heat = mock.MagicMock() domain_heat.id = '2f42ab40b7ad4140815ef830d816a16c' domain_heat.name = 'heat' domain_heat.enabled = True domain_heat.links = { 'self': 'http://192.168.1.1/identity/v3/domains/' '2f42ab40b7ad4140815ef830d816a16c'} domain_default = mock.MagicMock() domain_default.id = 'default' domain_default.name = 'Default' domain_default.enabled = True domain_default.links = { 'self': 'http://192.168.1.1/identity/v3/domains/default'} project_admin = mock.MagicMock() project_admin.id = '2ce92449a23145ef9c539f3327960ce3' project_admin.name = 'admin' project_admin.parent_id = 'default' project_admin.domain_id = 'default' project_admin.is_domain = False project_admin.enabled = True project_admin.links = { 'self': 'http://192.168.4.46/identity/v3/projects/' '2ce92449a23145ef9c539f3327960ce3'}, project_service = mock.MagicMock() project_service.id = '9bf93b86bca04e3b815f86a5de083adc' project_service.name = 'service' project_service.parent_id = 'default' project_service.domain_id = 'default' project_service.is_domain = False project_service.enabled = True project_service.links = { 'self': 'http://192.168.4.46/identity/v3/projects/' '9bf93b86bca04e3b815f86a5de083adc'} project_demo = mock.MagicMock() project_demo.id = '57d96b9af18d43bb9d047f436279b0be' project_demo.name = 'demo' project_demo.parent_id = 'default' project_demo.domain_id = 'default' project_demo.is_domain = False project_demo.enabled = True project_demo.links = { 'self': 'http://192.168.4.46/identity/v3/projects/' '57d96b9af18d43bb9d047f436279b0be'} self.domains = [domain_heat, domain_default] self.default_domain_projects = [project_admin, project_service] self.heat_domain_projects = [project_demo] def side_effect(self, domain=None): if not domain or domain.name == 'Default': return self.default_domain_projects elif domain.name == 'heat': return self.heat_domain_projects else: return [] def setUp(self): super().setUp() CONF = service.prepare_service([], []) self.discovery = project.TenantDiscovery(CONF) self.prepare_mock_data() self.manager = mock.MagicMock() self.manager.keystone.projects.list.side_effect = self.side_effect def test_project_discovery(self): self.manager.keystone.domains.list.return_value = self.domains result = self.discovery.discover(self.manager) self.assertEqual(len(result), 3) self.assertEqual(self.manager.keystone.projects.list.call_count, 2) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/test_dynamic_pollster.py0000664000175100017510000022001615033033467026674 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for OpenStack dynamic pollster """ import copy import json import logging from unittest import mock import requests from urllib import parse as urlparse from ceilometer import declarative from ceilometer.polling import dynamic_pollster from ceilometer import sample from oslotest import base LOG = logging.getLogger(__name__) REQUIRED_POLLSTER_FIELDS = ['name', 'sample_type', 'unit', 'value_attribute', 'endpoint_type', 'url_path'] class SampleGenerator: def __init__(self, samples_dict, turn_to_list=False): self.turn_to_list = turn_to_list self.samples_dict = {} for k, v in samples_dict.items(): if isinstance(v, list): self.samples_dict[k] = [0, v] else: self.samples_dict[k] = [0, [v]] def get_next_sample_dict(self): _dict = {} for key in self.samples_dict.keys(): _dict[key] = self.get_next_sample(key) if self.turn_to_list: _dict = [_dict] return _dict def get_next_sample(self, key): samples = self.samples_dict[key][1] samples_next_iteration = self.samples_dict[key][0] % len(samples) self.samples_dict[key][0] += 1 _sample = samples[samples_next_iteration] if isinstance(_sample, SampleGenerator): return _sample.get_next_sample_dict() return _sample class PagedSamplesGenerator(SampleGenerator): def __init__(self, samples_dict, dict_name, page_link_name): super().__init__(samples_dict) self.dict_name = dict_name self.page_link_name = page_link_name self.response = {} def generate_samples(self, page_base_link, page_links, last_page_size): self.response.clear() current_page_link = page_base_link for page_link, page_size in page_links.items(): page_link = page_base_link + "/" + page_link self.response[current_page_link] = { self.page_link_name: [{'href': page_link, 'rel': 'next'}], self.dict_name: self.populate_page(page_size) } current_page_link = page_link self.response[current_page_link] = { self.dict_name: self.populate_page(last_page_size) } def populate_page(self, page_size): page = [] for item_number in range(0, page_size): page.append(self.get_next_sample_dict()) return page class PagedSamplesGeneratorHttpRequestMock(PagedSamplesGenerator): def mock_request(self, url, **kwargs): return_value = TestDynamicPollster.FakeResponse() return_value.status_code = requests.codes.ok return_value.json_object = self.response[url] return return_value class TestDynamicPollster(base.BaseTestCase): class FakeResponse: status_code = None json_object = None _text = None @property def text(self): return self._text or json.dumps(self.json_object) def json(self): return self.json_object def raise_for_status(self): raise requests.HTTPError("Mock HTTP error.", response=self) class FakeManager: def __init__(self, keystone=None): self._keystone = keystone def setUp(self): super().setUp() self.pollster_definition_only_required_fields = { 'name': "test-pollster", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "volume", 'endpoint_type': "test", 'url_path': "v1/test/endpoint/fake"} self.pollster_definition_all_fields = { 'metadata_fields': "metadata-field-name", 'skip_sample_values': ["I-do-not-want-entries-with-this-value"], 'value_mapping': { 'value-to-map': 'new-value', 'value-to-map-to-numeric': 12 }, 'default_value_mapping': 0, 'metadata_mapping': { 'old-metadata-name': "new-metadata-name" }, 'preserve_mapped_metadata': False} self.pollster_definition_all_fields.update( self.pollster_definition_only_required_fields) self.multi_metric_pollster_definition = { 'name': "test-pollster.{category}", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "[categories].ops", 'endpoint_type': "test", 'url_path': "v1/test/endpoint/fake"} def execute_basic_asserts(self, pollster, pollster_definition): self.assertEqual(pollster, pollster.obj) self.assertEqual(pollster_definition['name'], pollster.name) for key in REQUIRED_POLLSTER_FIELDS: self.assertEqual(pollster_definition[key], pollster.pollster_definitions[key]) self.assertEqual(pollster_definition, pollster.pollster_definitions) @mock.patch('keystoneclient.v2_0.client.Client') def test_skip_samples_with_linked_samples(self, keystone_mock): generator = PagedSamplesGeneratorHttpRequestMock(samples_dict={ 'volume': SampleGenerator(samples_dict={ 'name': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], 'tmp': ['ra', 'rb', 'rc', 'rd', 're', 'rf', 'rg', 'rh']}, turn_to_list=True), 'id': [1, 2, 3, 4, 5, 6, 7, 8], 'name': ['a1', 'b2', 'c3', 'd4', 'e5', 'f6', 'g7', 'h8'] }, dict_name='servers', page_link_name='server_link') generator.generate_samples('http://test.com/v1/test-volumes', { 'marker=c3': 3, 'marker=f6': 3 }, 2) keystone_mock.session.get.side_effect = generator.mock_request fake_manager = self.FakeManager(keystone=keystone_mock) pollster_definition = dict(self.multi_metric_pollster_definition) pollster_definition['name'] = 'test-pollster.{name}' pollster_definition['value_attribute'] = '[volume].tmp' pollster_definition['skip_sample_values'] = ['rb'] pollster_definition['url_path'] = 'v1/test-volumes' pollster_definition['response_entries_key'] = 'servers' pollster_definition['next_sample_url_attribute'] = \ 'server_link | filter(lambda v: v.get("rel") == "next", value) |' \ 'list(value) | value[0] | value.get("href")' pollster = dynamic_pollster.DynamicPollster(pollster_definition) samples = pollster.get_samples(fake_manager, None, ['http://test.com']) self.assertEqual(['ra', 'rc', 'rd', 're', 'rf', 'rg', 'rh'], list(map(lambda s: s.volume, samples))) generator.generate_samples('http://test.com/v1/test-volumes', { 'marker=c3': 3, 'marker=f6': 3 }, 2) pollster_definition['name'] = 'test-pollster' pollster_definition['value_attribute'] = 'name' pollster_definition['skip_sample_values'] = ['b2'] pollster = dynamic_pollster.DynamicPollster(pollster_definition) samples = pollster.get_samples(fake_manager, None, ['http://test.com']) self.assertEqual(['a1', 'c3', 'd4', 'e5', 'f6', 'g7', 'h8'], list(map(lambda s: s.volume, samples))) def test_all_required_fields_ok(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) self.execute_basic_asserts( pollster, self.pollster_definition_only_required_fields) self.assertEqual( 0, len(pollster.pollster_definitions['skip_sample_values'])) self.assertEqual( 0, len(pollster.pollster_definitions['value_mapping'])) self.assertEqual( -1, pollster.pollster_definitions['default_value']) self.assertEqual( 0, len(pollster.pollster_definitions['metadata_mapping'])) self.assertEqual( True, pollster.pollster_definitions['preserve_mapped_metadata']) def test_all_fields_ok(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_all_fields) self.execute_basic_asserts(pollster, self.pollster_definition_all_fields) self.assertEqual( 1, len(pollster.pollster_definitions['skip_sample_values'])) self.assertEqual( 2, len(pollster.pollster_definitions['value_mapping'])) self.assertEqual( 0, pollster.pollster_definitions['default_value_mapping']) self.assertEqual( 1, len(pollster.pollster_definitions['metadata_mapping'])) self.assertEqual( False, pollster.pollster_definitions['preserve_mapped_metadata']) def test_all_required_fields_exceptions(self): for key in REQUIRED_POLLSTER_FIELDS: pollster_definition = copy.deepcopy( self.pollster_definition_only_required_fields) pollster_definition.pop(key) exception = self.assertRaises( declarative.DynamicPollsterDefinitionException, dynamic_pollster.DynamicPollster, pollster_definition) self.assertEqual("Required fields ['%s'] not specified." % key, exception.brief_message) def test_invalid_sample_type(self): self.pollster_definition_only_required_fields[ 'sample_type'] = "invalid_sample_type" exception = self.assertRaises( declarative.DynamicPollsterDefinitionException, dynamic_pollster.DynamicPollster, self.pollster_definition_only_required_fields) self.assertEqual("Invalid sample type [invalid_sample_type]. " "Valid ones are [('gauge', 'delta', 'cumulative')].", exception.brief_message) def test_all_valid_sample_type(self): for sample_type in sample.TYPES: self.pollster_definition_only_required_fields[ 'sample_type'] = sample_type pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) self.execute_basic_asserts( pollster, self.pollster_definition_only_required_fields) def test_default_discovery_method(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) self.assertEqual("endpoint:test", pollster.definitions.sample_gatherer .default_discovery) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_get_samples_empty_response(self, client_mock): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value.json_object = {} client_mock.session.get.return_value = return_value samples = pollster.definitions.sample_gatherer. \ execute_request_get_samples( keystone_client=client_mock, resource="https://endpoint.server.name/") self.assertEqual(0, len(samples)) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_get_samples_response_non_empty( self, client_mock): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value.json_object = {"firstElement": [{}, {}, {}]} client_mock.session.get.return_value = return_value samples = pollster.definitions.sample_gatherer. \ execute_request_get_samples( keystone_client=client_mock, resource="https://endpoint.server.name/") self.assertEqual(3, len(samples)) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_json_response_handler( self, client_mock): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = '{"test": [1,2,3]}' client_mock.session.get.return_value = return_value samples = pollster.definitions.sample_gatherer. \ execute_request_get_samples( keystone_client=client_mock, resource="https://endpoint.server.name/") self.assertEqual(3, len(samples)) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_xml_response_handler( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['response_handlers'] = ['xml'] pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = '123' client_mock.session.get.return_value = return_value samples = pollster.definitions.sample_gatherer. \ execute_request_get_samples( keystone_client=client_mock, resource="https://endpoint.server.name/") self.assertEqual(3, len(samples)) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_xml_json_response_handler( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['response_handlers'] = ['xml', 'json'] pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = '123' client_mock.session.get.return_value = return_value samples = pollster.definitions.sample_gatherer. \ execute_request_get_samples( keystone_client=client_mock, resource="https://endpoint.server.name/") self.assertEqual(3, len(samples)) return_value._text = '{"test": [1,2,3,4]}' samples = pollster.definitions.sample_gatherer. \ execute_request_get_samples( keystone_client=client_mock, resource="https://endpoint.server.name/") self.assertEqual(4, len(samples)) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_extra_metadata_fields_cache_disabled( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) extra_metadata_fields = { 'extra_metadata_fields_cache_seconds': 0, 'name': "project_name", 'endpoint_type': "identity", 'url_path': "'/v3/projects/' + str(sample['project_id'])", 'value': "name", } definitions['value_attribute'] = 'project_id' definitions['extra_metadata_fields'] = extra_metadata_fields pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = ''' {"projects": [ {"project_id": 9999, "name": "project1"}, {"project_id": 8888, "name": "project2"}, {"project_id": 7777, "name": "project3"}, {"project_id": 9999, "name": "project1"}, {"project_id": 8888, "name": "project2"}, {"project_id": 7777, "name": "project3"}, {"project_id": 9999, "name": "project1"}, {"project_id": 8888, "name": "project2"}, {"project_id": 7777, "name": "project3"}] } ''' return_value9999 = self.FakeResponse() return_value9999.status_code = requests.codes.ok return_value9999._text = ''' {"project": {"project_id": 9999, "name": "project1"} } ''' return_value8888 = self.FakeResponse() return_value8888.status_code = requests.codes.ok return_value8888._text = ''' {"project": {"project_id": 8888, "name": "project2"} } ''' return_value7777 = self.FakeResponse() return_value7777.status_code = requests.codes.ok return_value7777._text = ''' {"project": {"project_id": 7777, "name": "project3"} } ''' def get(url, *args, **kwargs): if '9999' in url: return return_value9999 if '8888' in url: return return_value8888 if '7777' in url: return return_value7777 return return_value client_mock.session.get.side_effect = get manager = mock.Mock manager._keystone = client_mock def discover(*args, **kwargs): return ["https://endpoint.server.name/"] manager.discover = discover samples = pollster.get_samples( manager=manager, cache=None, resources=["https://endpoint.server.name/"]) samples = list(samples) n_calls = client_mock.session.get.call_count self.assertEqual(9, len(samples)) self.assertEqual(10, n_calls) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_extra_metadata_fields_cache_enabled( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) extra_metadata_fields = { 'extra_metadata_fields_cache_seconds': 3600, 'name': "project_name", 'endpoint_type': "identity", 'url_path': "'/v3/projects/' + str(sample['project_id'])", 'value': "name", } definitions['value_attribute'] = 'project_id' definitions['extra_metadata_fields'] = extra_metadata_fields pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = ''' {"projects": [ {"project_id": 9999, "name": "project1"}, {"project_id": 8888, "name": "project2"}, {"project_id": 7777, "name": "project3"}, {"project_id": 9999, "name": "project4"}, {"project_id": 8888, "name": "project5"}, {"project_id": 7777, "name": "project6"}, {"project_id": 9999, "name": "project7"}, {"project_id": 8888, "name": "project8"}, {"project_id": 7777, "name": "project9"}] } ''' return_value9999 = self.FakeResponse() return_value9999.status_code = requests.codes.ok return_value9999._text = ''' {"project": {"project_id": 9999, "name": "project1"} } ''' return_value8888 = self.FakeResponse() return_value8888.status_code = requests.codes.ok return_value8888._text = ''' {"project": {"project_id": 8888, "name": "project2"} } ''' return_value7777 = self.FakeResponse() return_value7777.status_code = requests.codes.ok return_value7777._text = ''' {"project": {"project_id": 7777, "name": "project3"} } ''' def get(url, *args, **kwargs): if '9999' in url: return return_value9999 if '8888' in url: return return_value8888 if '7777' in url: return return_value7777 return return_value client_mock.session.get.side_effect = get manager = mock.Mock manager._keystone = client_mock def discover(*args, **kwargs): return ["https://endpoint.server.name/"] manager.discover = discover samples = pollster.get_samples( manager=manager, cache=None, resources=["https://endpoint.server.name/"]) samples = list(samples) n_calls = client_mock.session.get.call_count self.assertEqual(9, len(samples)) self.assertEqual(4, n_calls) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_extra_metadata_fields( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) extra_metadata_fields = [{ 'name': "project_name", 'endpoint_type': "identity", 'url_path': "'/v3/projects/' + str(sample['project_id'])", 'value': "name", 'metadata_fields': ['meta'] }, { 'name': "project_alias", 'endpoint_type': "identity", 'url_path': "'/v3/projects/' + " "str(extra_metadata_captured['project_name'])", 'value': "name", 'metadata_fields': ['meta'] }, { 'name': "project_meta", 'endpoint_type': "identity", 'url_path': "'/v3/projects/' + " "str(extra_metadata_by_name['project_name']" "['metadata']['meta'])", 'value': "project_id", 'metadata_fields': ['meta'] }] definitions['value_attribute'] = 'project_id' definitions['extra_metadata_fields'] = extra_metadata_fields pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = ''' {"projects": [ {"project_id": 9999, "name": "project1"}, {"project_id": 8888, "name": "project2"}, {"project_id": 7777, "name": "project3"}] } ''' return_value9999 = self.FakeResponse() return_value9999.status_code = requests.codes.ok return_value9999._text = ''' {"project": {"project_id": 9999, "name": "project1", "meta": "m1"} } ''' return_value8888 = self.FakeResponse() return_value8888.status_code = requests.codes.ok return_value8888._text = ''' {"project": {"project_id": 8888, "name": "project2", "meta": "m2"} } ''' return_value7777 = self.FakeResponse() return_value7777.status_code = requests.codes.ok return_value7777._text = ''' {"project": {"project_id": 7777, "name": "project3", "meta": "m3"} } ''' return_valueP1 = self.FakeResponse() return_valueP1.status_code = requests.codes.ok return_valueP1._text = ''' {"project": {"project_id": 7777, "name": "p1", "meta": null} } ''' return_valueP2 = self.FakeResponse() return_valueP2.status_code = requests.codes.ok return_valueP2._text = ''' {"project": {"project_id": 7777, "name": "p2", "meta": null} } ''' return_valueP3 = self.FakeResponse() return_valueP3.status_code = requests.codes.ok return_valueP3._text = ''' {"project": {"project_id": 7777, "name": "p3", "meta": null} } ''' return_valueM1 = self.FakeResponse() return_valueM1.status_code = requests.codes.ok return_valueM1._text = ''' {"project": {"project_id": "META1", "name": "p3", "meta": null} } ''' return_valueM2 = self.FakeResponse() return_valueM2.status_code = requests.codes.ok return_valueM2._text = ''' {"project": {"project_id": "META2", "name": "p3", "meta": null} } ''' return_valueM3 = self.FakeResponse() return_valueM3.status_code = requests.codes.ok return_valueM3._text = ''' {"project": {"project_id": "META3", "name": "p3", "meta": null} } ''' def get(url, *args, **kwargs): if '9999' in url: return return_value9999 if '8888' in url: return return_value8888 if '7777' in url: return return_value7777 if 'project1' in url: return return_valueP1 if 'project2' in url: return return_valueP2 if 'project3' in url: return return_valueP3 if 'm1' in url: return return_valueM1 if 'm2' in url: return return_valueM2 if 'm3' in url: return return_valueM3 return return_value client_mock.session.get = get manager = mock.Mock manager._keystone = client_mock def discover(*args, **kwargs): return ["https://endpoint.server.name/"] manager.discover = discover samples = pollster.get_samples( manager=manager, cache=None, resources=["https://endpoint.server.name/"]) samples = list(samples) self.assertEqual(3, len(samples)) self.assertEqual(samples[0].volume, 9999) self.assertEqual(samples[1].volume, 8888) self.assertEqual(samples[2].volume, 7777) self.assertEqual(samples[0].resource_metadata, {'project_name': 'project1', 'project_alias': 'p1', 'meta': 'm1', 'project_meta': 'META1'}) self.assertEqual(samples[1].resource_metadata, {'project_name': 'project2', 'project_alias': 'p2', 'meta': 'm2', 'project_meta': 'META2'}) self.assertEqual(samples[2].resource_metadata, {'project_name': 'project3', 'project_alias': 'p3', 'meta': 'm3', 'project_meta': 'META3'}) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_extra_metadata_fields_skip( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) extra_metadata_fields = [{ 'name': "project_name", 'endpoint_type': "identity", 'url_path': "'/v3/projects/' + str(sample['project_id'])", 'value': "name", }, { 'name': "project_alias", 'endpoint_type': "identity", 'extra_metadata_fields_skip': [{ 'value': 7777 }], 'url_path': "'/v3/projects/' + " "str(sample['p_name'])", 'value': "name", }] definitions['value_attribute'] = 'project_id' definitions['metadata_fields'] = ['to_skip', 'p_name'] definitions['extra_metadata_fields'] = extra_metadata_fields definitions['extra_metadata_fields_skip'] = [{ 'metadata': { 'to_skip': 'skip1' } }, { 'value': 8888 }] pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = ''' {"projects": [ {"project_id": 9999, "p_name": "project1", "to_skip": "skip1"}, {"project_id": 8888, "p_name": "project2", "to_skip": "skip2"}, {"project_id": 7777, "p_name": "project3", "to_skip": "skip3"}, {"project_id": 6666, "p_name": "project4", "to_skip": "skip4"}] } ''' return_value9999 = self.FakeResponse() return_value9999.status_code = requests.codes.ok return_value9999._text = ''' {"project": {"project_id": 9999, "name": "project1"} } ''' return_value8888 = self.FakeResponse() return_value8888.status_code = requests.codes.ok return_value8888._text = ''' {"project": {"project_id": 8888, "name": "project2"} } ''' return_value7777 = self.FakeResponse() return_value7777.status_code = requests.codes.ok return_value7777._text = ''' {"project": {"project_id": 7777, "name": "project3"} } ''' return_value6666 = self.FakeResponse() return_value6666.status_code = requests.codes.ok return_value6666._text = ''' {"project": {"project_id": 6666, "name": "project4"} } ''' return_valueP1 = self.FakeResponse() return_valueP1.status_code = requests.codes.ok return_valueP1._text = ''' {"project": {"project_id": 7777, "name": "p1"} } ''' return_valueP2 = self.FakeResponse() return_valueP2.status_code = requests.codes.ok return_valueP2._text = ''' {"project": {"project_id": 7777, "name": "p2"} } ''' return_valueP3 = self.FakeResponse() return_valueP3.status_code = requests.codes.ok return_valueP3._text = ''' {"project": {"project_id": 7777, "name": "p3"} } ''' return_valueP4 = self.FakeResponse() return_valueP4.status_code = requests.codes.ok return_valueP4._text = ''' {"project": {"project_id": 6666, "name": "p4"} } ''' def get(url, *args, **kwargs): if '9999' in url: return return_value9999 if '8888' in url: return return_value8888 if '7777' in url: return return_value7777 if '6666' in url: return return_value6666 if 'project1' in url: return return_valueP1 if 'project2' in url: return return_valueP2 if 'project3' in url: return return_valueP3 if 'project4' in url: return return_valueP4 return return_value client_mock.session.get = get manager = mock.Mock manager._keystone = client_mock def discover(*args, **kwargs): return ["https://endpoint.server.name/"] manager.discover = discover samples = pollster.get_samples( manager=manager, cache=None, resources=["https://endpoint.server.name/"]) samples = list(samples) self.assertEqual(4, len(samples)) self.assertEqual(samples[0].volume, 9999) self.assertEqual(samples[1].volume, 8888) self.assertEqual(samples[2].volume, 7777) self.assertEqual(samples[0].resource_metadata, {'p_name': 'project1', 'project_alias': 'p1', 'to_skip': 'skip1'}) self.assertEqual(samples[1].resource_metadata, {'p_name': 'project2', 'project_alias': 'p2', 'to_skip': 'skip2'}) self.assertEqual(samples[2].resource_metadata, {'p_name': 'project3', 'project_name': 'project3', 'to_skip': 'skip3'}) self.assertEqual(samples[3].resource_metadata, {'p_name': 'project4', 'project_alias': 'p4', 'project_name': 'project4', 'to_skip': 'skip4'}) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_extra_metadata_fields_different_requests( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) command = ''' \'\'\'echo '{"project": {"project_id": \'\'\'+ str(sample['project_id']) +\'\'\' , "name": "project1"}}' \'\'\' '''.replace('\n', '') command2 = ''' \'\'\'echo '{"project": {"project_id": \'\'\'+ str(sample['project_id']) +\'\'\' , "name": "project2"}}' \'\'\' '''.replace('\n', '') extra_metadata_fields_embedded = { 'name': "project_name2", 'host_command': command2, 'value': "name", } extra_metadata_fields = { 'name': "project_id2", 'host_command': command, 'value': "project_id", 'extra_metadata_fields': extra_metadata_fields_embedded } definitions['value_attribute'] = 'project_id' definitions['extra_metadata_fields'] = extra_metadata_fields pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = ''' {"projects": [ {"project_id": 9999, "name": "project1"}, {"project_id": 8888, "name": "project2"}, {"project_id": 7777, "name": "project3"}] } ''' def get(url, *args, **kwargs): return return_value client_mock.session.get = get manager = mock.Mock manager._keystone = client_mock def discover(*args, **kwargs): return ["https://endpoint.server.name/"] manager.discover = discover samples = pollster.get_samples( manager=manager, cache=None, resources=["https://endpoint.server.name/"]) samples = list(samples) self.assertEqual(3, len(samples)) self.assertEqual(samples[0].volume, 9999) self.assertEqual(samples[1].volume, 8888) self.assertEqual(samples[2].volume, 7777) self.assertEqual(samples[0].resource_metadata, {'project_id2': 9999, 'project_name2': 'project2'}) self.assertEqual(samples[1].resource_metadata, {'project_id2': 8888, 'project_name2': 'project2'}) self.assertEqual(samples[2].resource_metadata, {'project_id2': 7777, 'project_name2': 'project2'}) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_xml_json_response_handler_invalid_response( self, client_mock): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['response_handlers'] = ['xml', 'json'] pollster = dynamic_pollster.DynamicPollster(definitions) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value._text = 'Invalid response' client_mock.session.get.return_value = return_value with self.assertLogs('ceilometer.polling.dynamic_pollster', level='DEBUG') as logs: gatherer = pollster.definitions.sample_gatherer exception = self.assertRaises( declarative.InvalidResponseTypeException, gatherer.execute_request_get_samples, keystone_client=client_mock, resource="https://endpoint.server.name/") xml_handling_error = logs.output[3] json_handling_error = logs.output[4] self.assertIn( 'DEBUG:ceilometer.polling.dynamic_pollster:' 'Error handling response [Invalid response] ' 'with handler [XMLResponseHandler]', xml_handling_error) self.assertIn( 'DEBUG:ceilometer.polling.dynamic_pollster:' 'Error handling response [Invalid response] ' 'with handler [JsonResponseHandler]', json_handling_error) self.assertEqual( "InvalidResponseTypeException None: " "No remaining handlers to handle the response " "[Invalid response], used handlers " "[XMLResponseHandler, JsonResponseHandler]. " "[{'url_path': 'v1/test/endpoint/fake'}].", str(exception)) def test_configure_response_handler_definition_invalid_value(self): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['response_handlers'] = ['jason'] exception = self.assertRaises( declarative.DynamicPollsterDefinitionException, dynamic_pollster.DynamicPollster, pollster_definitions=definitions) self.assertEqual("DynamicPollsterDefinitionException None: " "Invalid response_handler value [jason]. " "Accepted values are [json, xml, text]", str(exception)) def test_configure_extra_metadata_field_skip_invalid_value(self): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['extra_metadata_fields_skip'] = 'teste' exception = self.assertRaises( declarative.DynamicPollsterDefinitionException, dynamic_pollster.DynamicPollster, pollster_definitions=definitions) self.assertEqual("DynamicPollsterDefinitionException None: " "Invalid extra_metadata_fields_skip configuration." " It must be a list of maps. Provided value: teste," " value type: str.", str(exception)) def test_configure_extra_metadata_field_skip_invalid_sub_value(self): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['extra_metadata_fields_skip'] = [{'test': '1'}, {'test': '2'}, 'teste'] exception = self.assertRaises( declarative.DynamicPollsterDefinitionException, dynamic_pollster.DynamicPollster, pollster_definitions=definitions) self.assertEqual("DynamicPollsterDefinitionException None: " "Invalid extra_metadata_fields_skip configuration." " It must be a list of maps. Provided value: " "[{'test': '1'}, {'test': '2'}, 'teste'], " "value type: list.", str(exception)) def test_configure_response_handler_definition_invalid_type(self): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['response_handlers'] = 'json' exception = self.assertRaises( declarative.DynamicPollsterDefinitionException, dynamic_pollster.DynamicPollster, pollster_definitions=definitions) self.assertEqual("DynamicPollsterDefinitionException None: " "Invalid response_handlers configuration. " "It must be a list. Provided value type: str", str(exception)) @mock.patch('keystoneclient.v2_0.client.Client') def test_execute_request_get_samples_exception_on_request( self, client_mock): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) return_value = self.FakeResponse() return_value.status_code = requests.codes.bad client_mock.session.get.return_value = return_value exception = self.assertRaises(requests.HTTPError, pollster.definitions.sample_gatherer. execute_request_get_samples, keystone_client=client_mock, resource="https://endpoint.server.name/") self.assertEqual("Mock HTTP error.", str(exception)) def test_execute_host_command_paged_responses(self): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['host_command'] = ''' echo '{"server": [{"status": "ACTIVE"}], "next": ""}' ''' str_json = "'{\\\"server\\\": [{\\\"status\\\": \\\"INACTIVE\\\"}]}'" definitions['next_sample_url_attribute'] = \ "next|\"echo \"+value+\"" + str_json + '"' pollster = dynamic_pollster.DynamicPollster(definitions) samples = pollster.definitions.sample_gatherer. \ execute_request_get_samples() resp_json = [{'status': 'ACTIVE'}, {'status': 'INACTIVE'}] self.assertEqual(resp_json, samples) def test_execute_host_command_response_handler(self): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['response_handlers'] = ['xml', 'json'] definitions['host_command'] = 'echo "xml\nxml"' entry = 'a' definitions['response_entries_key'] = entry definitions.pop('url_path') definitions.pop('endpoint_type') pollster = dynamic_pollster.DynamicPollster(definitions) samples_xml = pollster.definitions.sample_gatherer. \ execute_request_get_samples() definitions['host_command'] = 'echo \'{"a": {"y":"json",' \ '\n"s":"json"}}\'' samples_json = pollster.definitions.sample_gatherer. \ execute_request_get_samples() resp_xml = {'a': {'y': 'xml', 's': 'xml'}} resp_json = {'a': {'y': 'json', 's': 'json'}} self.assertEqual(resp_xml[entry], samples_xml) self.assertEqual(resp_json[entry], samples_json) def test_execute_host_command_invalid_command(self): definitions = copy.deepcopy( self.pollster_definition_only_required_fields) definitions['host_command'] = 'invalid-command' definitions.pop('url_path') definitions.pop('endpoint_type') pollster = dynamic_pollster.DynamicPollster(definitions) self.assertRaises( declarative.InvalidResponseTypeException, pollster.definitions.sample_gatherer.execute_request_get_samples) def test_generate_new_metadata_fields_no_metadata_mapping(self): metadata = {'name': 'someName', 'value': 1} metadata_before_call = copy.deepcopy(metadata) self.pollster_definition_only_required_fields['metadata_mapping'] = {} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) pollster.definitions.sample_extractor.generate_new_metadata_fields( metadata, self.pollster_definition_only_required_fields) self.assertEqual(metadata_before_call, metadata) def test_generate_new_metadata_fields_preserve_old_key(self): metadata = {'name': 'someName', 'value': 2} expected_metadata = copy.deepcopy(metadata) expected_metadata['balance'] = metadata['value'] self.pollster_definition_only_required_fields[ 'metadata_mapping'] = {'value': 'balance'} self.pollster_definition_only_required_fields[ 'preserve_mapped_metadata'] = True pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) pollster.definitions.sample_extractor.generate_new_metadata_fields( metadata, self.pollster_definition_only_required_fields) self.assertEqual(expected_metadata, metadata) def test_generate_new_metadata_fields_preserve_old_key_equals_false(self): metadata = {'name': 'someName', 'value': 1} expected_clean_metadata = copy.deepcopy(metadata) expected_clean_metadata['balance'] = metadata['value'] expected_clean_metadata.pop('value') self.pollster_definition_only_required_fields[ 'metadata_mapping'] = {'value': 'balance'} self.pollster_definition_only_required_fields[ 'preserve_mapped_metadata'] = False pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) pollster.definitions.sample_extractor.generate_new_metadata_fields( metadata, self.pollster_definition_only_required_fields) self.assertEqual(expected_clean_metadata, metadata) def test_execute_value_mapping_no_value_mapping(self): self.pollster_definition_only_required_fields['value_mapping'] = {} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) value_to_be_mapped = "test" expected_value = value_to_be_mapped value = pollster.definitions.value_mapper. \ execute_value_mapping(value_to_be_mapped) self.assertEqual(expected_value, value) def test_execute_value_mapping_no_value_mapping_found_with_default(self): self.pollster_definition_only_required_fields[ 'value_mapping'] = {'some-possible-value': 15} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) value_to_be_mapped = "test" expected_value = -1 value = pollster.definitions.value_mapper. \ execute_value_mapping(value_to_be_mapped) self.assertEqual(expected_value, value) def test_execute_value_mapping_no_value_mapping_found_with_custom_default( self): self.pollster_definition_only_required_fields[ 'value_mapping'] = {'some-possible-value': 5} self.pollster_definition_only_required_fields[ 'default_value'] = 0 pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) value_to_be_mapped = "test" expected_value = 0 value = pollster.definitions.value_mapper. \ execute_value_mapping(value_to_be_mapped) self.assertEqual(expected_value, value) def test_execute_value_mapping(self): self.pollster_definition_only_required_fields[ 'value_mapping'] = {'test': 'new-value'} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) value_to_be_mapped = "test" expected_value = 'new-value' value = pollster.definitions.value_mapper. \ execute_value_mapping(value_to_be_mapped) self.assertEqual(expected_value, value) def test_get_samples_no_resources(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) samples = pollster.get_samples(None, None, None) self.assertEqual(None, next(samples)) @mock.patch('ceilometer.polling.dynamic_pollster.' 'PollsterSampleGatherer.execute_request_get_samples') def test_get_samples_empty_samples(self, execute_request_get_samples_mock): execute_request_get_samples_mock.side_effect = [] pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) fake_manager = self.FakeManager() samples = pollster.get_samples( fake_manager, None, ["https://endpoint.server.name.com/"]) samples_list = list() try: for s in samples: samples_list.append(s) except RuntimeError as e: LOG.debug("Generator threw a StopIteration " "and we need to catch it [%s]." % e) self.assertEqual(0, len(samples_list)) def fake_sample_list(self, **kwargs): samples_list = list() samples_list.append( {'name': "sample5", 'volume': 5, 'description': "desc-sample-5", 'user_id': "924d1f77-5d75-4b96-a755-1774d6be17af", 'project_id': "6c7a0e87-7f2e-45d3-89ca-5a2dbba71a0e", 'id': "e335c317-dfdd-4f22-809a-625bd9a5992d" } ) samples_list.append( {'name': "sample1", 'volume': 2, 'description': "desc-sample-2", 'user_id': "20b5a704-b481-4603-a99e-2636c144b876", 'project_id': "6c7a0e87-7f2e-45d3-89ca-5a2dbba71a0e", 'id': "2e350554-6c05-4fda-8109-e47b595a714c" } ) return samples_list @mock.patch.object( dynamic_pollster.PollsterSampleGatherer, 'execute_request_get_samples', fake_sample_list) def test_get_samples(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) fake_manager = self.FakeManager() samples = pollster.get_samples( fake_manager, None, ["https://endpoint.server.name.com/"]) samples_list = list(samples) self.assertEqual(2, len(samples_list)) first_element = [ s for s in samples_list if s.resource_id == "e335c317-dfdd-4f22-809a-625bd9a5992d"][0] self.assertEqual(5, first_element.volume) self.assertEqual( "6c7a0e87-7f2e-45d3-89ca-5a2dbba71a0e", first_element.project_id) self.assertEqual( "924d1f77-5d75-4b96-a755-1774d6be17af", first_element.user_id) second_element = [ s for s in samples_list if s.resource_id == "2e350554-6c05-4fda-8109-e47b595a714c"][0] self.assertEqual(2, second_element.volume) self.assertEqual( "6c7a0e87-7f2e-45d3-89ca-5a2dbba71a0e", second_element.project_id) self.assertEqual( "20b5a704-b481-4603-a99e-2636c144b876", second_element.user_id) def test_retrieve_entries_from_response_response_is_a_list(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) response = [{"object1-attr1": 1}, {"object1-attr2": 2}] entries = pollster.definitions.sample_gatherer. \ retrieve_entries_from_response(response, pollster.definitions) self.assertEqual(response, entries) def test_retrieve_entries_using_first_entry_from_response(self): self.pollster_definition_only_required_fields[ 'response_entries_key'] = "first" pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) first_entries_from_response = [{"object1-attr1": 1}, {"object1-attr2": 2}] second_entries_from_response = [{"object1-attr3": 3}, {"object1-attr4": 33}] response = {"first": first_entries_from_response, "second": second_entries_from_response} entries = pollster.definitions.sample_gatherer.\ retrieve_entries_from_response( response, pollster.definitions.configurations) self.assertEqual(first_entries_from_response, entries) def test_retrieve_entries_using_second_entry_from_response(self): self.pollster_definition_only_required_fields[ 'response_entries_key'] = "second" pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) first_entries_from_response = [{"object1-attr1": 1}, {"object1-attr2": 2}] second_entries_from_response = [{"object1-attr3": 3}, {"object1-attr4": 33}] response = {"first": first_entries_from_response, "second": second_entries_from_response} entries = pollster.definitions.sample_gatherer. \ retrieve_entries_from_response(response, pollster.definitions.configurations) self.assertEqual(second_entries_from_response, entries) def test_retrieve_attribute_nested_value_non_nested_key(self): key = "key" value = [{"d": 2}, {"g": {"h": "val"}}] json_object = {"key": value} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) returned_value = pollster.definitions.sample_extractor.\ retrieve_attribute_nested_value(json_object, key) self.assertEqual(value, returned_value) def test_retrieve_attribute_nested_value_nested_key(self): key = "key.subKey" value1 = [{"d": 2}, {"g": {"h": "val"}}] sub_value = [{"r": 245}, {"h": {"yu": "yu"}}] json_object = {"key": {"subKey": sub_value, "subkey2": value1}} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) returned_value = pollster.definitions.sample_extractor. \ retrieve_attribute_nested_value(json_object, key) self.assertEqual(sub_value, returned_value) def test_retrieve_attribute_nested_value_with_operation_on_attribute(self): # spaces here are added on purpose at the end to make sure we # execute the strip in the code before the eval key = "key.subKey | value + 1|value / 2 | value * 3" value1 = [{"d": 2}, {"g": {"h": "val"}}] sub_value = 1 expected_value_after_operations = 3 json_object = {"key": {"subKey": sub_value, "subkey2": value1}} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) returned_value = pollster.definitions.sample_extractor.\ retrieve_attribute_nested_value(json_object, key) self.assertEqual(expected_value_after_operations, returned_value) def test_retrieve_attribute_nested_value_simulate_radosgw_processing(self): key = "user | value.split('$') | value[0] | value.strip()" json_object = {"categories": [ { "bytes_received": 0, "bytes_sent": 357088, "category": "complete_multipart", "ops": 472, "successful_ops": 472 }], "total": { "bytes_received": 206739531986, "bytes_sent": 273793180, "ops": 119690, "successful_ops": 119682 }, "user": " 00ab8d7e76fc4$00ab8d7e76fc45a37776732" } expected_value_after_operations = "00ab8d7e76fc4" pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) returned_value = pollster.definitions.sample_extractor.\ retrieve_attribute_nested_value(json_object, key) self.assertEqual(expected_value_after_operations, returned_value) def fake_sample_multi_metric(self, **kwargs): multi_metric_sample_list = [ {"categories": [ { "bytes_received": 0, "bytes_sent": 0, "category": "create_bucket", "ops": 2, "successful_ops": 2 }, { "bytes_received": 0, "bytes_sent": 2120428, "category": "get_obj", "ops": 46, "successful_ops": 46 }, { "bytes_received": 0, "bytes_sent": 21484, "category": "list_bucket", "ops": 8, "successful_ops": 8 }, { "bytes_received": 6889056, "bytes_sent": 0, "category": "put_obj", "ops": 46, "successful_ops": 6 }], "total": { "bytes_received": 6889056, "bytes_sent": 2141912, "ops": 102, "successful_ops": 106 }, "user": "test-user"}] return multi_metric_sample_list @mock.patch.object( dynamic_pollster.PollsterSampleGatherer, 'execute_request_get_samples', fake_sample_multi_metric) def test_get_samples_multi_metric_pollster(self): pollster = dynamic_pollster.DynamicPollster( self.multi_metric_pollster_definition) fake_manager = self.FakeManager() samples = pollster.get_samples( fake_manager, None, ["https://endpoint.server.name.com/"]) samples_list = list(samples) self.assertEqual(4, len(samples_list)) create_bucket_sample = [ s for s in samples_list if s.name == "test-pollster.create_bucket"][0] get_obj_sample = [ s for s in samples_list if s.name == "test-pollster.get_obj"][0] list_bucket_sample = [ s for s in samples_list if s.name == "test-pollster.list_bucket"][0] put_obj_sample = [ s for s in samples_list if s.name == "test-pollster.put_obj"][0] self.assertEqual(2, create_bucket_sample.volume) self.assertEqual(46, get_obj_sample.volume) self.assertEqual(8, list_bucket_sample.volume) self.assertEqual(46, put_obj_sample.volume) def test_execute_request_get_samples_custom_ids(self): sample = {'user_id_attribute': "1", 'project_id_attribute': "2", 'resource_id_attribute': "3", 'user_id': "234", 'project_id': "2334", 'id': "35"} def internal_execute_request_get_samples_mock(self, **kwargs): class Response: @property def text(self): return json.dumps([sample]) def json(self): return [sample] return Response(), "url" original_method = dynamic_pollster.PollsterSampleGatherer.\ _internal_execute_request_get_samples try: dynamic_pollster.PollsterSampleGatherer. \ _internal_execute_request_get_samples = \ internal_execute_request_get_samples_mock self.pollster_definition_all_fields[ 'user_id_attribute'] = 'user_id_attribute' self.pollster_definition_all_fields[ 'project_id_attribute'] = 'project_id_attribute' self.pollster_definition_all_fields[ 'resource_id_attribute'] = 'resource_id_attribute' pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_all_fields) params = {"d": "d"} response = pollster.definitions.sample_gatherer. \ execute_request_get_samples(**params) self.assertEqual(sample['user_id_attribute'], response[0]['user_id']) self.assertEqual(sample['project_id_attribute'], response[0]['project_id']) self.assertEqual(sample['resource_id_attribute'], response[0]['id']) finally: dynamic_pollster.PollsterSampleGatherer. \ _internal_execute_request_get_samples = original_method def test_retrieve_attribute_self_reference_sample(self): key = " . | value['key1']['subKey1'][0]['d'] if 'key1' in value else 0" sub_value1 = [{"d": 2}, {"g": {"h": "val"}}] sub_value2 = [{"r": 245}, {"h": {"yu": "yu"}}] json_object = {"key1": {"subKey1": sub_value1}, "key2": {"subkey2": sub_value2}} pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) returned_value = pollster.definitions.sample_extractor.\ retrieve_attribute_nested_value(json_object, key) self.assertEqual(2, returned_value) del json_object['key1'] returned_value = pollster.definitions.sample_extractor.\ retrieve_attribute_nested_value(json_object, key) self.assertEqual(0, returned_value) def test_create_request_arguments_NonOpenStackApisSamplesGatherer(self): pollster_definition = { 'name': "test-pollster", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "volume", 'url_path': "https://test.com/v1/test/endpoint/fake", "module": "someModule", "authentication_object": "objectAuthentication", "authentication_parameters": "authParam", "headers": [{"header1": "val1"}, {"header2": "val2"}]} pollster = dynamic_pollster.DynamicPollster(pollster_definition) request_args = pollster.definitions.sample_gatherer\ .create_request_arguments(pollster.definitions.configurations) self.assertIn("headers", request_args) self.assertEqual(2, len(request_args["headers"])) self.assertEqual(['header1', 'header2'], list(map(lambda h: list(h.keys())[0], request_args["headers"]))) self.assertEqual(['val1', 'val2'], list(map(lambda h: list(h.values())[0], request_args["headers"]))) self.assertNotIn("authenticated", request_args) def test_create_request_arguments_PollsterSampleGatherer(self): pollster_definition = copy.deepcopy( self.pollster_definition_only_required_fields) pollster_definition["headers"] = [ {"x-openstack-nova-api-version": "2.46"}, {"custom_header": "custom"}, {"some_other_header": "something"}] pollster = dynamic_pollster.DynamicPollster(pollster_definition) request_args = pollster.definitions.sample_gatherer\ .create_request_arguments(pollster.definitions.configurations) self.assertIn("headers", request_args) self.assertIn("authenticated", request_args) self.assertTrue(request_args["authenticated"]) self.assertEqual(3, len(request_args["headers"])) self.assertEqual(['x-openstack-nova-api-version', 'custom_header', "some_other_header"], list(map(lambda h: list(h.keys())[0], request_args["headers"]))) self.assertEqual(['2.46', 'custom', 'something'], list(map(lambda h: list(h.values())[0], request_args["headers"]))) def test_create_request_arguments_PollsterSampleGatherer_no_headers(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) request_args =\ pollster.definitions.sample_gatherer.create_request_arguments( pollster.definitions.configurations) self.assertNotIn("headers", request_args) self.assertIn("authenticated", request_args) self.assertTrue(request_args["authenticated"]) @mock.patch('keystoneclient.v2_0.client.Client') def test_metadata_nested_objects(self, keystone_mock): generator = PagedSamplesGeneratorHttpRequestMock(samples_dict={ 'flavor': [{"name": "a", "ram": 1}, {"name": "b", "ram": 2}, {"name": "c", "ram": 3}, {"name": "d", "ram": 4}, {"name": "e", "ram": 5}, {"name": "f", "ram": 6}, {"name": "g", "ram": 7}, {"name": "h", "ram": 8}], 'name': ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8'], 'state': ['Active', 'Error', 'Down', 'Active', 'Active', 'Migrating', 'Active', 'Error'] }, dict_name='servers', page_link_name='server_link') generator.generate_samples('http://test.com/v1/test-servers', { 'marker=c3': 3, 'marker=f6': 3 }, 2) keystone_mock.session.get.side_effect = generator.mock_request fake_manager = self.FakeManager(keystone=keystone_mock) pollster_definition = dict(self.multi_metric_pollster_definition) pollster_definition['name'] = 'test-pollster' pollster_definition['value_attribute'] = 'state' pollster_definition['url_path'] = 'v1/test-servers' pollster_definition['response_entries_key'] = 'servers' pollster_definition['metadata_fields'] = ['flavor.name', 'flavor.ram'] pollster_definition['next_sample_url_attribute'] = \ 'server_link | filter(lambda v: v.get("rel") == "next", value) |' \ 'list(value)| value [0] | value.get("href")' pollster = dynamic_pollster.DynamicPollster(pollster_definition) samples = pollster.get_samples(fake_manager, None, ['http://test.com']) samples = list(samples) self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], list(map(lambda s: s.resource_metadata["flavor.name"], samples))) self.assertEqual(list(range(1, 9)), list(map(lambda s: s.resource_metadata["flavor.ram"], samples))) def test_get_request_linked_samples_url_endpoint_no_trailing_slash(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) base_url = ( "http://test.com:8779/v1.0/1a2b3c4d5e1a2b3c4d5e1a2b3c4d5e1a" ) expected_url = urlparse.urljoin( base_url + "/", self.pollster_definition_only_required_fields[ 'url_path']) kwargs = {'resource': base_url} url = pollster.definitions.sample_gatherer\ .get_request_linked_samples_url( kwargs, pollster.definitions.configurations) self.assertEqual(expected_url, url) def test_get_request_linked_samples_url_endpoint_trailing_slash(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) base_url = "http://test.com:9511/v1/" expected_url = urlparse.urljoin( base_url, self.pollster_definition_only_required_fields[ 'url_path']) kwargs = {'resource': base_url} url = pollster.definitions.sample_gatherer\ .get_request_linked_samples_url( kwargs, pollster.definitions.configurations) self.assertEqual(expected_url, url) def test_get_request_linked_samples_url_next_sample_url(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) base_url = "http://test.com/something_that_we_do_not_care" expected_url = "http://test.com/next_page" kwargs = {'resource': base_url, 'next_sample_url': expected_url} url = pollster.definitions.sample_gatherer\ .get_request_linked_samples_url(kwargs, pollster.definitions) self.assertEqual(expected_url, url) def test_get_request_linked_samples_url_next_sample_only_url_path(self): pollster = dynamic_pollster.DynamicPollster( self.pollster_definition_only_required_fields) base_url = "http://test.com/something_that_we_do_not_care" expected_url = "http://test.com/next_page" kwargs = {'resource': base_url, 'next_sample_url': "/next_page"} url = pollster.definitions.sample_gatherer\ .get_request_linked_samples_url( kwargs, pollster.definitions.configurations) self.assertEqual(expected_url, url) def test_generate_sample_and_extract_metadata(self): definition = self.pollster_definition_only_required_fields.copy() definition['metadata_fields'] = ["metadata1", 'metadata2'] pollster = dynamic_pollster.DynamicPollster(definition) pollster_sample = {'metadata1': 'metadata1', 'metadata2': 'metadata2', 'value': 1} sample = pollster.definitions.sample_extractor.generate_sample( pollster_sample, pollster.definitions.configurations, manager=mock.Mock(), conf={}) self.assertEqual(1, sample.volume) self.assertEqual(2, len(sample.resource_metadata)) self.assertEqual('metadata1', sample.resource_metadata['metadata1']) self.assertEqual('metadata2', sample.resource_metadata['metadata2']) def test_generate_sample_and_extract_metadata_false_value(self): definition = self.pollster_definition_only_required_fields.copy() definition['metadata_fields'] = ["metadata1", 'metadata2', 'metadata3_false'] pollster = dynamic_pollster.DynamicPollster(definition) pollster_sample = {'metadata1': 'metadata1', 'metadata2': 'metadata2', 'metadata3_false': False, 'value': 1} sample = pollster.definitions.sample_extractor.generate_sample( pollster_sample, pollster.definitions.configurations, manager=mock.Mock(), conf={}) self.assertEqual(1, sample.volume) self.assertEqual(3, len(sample.resource_metadata)) self.assertEqual('metadata1', sample.resource_metadata['metadata1']) self.assertEqual('metadata2', sample.resource_metadata['metadata2']) self.assertIs(False, sample.resource_metadata['metadata3_false']) def test_generate_sample_and_extract_metadata_none_value(self): definition = self.pollster_definition_only_required_fields.copy() definition['metadata_fields'] = ["metadata1", 'metadata2', 'metadata3'] pollster = dynamic_pollster.DynamicPollster(definition) pollster_sample = {'metadata1': 'metadata1', 'metadata2': 'metadata2', 'metadata3': None, 'value': 1} sample = pollster.definitions.sample_extractor.generate_sample( pollster_sample, pollster.definitions.configurations, manager=mock.Mock(), conf={}) self.assertEqual(1, sample.volume) self.assertEqual(3, len(sample.resource_metadata)) self.assertEqual('metadata1', sample.resource_metadata['metadata1']) self.assertEqual('metadata2', sample.resource_metadata['metadata2']) self.assertIsNone(sample.resource_metadata['metadata3']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/test_heartbeat.py0000664000175100017510000001034215033033467025262 0ustar00mylesmyles# # Copyright 2024 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer polling heartbeat process""" import multiprocessing import shutil import tempfile from oslo_utils import timeutils from unittest import mock from ceilometer.polling import manager from ceilometer import service from ceilometer.tests import base class TestHeartBeatManagert(base.BaseTestCase): def setUp(self): super().setUp() self.conf = service.prepare_service([], []) self.tmpdir = tempfile.mkdtemp() self.queue = multiprocessing.Queue() self.mgr = manager.AgentManager(0, self.conf, namespaces='central', queue=self.queue) def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdir) def test_hb_not_configured(self): self.assertRaises(manager.HeartBeatException, manager.AgentHeartBeatManager, 0, self.conf, namespaces='ipmi', queue=self.queue) @mock.patch('ceilometer.polling.manager.LOG') def test_hb_startup(self, LOG): # activate heartbeat agent self.conf.set_override('heartbeat_socket_dir', self.tmpdir, group='polling') manager.AgentHeartBeatManager(0, self.conf, namespaces='compute', queue=self.queue) calls = [mock.call("Starting heartbeat child service. Listening" f" on {self.tmpdir}/ceilometer-compute.socket")] LOG.info.assert_has_calls(calls) @mock.patch('ceilometer.polling.manager.LOG') def test_hb_update(self, LOG): self.conf.set_override('heartbeat_socket_dir', self.tmpdir, group='polling') hb = manager.AgentHeartBeatManager(0, self.conf, namespaces='central', queue=self.queue) timestamp = timeutils.utcnow().isoformat() self.queue.put_nowait({'timestamp': timestamp, 'pollster': 'test'}) hb._update_status() calls = [mock.call(f"Updated heartbeat for test ({timestamp})")] LOG.debug.assert_has_calls(calls) @mock.patch('ceilometer.polling.manager.LOG') def test_hb_send(self, LOG): with mock.patch('socket.socket') as FakeSocket: sub_skt = mock.Mock() sub_skt.sendall.return_value = None sub_skt.sendall.return_value = None skt = FakeSocket.return_value skt.bind.return_value = mock.Mock() skt.listen.return_value = mock.Mock() skt.accept.return_value = (sub_skt, "") self.conf.set_override('heartbeat_socket_dir', self.tmpdir, group='polling') hb = manager.AgentHeartBeatManager(0, self.conf, namespaces='central', queue=self.queue) timestamp = timeutils.utcnow().isoformat() self.queue.put_nowait({'timestamp': timestamp, 'pollster': 'test1'}) hb._update_status() self.queue.put_nowait({'timestamp': timestamp, 'pollster': 'test2'}) hb._update_status() # test status report hb._send_heartbeat() calls = [mock.call("Heartbeat status report requested " f"at {self.tmpdir}/ceilometer-central.socket"), mock.call("Reported heartbeat status:\n" f"test1 {timestamp}\n" f"test2 {timestamp}")] LOG.debug.assert_has_calls(calls) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/test_manager.py0000664000175100017510000011545615033033467024751 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 Intel corp. # Copyright 2013 eNovance # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer agent manager""" import copy import datetime import multiprocessing import shutil import tempfile from unittest import mock import fixtures from keystoneauth1 import exceptions as ka_exceptions from oslo_utils import timeutils from stevedore import extension from ceilometer.compute import discovery as nova_discover from ceilometer.polling.dynamic_pollster import DynamicPollster from ceilometer.polling.dynamic_pollster import \ NonOpenStackApisPollsterDefinition from ceilometer.polling.dynamic_pollster import SingleMetricPollsterDefinitions from ceilometer.polling import manager from ceilometer.polling import plugin_base from ceilometer import sample from ceilometer import service from ceilometer.tests import base def default_test_data(name='test'): return sample.Sample( name=name, type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'Pollster'}) class TestPollster(plugin_base.PollsterBase): test_data = default_test_data() discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) c = copy.deepcopy(self.test_data) c.resource_metadata['resources'] = resources return [c] class PollingException(Exception): pass class TestPollsterBuilder(TestPollster): @classmethod def build_pollsters(cls, conf): return [('builder1', cls(conf)), ('builder2', cls(conf))] class TestManager(base.BaseTestCase): def setUp(self): super().setUp() self.conf = service.prepare_service([], []) def test_hash_of_set(self): x = ['a', 'b'] y = ['a', 'b', 'a'] z = ['a', 'c'] self.assertEqual(manager.hash_of_set(x), manager.hash_of_set(y)) self.assertNotEqual(manager.hash_of_set(x), manager.hash_of_set(z)) self.assertNotEqual(manager.hash_of_set(y), manager.hash_of_set(z)) def test_load_plugins(self): mgr = manager.AgentManager(0, self.conf, queue=multiprocessing.Queue()) self.assertIsNotNone(list(mgr.extensions)) @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(return_value=None)) def test_load_normal_plugins(self): mgr = manager.AgentManager(0, self.conf, namespaces=['ipmi'], queue=multiprocessing.Queue()) self.assertEqual(5, len(mgr.extensions)) # Skip loading pollster upon ImportError @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(side_effect=ImportError)) @mock.patch('ceilometer.polling.manager.LOG') def test_import_error_in_plugin(self, LOG): namespaces = ['ipmi'] manager.AgentManager(0, self.conf, namespaces=namespaces, queue=multiprocessing.Queue()) LOG.warning.assert_called_with( 'No valid pollsters can be loaded from %s namespaces', namespaces) # Exceptions other than ExtensionLoadError are propagated @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', mock.Mock(side_effect=PollingException)) def test_load_exceptional_plugins(self): self.assertRaises(PollingException, manager.AgentManager, 0, self.conf, ['ipmi']) def test_builder(self): @staticmethod def fake_get_ext_mgr(namespace, *args, **kwargs): if 'builder' in namespace: return extension.ExtensionManager.make_test_instance( [ extension.Extension('builder', None, TestPollsterBuilder, None), ] ) else: return extension.ExtensionManager.make_test_instance( [ extension.Extension('test', None, None, TestPollster(self.conf)), ] ) with mock.patch.object(manager.AgentManager, '_get_ext_mgr', new=fake_get_ext_mgr): mgr = manager.AgentManager(0, self.conf, namespaces=['central']) self.assertEqual(3, len(mgr.extensions)) for ext in mgr.extensions: self.assertIn(ext.name, ['builder1', 'builder2', 'test']) self.assertIsInstance(ext.obj, TestPollster) class BatchTestPollster(TestPollster): test_data = default_test_data() discovery = None @property def default_discovery(self): return self.discovery def get_samples(self, manager, cache, resources): resources = resources or [] self.samples.append((manager, resources)) self.resources.extend(resources) for resource in resources: c = copy.deepcopy(self.test_data) c.timestamp = timeutils.utcnow().isoformat() c.resource_id = resource c.resource_metadata['resource'] = resource yield c class TestPollsterKeystone(TestPollster): def get_samples(self, manager, cache, resources): # Just try to use keystone, that will raise an exception manager.keystone.projects.list() class TestPollsterPollingException(TestPollster): discovery = 'test' polling_failures = 0 def get_samples(self, manager, cache, resources): func = super().get_samples sample = func(manager=manager, cache=cache, resources=resources) # Raise polling exception after 2 times self.polling_failures += 1 if self.polling_failures > 2: raise plugin_base.PollsterPermanentError(resources) return sample class TestDiscovery(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) return self.resources class TestDiscoveryException(plugin_base.DiscoveryBase): def discover(self, manager, param=None): self.params.append(param) raise Exception() class BaseAgent(base.BaseTestCase): class Pollster(TestPollster): samples = [] resources = [] test_data = default_test_data() class BatchPollster(BatchTestPollster): samples = [] resources = [] test_data = default_test_data() class PollsterAnother(TestPollster): samples = [] resources = [] test_data = default_test_data('testanother') class PollsterKeystone(TestPollsterKeystone): samples = [] resources = [] test_data = default_test_data('testkeystone') class PollsterPollingException(TestPollsterPollingException): samples = [] resources = [] test_data = default_test_data('testpollingexception') class Discovery(TestDiscovery): params = [] resources = [] class DiscoveryAnother(TestDiscovery): params = [] resources = [] @property def group_id(self): return 'another_group' class DiscoveryException(TestDiscoveryException): params = [] def setup_polling(self, poll_cfg=None, override_conf=None): name = self.cfg2file(poll_cfg or self.polling_cfg) conf_to_use = override_conf or self.CONF conf_to_use.set_override('cfg_file', name, group='polling') self.mgr.polling_manager = manager.PollingManager(conf_to_use) def create_manager(self): queue = multiprocessing.Queue() return manager.AgentManager(0, self.CONF, queue=queue) def fake_notifier_sample(self, ctxt, event_type, payload): for m in payload['samples']: del m['message_signature'] self.notified_samples.append(m) def setUp(self): super().setUp() self.notified_samples = [] self.notifier = mock.Mock() self.notifier.sample.side_effect = self.fake_notifier_sample self.useFixture(fixtures.MockPatch('oslo_messaging.Notifier', return_value=self.notifier)) self.useFixture(fixtures.MockPatch('keystoneclient.v2_0.client.Client', return_value=mock.Mock())) self.CONF = service.prepare_service([], []) self.CONF.set_override( 'cfg_file', self.path_get('etc/ceilometer/polling_all.yaml'), group='polling' ) self.polling_cfg = { 'sources': [{ 'name': 'test_polling', 'interval': 60, 'meters': ['test'], 'resources': ['test://']}] } def tearDown(self): self.PollsterKeystone.samples = [] self.PollsterKeystone.resources = [] self.PollsterPollingException.samples = [] self.PollsterPollingException.resources = [] self.Pollster.samples = [] self.Pollster.discovery = [] self.PollsterAnother.samples = [] self.PollsterAnother.discovery = [] self.Pollster.resources = [] self.PollsterAnother.resources = [] self.Discovery.params = [] self.DiscoveryAnother.params = [] self.DiscoveryException.params = [] self.Discovery.resources = [] self.DiscoveryAnother.resources = [] super().tearDown() def create_extension_list(self): return [extension.Extension('test', None, None, self.Pollster(self.CONF), ), extension.Extension('testbatch', None, None, self.BatchPollster(self.CONF), ), extension.Extension('testanother', None, None, self.PollsterAnother(self.CONF), ), extension.Extension('testkeystone', None, None, self.PollsterKeystone(self.CONF), ), extension.Extension('testpollingexception', None, None, self.PollsterPollingException(self.CONF), ) ] def create_discoveries(self): return extension.ExtensionManager.make_test_instance( [ extension.Extension( 'testdiscovery', None, None, self.Discovery(self.CONF), ), extension.Extension( 'testdiscoveryanother', None, None, self.DiscoveryAnother(self.CONF), ), extension.Extension( 'testdiscoveryexception', None, None, self.DiscoveryException(self.CONF), ), ], ) class TestPollingAgent(BaseAgent): def setUp(self): super().setUp() self.mgr = self.create_manager() self.mgr.extensions = self.create_extension_list() ks_client = mock.Mock(auth_token='fake_token') ks_client.projects.get.return_value = mock.Mock( name='admin', id='4465ecd1438b4d23a866cf8447387a7b' ) ks_client.users.get.return_value = mock.Mock( name='admin', id='c0c935468e654d5a8baae1a08adf4dfb' ) self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.ks_client = ks_client self.setup_polling() @mock.patch('ceilometer.polling.manager.PollingManager') def test_start(self, poll_manager): self.mgr.setup_polling_tasks = mock.MagicMock() self.mgr.run() poll_manager.assert_called_once_with(self.CONF) self.mgr.setup_polling_tasks.assert_called_once_with() self.mgr.terminate() def test_setup_polling_tasks(self): polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) per_task_resources = polling_tasks[60].resources self.assertEqual(1, len(per_task_resources)) self.assertEqual(set(self.polling_cfg['sources'][0]['resources']), set(per_task_resources['test_polling-test'].get({}))) def test_setup_polling_tasks_multiple_interval(self): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 10, 'meters': ['test'], 'resources': ['test://'], }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(2, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertIn(10, polling_tasks.keys()) def test_setup_polling_tasks_mismatch_counter(self): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 10, 'meters': ['test_invalid'], 'resources': ['invalid://'], }) polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.assertNotIn(10, polling_tasks.keys()) @mock.patch('glob.glob') @mock.patch('ceilometer.declarative.load_definitions') def test_setup_polling_dynamic_pollster_namespace(self, load_mock, glob_mock): glob_mock.return_value = ['test.yml'] load_mock.return_value = [{ 'name': "test.dynamic.pollster", 'namespaces': "dynamic", 'sample_type': 'gauge', 'unit': 'test', 'endpoint_type': 'test', 'url_path': 'test', 'value_attribute': 'test' }, { 'name': "test.compute.central.pollster", 'sample_type': 'gauge', 'namespaces': ["compute", "central"], 'unit': 'test', 'endpoint_type': 'test', 'url_path': 'test', 'value_attribute': 'test' }, { 'name': "test.compute.pollster", 'namespaces': ["compute"], 'sample_type': 'gauge', 'unit': 'test', 'endpoint_type': 'test', 'url_path': 'test', 'value_attribute': 'test' }, { 'name': "test.central.pollster", 'sample_type': 'gauge', 'unit': 'test', 'endpoint_type': 'test', 'url_path': 'test', 'value_attribute': 'test' }] mgr = manager.AgentManager(0, self.CONF, namespaces=['dynamic']) self.assertEqual(len(mgr.extensions), 1) self.assertEqual( mgr.extensions[0].definitions.configurations['name'], 'test.dynamic.pollster') mgr = manager.AgentManager(0, self.CONF) self.assertEqual( mgr.extensions[-3].definitions.configurations['name'], 'test.compute.central.pollster') self.assertEqual( mgr.extensions[-2].definitions.configurations['name'], 'test.compute.pollster') self.assertEqual( mgr.extensions[-1].definitions.configurations['name'], 'test.central.pollster') mgr = manager.AgentManager(0, self.CONF, namespaces=['compute']) self.assertEqual( mgr.extensions[-2].definitions.configurations['name'], 'test.compute.central.pollster') self.assertEqual( mgr.extensions[-1].definitions.configurations['name'], 'test.compute.pollster') mgr = manager.AgentManager(0, self.CONF, ['central']) self.assertEqual( mgr.extensions[-2].definitions.configurations['name'], 'test.compute.central.pollster') self.assertEqual( mgr.extensions[-1].definitions.configurations['name'], 'test.central.pollster') def test_setup_polling_task_same_interval(self): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 60, 'meters': ['testanother'], 'resources': ['testanother://'], }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) pollsters = polling_tasks.get(60).pollster_matches self.assertEqual(2, len(pollsters)) per_task_resources = polling_tasks[60].resources self.assertEqual(2, len(per_task_resources)) key = 'test_polling-test' self.assertEqual(set(self.polling_cfg['sources'][0]['resources']), set(per_task_resources[key].get({}))) key = 'test_polling_1-testanother' self.assertEqual(set(self.polling_cfg['sources'][1]['resources']), set(per_task_resources[key].get({}))) def _verify_discovery_params(self, expected): self.assertEqual(expected, self.Discovery.params) self.assertEqual(expected, self.DiscoveryAnother.params) self.assertEqual(expected, self.DiscoveryException.params) def _do_test_per_pollster_discovery(self, discovered_resources, static_resources): self.Pollster.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] if static_resources: # just so we can test that static + pre_polling amalgamated # override per_pollster self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) if static_resources: self.assertEqual(set(static_resources + self.DiscoveryAnother.resources), set(self.Pollster.resources)) else: self.assertEqual(set(self.Discovery.resources), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_pollster_discovery(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], []) def test_per_pollster_discovery_overridden_by_per_polling_discovery(self): # ensure static+per_source_discovery overrides per_pollster_discovery self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_pollster_discovery_duplicated(self): self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], []) def test_per_pollster_discovery_overridden_by_duplicated_static(self): self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], ['static_1', 'dup', 'dup']) def test_per_pollster_discovery_caching(self): # ensure single discovery associated with multiple pollsters # only called once per polling cycle discovered_resources = ['discovered_1', 'discovered_2'] self.Pollster.discovery = 'testdiscovery' self.PollsterAnother.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.polling_cfg['sources'][0]['meters'].append('testanother') self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Discovery.params)) self.assertEqual(discovered_resources, self.Pollster.resources) self.assertEqual(discovered_resources, self.PollsterAnother.resources) def _do_test_per_polling_discovery(self, discovered_resources, static_resources): self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.DiscoveryAnother.resources = [d[::-1] for d in discovered_resources] self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = static_resources self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) discovery = self.Discovery.resources + self.DiscoveryAnother.resources # compare resource lists modulo ordering self.assertEqual(set(static_resources + discovery), set(self.Pollster.resources)) # Make sure no duplicated resource from discovery for x in self.Pollster.resources: self.assertEqual(1, self.Pollster.resources.count(x)) def test_per_polling_discovery_discovered_only(self): self._do_test_per_polling_discovery(['discovered_1', 'discovered_2'], []) def test_per_polling_discovery_static_only(self): self._do_test_per_polling_discovery([], ['static_1', 'static_2']) def test_per_polling_discovery_discovered_augmented_by_static(self): self._do_test_per_polling_discovery(['discovered_1', 'discovered_2'], ['static_1', 'static_2']) def test_per_polling_discovery_discovered_duplicated_static(self): self._do_test_per_polling_discovery(['discovered_1', 'pud'], ['dup', 'static_1', 'dup']) def test_multiple_pollings_different_static_resources(self): # assert that the individual lists of static and discovered resources # for each polling with a common interval are passed to individual # pollsters matching each polling self.polling_cfg['sources'][0]['resources'] = ['test://'] self.polling_cfg['sources'][0]['discovery'] = ['testdiscovery'] self.polling_cfg['sources'].append({ 'name': 'another_polling', 'interval': 60, 'meters': ['test'], 'resources': ['another://'], 'discovery': ['testdiscoveryanother'], }) self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual([None], self.Discovery.params) self.assertEqual([None], self.DiscoveryAnother.params) self.assertEqual(2, len(self.Pollster.samples)) samples = self.Pollster.samples test_resources = ['test://', 'discovered_1', 'discovered_2'] another_resources = ['another://', 'discovered_3', 'discovered_4'] if samples[0][1] == test_resources: self.assertEqual(another_resources, samples[1][1]) elif samples[0][1] == another_resources: self.assertEqual(test_resources, samples[1][1]) else: self.fail('unexpected sample resources %s' % samples) def test_multiple_sources_different_discoverers(self): self.Discovery.resources = ['discovered_1', 'discovered_2'] self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] sources = [{'name': 'test_source_1', 'interval': 60, 'meters': ['test'], 'discovery': ['testdiscovery']}, {'name': 'test_source_2', 'interval': 60, 'meters': ['testanother'], 'discovery': ['testdiscoveryanother']}] self.polling_cfg = {'sources': sources} self.mgr.discoveries = self.create_discoveries() self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.assertEqual(1, len(polling_tasks)) self.assertIn(60, polling_tasks.keys()) self.mgr.interval_task(polling_tasks.get(60)) self.assertEqual(1, len(self.Pollster.samples)) self.assertEqual(['discovered_1', 'discovered_2'], self.Pollster.resources) self.assertEqual(1, len(self.PollsterAnother.samples)) self.assertEqual(['discovered_3', 'discovered_4'], self.PollsterAnother.resources) @mock.patch('ceilometer.polling.manager.LOG') def test_polling_and_notify_with_resources(self, LOG): self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.info.assert_has_calls([ mock.call('Polling pollster %(poll)s in the context of %(src)s', {'poll': 'test', 'src': 'test_polling'}), mock.call('Finished polling pollster %(poll)s in the context ' 'of %(src)s', {'poll': 'test', 'src': 'test_polling'}) ]) LOG.debug.assert_has_calls([ mock.call('Polster heartbeat update: test') ]) @mock.patch('ceilometer.polling.manager.LOG') def test_polling_and_notify_with_resources_with_threads(self, log_mock): conf_to_use = self.CONF conf_to_use.set_override( 'threads_to_process_pollsters', 4, group='polling') self.setup_polling(override_conf=conf_to_use) polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() log_mock.info.assert_has_calls([ mock.call('Polling pollster %(poll)s in the context of %(src)s', {'poll': 'test', 'src': 'test_polling'}), mock.call('Finished polling pollster %(poll)s in the context ' 'of %(src)s', {'poll': 'test', 'src': 'test_polling'}) ]) log_mock.debug.assert_has_calls([ mock.call('Polster heartbeat update: test') ]) # Even though we enabled 4 threads, we have only one metric configured. # Therefore, there should be only one call here. self.assertEqual(1, polling_task.manager.notifier.sample.call_count) @mock.patch('ceilometer.polling.manager.LOG') def test_skip_polling_and_notify_with_no_resources(self, LOG): self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches['test_polling'])[0] polling_task.poll_and_notify() LOG.debug.assert_has_calls([mock.call( 'Skip pollster %(name)s, no %(p_context)s resources found ' 'this cycle', {'name': pollster.name, 'p_context': ''})]) @mock.patch('ceilometer.polling.manager.LOG') def test_skip_polling_polled_resources(self, LOG): self.polling_cfg['sources'].append({ 'name': 'test_polling_1', 'interval': 60, 'meters': ['test'], 'resources': ['test://'], }) self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task.poll_and_notify() LOG.debug.assert_has_calls([mock.call( 'Skip pollster %(name)s, no %(p_context)s resources found ' 'this cycle', {'name': 'test', 'p_context': 'new'})]) @mock.patch('oslo_utils.timeutils.utcnow') def test_polling_samples_timestamp(self, mock_utc): polled_samples = [] timestamp = '2222-11-22T00:11:22.333333' def fake_send_notification(samples): polled_samples.extend(samples) mock_utc.return_value = datetime.datetime.strptime( timestamp, "%Y-%m-%dT%H:%M:%S.%f") self.setup_polling() polling_task = list(self.mgr.setup_polling_tasks().values())[0] polling_task._send_notification = mock.Mock( side_effect=fake_send_notification) polling_task.poll_and_notify() self.assertEqual(timestamp, polled_samples[0]['timestamp']) def test_get_sample_resources(self): polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertTrue(self.Pollster.resources) def test_when_keystone_fail(self): """Test for bug 1316532.""" self.useFixture(fixtures.MockPatch( 'keystoneclient.v2_0.client.Client', side_effect=ka_exceptions.ClientException)) poll_cfg = { 'sources': [{ 'name': "test_keystone", 'interval': 10, 'meters': ['testkeystone'], 'resources': ['test://'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(list(polling_tasks.values())[0]) self.assertFalse(self.PollsterKeystone.samples) self.assertFalse(self.notified_samples) @mock.patch('ceilometer.polling.manager.LOG') def test_polling_exception(self, LOG): source_name = 'test_pollingexception' res_list = ['test://'] poll_cfg = { 'sources': [{ 'name': source_name, 'interval': 10, 'meters': ['testpollingexception'], 'resources': res_list, 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches[source_name])[0] # 2 samples after 4 pollings, as pollster got disabled upon exception for x in range(0, 4): self.mgr.interval_task(polling_task) samples = self.notified_samples self.assertEqual(2, len(samples)) LOG.error.assert_called_once_with(( 'Prevent pollster %(name)s from ' 'polling %(res_list)s on source %(source)s anymore!'), dict(name=pollster.name, res_list=str(res_list), source=source_name)) @mock.patch('ceilometer.polling.manager.LOG') def test_polling_novalike_exception(self, LOG): source_name = 'test_pollingexception' poll_cfg = { 'sources': [{ 'name': source_name, 'interval': 10, 'meters': ['testpollingexception'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] pollster = list(polling_task.pollster_matches[source_name])[0] with mock.patch.object(polling_task.manager, 'discover') as disco: # NOTE(gordc): polling error on 3rd poll for __ in range(4): disco.return_value = ( [nova_discover.NovaLikeServer(**{'id': 1})]) self.mgr.interval_task(polling_task) LOG.error.assert_called_once_with(( 'Prevent pollster %(name)s from ' 'polling %(res_list)s on source %(source)s anymore!'), dict(name=pollster.name, res_list="[]", source=source_name)) def test_batching_polled_samples_disable_batch(self): self.CONF.set_override('batch_size', 0, group='polling') self._batching_samples(4, 4) def test_batching_polled_samples_batch_size(self): self.CONF.set_override('batch_size', 2, group='polling') self._batching_samples(4, 2) def test_batching_polled_samples_default(self): self._batching_samples(4, 1) def _batching_samples(self, expected_samples, call_count): poll_cfg = { 'sources': [{ 'name': 'test_pipeline', 'interval': 1, 'meters': ['testbatch'], 'resources': ['alpha', 'beta', 'gamma', 'delta'], 'sinks': ['test_sink']}], 'sinks': [{ 'name': 'test_sink', 'publishers': ["test"]}] } self.setup_polling(poll_cfg) polling_task = list(self.mgr.setup_polling_tasks().values())[0] self.mgr.interval_task(polling_task) samples = self.notified_samples self.assertEqual(expected_samples, len(samples)) self.assertEqual(call_count, self.notifier.sample.call_count) class TestPollingAgentPartitioned(BaseAgent): def setUp(self): super().setUp() self.tempdir = tempfile.mkdtemp() self.CONF.set_override("backend_url", "file://%s" % self.tempdir, "coordination") self.addCleanup(shutil.rmtree, self.tempdir, ignore_errors=True) self.hashring = mock.MagicMock() self.hashring.belongs_to_self = mock.MagicMock() self.hashring.belongs_to_self.return_value = True self.mgr = self.create_manager() self.mgr.extensions = self.create_extension_list() self.mgr.hashrings = mock.MagicMock() self.mgr.hashrings.__getitem__.return_value = self.hashring self.setup_polling() def test_discovery_partitioning(self): discovered_resources = ['discovered_1', 'discovered_2'] self.Pollster.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.hashring.belongs_to_self.assert_has_calls( [mock.call('discovered_1'), mock.call('discovered_2')]) def test_discovery_partitioning_unhashable(self): discovered_resources = [{'unhashable': True}] self.Pollster.discovery = 'testdiscovery' self.mgr.discoveries = self.create_discoveries() self.Discovery.resources = discovered_resources self.polling_cfg['sources'][0]['discovery'] = [ 'testdiscovery', 'testdiscoveryanother', 'testdiscoverynonexistent', 'testdiscoveryexception'] self.polling_cfg['sources'][0]['resources'] = [] self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.hashring.belongs_to_self.assert_has_calls( [mock.call('{\'unhashable\': True}')]) def test_static_resources_partitioning(self): static_resources = ['static_1', 'static_2'] static_resources2 = ['static_3', 'static_4'] self.polling_cfg['sources'][0]['resources'] = static_resources self.polling_cfg['sources'].append({ 'name': 'test_polling2', 'interval': 60, 'meters': ['test', 'test2'], 'resources': static_resources2, }) # have one polling without static resources defined self.polling_cfg['sources'].append({ 'name': 'test_polling3', 'interval': 60, 'meters': ['test', 'test2'], 'resources': [], }) self.setup_polling() polling_tasks = self.mgr.setup_polling_tasks() self.mgr.interval_task(polling_tasks.get(60)) self.hashring.belongs_to_self.assert_has_calls([ mock.call('static_1'), mock.call('static_2'), mock.call('static_3'), mock.call('static_4'), ], any_order=True) def test_instantiate_dynamic_pollster_standard_pollster(self): pollster_definition_only_required_fields = { 'name': "test-pollster", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "volume", 'endpoint_type': "test", 'url_path': "v1/test/endpoint/fake"} pollster = DynamicPollster(pollster_definition_only_required_fields) self.assertIsInstance(pollster.definitions, SingleMetricPollsterDefinitions) def test_instantiate_dynamic_pollster_non_openstack_api(self): pollster_definition_only_required_fields = { 'name': "test-pollster", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "volume", 'url_path': "v1/test/endpoint/fake", 'module': "module-name", 'authentication_object': "authentication_object"} pollster = DynamicPollster(pollster_definition_only_required_fields) self.assertIsInstance(pollster.definitions, NonOpenStackApisPollsterDefinition) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/test_non_openstack_credentials_discovery.py0000664000175100017510000000761015033033467032634 0ustar00mylesmyles# Copyright 2014-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslotest import base import requests from ceilometer.polling.discovery.endpoint import EndpointDiscovery from ceilometer.polling.discovery.non_openstack_credentials_discovery import \ NonOpenStackCredentialsDiscovery class TestNonOpenStackCredentialsDiscovery(base.BaseTestCase): class FakeResponse: status_code = None json_object = None _content = "" def json(self): return self.json_object def raise_for_status(self): raise requests.HTTPError("Mock HTTP error.", response=self) class FakeManager: def __init__(self, keystone_client_mock): self._keystone = keystone_client_mock def setUp(self): super().setUp() self.discovery = NonOpenStackCredentialsDiscovery(None) def test_discover_no_parameters(self): result = self.discovery.discover(None, None) self.assertEqual(['No secrets found'], result) result = self.discovery.discover(None, "") self.assertEqual(['No secrets found'], result) def test_discover_no_barbican_endpoint(self): def discover_mock(self, manager, param=None): return [] original_discover_method = EndpointDiscovery.discover EndpointDiscovery.discover = discover_mock result = self.discovery.discover(None, "param") self.assertEqual(['No secrets found'], result) EndpointDiscovery.discover = original_discover_method @mock.patch('keystoneclient.v2_0.client.Client') def test_discover_error_response(self, client_mock): def discover_mock(self, manager, param=None): return ["barbican_url"] original_discover_method = EndpointDiscovery.discover EndpointDiscovery.discover = discover_mock for http_status_code in requests.status_codes._codes.keys(): if http_status_code < 400: continue return_value = self.FakeResponse() return_value.status_code = http_status_code return_value.json_object = {} client_mock.session.get.return_value = return_value exception = self.assertRaises( requests.HTTPError, self.discovery.discover, manager=self.FakeManager(client_mock), param="param") self.assertEqual("Mock HTTP error.", str(exception)) EndpointDiscovery.discover = original_discover_method @mock.patch('keystoneclient.v2_0.client.Client') def test_discover_response_ok(self, client_mock): discover_mock = mock.MagicMock() discover_mock.return_value = ["barbican_url"] original_discover_method = EndpointDiscovery.discover EndpointDiscovery.discover = discover_mock return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value.json_object = {} return_value._content = "content" client_mock.session.get.return_value = return_value fake_manager = self.FakeManager(client_mock) response = self.discovery.discover(manager=fake_manager, param="param") self.assertEqual(["content"], response) discover_mock.assert_has_calls([ mock.call(fake_manager, "key-manager")]) EndpointDiscovery.discover = original_discover_method ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/polling/test_non_openstack_dynamic_pollster.py0000664000175100017510000004614515033033467031626 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Non-OpenStack dynamic pollsters """ import copy import json import sys from unittest import mock from oslotest import base import requests from urllib import parse as urlparse from ceilometer.declarative import DynamicPollsterDefinitionException from ceilometer.declarative import NonOpenStackApisDynamicPollsterException from ceilometer.polling.dynamic_pollster import DynamicPollster from ceilometer.polling.dynamic_pollster import MultiMetricPollsterDefinitions from ceilometer.polling.dynamic_pollster import \ NonOpenStackApisPollsterDefinition from ceilometer.polling.dynamic_pollster import NonOpenStackApisSamplesGatherer from ceilometer.polling.dynamic_pollster import PollsterSampleGatherer from ceilometer.polling.dynamic_pollster import SingleMetricPollsterDefinitions REQUIRED_POLLSTER_FIELDS = ['name', 'sample_type', 'unit', 'value_attribute', 'url_path', 'module', 'authentication_object'] OPTIONAL_POLLSTER_FIELDS = ['metadata_fields', 'skip_sample_values', 'value_mapping', 'default_value', 'metadata_mapping', 'preserve_mapped_metadata', 'response_entries_key', 'user_id_attribute', 'resource_id_attribute', 'barbican_secret_id', 'authentication_parameters', 'project_id_attribute'] ALL_POLLSTER_FIELDS = REQUIRED_POLLSTER_FIELDS + OPTIONAL_POLLSTER_FIELDS def fake_sample_multi_metric(self, **kwargs): multi_metric_sample_list = [ {"user_id": "UID-U007", "project_id": "UID-P007", "id": "UID-007", "categories": [ { "bytes_received": 0, "bytes_sent": 0, "category": "create_bucket", "ops": 2, "successful_ops": 2 }, { "bytes_received": 0, "bytes_sent": 2120428, "category": "get_obj", "ops": 46, "successful_ops": 46 }, { "bytes_received": 0, "bytes_sent": 21484, "category": "list_bucket", "ops": 8, "successful_ops": 8 }, { "bytes_received": 6889056, "bytes_sent": 0, "category": "put_obj", "ops": 46, "successful_ops": 6 }], "total": { "bytes_received": 6889056, "bytes_sent": 2141912, "ops": 102, "successful_ops": 106 }, "user": "test-user"}] return multi_metric_sample_list class TestNonOpenStackApisDynamicPollster(base.BaseTestCase): class FakeManager: _keystone = None class FakeResponse: status_code = None json_object = None def json(self): return self.json_object def raise_for_status(self): raise requests.HTTPError("Mock HTTP error.", response=self) def setUp(self): super().setUp() self.pollster_definition_only_openstack_required_single_metric = { 'name': "test-pollster", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "volume", "endpoint_type": "type", 'url_path': "v1/test/endpoint/fake"} self.pollster_definition_only_openstack_required_multi_metric = { 'name': "test-pollster.{category}", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "[categories].ops", 'url_path': "v1/test/endpoint/fake", "endpoint_type": "type"} self.pollster_definition_only_required_fields = { 'name': "test-pollster", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "volume", 'url_path': "http://server.com/v1/test/endpoint/fake", 'module': "module-name", 'authentication_object': "authentication_object"} self.pollster_definition_all_fields = { 'name': "test-pollster", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "volume", 'url_path': "v1/test/endpoint/fake", 'module': "module-name", 'authentication_object': "authentication_object", 'user_id_attribute': 'user_id', 'project_id_attribute': 'project_id', 'resource_id_attribute': 'id', 'barbican_secret_id': 'barbican_id', 'authentication_parameters': 'parameters'} self.pollster_definition_all_fields_multi_metrics = { 'name': "test-pollster.{category}", 'sample_type': "gauge", 'unit': "test", 'value_attribute': "[categories].ops", 'url_path': "v1/test/endpoint/fake", 'module': "module-name", 'authentication_object': "authentication_object", 'user_id_attribute': 'user_id', 'project_id_attribute': 'project_id', 'resource_id_attribute': 'id', 'barbican_secret_id': 'barbican_id', 'authentication_parameters': 'parameters'} def test_all_fields(self): all_required = ['module', 'authentication_object', 'name', 'sample_type', 'unit', 'value_attribute', 'url_path'] all_optional = ['metadata_fields', 'skip_sample_values', 'value_mapping', 'default_value', 'metadata_mapping', 'preserve_mapped_metadata', 'user_id_attribute', 'project_id_attribute', 'resource_id_attribute', 'barbican_secret_id', 'authentication_parameters', 'response_entries_key'] + all_required for field in all_required: self.assertIn(field, REQUIRED_POLLSTER_FIELDS) for field in all_optional: self.assertIn(field, ALL_POLLSTER_FIELDS) def test_all_required_fields_exceptions(self): for key in REQUIRED_POLLSTER_FIELDS: if key == 'module': continue pollster_definition = copy.deepcopy( self.pollster_definition_only_required_fields) pollster_definition.pop(key) exception = self.assertRaises( DynamicPollsterDefinitionException, DynamicPollster, pollster_definition, None, [NonOpenStackApisPollsterDefinition]) self.assertEqual("Required fields ['%s'] not specified." % key, exception.brief_message) def test_set_default_values(self): pollster = DynamicPollster( self.pollster_definition_only_required_fields) pollster_definitions = pollster.pollster_definitions self.assertEqual("user_id", pollster_definitions['user_id_attribute']) self.assertEqual("project_id", pollster_definitions['project_id_attribute']) self.assertEqual("id", pollster_definitions['resource_id_attribute']) self.assertEqual('', pollster_definitions['barbican_secret_id']) self.assertEqual('', pollster_definitions['authentication_parameters']) def test_user_set_optional_parameters(self): pollster = DynamicPollster( self.pollster_definition_all_fields) pollster_definitions = pollster.pollster_definitions self.assertEqual('user_id', pollster_definitions['user_id_attribute']) self.assertEqual('project_id', pollster_definitions['project_id_attribute']) self.assertEqual('id', pollster_definitions['resource_id_attribute']) self.assertEqual('barbican_id', pollster_definitions['barbican_secret_id']) self.assertEqual('parameters', pollster_definitions['authentication_parameters']) def test_default_discovery_empty_secret_id(self): pollster = DynamicPollster( self.pollster_definition_only_required_fields) self.assertEqual("barbican:", pollster.definitions.sample_gatherer. default_discovery) def test_default_discovery_not_empty_secret_id(self): pollster = DynamicPollster( self.pollster_definition_all_fields) self.assertEqual("barbican:barbican_id", pollster.definitions. sample_gatherer.default_discovery) @mock.patch('requests.get') def test_internal_execute_request_get_samples_status_code_ok( self, get_mock): sys.modules['module-name'] = mock.MagicMock() pollster = DynamicPollster( self.pollster_definition_only_required_fields) return_value = self.FakeResponse() return_value.status_code = requests.codes.ok return_value.json_object = {} return_value.reason = "Ok" get_mock.return_value = return_value kwargs = {'resource': "credentials"} resp, url = pollster.definitions.sample_gatherer.\ _internal_execute_request_get_samples( pollster.definitions.configurations, **kwargs) self.assertEqual( self.pollster_definition_only_required_fields['url_path'], url) self.assertEqual(return_value, resp) @mock.patch('requests.get') def test_internal_execute_request_get_samples_status_code_not_ok( self, get_mock): sys.modules['module-name'] = mock.MagicMock() pollster = DynamicPollster( self.pollster_definition_only_required_fields) for http_status_code in requests.status_codes._codes.keys(): if http_status_code >= 400: return_value = self.FakeResponse() return_value.status_code = http_status_code return_value.json_object = {} return_value.reason = requests.status_codes._codes[ http_status_code][0] get_mock.return_value = return_value kwargs = {'resource': "credentials"} exception = self.assertRaises( NonOpenStackApisDynamicPollsterException, pollster.definitions.sample_gatherer. _internal_execute_request_get_samples, pollster.definitions.configurations, **kwargs) self.assertEqual( "NonOpenStackApisDynamicPollsterException" " None: Error while executing request[%s]." " Status[%s] and reason [%s]." % (self.pollster_definition_only_required_fields['url_path'], http_status_code, return_value.reason), str(exception)) def test_generate_new_attributes_in_sample_attribute_key_none(self): pollster = DynamicPollster( self.pollster_definition_only_required_fields) sample = {"test": "2"} new_key = "new-key" pollster.definitions.sample_gatherer. \ generate_new_attributes_in_sample(sample, None, new_key) pollster.definitions.sample_gatherer. \ generate_new_attributes_in_sample(sample, "", new_key) self.assertNotIn(new_key, sample) def test_generate_new_attributes_in_sample(self): pollster = DynamicPollster( self.pollster_definition_only_required_fields) sample = {"test": "2"} new_key = "new-key" pollster.definitions.sample_gatherer. \ generate_new_attributes_in_sample(sample, "test", new_key) self.assertIn(new_key, sample) self.assertEqual(sample["test"], sample[new_key]) def test_execute_request_get_samples_non_empty_keys(self): sample = {'user_id_attribute': "123456789", 'project_id_attribute': "dfghyt432345t", 'resource_id_attribute': "sdfghjt543"} def internal_execute_request_get_samples_mock( self, definitions, **kwargs): class Response: @property def text(self): return json.dumps([sample]) def json(self): return [sample] return Response(), "url" original_method = NonOpenStackApisSamplesGatherer. \ _internal_execute_request_get_samples try: NonOpenStackApisSamplesGatherer. \ _internal_execute_request_get_samples = \ internal_execute_request_get_samples_mock self.pollster_definition_all_fields[ 'user_id_attribute'] = 'user_id_attribute' self.pollster_definition_all_fields[ 'project_id_attribute'] = 'project_id_attribute' self.pollster_definition_all_fields[ 'resource_id_attribute'] = 'resource_id_attribute' pollster = DynamicPollster( self.pollster_definition_all_fields) params = {"d": "d"} response = pollster.definitions.sample_gatherer. \ execute_request_get_samples(**params) self.assertEqual(sample['user_id_attribute'], response[0]['user_id']) self.assertEqual(sample['project_id_attribute'], response[0]['project_id']) self.assertEqual(sample['resource_id_attribute'], response[0]['id']) finally: NonOpenStackApisSamplesGatherer. \ _internal_execute_request_get_samples = original_method def test_execute_request_get_samples_empty_keys(self): sample = {'user_id_attribute': "123456789", 'project_id_attribute': "dfghyt432345t", 'resource_id_attribute': "sdfghjt543"} def execute_request_get_samples_mock(self, **kwargs): samples = [sample] return samples DynamicPollster.execute_request_get_samples = \ execute_request_get_samples_mock self.pollster_definition_all_fields[ 'user_id_attribute'] = None self.pollster_definition_all_fields[ 'project_id_attribute'] = None self.pollster_definition_all_fields[ 'resource_id_attribute'] = None pollster = DynamicPollster( self.pollster_definition_all_fields) params = {"d": "d"} response = pollster.execute_request_get_samples(**params) self.assertNotIn('user_id', response[0]) self.assertNotIn('project_id', response[0]) self.assertNotIn('id', response[0]) def test_pollster_defintions_instantiation(self): def validate_definitions_instance(instance, isNonOpenstack, isMultiMetric, isSingleMetric): self.assertIs( isinstance(instance, NonOpenStackApisPollsterDefinition), isNonOpenstack) self.assertIs(isinstance(instance, MultiMetricPollsterDefinitions), isMultiMetric) self.assertIs( isinstance(instance, SingleMetricPollsterDefinitions), isSingleMetric) pollster = DynamicPollster( self.pollster_definition_all_fields_multi_metrics) validate_definitions_instance(pollster.definitions, True, True, False) pollster = DynamicPollster( self.pollster_definition_all_fields) validate_definitions_instance(pollster.definitions, True, False, True) pollster = DynamicPollster( self.pollster_definition_only_openstack_required_multi_metric) validate_definitions_instance(pollster.definitions, False, True, False) pollster = DynamicPollster( self.pollster_definition_only_openstack_required_single_metric) validate_definitions_instance(pollster.definitions, False, False, True) @mock.patch.object( PollsterSampleGatherer, 'execute_request_get_samples', fake_sample_multi_metric) def test_get_samples_multi_metric_pollster(self): pollster = DynamicPollster( self.pollster_definition_all_fields_multi_metrics) fake_manager = self.FakeManager() samples = pollster.get_samples( fake_manager, None, ["https://endpoint.server.name.com/"]) samples_list = list(samples) self.assertEqual(4, len(samples_list)) create_bucket_sample = [ s for s in samples_list if s.name == "test-pollster.create_bucket"][0] get_obj_sample = [ s for s in samples_list if s.name == "test-pollster.get_obj"][0] list_bucket_sample = [ s for s in samples_list if s.name == "test-pollster.list_bucket"][0] put_obj_sample = [ s for s in samples_list if s.name == "test-pollster.put_obj"][0] self.assertEqual(2, create_bucket_sample.volume) self.assertEqual(46, get_obj_sample.volume) self.assertEqual(8, list_bucket_sample.volume) self.assertEqual(46, put_obj_sample.volume) def test_get_request_linked_samples_url_no_next_sample(self): pollster = DynamicPollster( self.pollster_definition_only_required_fields) expected_url = self.pollster_definition_only_required_fields[ 'url_path'] kwargs = {'resource': "non-openstack-resource"} url = pollster.definitions.sample_gatherer\ .get_request_linked_samples_url( kwargs, pollster.definitions.configurations) self.assertEqual(expected_url, url) def test_get_request_linked_samples_url_next_sample_url(self): pollster = DynamicPollster( self.pollster_definition_only_required_fields) base_url = self.pollster_definition_only_required_fields['url_path'] next_sample_path = "/next_page" expected_url = urlparse.urljoin(base_url, next_sample_path) kwargs = {'next_sample_url': expected_url} url = pollster.definitions.sample_gatherer\ .get_request_linked_samples_url(kwargs, pollster.definitions) self.assertEqual(expected_url, url) def test_get_request_linked_samples_url_next_sample_only_url_path(self): pollster = DynamicPollster( self.pollster_definition_only_required_fields) base_url = self.pollster_definition_only_required_fields['url_path'] next_sample_path = "/next_page" expected_url = urlparse.urljoin(base_url, next_sample_path) kwargs = {'next_sample_url': next_sample_path} url = pollster.definitions.sample_gatherer\ .get_request_linked_samples_url( kwargs, pollster.definitions.configurations) self.assertEqual(expected_url, url) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/0000775000175100017510000000000015033033521022232 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/__init__.py0000664000175100017510000000000015033033467024342 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_file.py0000664000175100017510000001305015033033467024572 0ustar00mylesmyles# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/file.py """ import json import logging.handlers import os import tempfile from oslo_utils import netutils from oslo_utils import timeutils from oslotest import base from ceilometer.publisher import file from ceilometer import sample from ceilometer import service class TestFilePublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) def test_file_publisher_maxbytes(self): # Test valid configurations tempdir = tempfile.mkdtemp() name = '%s/log_file' % tempdir parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' % name) publisher = file.FilePublisher(self.CONF, parsed_url) publisher.publish_samples(self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([50, name, 3], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # The rotating file gets created since only allow 50 bytes. self.assertTrue(os.path.exists('%s.1' % name)) def test_file_publisher(self): # Test missing max bytes, backup count configurations tempdir = tempfile.mkdtemp() name = '%s/log_file_plain' % tempdir parsed_url = netutils.urlsplit('file://%s' % name) publisher = file.FilePublisher(self.CONF, parsed_url) publisher.publish_samples(self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([0, name, 0], [handler.maxBytes, handler.baseFilename, handler.backupCount]) # Test the content is corrected saved in the file self.assertTrue(os.path.exists(name)) with open(name) as f: content = f.read() for sample_item in self.test_data: self.assertIn(sample_item.id, content) self.assertIn(sample_item.timestamp, content) def test_file_publisher_invalid(self): # Test invalid max bytes, backup count configurations tempdir = tempfile.mkdtemp() parsed_url = netutils.urlsplit( 'file://%s/log_file_bad' '?max_bytes=yus&backup_count=5y' % tempdir) publisher = file.FilePublisher(self.CONF, parsed_url) publisher.publish_samples(self.test_data) self.assertIsNone(publisher.publisher_logger) def test_file_publisher_json(self): tempdir = tempfile.mkdtemp() name = '%s/log_file_json' % tempdir parsed_url = netutils.urlsplit('file://%s?json' % name) publisher = file.FilePublisher(self.CONF, parsed_url) publisher.publish_samples(self.test_data) handler = publisher.publisher_logger.handlers[0] self.assertIsInstance(handler, logging.handlers.RotatingFileHandler) self.assertEqual([0, name, 0], [handler.maxBytes, handler.baseFilename, handler.backupCount]) self.assertTrue(os.path.exists(name)) with open(name) as f: content = f.readlines() self.assertEqual(len(self.test_data), len(content)) for index, line in enumerate(content): try: json_data = json.loads(line) except ValueError: self.fail("File written is not valid json") self.assertEqual(self.test_data[index].id, json_data['id']) self.assertEqual(self.test_data[index].timestamp, json_data['timestamp']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_gnocchi.py0000664000175100017510000010774115033033467025300 0ustar00mylesmyles# # Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import uuid import fixtures from gnocchiclient import exceptions as gnocchi_exc from keystoneauth1 import exceptions as ka_exceptions from oslo_config import fixture as config_fixture from oslo_utils import fileutils from oslo_utils import fixture as utils_fixture from oslo_utils import netutils from oslo_utils import timeutils import requests from stevedore import extension import testscenarios from ceilometer.event import models from ceilometer.publisher import gnocchi from ceilometer import sample from ceilometer import service as ceilometer_service from ceilometer.tests import base load_tests = testscenarios.load_tests_apply_scenarios INSTANCE_DELETE_START = models.Event( event_type='compute.instance.delete.start', traits=[models.Trait('state', 1, 'active'), models.Trait( 'user_id', 1, '1e3ce043029547f1a61c1996d1a531a2'), models.Trait('service', 1, 'compute'), models.Trait('availability_zone', 1, 'zone1'), models.Trait('disk_gb', 2, 0), models.Trait('instance_type', 1, 'm1.tiny'), models.Trait('tenant_id', 1, '7c150a59fe714e6f9263774af9688f0e'), models.Trait('root_gb', 2, 0), models.Trait('ephemeral_gb', 2, 0), models.Trait('instance_type_id', 2, '2'), models.Trait('vcpus', 2, 1), models.Trait('memory_mb', 2, 512), models.Trait( 'instance_id', 1, '9f9d01b9-4a58-4271-9e27-398b21ab20d1'), models.Trait('host', 1, 'vagrant-precise'), models.Trait( 'request_id', 1, 'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39'), models.Trait('project_id', 1, '7c150a59fe714e6f9263774af9688f0e'), models.Trait('launched_at', 4, '2012-05-08T20:23:47')], raw={}, generated='2012-05-08T20:24:14.824743', message_id='a15b94ee-cb8e-4c71-9abe-14aa80055fb4', ) INSTANCE_CREATE_END = models.Event( event_type='compute.instance.create.end', traits=[models.Trait('state', 1, 'active'), models.Trait( 'user_id', 1, '1e3ce043029547f1a61c1996d1a531a2'), models.Trait('service', 1, 'compute'), models.Trait('availability_zone', 1, 'zone1'), models.Trait('disk_gb', 2, 0), models.Trait('instance_type', 1, 'm1.tiny'), models.Trait('tenant_id', 1, '7c150a59fe714e6f9263774af9688f0e'), models.Trait('root_gb', 2, 0), models.Trait('ephemeral_gb', 2, 0), models.Trait('instance_type_id', 2, '2'), models.Trait('vcpus', 2, 1), models.Trait('memory_mb', 2, 512), models.Trait( 'instance_id', 1, '9f9d01b9-4a58-4271-9e27-398b21ab20d1'), models.Trait('host', 1, 'vagrant-precise'), models.Trait( 'request_id', 1, 'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39'), models.Trait('project_id', 1, '7c150a59fe714e6f9263774af9688f0e'), models.Trait('launched_at', 4, '2012-05-08T20:23:47')], raw={}, generated='2012-05-08T20:24:14.824743', message_id='202f745e-4913-11e9-affe-9797342bd3a8', ) IMAGE_DELETE_START = models.Event( event_type='image.delete', traits=[models.Trait('status', 1, 'deleted'), models.Trait('deleted_at', 1, '2016-11-04T04:25:56Z'), models.Trait('user_id', 1, 'e97ef33a20ed4843b520d223f3cc33d4'), models.Trait('name', 1, 'cirros'), models.Trait('service', 1, 'image.localhost'), models.Trait( 'resource_id', 1, 'dc337359-de70-4044-8e2c-80573ba6e577'), models.Trait('created_at', 1, '2016-11-04T04:24:36Z'), models.Trait( 'project_id', 1, 'e97ef33a20ed4843b520d223f3cc33d4'), models.Trait('size', 1, '13287936')], raw={}, generated='2016-11-04T04:25:56.493820', message_id='7f5280f7-1d10-46a5-ba58-4d5508e49f99' ) VOLUME_DELETE_END = models.Event( event_type='volume.delete.end', traits=[models.Trait('availability_zone', 1, 'nova'), models.Trait('created_at', 1, '2016-11-28T13:19:53+00:00'), models.Trait('display_name', 1, 'vol-001'), models.Trait( 'host', 1, 'zhangguoqing-dev@lvmdriver-1#lvmdriver-1'), models.Trait( 'project_id', 1, 'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait('replication_status', 1, 'disabled'), models.Trait( 'request_id', 1, 'req-f44df096-50d4-4211-95ea-64be6f5e4f60'), models.Trait( 'resource_id', 1, '6cc6e7dd-d17d-460f-ae79-7e08a216ce96'), models.Trait( 'service', 1, 'volume.zhangguoqing-dev@lvmdriver-1'), models.Trait('size', 1, '1'), models.Trait('status', 1, 'deleting'), models.Trait('tenant_id', 1, 'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait('type', 1, 'af6271fa-13c4-44e6-9246-754ce9dc7df8'), models.Trait('user_id', 1, '819bbd28f5374506b8502521c89430b5')], raw={}, generated='2016-11-28T13:42:15.484674', message_id='a15b94ee-cb8e-4c71-9abe-14aa80055fb4', ) FLOATINGIP_DELETE_END = models.Event( event_type='floatingip.delete.end', traits=[models.Trait('service', 1, 'network.zhangguoqing-dev'), models.Trait( 'project_id', 1, 'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait( 'request_id', 1, 'req-443ddb77-31f7-41fe-abbf-921107dd9f00'), models.Trait( 'resource_id', 1, '705e2c08-08e8-45cb-8673-5c5be955569b'), models.Trait('tenant_id', 1, 'd53fcc7dc53c4662ad77822c36a21f00'), models.Trait('user_id', 1, '819bbd28f5374506b8502521c89430b5')], raw={}, generated='2016-11-29T09:25:55.474710', message_id='a15b94ee-cb8e-4c71-9abe-14aa80055fb4' ) VOLUME_TRANSFER_ACCEPT_END = models.Event( event_type='volume.transfer.accept.end', traits=[models.Trait('tenant_id', 1, '945e7d09220e4308abe4b3b734bf5fce>'), models.Trait('project_id', 1, '85bc015f7a2342348593077a927c4aaa'), models.Trait('user_id', 1, '945e7d09220e4308abe4b3b734bf5fce'), models.Trait('service', 1, 'volume.controller-0'), models.Trait( 'request_id', 1, 'req-71dd1ae4-81ca-431a-b9fd-ac833eba889f'), models.Trait( 'resource_id', 1, '156b8d3f-ad99-429b-b84c-3f263fb2a801'), models.Trait( 'display_name', 1, 'test-vol'), models.Trait( 'type', 1, 'req-71dd1ae4-81ca-431a-b9fd-ac833eba889f'), models.Trait('host', 1, 'hostgroup@tripleo_iscsi#tripleo_iscsi'), models.Trait('created_at', 4, '2020-08-28 12:51:52'), models.Trait('size', 2, 1)], raw={}, generated='2020-08-28T12:52:22.930413', message_id='9fc4ceee-d980-4098-a685-2ad660838ac1' ) SNAPSHOT_TRANSFER_ACCEPT_END = models.Event( event_type='snapshot.transfer.accept.end', traits=[models.Trait('tenant_id', 1, '945e7d09220e4308abe4b3b734bf5fce>'), models.Trait('project_id', 1, '85bc015f7a2342348593077a927c4aaa'), models.Trait('user_id', 1, '945e7d09220e4308abe4b3b734bf5fce'), models.Trait('service', 1, 'volume.controller-0'), models.Trait( 'request_id', 1, 'req-71dd1ae4-81ca-431a-b9fd-ac833eba889f'), models.Trait( 'resource_id', 1, '156b8d3f-ad99-429b-b84c-3f263fb2a801'), models.Trait( 'display_name', 1, 'test-vol'), models.Trait( 'type', 1, 'req-71dd1ae4-81ca-431a-b9fd-ac833eba889f'), models.Trait('host', 1, 'hostgroup@tripleo_iscsi#tripleo_iscsi'), models.Trait('created_at', 4, '2020-08-28 12:51:52'), models.Trait('size', 2, 1)], raw={}, generated='2020-08-28T12:52:22.930413', message_id='9fc4ceee-d980-4098-a685-2ad660838ac1' ) class PublisherTest(base.BaseTestCase): def setUp(self): super().setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf = self.useFixture(config_fixture.Config(conf)) self.resource_id = str(uuid.uuid4()) self.samples = [sample.Sample( name='disk.root.size', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2012-05-08 20:23:48.028195', resource_id=self.resource_id, resource_metadata={ 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', } ), sample.Sample( name='disk.root.size', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2014-05-08 20:23:48.028195', resource_id=self.resource_id, resource_metadata={ 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', }, ), ] ks_client = mock.Mock(auth_token='fake_token') ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.useFixture(fixtures.MockPatch( 'gnocchiclient.v1.client.Client', return_value=mock.Mock())) self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_session', return_value=mock.Mock())) self.ks_client = ks_client def test_config_load(self): url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) names = [rd.cfg['resource_type'] for rd in d.resources_definition] self.assertIn('instance', names) self.assertIn('volume', names) def test_match(self): resource = { 'metrics': ['image', 'image.size', 'image.download', 'image.serve'], 'attributes': {'container_format': 'resource_metadata.container_format', 'disk_format': 'resource_metadata.disk_format', 'name': 'resource_metadata.name'}, 'event_delete': 'image.delete', 'event_attributes': {'id': 'resource_id'}, 'resource_type': 'image'} plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait.trait_plugin') rd = gnocchi.ResourcesDefinition( resource, "high", "low", plugin_manager) operation = rd.event_match("image.delete") self.assertEqual('delete', operation) def test_metric_match(self): pub = gnocchi.GnocchiPublisher(self.conf.conf, netutils.urlsplit("gnocchi://")) self.assertIn('image.size', pub.metric_map['image.size'].metrics) @mock.patch('ceilometer.publisher.gnocchi.LOG') def test_broken_config_load(self, mylog): contents = [("---\n" "resources:\n" " - resource_type: foobar\n"), ("---\n" "resources:\n" " - resource_type: 0\n"), ("---\n" "resources:\n" " - sample_types: ['foo', 'bar']\n"), ("---\n" "resources:\n" " - sample_types: foobar\n" " - resource_type: foobar\n"), ] for content in contents: content = content.encode('utf-8') temp = fileutils.write_to_tempfile(content=content, prefix='gnocchi_resources', suffix='.yaml') self.addCleanup(os.remove, temp) url = netutils.urlsplit( "gnocchi://?resources_definition_file=" + temp) d = gnocchi.GnocchiPublisher(self.conf.conf, url) self.assertTrue(mylog.error.called) self.assertEqual(0, len(d.resources_definition)) @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '._if_not_cached', mock.Mock()) @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '.batch_measures') def _do_test_activity_filter(self, expected_measures, fake_batch): url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) d._already_checked_archive_policies = True d.publish_samples(self.samples) self.assertEqual(1, len(fake_batch.mock_calls)) measures = fake_batch.mock_calls[0][1][0] self.assertEqual( expected_measures, sum(len(m["measures"]) for rid in measures for m in measures[rid].values())) def test_activity_filter_match_project_id(self): self.samples[0].project_id = ( 'a2d42c23-d518-46b6-96ab-3fba2e146859') self._do_test_activity_filter(1) @mock.patch('ceilometer.publisher.gnocchi.LOG') def test_activity_gnocchi_project_not_found(self, logger): self.ks_client.projects.find.side_effect = ka_exceptions.NotFound self._do_test_activity_filter(2) logger.warning.assert_called_with( 'Filtered project [service] not found in keystone, ignoring the ' 'filter_project option') @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '._get_gnocchi_client') def test_get_gnocchi_client(self, gnocchi_cli): url = netutils.urlsplit("gnocchi://") gnocchi_cli.side_effect = ka_exceptions.DiscoveryFailure cfg = self.conf.conf publisher = gnocchi.GnocchiPublisher self.assertRaises(ka_exceptions.DiscoveryFailure, publisher, cfg, url) def test_activity_filter_match_swift_event(self): self.samples[0].name = 'storage.objects.outgoing.bytes' self.samples[0].resource_id = 'a2d42c23-d518-46b6-96ab-3fba2e146859' self._do_test_activity_filter(1) def test_activity_filter_nomatch(self): self._do_test_activity_filter(2) @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '.batch_measures') def test_unhandled_meter(self, fake_batch): samples = [sample.Sample( name='unknown.meter', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2014-05-08 20:23:48.028195', resource_id='randomid', resource_metadata={} )] url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) d._already_checked_archive_policies = True d.publish_samples(samples) self.assertEqual(0, len(fake_batch.call_args[0][1])) @mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher' '.batch_measures') def test_unhandled_meter_with_no_resource_id(self, fake_batch): samples = [ sample.Sample( name='unknown.meter', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2014-05-08 20:23:48.028195', resource_id=None, resource_metadata={}), sample.Sample( name='unknown.meter', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2014-05-08 20:23:48.028195', resource_id="Some-other-resource-id", resource_metadata={}) ] url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) d._already_checked_archive_policies = True d.publish_samples(samples) self.assertEqual(0, len(fake_batch.call_args[0][1])) @mock.patch('ceilometer.publisher.gnocchi.LOG') @mock.patch('gnocchiclient.v1.client.Client') def test__set_update_attributes_non_existent_resource(self, fakeclient_cls, logger): url = netutils.urlsplit("gnocchi://") self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value fakeclient.resource.update.side_effect = [ gnocchi_exc.ResourceNotFound(404)] non_existent_resource = { 'type': 'volume', 'id': self.resource_id, } self.publisher._set_update_attributes(non_existent_resource) logger.debug.assert_called_with( "Update event received on unexisting resource (%s), ignore it.", self.resource_id) class MockResponse(mock.NonCallableMock): def __init__(self, code): text = {500: 'Internal Server Error', 404: 'Not Found', 204: 'Created', 409: 'Conflict', }.get(code) super().__init__(spec=requests.Response, status_code=code, text=text) class PublisherWorkflowTest(base.BaseTestCase, testscenarios.TestWithScenarios): sample_scenarios = [ ('cpu', dict( sample=sample.Sample( resource_id=str(uuid.uuid4()) + "_foobar", name='cpu', unit='ns', type=sample.TYPE_CUMULATIVE, volume=500, user_id='test_user', project_id='test_project', source='openstack', timestamp='2012-05-08 20:23:48.028195', resource_metadata={ 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', }, ), metric_attributes={ "archive_policy_name": "ceilometer-low-rate", "unit": "ns", "measures": [{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': 500 }] }, postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ 'host': 'foo', 'image_ref': 'imageref!', 'flavor_id': 1234, 'display_name': 'myinstance', }, resource_type='instance')), ('disk.root.size', dict( sample=sample.Sample( resource_id=str(uuid.uuid4()) + "_foobar", name='disk.root.size', unit='GB', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2012-05-08 20:23:48.028195', resource_metadata={ 'host': 'foo', 'image_ref': 'imageref!', 'instance_flavor_id': 1234, 'display_name': 'myinstance', }, ), metric_attributes={ "archive_policy_name": "ceilometer-low", "unit": "GB", "measures": [{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': 2 }] }, postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ 'host': 'foo', 'image_ref': 'imageref!', 'flavor_id': 1234, 'display_name': 'myinstance', }, resource_type='instance')), ('hardware.ipmi.node.power', dict( sample=sample.Sample( resource_id=str(uuid.uuid4()) + "_foobar", name='hardware.ipmi.node.power', unit='W', type=sample.TYPE_GAUGE, volume=2, user_id='test_user', project_id='test_project', source='openstack', timestamp='2012-05-08 20:23:48.028195', resource_metadata={ 'useless': 'not_used', }, ), metric_attributes={ "archive_policy_name": "ceilometer-low", "unit": "W", "measures": [{ 'timestamp': '2012-05-08 20:23:48.028195', 'value': 2 }] }, postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ }, resource_type='ipmi')), ] default_workflow = dict(resource_exists=True, post_measure_fail=False, create_resource_fail=False, create_resource_race=False, update_resource_fail=False, retry_post_measures_fail=False) workflow_scenarios = [ ('normal_workflow', {}), ('new_resource', dict(resource_exists=False)), ('new_resource_compat', dict(resource_exists=False)), ('new_resource_fail', dict(resource_exists=False, create_resource_fail=True)), ('new_resource_race', dict(resource_exists=False, create_resource_race=True)), ('resource_update_fail', dict(update_resource_fail=True)), ('retry_fail', dict(resource_exists=False, retry_post_measures_fail=True)), ('measure_fail', dict(post_measure_fail=True)), ] @classmethod def generate_scenarios(cls): workflow_scenarios = [] for name, wf_change in cls.workflow_scenarios: wf = cls.default_workflow.copy() wf.update(wf_change) workflow_scenarios.append((name, wf)) cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, workflow_scenarios) def setUp(self): super().setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) self.conf = self.useFixture(config_fixture.Config(conf)) ks_client = mock.Mock() ks_client.projects.find.return_value = mock.Mock( name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_session', return_value=ks_client)) self.ks_client = ks_client @mock.patch('gnocchiclient.v1.client.Client') def test_delete_event_workflow(self, fakeclient_cls): url = netutils.urlsplit("gnocchi://") self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value fakeclient.resource.search.side_effect = [ [{"id": "b26268d6-8bb5-11e6-baff-00224d8226cd", "type": "instance_disk", "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], [{"id": "b1c7544a-8bb5-11e6-850e-00224d8226cd", "type": "instance_network_interface", "instance_id": "9f9d01b9-4a58-4271-9e27-398b21ab20d1"}], ] search_params = { '=': {'instance_id': '9f9d01b9-4a58-4271-9e27-398b21ab20d1'} } now = timeutils.utcnow() self.useFixture(utils_fixture.TimeFixture(now)) expected_calls = [ mock.call.resource.search('instance_network_interface', search_params), mock.call.resource.search('instance_disk', search_params), mock.call.resource.update( 'instance', '9f9d01b9-4a58-4271-9e27-398b21ab20d1', {'ended_at': now.isoformat()}), mock.call.resource.update( 'instance_disk', 'b26268d6-8bb5-11e6-baff-00224d8226cd', {'ended_at': now.isoformat()}), mock.call.resource.update( 'instance_network_interface', 'b1c7544a-8bb5-11e6-850e-00224d8226cd', {'ended_at': now.isoformat()}), mock.call.resource.update( 'image', 'dc337359-de70-4044-8e2c-80573ba6e577', {'ended_at': now.isoformat()}), mock.call.resource.update( 'volume', '6cc6e7dd-d17d-460f-ae79-7e08a216ce96', {'ended_at': now.isoformat()}), mock.call.resource.update( 'network', '705e2c08-08e8-45cb-8673-5c5be955569b', {'ended_at': now.isoformat()}) ] self.publisher.publish_events([INSTANCE_DELETE_START, IMAGE_DELETE_START, VOLUME_DELETE_END, FLOATINGIP_DELETE_END]) self.assertEqual(8, len(fakeclient.mock_calls)) for call in expected_calls: self.assertIn(call, fakeclient.mock_calls) @mock.patch('gnocchiclient.v1.client.Client') def test_create_event_workflow(self, fakeclient_cls): url = netutils.urlsplit("gnocchi://") self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value now = timeutils.utcnow() self.useFixture(utils_fixture.TimeFixture(now)) expected_calls = [ mock.call.resource.create( 'instance', {'id': '9f9d01b9-4a58-4271-9e27-398b21ab20d1', 'user_id': '1e3ce043029547f1a61c1996d1a531a2', 'project_id': '7c150a59fe714e6f9263774af9688f0e', 'availability_zone': 'zone1', 'flavor_name': 'm1.tiny', 'flavor_id': '2', 'host': 'vagrant-precise'}), ] self.publisher.publish_events([INSTANCE_CREATE_END]) self.assertEqual(1, len(fakeclient.mock_calls)) for call in expected_calls: self.assertIn(call, fakeclient.mock_calls) @mock.patch('gnocchiclient.v1.client.Client') def test_update_event_workflow(self, fakeclient_cls): url = netutils.urlsplit("gnocchi://") self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value now = timeutils.utcnow() self.useFixture(utils_fixture.TimeFixture(now)) expected_calls = [ mock.call.resource.update( 'volume', '156b8d3f-ad99-429b-b84c-3f263fb2a801', {'project_id': '85bc015f7a2342348593077a927c4aaa'}), ] self.publisher.publish_events([VOLUME_TRANSFER_ACCEPT_END]) self.assertEqual(1, len(fakeclient.mock_calls)) for call in expected_calls: self.assertIn(call, fakeclient.mock_calls) @mock.patch('gnocchiclient.v1.client.Client') def test_update_snapshot_event_workflow(self, fakeclient_cls): url = netutils.urlsplit("gnocchi://") self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value now = timeutils.utcnow() self.useFixture(utils_fixture.TimeFixture(now)) expected_calls = [ mock.call.resource.update( 'volume', '156b8d3f-ad99-429b-b84c-3f263fb2a801', {'project_id': '85bc015f7a2342348593077a927c4aaa'}), ] self.publisher.publish_events([SNAPSHOT_TRANSFER_ACCEPT_END]) self.assertEqual(1, len(fakeclient.mock_calls)) for call in expected_calls: self.assertIn(call, fakeclient.mock_calls) @mock.patch('ceilometer.cache_utils.get_client', mock.Mock()) @mock.patch('ceilometer.publisher.gnocchi.LOG') @mock.patch('gnocchiclient.v1.client.Client') def test_workflow(self, fakeclient_cls, logger): url = netutils.urlsplit("gnocchi://") publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value resource_id = self.sample.resource_id.replace("/", "_") metric_name = self.sample.name gnocchi_id = uuid.uuid4() expected_calls = [ mock.call.archive_policy.create({"name": "ceilometer-low", "back_window": 0, "aggregation_methods": ["mean"], "definition": mock.ANY}), mock.call.archive_policy.create({"name": "ceilometer-low-rate", "back_window": 0, "aggregation_methods": [ "mean", "rate:mean"], "definition": mock.ANY}), mock.call.archive_policy.create({"name": "ceilometer-high", "back_window": 0, "aggregation_methods": ["mean"], "definition": mock.ANY}), mock.call.archive_policy.create({"name": "ceilometer-high-rate", "back_window": 0, "aggregation_methods": [ "mean", "rate:mean"], "definition": mock.ANY}), mock.call.metric.batch_resources_metrics_measures( {resource_id: {metric_name: self.metric_attributes}}, create_metrics=True) ] resource_definition = publisher.metric_map.get(self.sample.name) expected_measures_in_log = {resource_id: {self.sample.name: { 'measures': [{'timestamp': self.sample.timestamp, 'value': self.sample.volume}], 'archive_policy_name': resource_definition.metrics[ metric_name]["archive_policy_name"], 'unit': self.sample.unit}}} resource_type = resource_definition.cfg['resource_type'] expected_debug = [ mock.call('Filtered project [%s] found with ID [%s].', 'service', 'a2d42c23-d518-46b6-96ab-3fba2e146859'), mock.call('Sample [%s] is not a Gnocchi activity; therefore, we ' 'do not filter it out and push it to Gnocchi.', self.sample), mock.call('Processing sample [%s] for resource ID [%s].', self.sample, resource_id), mock.call('Executing batch resource metrics measures for resource ' '[%s] and measures [%s].', mock.ANY, expected_measures_in_log)] measures_posted = False batch_side_effect = [] if self.post_measure_fail: batch_side_effect += [Exception('boom!')] elif not self.resource_exists: batch_side_effect += [ gnocchi_exc.BadRequest( 400, {"cause": "Unknown resources", 'detail': [{ 'resource_id': gnocchi_id, 'original_resource_id': resource_id}]})] attributes = self.postable_attributes.copy() attributes.update(self.patchable_attributes) attributes['id'] = self.sample.resource_id expected_calls.append(mock.call.resource.create( self.resource_type, attributes)) if self.create_resource_fail: fakeclient.resource.create.side_effect = [Exception('boom!')] elif self.create_resource_race: fakeclient.resource.create.side_effect = [ gnocchi_exc.ResourceAlreadyExists(409)] else: # not resource_exists expected_debug.append(mock.call( 'Resource %s created', self.sample.resource_id)) if not self.create_resource_fail: expected_calls.append( mock.call.metric.batch_resources_metrics_measures( {resource_id: {metric_name: self.metric_attributes}}, create_metrics=True) ) if self.retry_post_measures_fail: batch_side_effect += [Exception('boom!')] else: measures_posted = True else: measures_posted = True if measures_posted: batch_side_effect += [None] expected_debug.append( mock.call("%d measures posted against %d metrics through %d " "resources", len(self.metric_attributes["measures"]), 1, 1) ) if self.patchable_attributes: expected_calls.append(mock.call.resource.update( self.resource_type, resource_id, self.patchable_attributes)) if self.update_resource_fail: fakeclient.resource.update.side_effect = [Exception('boom!')] else: expected_debug.append(mock.call( 'Resource %s updated', self.sample.resource_id)) batch = fakeclient.metric.batch_resources_metrics_measures batch.side_effect = batch_side_effect publisher.publish_samples([self.sample]) # Check that the last log message is the expected one if (self.post_measure_fail or self.create_resource_fail or self.retry_post_measures_fail or (self.update_resource_fail and self.patchable_attributes)): if self.update_resource_fail and self.patchable_attributes: logger.error.assert_called_with( 'Unexpected exception updating resource type [%s] with ' 'ID [%s] for resource data [%s]: [%s].', resource_type, resource_id, mock.ANY, 'boom!', exc_info=True) else: logger.error.assert_called_with( 'Unexpected exception while pushing measures [%s] for ' 'gnocchi data [%s]: [%s].', expected_measures_in_log, mock.ANY, 'boom!', exc_info=True) else: self.assertEqual(0, logger.error.call_count) self.assertEqual(expected_calls, fakeclient.mock_calls) self.assertEqual(expected_debug, logger.debug.mock_calls) PublisherWorkflowTest.generate_scenarios() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_http.py0000664000175100017510000002434315033033467024641 0ustar00mylesmyles# # Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/http.py""" import datetime from unittest import mock import uuid from oslo_utils import timeutils from oslotest import base import requests from urllib import parse as urlparse from ceilometer.event import models as event from ceilometer.publisher import http from ceilometer import sample from ceilometer import service class TestHttpPublisher(base.BaseTestCase): resource_id = str(uuid.uuid4()) sample_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] event_data = [event.Event( message_id=str(uuid.uuid4()), event_type='event_%d' % i, generated=timeutils.utcnow().isoformat(), traits=[], raw={'payload': {'some': 'aa'}}) for i in range(3)] def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) def test_http_publisher_config(self): """Test publisher config parameters.""" # invalid hostname, the given url, results in an empty hostname parsed_url = urlparse.urlparse('http:/aaa.bb/path') self.assertRaises(ValueError, http.HttpPublisher, self.CONF, parsed_url) # invalid port parsed_url = urlparse.urlparse('http://aaa:bb/path') self.assertRaises(ValueError, http.HttpPublisher, self.CONF, parsed_url) parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) # By default, timeout and retry_count should be set to 5 and 2 # respectively self.assertEqual(5, publisher.timeout) self.assertEqual(2, publisher.max_retries) parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'timeout=19&max_retries=4') publisher = http.HttpPublisher(self.CONF, parsed_url) self.assertEqual(19, publisher.timeout) self.assertEqual(4, publisher.max_retries) parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'timeout=19') publisher = http.HttpPublisher(self.CONF, parsed_url) self.assertEqual(19, publisher.timeout) self.assertEqual(2, publisher.max_retries) parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'max_retries=6') publisher = http.HttpPublisher(self.CONF, parsed_url) self.assertEqual(5, publisher.timeout) self.assertEqual(6, publisher.max_retries) @mock.patch('ceilometer.publisher.http.LOG') def test_http_post_samples(self, thelog): """Test publisher post.""" parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) self.assertEqual(1, m_req.call_count) self.assertFalse(thelog.exception.called) res = requests.Response() res.status_code = 401 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) self.assertEqual(1, m_req.call_count) self.assertTrue(thelog.exception.called) @mock.patch('ceilometer.publisher.http.LOG') def test_http_post_events(self, thelog): """Test publisher post.""" parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_events(self.event_data) self.assertEqual(1, m_req.call_count) self.assertFalse(thelog.exception.called) res = requests.Response() res.status_code = 401 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_events(self.event_data) self.assertEqual(1, m_req.call_count) self.assertTrue(thelog.exception.called) @mock.patch('ceilometer.publisher.http.LOG') def test_http_post_empty_data(self, thelog): parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_events([]) self.assertEqual(0, m_req.call_count) self.assertTrue(thelog.debug.called) def _post_batch_control_test(self, method, data, batch): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'batch=%s' % batch) publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: getattr(publisher, method)(data) self.assertEqual(1 if batch else 3, post.call_count) def test_post_batch_sample(self): self._post_batch_control_test('publish_samples', self.sample_data, 1) def test_post_no_batch_sample(self): self._post_batch_control_test('publish_samples', self.sample_data, 0) def test_post_batch_event(self): self._post_batch_control_test('publish_events', self.event_data, 1) def test_post_no_batch_event(self): self._post_batch_control_test('publish_events', self.event_data, 0) def test_post_verify_ssl_default(self): parsed_url = urlparse.urlparse('http://localhost:90/path1') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertTrue(post.call_args[1]['verify']) def test_post_verify_ssl_True(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'verify_ssl=True') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertTrue(post.call_args[1]['verify']) def test_post_verify_ssl_False(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'verify_ssl=False') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertFalse(post.call_args[1]['verify']) def test_post_verify_ssl_path(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'verify_ssl=/path/to/cert.crt') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertEqual('/path/to/cert.crt', post.call_args[1]['verify']) def test_post_basic_auth(self): parsed_url = urlparse.urlparse( 'http://alice:l00kingGla$$@localhost:90/path1?') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertEqual(('alice', 'l00kingGla$$'), post.call_args[1]['auth']) def test_post_client_cert_auth(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?' 'clientcert=/path/to/cert.crt&' 'clientkey=/path/to/cert.key') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_samples(self.sample_data) self.assertEqual(('/path/to/cert.crt', '/path/to/cert.key'), post.call_args[1]['cert']) def test_post_raw_only(self): parsed_url = urlparse.urlparse('http://localhost:90/path1?raw_only=1') publisher = http.HttpPublisher(self.CONF, parsed_url) with mock.patch.object(requests.Session, 'post') as post: publisher.publish_events(self.event_data) self.assertEqual( '[{"some": "aa"}, {"some": "aa"}, {"some": "aa"}]', post.call_args[1]['data']) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_messaging_publisher.py0000664000175100017510000003467715033033467027727 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/messaging.py""" from unittest import mock import uuid import oslo_messaging from oslo_messaging._drivers import impl_kafka as kafka_driver from oslo_utils import netutils from oslo_utils import timeutils import testscenarios.testcase from ceilometer.event import models as event from ceilometer.publisher import messaging as msg_publisher from ceilometer import sample from ceilometer import service from ceilometer.tests import base as tests_base class BasePublisherTestCase(tests_base.BaseTestCase): test_event_data = [ event.Event(message_id=uuid.uuid4(), event_type='event_%d' % i, generated=timeutils.utcnow(), traits=[], raw={}) for i in range(0, 5) ] test_sample_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) class NotifierOnlyPublisherTest(BasePublisherTestCase): @mock.patch('oslo_messaging.Notifier') def test_publish_topic_override(self, notifier): msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://?topic=custom_topic')) notifier.assert_called_with(mock.ANY, topics=['custom_topic'], driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://?topic=custom_event_topic')) notifier.assert_called_with(mock.ANY, topics=['custom_event_topic'], driver=mock.ANY, retry=mock.ANY, publisher_id=mock.ANY) @mock.patch('ceilometer.messaging.get_transport') def test_publish_other_host(self, cgt): msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234')) cgt.assert_called_with(self.CONF, 'rabbit://foo:foo@127.0.0.1:1234') msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234')) cgt.assert_called_with(self.CONF, 'rabbit://foo:foo@127.0.0.1:1234') @mock.patch('ceilometer.messaging.get_transport') def test_publish_other_host_vhost_and_query(self, cgt): msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234/foo' '?driver=amqp&amqp_auto_delete=true')) cgt.assert_called_with(self.CONF, 'amqp://foo:foo@127.0.0.1:1234/foo' '?amqp_auto_delete=true') msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://foo:foo@127.0.0.1:1234/foo' '?driver=amqp&amqp_auto_delete=true')) cgt.assert_called_with(self.CONF, 'amqp://foo:foo@127.0.0.1:1234/foo' '?amqp_auto_delete=true') @mock.patch('ceilometer.messaging.get_transport') def test_publish_with_none_rabbit_driver(self, cgt): sample_publisher = msg_publisher.SampleNotifierPublisher( self.CONF, netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka')) cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092') transport = oslo_messaging.get_transport(self.CONF, 'kafka://127.0.0.1:9092') self.assertIsInstance(transport._driver, kafka_driver.KafkaDriver) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(sample_publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, sample_publisher.publish_samples, self.test_sample_data) self.assertEqual(0, len(sample_publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with('metering', mock.ANY) event_publisher = msg_publisher.EventNotifierPublisher( self.CONF, netutils.urlsplit('notifier://127.0.0.1:9092?driver=kafka')) cgt.assert_called_with(self.CONF, 'kafka://127.0.0.1:9092') with mock.patch.object(event_publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, event_publisher.publish_events, self.test_event_data) self.assertEqual(0, len(event_publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with('event', mock.ANY) class TestPublisher(testscenarios.testcase.WithScenarios, BasePublisherTestCase): scenarios = [ ('notifier', dict(protocol="notifier", publisher_cls=msg_publisher.SampleNotifierPublisher, test_data=BasePublisherTestCase.test_sample_data, pub_func='publish_samples', attr='source')), ('event_notifier', dict(protocol="notifier", publisher_cls=msg_publisher.EventNotifierPublisher, test_data=BasePublisherTestCase.test_event_data, pub_func='publish_events', attr='event_type')), ] def setUp(self): super().setUp() self.topic = (self.CONF.publisher_notifier.event_topic if self.pub_func == 'publish_events' else self.CONF.publisher_notifier.metering_topic) class TestPublisherPolicy(TestPublisher): @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_no_policy(self, mylog): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with( self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_block(self, mylog): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=default' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), self.test_data) self.assertTrue(mylog.info.called) self.assertEqual(0, len(publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with( self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG') def test_published_with_policy_incorrect(self, mylog): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=notexist' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect self.assertRaises( msg_publisher.DeliveryFailure, getattr(publisher, self.pub_func), self.test_data) self.assertTrue(mylog.warning.called) self.assertEqual('default', publisher.policy) self.assertEqual(0, len(publisher.local_queue)) self.assertEqual(100, len(fake_send.mock_calls)) fake_send.assert_called_with( self.topic, mock.ANY) @mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) class TestPublisherPolicyReactions(TestPublisher): def test_published_with_policy_drop_and_rpc_down(self): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=drop' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(0, len(publisher.local_queue)) fake_send.assert_called_once_with( self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down(self): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.assert_called_once_with( self.topic, mock.ANY) def test_published_with_policy_queue_and_rpc_down_up(self): self.rpc_unreachable = True publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(1, len(publisher.local_queue)) fake_send.side_effect = mock.MagicMock() getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(0, len(publisher.local_queue)) topic = self.topic expected = [mock.call(topic, mock.ANY), mock.call(topic, mock.ANY), mock.call(topic, mock.ANY)] self.assertEqual(expected, fake_send.mock_calls) def test_published_with_policy_sized_queue_and_rpc_down(self): publisher = self.publisher_cls(self.CONF, netutils.urlsplit( '%s://?policy=queue&max_queue_length=3' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 5): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(3, len(publisher.local_queue)) self.assertEqual( 'test-2', publisher.local_queue[0][1][0][self.attr] ) self.assertEqual( 'test-3', publisher.local_queue[1][1][0][self.attr] ) self.assertEqual( 'test-4', publisher.local_queue[2][1][0][self.attr] ) def test_published_with_policy_default_sized_queue_and_rpc_down(self): publisher = self.publisher_cls( self.CONF, netutils.urlsplit('%s://?policy=queue' % self.protocol)) side_effect = msg_publisher.DeliveryFailure() with mock.patch.object(publisher, '_send') as fake_send: fake_send.side_effect = side_effect for i in range(0, 2000): for s in self.test_data: setattr(s, self.attr, 'test-%d' % i) getattr(publisher, self.pub_func)(self.test_data) self.assertEqual(1024, len(publisher.local_queue)) self.assertEqual( 'test-976', publisher.local_queue[0][1][0][self.attr] ) self.assertEqual( 'test-1999', publisher.local_queue[1023][1][0][self.attr] ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_opentelemetry_http.py0000664000175100017510000001474715033033467027624 0ustar00mylesmyles# # Copyright 2024 cmss, inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/opentelemetry.py""" import json import time from unittest import mock import uuid from oslo_utils import timeutils from oslotest import base import requests from urllib import parse as urlparse from ceilometer.publisher import opentelemetry_http from ceilometer import sample from ceilometer import service class TestOpentelemetryHttpPublisher(base.BaseTestCase): resource_id = str(uuid.uuid4()) format_time = timeutils.utcnow().isoformat() sample_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=format_time, resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_DELTA, unit='', volume=3, user_id='test', project_id='test', resource_id=resource_id, timestamp=format_time, resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_GAUGE, unit='', volume=5, user_id='test', project_id='test', resource_id=resource_id, timestamp=format_time, resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='delta.epsilon', type=sample.TYPE_GAUGE, unit='', volume=7, user_id='test', project_id='test', resource_id=resource_id, timestamp=format_time, resource_metadata={'name': 'TestPublish'}, ), ] @staticmethod def _make_fake_json(sample, format_time): struct_time = timeutils.parse_isotime(format_time).timetuple() unix_time = int(time.mktime(struct_time)) if sample.type == "cumulative": metric_type = "counter" else: metric_type = "gauge" return {"resource_metrics": [{ "scope_metrics": [{ "scope": { "name": "ceilometer", "version": "v1" }, "metrics": [{ "name": sample.name.replace(".", "_"), "description": sample.name + " unit:", "unit": "", metric_type: { "data_points": [{ "attributes": [{ "key": "resource_id", "value": { "string_value": sample.resource_id } }, { "key": "user_id", "value": { "string_value": "test" } }, { "key": "project_id", "value": { "string_value": "test" } }], "start_time_unix_nano": unix_time, "time_unix_nano": unix_time, "as_double": sample.volume, "flags": 0 }]}}]}]}]} def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) def test_post_samples(self): """Test publisher post.""" parsed_url = urlparse.urlparse( 'opentelemetryhttp://localhost:4318/v1/metrics') publisher = opentelemetry_http.OpentelemetryHttpPublisher( self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) datas = [] for s in self.sample_data: datas.append(self._make_fake_json(s, self.format_time)) expected = [] for d in datas: expected.append(mock.call('http://localhost:4318/v1/metrics', auth=None, cert=None, data=json.dumps(d), headers={'Content-type': 'application/json'}, timeout=5, verify=True)) self.assertEqual(expected, m_req.mock_calls) def test_post_samples_ssl(self): """Test publisher post.""" parsed_url = urlparse.urlparse( 'opentelemetryhttp://localhost:4318/v1/metrics?ssl=1') publisher = opentelemetry_http.OpentelemetryHttpPublisher( self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) datas = [] for s in self.sample_data: datas.append(self._make_fake_json(s, self.format_time)) expected = [] for d in datas: expected.append(mock.call('https://localhost:4318/v1/metrics', auth=None, cert=None, data=json.dumps(d), headers={'Content-type': 'application/json'}, timeout=5, verify=True)) self.assertEqual(expected, m_req.mock_calls) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_prometheus.py0000664000175100017510000001175015033033467026053 0ustar00mylesmyles# # Copyright 2016 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/prometheus.py""" import datetime from unittest import mock import uuid from oslo_utils import timeutils from oslotest import base import requests from urllib import parse as urlparse from ceilometer.publisher import prometheus from ceilometer import sample from ceilometer import service class TestPrometheusPublisher(base.BaseTestCase): resource_id = str(uuid.uuid4()) sample_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_DELTA, unit='', volume=3, user_id='test', project_id='test', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_GAUGE, unit='', volume=5, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='delta.epsilon', type=sample.TYPE_GAUGE, unit='', volume=7, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) def test_post_samples(self): """Test publisher post.""" parsed_url = urlparse.urlparse( 'prometheus://localhost:90/metrics/job/os') publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) data = """# TYPE alpha counter alpha{{resource_id="{}", user_id="test", project_id="test"}} 1 beta{{resource_id="{}", user_id="test", project_id="test"}} 3 # TYPE gamma gauge gamma{{resource_id="{}", user_id="test", project_id="test"}} 5 # TYPE delta_epsilon gauge delta_epsilon{{resource_id="{}", user_id="test", project_id="test"}} 7 """.format(self.resource_id, self.resource_id, self.resource_id, self.resource_id) expected = [ mock.call('http://localhost:90/metrics/job/os', auth=None, cert=None, data=data, headers={'Content-type': 'plain/text'}, timeout=5, verify=True) ] self.assertEqual(expected, m_req.mock_calls) def test_post_samples_ssl(self): """Test publisher post.""" parsed_url = urlparse.urlparse( 'prometheus://localhost:90/metrics/job/os?ssl=1') publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url) res = requests.Response() res.status_code = 200 with mock.patch.object(requests.Session, 'post', return_value=res) as m_req: publisher.publish_samples(self.sample_data) data = """# TYPE alpha counter alpha{{resource_id="{}", user_id="test", project_id="test"}} 1 beta{{resource_id="{}", user_id="test", project_id="test"}} 3 # TYPE gamma gauge gamma{{resource_id="{}", user_id="test", project_id="test"}} 5 # TYPE delta_epsilon gauge delta_epsilon{{resource_id="{}", user_id="test", project_id="test"}} 7 """.format(self.resource_id, self.resource_id, self.resource_id, self.resource_id) expected = [ mock.call('https://localhost:90/metrics/job/os', auth=None, cert=None, data=data, headers={'Content-type': 'plain/text'}, timeout=5, verify=True) ] self.assertEqual(expected, m_req.mock_calls) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_tcp.py0000664000175100017510000001516015033033467024445 0ustar00mylesmyles# # Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/tcp.py""" from unittest import mock import msgpack from oslo_utils import netutils from oslo_utils import timeutils from oslotest import base from ceilometer.publisher.tcp import TCPPublisher from ceilometer.publisher import utils from ceilometer import sample from ceilometer import service COUNTER_SOURCE = 'testsource' class TestTCPPublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), ] @staticmethod def _make_fake_socket(published): def _fake_socket_create_connection(inet_addr): def record_data(msg): msg_length = int.from_bytes(msg[0:8], "little") published.append(msg[8:msg_length + 8]) tcp_socket = mock.Mock() tcp_socket.send = record_data return tcp_socket return _fake_socket_create_connection def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.CONF.publisher.telemetry_secret = 'not-so-secret' def test_published(self): self.data_sent = [] with mock.patch('ceilometer.publisher.tcp.socket.create_connection', self._make_fake_socket(self.data_sent)): publisher = TCPPublisher(self.CONF, netutils.urlsplit('tcp://somehost')) publisher.publish_samples(self.test_data) self.assertEqual(5, len(self.data_sent)) sent_counters = [] for data in self.data_sent: counter = msgpack.loads(data, raw=False) sent_counters.append(counter) # Check that counters are equal def sort_func(counter): return counter['counter_name'] counters = [utils.meter_message_from_counter(d, "not-so-secret", publisher.conf.host) for d in self.test_data] counters.sort(key=sort_func) sent_counters.sort(key=sort_func) self.assertEqual(counters, sent_counters) def _make_disconnecting_socket(self): def _fake_socket_create_connection(inet_addr): def record_data(msg): if not self.connections: self.connections = True raise OSError msg_length = int.from_bytes(msg[0:8], "little") self.data_sent.append(msg[8:msg_length + 8]) tcp_socket = mock.MagicMock() tcp_socket.send = record_data return tcp_socket return _fake_socket_create_connection def test_reconnect(self): self.data_sent = [] self.connections = False with mock.patch('ceilometer.publisher.tcp.socket.create_connection', self._make_disconnecting_socket()): publisher = TCPPublisher(self.CONF, netutils.urlsplit('tcp://somehost')) publisher.publish_samples(self.test_data) sent_counters = [] for data in self.data_sent: counter = msgpack.loads(data, raw=False) sent_counters.append(counter) # Check that counters are equal def sort_func(counter): return counter['counter_name'] counters = [utils.meter_message_from_counter(d, "not-so-secret", publisher.conf.host) for d in self.test_data] counters.sort(key=sort_func) sent_counters.sort(key=sort_func) self.assertEqual(counters, sent_counters) @staticmethod def _raise_OSError(*args): raise OSError def _make_broken_socket(self, inet_addr): tcp_socket = mock.Mock() tcp_socket.send = self._raise_OSError return tcp_socket def test_publish_error(self): with mock.patch('ceilometer.publisher.tcp.socket.create_connection', self._make_broken_socket): publisher = TCPPublisher(self.CONF, netutils.urlsplit('tcp://localhost')) publisher.publish_samples(self.test_data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_udp.py0000664000175100017510000001155515033033467024453 0ustar00mylesmyles# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/udp.py""" from unittest import mock import msgpack from oslo_utils import netutils from oslo_utils import timeutils from oslotest import base from ceilometer.publisher import udp from ceilometer.publisher import utils from ceilometer import sample from ceilometer import service COUNTER_SOURCE = 'testsource' class TestUDPPublisher(base.BaseTestCase): test_data = [ sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test2', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), sample.Sample( name='test3', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id='test_run_tasks', timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, source=COUNTER_SOURCE, ), ] @staticmethod def _make_fake_socket(published): def _fake_socket_socket(family, type): def record_data(msg, dest): published.append((msg, dest)) udp_socket = mock.Mock() udp_socket.sendto = record_data return udp_socket return _fake_socket_socket def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.CONF.publisher.telemetry_secret = 'not-so-secret' def test_published(self): self.data_sent = [] with mock.patch('socket.socket', self._make_fake_socket(self.data_sent)): publisher = udp.UDPPublisher( self.CONF, netutils.urlsplit('udp://somehost')) publisher.publish_samples(self.test_data) self.assertEqual(5, len(self.data_sent)) sent_counters = [] for data, dest in self.data_sent: counter = msgpack.loads(data, raw=False) sent_counters.append(counter) # Check destination self.assertEqual(('somehost', 4952), dest) # Check that counters are equal def sort_func(counter): return counter['counter_name'] counters = [utils.meter_message_from_counter(d, "not-so-secret") for d in self.test_data] counters.sort(key=sort_func) sent_counters.sort(key=sort_func) self.assertEqual(counters, sent_counters) @staticmethod def _raise_ioerror(*args): raise OSError def _make_broken_socket(self, family, type): udp_socket = mock.Mock() udp_socket.sendto = self._raise_ioerror return udp_socket def test_publish_error(self): with mock.patch('socket.socket', self._make_broken_socket): publisher = udp.UDPPublisher( self.CONF, netutils.urlsplit('udp://localhost')) publisher.publish_samples(self.test_data) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_utils.py0000664000175100017510000001436715033033467025027 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/publisher/utils.py """ import json from oslotest import base from ceilometer.publisher import utils class TestSignature(base.BaseTestCase): def test_compute_signature_change_key(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_change_value(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, 'not-so-secret') self.assertNotEqual(sig1, sig2) def test_compute_signature_same(self): sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 sig2 = utils.compute_signature(data, 'not-so-secret') self.assertEqual(sig1, sig2) def test_compute_signature_use_configured_secret(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') sig2 = utils.compute_signature(data, 'different-value') self.assertNotEqual(sig1, sig2) def test_verify_signature_signed(self): data = {'a': 'A', 'b': 'B'} sig1 = utils.compute_signature(data, 'not-so-secret') data['message_signature'] = sig1 self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unsigned(self): data = {'a': 'A', 'b': 'B'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_incorrect(self): data = {'a': 'A', 'b': 'B', 'message_signature': 'Not the same'} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_invalid_encoding(self): data = {'a': 'A', 'b': 'B', 'message_signature': ''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_unicode(self): data = {'a': 'A', 'b': 'B', 'message_signature': ''} self.assertFalse(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') self.assertTrue(utils.verify_signature(data, 'not-so-secret')) def test_verify_signature_nested_json(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', 'c': ('c',), 'd': ['d'] }, } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = json.loads(json.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_unicode_symbols(self): data = {'a\xe9\u0437': 'A', 'b': 'B\xe9\u0437' } data['message_signature'] = utils.compute_signature( data, 'not-so-secret') jsondata = json.loads(json.dumps(data)) self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) def test_verify_no_secret(self): data = {'a': 'A', 'b': 'B'} self.assertTrue(utils.verify_signature(data, '')) class TestUtils(base.BaseTestCase): def test_recursive_keypairs(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B'}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested:a', 'A'), ('nested:b', 'B')], pairs) def test_recursive_keypairs_with_separator(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } separator = '.' pairs = list(utils.recursive_keypairs(data, separator)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B')], pairs) def test_recursive_keypairs_with_list_of_dict(self): small = 1 big = 1 << 64 expected = [('a', 'A'), ('b', 'B'), ('nested:list', [{small: 99, big: 42}])] data = {'a': 'A', 'b': 'B', 'nested': {'list': [{small: 99, big: 42}]}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual(len(expected), len(pairs)) for k, v in pairs: # the keys 1 and 1<<64 cause a hash collision on 64bit platforms if k == 'nested:list': self.assertIn(v, [[{small: 99, big: 42}], [{big: 42, small: 99}]]) else: self.assertIn((k, v), expected) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/publisher/test_zaqar.py0000664000175100017510000001062715033033467025000 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import uuid from oslo_utils import timeutils from oslotest import base from urllib import parse as urlparse from ceilometer.event import models as event from ceilometer.publisher import zaqar from ceilometer import sample from ceilometer import service class TestZaqarPublisher(base.BaseTestCase): resource_id = str(uuid.uuid4()) sample_data = [ sample.Sample( name='alpha', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='beta', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=timeutils.utcnow().isoformat(), resource_metadata={'name': 'TestPublish'}, ), sample.Sample( name='gamma', type=sample.TYPE_CUMULATIVE, unit='', volume=1, user_id='test', project_id='test', resource_id=resource_id, timestamp=datetime.datetime.now().isoformat(), resource_metadata={'name': 'TestPublish'}, ), ] event_data = [event.Event( message_id=str(uuid.uuid4()), event_type='event_%d' % i, generated=timeutils.utcnow().isoformat(), traits=[], raw={'payload': {'some': 'aa'}}) for i in range(3)] def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) def test_zaqar_publisher_config(self): """Test publisher config parameters.""" parsed_url = urlparse.urlparse('zaqar://') self.assertRaises(ValueError, zaqar.ZaqarPublisher, self.CONF, parsed_url) parsed_url = urlparse.urlparse('zaqar://?queue=foo&ttl=bar') self.assertRaises(ValueError, zaqar.ZaqarPublisher, self.CONF, parsed_url) parsed_url = urlparse.urlparse('zaqar://?queue=foo&ttl=60') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) self.assertEqual(60, publisher.ttl) parsed_url = urlparse.urlparse('zaqar://?queue=foo') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) self.assertEqual(3600, publisher.ttl) self.assertEqual('foo', publisher.queue_name) @mock.patch('zaqarclient.queues.v2.queues.Queue') def test_zaqar_post_samples(self, mock_queue): """Test publisher post.""" parsed_url = urlparse.urlparse('zaqar://?queue=foo') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) mock_post = mock.Mock() mock_queue.return_value = mock_post publisher.publish_samples(self.sample_data) mock_queue.assert_called_once_with(mock.ANY, 'foo') self.assertEqual( 3, len(mock_post.post.call_args_list[0][0][0])) self.assertEqual( mock_post.post.call_args_list[0][0][0][0]['body'], self.sample_data[0].as_dict()) @mock.patch('zaqarclient.queues.v2.queues.Queue') def test_zaqar_post_events(self, mock_queue): """Test publisher post.""" parsed_url = urlparse.urlparse('zaqar://?queue=foo') publisher = zaqar.ZaqarPublisher(self.CONF, parsed_url) mock_post = mock.Mock() mock_queue.return_value = mock_post publisher.publish_events(self.event_data) mock_queue.assert_called_once_with(mock.ANY, 'foo') self.assertEqual( 3, len(mock_post.post.call_args_list[0][0][0])) self.assertEqual( mock_post.post.call_args_list[0][0][0][0]['body'], self.event_data[0].serialize()) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_bin.py0000664000175100017510000000775415033033467022444 0ustar00mylesmyles# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess import time from oslo_utils import fileutils from ceilometer.tests import base class BinTestCase(base.BaseTestCase): def setUp(self): super().setUp() content = ("[DEFAULT]\n" "transport_url = fake://\n") content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super().tearDown() os.remove(self.tempfile) def test_upgrade_run(self): subp = subprocess.Popen(['ceilometer-upgrade', '--skip-gnocchi-resource-types', "--config-file=%s" % self.tempfile]) self.assertEqual(0, subp.wait()) class BinSendSampleTestCase(base.BaseTestCase): def setUp(self): super().setUp() pipeline_cfg_file = self.path_get( 'ceilometer/pipeline/data/pipeline.yaml') content = ("[DEFAULT]\n" "transport_url = fake://\n" "pipeline_cfg_file={}\n".format(pipeline_cfg_file)) content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') def tearDown(self): super().tearDown() os.remove(self.tempfile) def test_send_counter_run(self): subp = subprocess.Popen(['ceilometer-send-sample', "--config-file=%s" % self.tempfile, "--sample-resource=someuuid", "--sample-name=mycounter"]) self.assertEqual(0, subp.wait()) class BinCeilometerPollingServiceTestCase(base.BaseTestCase): def setUp(self): super().setUp() self.tempfile = None self.subp = None def tearDown(self): if self.subp: try: self.subp.kill() except OSError: pass os.remove(self.tempfile) super().tearDown() def test_starting_with_duplication_namespaces(self): content = ("[DEFAULT]\n" "transport_url = fake://\n") content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='ceilometer', suffix='.conf') self.subp = subprocess.Popen(['ceilometer-polling', "--config-file=%s" % self.tempfile, "--polling-namespaces", "compute", "compute"], stderr=subprocess.PIPE) expected = (b'Duplicated values: [\'compute\', \'compute\'] ' b'found in CLI options, auto de-duplicated') # NOTE(gordc): polling process won't quit so wait for a bit and check start = time.time() while time.time() - start < 5: output = self.subp.stderr.readline() if expected in output: break else: self.fail('Did not detect expected warning: %s' % expected) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_cache_utils.py0000664000175100017510000001003715033033467024143 0ustar00mylesmyles# # Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer import cache_utils from ceilometer import service as ceilometer_service from oslo_cache.backends import dictionary from oslo_cache import core as cache from oslo_config import fixture as config_fixture from oslotest import base class CacheConfFixture(config_fixture.Config): def setUp(self): super().setUp() self.conf = ceilometer_service.\ prepare_service(argv=[], config_files=[]) cache.configure(self.conf) class TestOsloCache(base.BaseTestCase): def setUp(self): super().setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) dict_conf_fixture = CacheConfFixture(conf) self.useFixture(dict_conf_fixture) dict_conf_fixture.config(enabled=True, group='cache') dict_conf_fixture.config(expiration_time=600, backend='oslo_cache.dict', group='cache') self.dict_conf = dict_conf_fixture.conf # enable_retry_client is only supported by # 'dogpile.cache.pymemcache' backend which makes this # incorrect config faulty_conf_fixture = CacheConfFixture(conf) self.useFixture(faulty_conf_fixture) faulty_conf_fixture.config(enabled=True, group='cache') faulty_conf_fixture.config(expiration_time=600, backend='dogpile.cache.memcached', group='cache', enable_retry_client='true') self.faulty_conf = faulty_conf_fixture.conf no_cache_fixture = CacheConfFixture(conf) self.useFixture(no_cache_fixture) # no_cache_fixture.config() self.no_cache_conf = no_cache_fixture.conf def test_get_cache_region(self): self.assertIsNotNone(cache_utils.get_cache_region(self.dict_conf)) # having invalid configurations will return None with self.assertLogs('ceilometer.cache_utils', level='ERROR') as logs: self.assertIsNone( cache_utils.get_cache_region(self.faulty_conf) ) cache_configure_failed = logs.output self.assertIn( 'ERROR:ceilometer.cache_utils:' 'failed to configure oslo_cache: ' 'Retry client is only supported by ' 'the \'dogpile.cache.pymemcache\' backend.', cache_configure_failed) def test_get_client(self): dict_cache_client = cache_utils.get_client(self.dict_conf) self.assertIsNotNone(dict_cache_client) self.assertIsInstance(dict_cache_client.region.backend, dictionary.DictCacheBackend) no_cache_config = cache_utils.get_client(self.no_cache_conf) self.assertIsNotNone(no_cache_config) self.assertIsInstance(dict_cache_client.region.backend, dictionary.DictCacheBackend) # having invalid configurations will return None with self.assertLogs('ceilometer.cache_utils', level='ERROR') as logs: cache_client = cache_utils.get_client(self.faulty_conf) cache_configure_failed = logs.output self.assertIsNone(cache_client) self.assertIn( 'ERROR:ceilometer.cache_utils:' 'failed to configure oslo_cache: ' 'Retry client is only supported by ' 'the \'dogpile.cache.pymemcache\' backend.', cache_configure_failed) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_declarative.py0000664000175100017510000000313115033033467024140 0ustar00mylesmyles# # Copyright 2016 Mirantis, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures from ceilometer import declarative from ceilometer.tests import base class TestDefinition(base.BaseTestCase): def setUp(self): super().setUp() self.configs = [ "_field1", "_field2|_field3", {'fields': 'field4.`split(., 1, 1)`'}, {'fields': ['field5.arg', 'field6'], 'type': 'text'} ] self.parser = mock.MagicMock() parser_patch = fixtures.MockPatch( "jsonpath_rw_ext.parser.ExtentedJsonPathParser.parse", new=self.parser) self.useFixture(parser_patch) def test_caching_parsers(self): for config in self.configs * 2: declarative.Definition("test", config, mock.MagicMock()) self.assertEqual(4, self.parser.call_count) self.parser.assert_has_calls([ mock.call("_field1"), mock.call("_field2|_field3"), mock.call("field4.`split(., 1, 1)`"), mock.call("(field5.arg)|(field6)"), ]) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_decoupled_pipeline.py0000664000175100017510000001630715033033467025517 0ustar00mylesmyles# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.pipeline import base from ceilometer.pipeline import sample as pipeline from ceilometer import sample from ceilometer.tests.unit import pipeline_base class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): def _setup_pipeline_cfg(self): source = {'name': 'test_source', 'meters': ['a'], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['new'], }) def _break_pipeline_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'meters': ['b'], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def test_source_no_sink(self): del self.pipeline_cfg['sinks'] self._exception_create_pipelinemanager() def test_source_dangling_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['second_sink'] }) self._exception_create_pipelinemanager() def test_sink_no_source(self): del self.pipeline_cfg['sources'] self._exception_create_pipelinemanager() def test_source_with_multiple_sinks(self): meter_cfg = ['a', 'b'] self._set_pipeline_cfg('meters', meter_cfg) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['new'], }) self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('test_source:second_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher in (test_publisher, new_publisher): self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], "name")) self.assertEqual('b', getattr(publisher.samples[1], "name")) def test_multiple_sources_with_single_sink(self): self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'meters': ['b'], 'sinks': ['test_sink'] }) self._build_and_set_new_pipeline() pipeline_manager = pipeline.SamplePipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_counter]) self.test_counter = sample.Sample( name='b', type=self.test_counter.type, volume=self.test_counter.volume, unit=self.test_counter.unit, user_id=self.test_counter.user_id, project_id=self.test_counter.project_id, resource_id=self.test_counter.resource_id, timestamp=self.test_counter.timestamp, resource_metadata=self.test_counter.resource_metadata, ) with pipeline_manager.publisher() as p: p([self.test_counter]) self.assertEqual(2, len(pipeline_manager.pipelines)) self.assertEqual('test_source:test_sink', str(pipeline_manager.pipelines[0])) self.assertEqual('second_source:test_sink', str(pipeline_manager.pipelines[1])) test_publisher = pipeline_manager.pipelines[0].publishers[0] another_publisher = pipeline_manager.pipelines[1].publishers[0] for publisher in [test_publisher, another_publisher]: self.assertEqual(2, len(publisher.samples)) self.assertEqual(2, publisher.calls) self.assertEqual('a', getattr(publisher.samples[0], "name")) self.assertEqual('b', getattr(publisher.samples[1], "name")) def test_duplicated_sinks_names(self): self.pipeline_cfg['sinks'].append({ 'name': 'test_sink', 'publishers': ['except'], }) self._build_and_set_new_pipeline() self.assertRaises(base.PipelineException, pipeline.SamplePipelineManager, self.CONF) def test_duplicated_source_names(self): self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'meters': ['a'], 'sinks': ['test_sink'] }) self._build_and_set_new_pipeline() self.assertRaises(base.PipelineException, pipeline.SamplePipelineManager, self.CONF) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_event_pipeline.py0000664000175100017510000003354615033033467024700 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import traceback import uuid import fixtures from oslo_utils import timeutils from ceilometer.event import models from ceilometer.pipeline import base as pipeline from ceilometer.pipeline import event from ceilometer import publisher from ceilometer.publisher import test as test_publisher from ceilometer import service from ceilometer.tests import base class EventPipelineTestCase(base.BaseTestCase): def get_publisher(self, conf, url, namespace=''): fake_drivers = {'test://': test_publisher.TestPublisher, 'new://': test_publisher.TestPublisher, 'except://': self.PublisherClassException} return fake_drivers[url](conf, url) class PublisherClassException(publisher.ConfigPublisherBase): def publish_samples(self, samples): pass def publish_events(self, events): raise Exception() def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.test_event = models.Event( message_id=uuid.uuid4(), event_type='a', generated=timeutils.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'started'} ) self.test_event2 = models.Event( message_id=uuid.uuid4(), event_type='b', generated=timeutils.utcnow(), traits=[ models.Trait('t_text', 1, 'text_trait'), models.Trait('t_int', 2, 'int_trait'), models.Trait('t_float', 3, 'float_trait'), models.Trait('t_datetime', 4, 'datetime_trait') ], raw={'status': 'stopped'} ) self.useFixture(fixtures.MockPatchObject( publisher, 'get_publisher', side_effect=self.get_publisher)) self._setup_pipeline_cfg() self._reraise_exception = True self.useFixture(fixtures.MockPatch( 'ceilometer.pipeline.base.LOG.exception', side_effect=self._handle_reraise_exception)) def _handle_reraise_exception(self, *args, **kwargs): if self._reraise_exception: raise Exception(traceback.format_exc()) def _setup_pipeline_cfg(self): """Setup the appropriate form of pipeline config.""" source = {'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink']} sink = {'name': 'test_sink', 'publishers': ['test://']} self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} def _augment_pipeline_cfg(self): """Augment the pipeline config with an additional element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['new://'], }) def _break_pipeline_cfg(self): """Break the pipeline config with a malformed element.""" self.pipeline_cfg['sources'].append({ 'name': 'second_source', 'events': ['b'], 'sinks': ['second_sink'] }) self.pipeline_cfg['sinks'].append({ 'name': 'second_sink', 'publishers': ['except'], }) def _dup_pipeline_name_cfg(self): """Break the pipeline config with duplicate pipeline name.""" self.pipeline_cfg['sources'].append({ 'name': 'test_source', 'events': ['a'], 'sinks': ['test_sink'] }) def _set_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field] = value else: self.pipeline_cfg['sinks'][0][field] = value def _extend_pipeline_cfg(self, field, value): if field in self.pipeline_cfg['sources'][0]: self.pipeline_cfg['sources'][0][field].extend(value) else: self.pipeline_cfg['sinks'][0][field].extend(value) def _unset_pipeline_cfg(self, field): if field in self.pipeline_cfg['sources'][0]: del self.pipeline_cfg['sources'][0][field] else: del self.pipeline_cfg['sinks'][0][field] def _build_and_set_new_pipeline(self): name = self.cfg2file(self.pipeline_cfg) self.CONF.set_override('event_pipeline_cfg_file', name) def _exception_create_pipelinemanager(self): self._build_and_set_new_pipeline() self.assertRaises(pipeline.PipelineException, event.EventPipelineManager, self.CONF) def test_no_events(self): self._unset_pipeline_cfg('events') self._exception_create_pipelinemanager() def test_no_name(self): self._unset_pipeline_cfg('name') self._exception_create_pipelinemanager() def test_name(self): self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) for pipe in pipeline_manager.pipelines: self.assertTrue(pipe.name.startswith('event:')) def test_no_publishers(self): self._unset_pipeline_cfg('publishers') self._exception_create_pipelinemanager() def test_check_events_include_exclude_same(self): event_cfg = ['a', '!a'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_include_exclude(self): event_cfg = ['a', '!b'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_events_wildcard_included(self): event_cfg = ['a', '*'] self._set_pipeline_cfg('events', event_cfg) self._exception_create_pipelinemanager() def test_check_publishers_invalid_publisher(self): publisher_cfg = ['test_invalid'] self._set_pipeline_cfg('publishers', publisher_cfg) def test_multiple_included_events(self): event_cfg = ['a', 'b'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) with pipeline_manager.publisher() as p: p([self.test_event2]) self.assertEqual(2, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) self.assertEqual('b', getattr(publisher.events[1], 'event_type')) def test_event_non_match(self): event_cfg = ['nomatch'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(0, len(publisher.events)) self.assertEqual(0, publisher.calls) def test_wildcard_event(self): event_cfg = ['*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_wildcard_excluded_events(self): event_cfg = ['*', '!a'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_event('a')) def test_wildcard_excluded_events_not_excluded(self): event_cfg = ['*', '!b'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_not_excluded(self): event_cfg = ['!b', '!c'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_all_excluded_events_excluded(self): event_cfg = ['!a', '!c'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_event('a')) self.assertTrue(pipe.source.support_event('b')) self.assertFalse(pipe.source.support_event('c')) def test_wildcard_and_excluded_wildcard_events(self): event_cfg = ['*', '!compute.*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source. support_event('compute.instance.create.start')) self.assertTrue(pipe.source.support_event('identity.user.create')) def test_included_event_and_wildcard_events(self): event_cfg = ['compute.instance.create.start', 'identity.*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertTrue(pipe.source.support_event('identity.user.create')) self.assertTrue(pipe.source. support_event('compute.instance.create.start')) self.assertFalse(pipe.source. support_event('compute.instance.create.stop')) def test_excluded_event_and_excluded_wildcard_events(self): event_cfg = ['!compute.instance.create.start', '!identity.*'] self._set_pipeline_cfg('events', event_cfg) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) pipe = pipeline_manager.pipelines[0] self.assertFalse(pipe.source.support_event('identity.user.create')) self.assertFalse(pipe.source. support_event('compute.instance.create.start')) self.assertTrue(pipe.source. support_event('compute.instance.create.stop')) def test_multiple_pipeline(self): self._augment_pipeline_cfg() self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event, self.test_event2]) publisher = pipeline_manager.pipelines[0].publishers[0] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, publisher.calls) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) new_publisher = pipeline_manager.pipelines[1].publishers[0] self.assertEqual(1, len(new_publisher.events)) self.assertEqual(1, new_publisher.calls) self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) def test_multiple_publisher(self): self._set_pipeline_cfg('publishers', ['test://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[0] new_publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual(1, len(new_publisher.events)) self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_multiple_publisher_isolation(self): self._reraise_exception = False self._set_pipeline_cfg('publishers', ['except://', 'new://']) self._build_and_set_new_pipeline() pipeline_manager = event.EventPipelineManager(self.CONF) with pipeline_manager.publisher() as p: p([self.test_event]) publisher = pipeline_manager.pipelines[0].publishers[1] self.assertEqual(1, len(publisher.events)) self.assertEqual('a', getattr(publisher.events[0], 'event_type')) def test_unique_pipeline_names(self): self._dup_pipeline_name_cfg() self._exception_create_pipelinemanager() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_messaging.py0000664000175100017510000000516215033033467023640 0ustar00mylesmyles# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging.conffixture from oslotest import base from ceilometer import messaging from ceilometer import service class MessagingTests(base.BaseTestCase): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) def test_get_transport_invalid_url(self): self.assertRaises(oslo_messaging.InvalidTransportURL, messaging.get_transport, self.CONF, "notvalid!") def test_get_transport_url_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://') self.assertEqual(t1, t2) def test_get_transport_default_url_caching(self): t1 = messaging.get_transport(self.CONF) t2 = messaging.get_transport(self.CONF) self.assertEqual(t1, t2) def test_get_transport_default_url_no_caching(self): t1 = messaging.get_transport(self.CONF, cache=False) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_no_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://', cache=False) t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_default_url_caching_mix(self): t1 = messaging.get_transport(self.CONF) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_caching_mix(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_optional(self): self.CONF.set_override('transport_url', 'non-url') self.assertIsNone(messaging.get_transport(self.CONF, optional=True, cache=False)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_middleware.py0000664000175100017510000000776515033033467024013 0ustar00mylesmyles# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from ceilometer import middleware from ceilometer import service from ceilometer.tests import base HTTP_REQUEST = { 'ctxt': {'auth_token': '3d8b13de1b7d499587dfc69b77dc09c2', 'is_admin': True, 'project_id': '7c150a59fe714e6f9263774af9688f0e', 'quota_class': None, 'read_deleted': 'no', 'remote_address': '10.0.2.15', 'request_id': 'req-d68b36e0-9233-467f-9afb-d81435d64d66', 'roles': ['admin'], 'timestamp': '2012-05-08T20:23:41.425105', 'user_id': '1e3ce043029547f1a61c1996d1a531a2'}, 'event_type': 'http.request', 'payload': {'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}}, 'priority': 'INFO', 'publisher_id': 'compute.vagrant-precise', 'metadata': {'message_id': 'dae6f69c-00e0-41c0-b371-41ec3b7f4451', 'timestamp': '2012-05-08 20:23:48.028195'}, } HTTP_RESPONSE = { 'ctxt': {'auth_token': '3d8b13de1b7d499587dfc69b77dc09c2', 'is_admin': True, 'project_id': '7c150a59fe714e6f9263774af9688f0e', 'quota_class': None, 'read_deleted': 'no', 'remote_address': '10.0.2.15', 'request_id': 'req-d68b36e0-9233-467f-9afb-d81435d64d66', 'roles': ['admin'], 'timestamp': '2012-05-08T20:23:41.425105', 'user_id': '1e3ce043029547f1a61c1996d1a531a2'}, 'event_type': 'http.response', 'payload': {'request': {'HTTP_X_FOOBAR': 'foobaz', 'HTTP_X_USER_ID': 'jd-x32', 'HTTP_X_PROJECT_ID': 'project-id', 'HTTP_X_SERVICE_NAME': 'nova'}, 'response': {'status': '200 OK'}}, 'priority': 'INFO', 'publisher_id': 'compute.vagrant-precise', 'metadata': {'message_id': 'dae6f69c-00e0-41c0-b371-41ec3b7f4451', 'timestamp': '2012-05-08 20:23:48.028195'}, } class TestNotifications(base.BaseTestCase): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) def test_process_request_notification(self): sample = list(middleware.HTTPRequest( mock.Mock(), mock.Mock()).build_sample(HTTP_REQUEST))[0] self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_REQUEST['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) def test_process_response_notification(self): sample = list(middleware.HTTPResponse( mock.Mock(), mock.Mock()).build_sample(HTTP_RESPONSE))[0] self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], sample.user_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_PROJECT_ID'], sample.project_id) self.assertEqual(HTTP_RESPONSE['payload']['request'] ['HTTP_X_SERVICE_NAME'], sample.resource_id) self.assertEqual(1, sample.volume) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_neutronclient.py0000664000175100017510000000503315033033467024551 0ustar00mylesmyles# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslotest import base from ceilometer import neutron_client from ceilometer import service class TestNeutronClient(base.BaseTestCase): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.nc = neutron_client.Client(self.CONF) self.nc.lb_version = 'v1' @staticmethod def fake_ports_list(): return {'ports': [{'admin_state_up': True, 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', 'device_owner': 'network:router_gateway', 'extra_dhcp_opts': [], 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', 'mac_address': 'fa:16:3e:c5:35:93', 'name': '', 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'status': 'ACTIVE', 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, ]} def test_port_get_all(self): with mock.patch.object(self.nc.client, 'list_ports', side_effect=self.fake_ports_list): ports = self.nc.port_get_all() self.assertEqual(1, len(ports)) self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', ports[0]['id']) @staticmethod def fake_networks_list(): return {'networks': [{'admin_state_up': True, 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', 'name': 'public', 'provider:network_type': 'gre', 'provider:physical_network': None, 'provider:segmentation_id': 2, 'router:external': True, 'shared': False, 'status': 'ACTIVE', 'subnets': ['c4b6f5b8-3508-4896-b238-a441f25fb492'], 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, ]} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_notification.py0000664000175100017510000002043415033033467024350 0ustar00mylesmyles# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Ceilometer notify daemon.""" import time from unittest import mock from oslo_utils import fileutils import yaml from ceilometer import messaging from ceilometer import notification from ceilometer.publisher import test as test_publisher from ceilometer import service from ceilometer.tests import base as tests_base TEST_NOTICE_CTXT = { 'auth_token': '3d8b13de1b7d499587dfc69b77dc09c2', 'is_admin': True, 'project_id': '7c150a59fe714e6f9263774af9688f0e', 'quota_class': None, 'read_deleted': 'no', 'remote_address': '10.0.2.15', 'request_id': 'req-d68b36e0-9233-467f-9afb-d81435d64d66', 'roles': ['admin'], 'timestamp': '2012-05-08T20:23:41.425105', 'user_id': '1e3ce043029547f1a61c1996d1a531a2', } TEST_NOTICE_METADATA = { 'message_id': 'dae6f69c-00e0-41c0-b371-41ec3b7f4451', 'timestamp': '2012-05-08 20:23:48.028195', } TEST_NOTICE_PAYLOAD = { 'created_at': '2012-05-08 20:23:41', 'deleted_at': '', 'disk_gb': 0, 'display_name': 'testme', 'fixed_ips': [{'address': '10.0.0.2', 'floating_ips': [], 'meta': {}, 'type': 'fixed', 'version': 4}], 'image_ref_url': 'http://10.0.2.15:9292/images/UUID', 'instance_id': '9f9d01b9-4a58-4271-9e27-398b21ab20d1', 'instance_type': 'm1.tiny', 'instance_type_id': 2, 'launched_at': '2012-05-08 20:23:47.985999', 'memory_mb': 512, 'state': 'active', 'state_description': '', 'tenant_id': '7c150a59fe714e6f9263774af9688f0e', 'user_id': '1e3ce043029547f1a61c1996d1a531a2', 'reservation_id': '1e3ce043029547f1a61c1996d1a531a3', 'vcpus': 1, 'root_gb': 0, 'ephemeral_gb': 0, 'host': 'compute-host-name', 'availability_zone': '1e3ce043029547f1a61c1996d1a531a4', 'os_type': 'linux?', 'architecture': 'x86', 'image_ref': 'UUID', 'kernel_id': '1e3ce043029547f1a61c1996d1a531a5', 'ramdisk_id': '1e3ce043029547f1a61c1996d1a531a6', } class BaseNotificationTest(tests_base.BaseTestCase): def run_service(self, srv): srv.run() self.addCleanup(srv.terminate) class TestNotification(BaseNotificationTest): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF) self.srv = notification.NotificationService(0, self.CONF) def test_targets(self): self.assertEqual(14, len(self.srv.get_targets())) def test_start_multiple_listeners(self): urls = ["fake://vhost1", "fake://vhost2"] self.CONF.set_override("messaging_urls", urls, group="notification") self.srv.run() self.addCleanup(self.srv.terminate) self.assertEqual(2, len(self.srv.listeners)) @mock.patch('oslo_messaging.get_batch_notification_listener') def test_unique_consumers(self, mock_listener): self.CONF.set_override('notification_control_exchanges', ['dup'] * 2, group='notification') self.run_service(self.srv) # 1 target, 1 listener self.assertEqual(1, len(mock_listener.call_args_list[0][0][1])) self.assertEqual(1, len(self.srv.listeners)) def test_select_pipelines(self): self.CONF.set_override('pipelines', ['event'], group='notification') self.srv.run() self.addCleanup(self.srv.terminate) self.assertEqual(1, len(self.srv.managers)) self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) @mock.patch('ceilometer.notification.LOG') def test_select_pipelines_missing(self, logger): self.CONF.set_override('pipelines', ['meter', 'event', 'bad'], group='notification') self.srv.run() self.addCleanup(self.srv.terminate) self.assertEqual(2, len(self.srv.managers)) logger.error.assert_called_with( 'Could not load the following pipelines: %s', {'bad'}) class BaseRealNotification(BaseNotificationTest): def setup_pipeline(self, counter_names): pipeline = yaml.dump({ 'sources': [{ 'name': 'test_pipeline', 'interval': 5, 'meters': counter_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': ['test://'] }] }) pipeline = pipeline.encode('utf-8') pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, prefix="pipeline", suffix="yaml") return pipeline_cfg_file def setup_event_pipeline(self, event_names): ev_pipeline = yaml.dump({ 'sources': [{ 'name': 'test_event', 'events': event_names, 'sinks': ['test_sink'] }], 'sinks': [{ 'name': 'test_sink', 'publishers': ['test://'] }] }) ev_pipeline = ev_pipeline.encode('utf-8') ev_pipeline_cfg_file = fileutils.write_to_tempfile( content=ev_pipeline, prefix="event_pipeline", suffix="yaml") return ev_pipeline_cfg_file def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.setup_messaging(self.CONF, 'nova') pipeline_cfg_file = self.setup_pipeline(['vcpus', 'memory']) self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) self.expected_samples = 2 ev_pipeline_cfg_file = self.setup_event_pipeline( ['compute.instance.*']) self.expected_events = 1 self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) self.publisher = test_publisher.TestPublisher(self.CONF, "") def _check_notification_service(self): self.run_service(self.srv) notifier = messaging.get_notifier(self.transport, "compute.vagrant-precise") notifier.info({}, 'compute.instance.create.end', TEST_NOTICE_PAYLOAD) start = time.time() while time.time() - start < 60: if (len(self.publisher.samples) >= self.expected_samples and len(self.publisher.events) >= self.expected_events): break resources = list({s.resource_id for s in self.publisher.samples}) self.assertEqual(self.expected_samples, len(self.publisher.samples)) self.assertEqual(self.expected_events, len(self.publisher.events)) self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) class TestRealNotification(BaseRealNotification): def setUp(self): super().setUp() self.srv = notification.NotificationService(0, self.CONF) @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self._check_notification_service() @mock.patch('ceilometer.publisher.test.TestPublisher') def test_notification_service_error_topic(self, fake_publisher_cls): fake_publisher_cls.return_value = self.publisher self.run_service(self.srv) notifier = messaging.get_notifier(self.transport, 'compute.vagrant-precise') notifier.error({}, 'compute.instance.error', TEST_NOTICE_PAYLOAD) start = time.time() while time.time() - start < 60: if len(self.publisher.events) >= self.expected_events: break self.assertEqual(self.expected_events, len(self.publisher.events)) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_novaclient.py0000664000175100017510000002141615033033467024025 0ustar00mylesmyles# Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures import glanceclient import novaclient from oslotest import base from ceilometer import nova_client from ceilometer import service class TestNovaClient(base.BaseTestCase): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self._flavors_count = 0 self._images_count = 0 self.nv = nova_client.Client(self.CONF) self.useFixture(fixtures.MockPatchObject( self.nv.nova_client.flavors, 'get', side_effect=self.fake_flavors_get)) self.useFixture(fixtures.MockPatchObject( self.nv.glance_client.images, 'get', side_effect=self.fake_images_get)) def fake_flavors_get(self, *args, **kwargs): self._flavors_count += 1 a = mock.MagicMock() a.id = args[0] if a.id == 1: a.name = 'm1.tiny' elif a.id == 2: a.name = 'm1.large' else: raise novaclient.exceptions.NotFound('foobar') return a def fake_images_get(self, *args, **kwargs): self._images_count += 1 a = mock.MagicMock() a.id = args[0] image_details = { 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), 3: ('rhel-6-x64', None), 4: ('rhel-6-x64', dict()), 5: ('rhel-6-x64', dict(kernel_id=11)), 6: ('rhel-6-x64', dict(ramdisk_id=21)) } if a.id in image_details: a.name = image_details[a.id][0] a.metadata = image_details[a.id][1] else: raise glanceclient.exc.HTTPNotFound('foobar') return a @staticmethod def fake_servers_list(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 1} b = mock.MagicMock() b.id = 43 b.flavor = {'id': 2} b.image = {'id': 2} return [a, b] def test_instance_get_all_by_host(self): with mock.patch.object(self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(2, len(instances)) self.assertEqual('m1.tiny', instances[0].flavor['name']) self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) self.assertEqual(11, instances[0].kernel_id) self.assertEqual(21, instances[0].ramdisk_id) @staticmethod def fake_servers_list_unknown_flavor(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = {'id': 1} return [a] def test_instance_get_all_by_host_unknown_flavor(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_flavor): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].flavor['name']) @staticmethod def fake_servers_list_unknown_image(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': 666} return [a] @staticmethod def fake_servers_list_image_missing_metadata(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 1} a.image = {'id': args[0]} return [a] @staticmethod def fake_instance_image_missing(*args, **kwargs): a = mock.MagicMock() a.id = 42 a.flavor = {'id': 666} a.image = None return [a] def test_instance_get_all_by_host_unknown_image(self): with mock.patch.object( self.nv.nova_client.servers, 'list', side_effect=self.fake_servers_list_unknown_image): instances = self.nv.instance_get_all_by_host('foobar') self.assertEqual(1, len(instances)) self.assertEqual('unknown-id-666', instances[0].image['name']) def test_with_flavor_and_image(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) instance = results[0] self.assertEqual(2, len(results)) self.assertEqual('ubuntu-12.04-x86', instance.image['name']) self.assertEqual('m1.tiny', instance.flavor['name']) self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_unknown_image(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_unknown_flavor(self): instances = self.fake_servers_list_unknown_flavor() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual('unknown-id-666', instance.flavor['name']) self.assertEqual(0, instance.flavor['vcpus']) self.assertEqual(0, instance.flavor['ram']) self.assertEqual(0, instance.flavor['disk']) self.assertNotEqual(instance.image['name'], 'unknown-id-666') self.assertEqual(11, instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_none_metadata(self): instances = self.fake_servers_list_image_missing_metadata(3) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_metadata(self): instances = self.fake_servers_list_image_missing_metadata(4) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_ramdisk(self): instances = self.fake_servers_list_image_missing_metadata(5) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertEqual(11, instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_flavor_and_image_missing_kernel(self): instances = self.fake_servers_list_image_missing_metadata(6) results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertEqual(21, instance.ramdisk_id) def test_with_flavor_and_image_no_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list()) self.assertEqual(2, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_cache(self): results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) self.assertEqual(4, len(results)) self.assertEqual(2, self._flavors_count) self.assertEqual(2, self._images_count) def test_with_flavor_and_image_unknown_image_cache(self): instances = self.fake_servers_list_unknown_image() results = self.nv._with_flavor_and_image(instances * 2) self.assertEqual(2, len(results)) self.assertEqual(1, self._flavors_count) self.assertEqual(1, self._images_count) for instance in results: self.assertEqual('unknown-id-666', instance.image['name']) self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.ramdisk_id) def test_with_missing_image_instance(self): instances = self.fake_instance_image_missing() results = self.nv._with_flavor_and_image(instances) instance = results[0] self.assertIsNone(instance.kernel_id) self.assertIsNone(instance.image) self.assertIsNone(instance.ramdisk_id) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_polling.py0000664000175100017510000000707715033033467023336 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import manager from ceilometer import service from ceilometer.tests import base class PollingTestCase(base.BaseTestCase): def setUp(self): super().setUp() self.CONF = service.prepare_service([], []) self.poll_cfg = {'sources': [{'name': 'test_source', 'interval': 600, 'meters': ['a']}]} def _build_and_set_new_polling(self): name = self.cfg2file(self.poll_cfg) self.CONF.set_override('cfg_file', name, group='polling') def test_no_name(self): del self.poll_cfg['sources'][0]['name'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_no_interval(self): del self.poll_cfg['sources'][0]['interval'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_invalid_string_interval(self): self.poll_cfg['sources'][0]['interval'] = 'string' self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_get_interval(self): self._build_and_set_new_polling() poll_manager = manager.PollingManager(self.CONF) source = poll_manager.sources[0] self.assertEqual(600, source.get_interval()) def test_invalid_resources(self): self.poll_cfg['sources'][0]['resources'] = {'invalid': 1} self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_resources(self): resources = ['test1://', 'test2://'] self.poll_cfg['sources'][0]['resources'] = resources self._build_and_set_new_polling() poll_manager = manager.PollingManager(self.CONF) self.assertEqual(resources, poll_manager.sources[0].resources) def test_no_resources(self): self._build_and_set_new_polling() poll_manager = manager.PollingManager(self.CONF) self.assertEqual(0, len(poll_manager.sources[0].resources)) def test_check_meters_include_exclude_same(self): self.poll_cfg['sources'][0]['meters'] = ['a', '!a'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_check_meters_include_exclude(self): self.poll_cfg['sources'][0]['meters'] = ['a', '!b'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) def test_check_meters_wildcard_included(self): self.poll_cfg['sources'][0]['meters'] = ['a', '*'] self._build_and_set_new_polling() self.assertRaises(manager.PollingException, manager.PollingManager, self.CONF) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_prom_exporter.py0000664000175100017510000004063115033033467024570 0ustar00mylesmyles# # Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/polling/prom_exporter.py""" from oslotest import base from unittest import mock from unittest.mock import call from ceilometer.polling import manager from ceilometer.polling import prom_exporter from ceilometer import service COUNTER_SOURCE = 'testsource' class TestPromExporter(base.BaseTestCase): test_disk_latency = [ { 'source': 'openstack', 'counter_name': 'disk.device.read.latency', 'counter_type': 'cumulative', 'counter_unit': 'ns', 'counter_volume': 132128682, 'user_id': '6e7d71415cd5401cbe103829c9c5dec2', 'user_name': None, 'project_id': 'd965489b7f894cbda89cd2e25bfd85a0', 'project_name': None, 'resource_id': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63-vda', 'timestamp': '2024-06-20T09:32:36.521082', 'resource_metadata': { 'display_name': 'myserver', 'name': 'instance-00000002', 'instance_id': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'instance_type': 'tiny', 'host': 'e0d297f5df3b62ec73c8d42b', 'instance_host': 'devstack', 'flavor': { 'id': '4af9ac72-5787-4f86-8644-0faa87ce7c83', 'name': 'tiny', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 0, 'swap': 0 }, 'status': 'active', 'state': 'running', 'task_state': '', 'image': { 'id': '71860ed5-f66d-43e0-9514-f1d188106284' }, 'image_ref': '71860ed5-f66d-43e0-9514-f1d188106284', 'image_ref_url': None, 'architecture': 'x86_64', 'os_type': 'hvm', 'vcpus': 1, 'memory_mb': 512, 'disk_gb': 1, 'ephemeral_gb': 0, 'root_gb': 1, 'disk_name': 'vda', 'user_metadata': { 'custom_label': 'custom value' } }, 'message_id': '078029c7-2ee8-11ef-a915-bd45e2085de3', 'monotonic_time': 1819980.112406547, 'message_signature': 'f8d9a411b0cd0cb0d34e83' }, { 'source': 'openstack', 'counter_name': 'disk.device.read.latency', 'counter_type': 'cumulative', 'counter_unit': 'ns', 'counter_volume': 232128754, 'user_id': '6e7d71415cd5401cbe103829c9c5dec2', 'user_name': None, 'project_id': 'd965489b7f894cbda89cd2e25bfd85a0', 'project_name': None, 'resource_id': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63-vda', 'timestamp': '2024-06-20T09:32:46.521082', 'resource_metadata': { 'display_name': 'myserver', 'name': 'instance-00000002', 'instance_id': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'instance_type': 'tiny', 'host': 'e0d297f5df3b62ec73c8d42b', 'instance_host': 'devstack', 'flavor': { 'id': '4af9ac72-5787-4f86-8644-0faa87ce7c83', 'name': 'tiny', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 0, 'swap': 0 }, 'status': 'active', 'state': 'running', 'task_state': '', 'image': { 'id': '71860ed5-f66d-43e0-9514-f1d188106284' }, 'image_ref': '71860ed5-f66d-43e0-9514-f1d188106284', 'image_ref_url': None, 'architecture': 'x86_64', 'os_type': 'hvm', 'vcpus': 1, 'memory_mb': 512, 'disk_gb': 1, 'ephemeral_gb': 0, 'root_gb': 1, 'disk_name': 'vda', 'user_metadata': { 'custom_label': 'custom value' } }, 'message_id': '078029c7-2ee8-11ef-a915-bd45e2085de4', 'monotonic_time': 1819990.112406547, 'message_signature': 'f8d9a411b0cd0cb0d34e84' } ] test_memory_usage = [ { 'source': 'openstack', 'counter_name': 'memory.usage', 'counter_type': 'gauge', 'counter_unit': 'MB', 'counter_volume': 37.98046875, 'user_id': '6e7d71415cd5401cbe103829c9c5dec2', 'user_name': None, 'project_id': 'd965489b7f894cbda89cd2e25bfd85a0', 'project_name': None, 'resource_id': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'timestamp': '2024-06-20T09:32:36.515823', 'resource_metadata': { 'display_name': 'myserver', 'name': 'instance-00000002', 'instance_id': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'instance_type': 'tiny', 'host': 'e0d297f5df3b62ec73c8d42b', 'instance_host': 'devstack', 'flavor': { 'id': '4af9ac72-5787-4f86-8644-0faa87ce7c83', 'name': 'tiny', 'vcpus': 1, 'ram': 512, 'disk': 1, 'ephemeral': 0, 'swap': 0 }, 'status': 'active', 'state': 'running', 'task_state': '', 'image': { 'id': '71860ed5-f66d-43e0-9514-f1d188106284' }, 'image_ref': '71860ed5-f66d-43e0-9514-f1d188106284', 'image_ref_url': None, 'architecture': 'x86_64', 'os_type': 'hvm', 'vcpus': 1, 'memory_mb': 512, 'disk_gb': 1, 'ephemeral_gb': 0, 'root_gb': 1 }, 'message_id': '078029bf-2ee8-11ef-a915-bd45e2085de3', 'monotonic_time': 1819980.131767362, 'message_signature': 'f8d9a411b0cd0cb0d34e83' } ] test_image_size = [ { 'source': 'openstack', 'counter_name': 'image.size', 'counter_type': 'gauge', 'counter_unit': 'B', 'counter_volume': 16344576, 'user_id': None, 'user_name': None, 'project_id': 'd965489b7f894cbda89cd2e25bfd85a0', 'project_name': None, 'resource_id': 'f9276c96-8a12-432b-96a1-559d70715f97', 'timestamp': '2024-06-20T09:40:17.118871', 'resource_metadata': { 'status': 'active', 'visibility': 'public', 'name': 'cirros2', 'container_format': 'bare', 'created_at': '2024-05-30T11:38:52Z', 'disk_format': 'qcow2', 'updated_at': '2024-05-30T11:38:52Z', 'min_disk': 0, 'protected': False, 'checksum': '7734eb3945297adc90ddc6cebe8bb082', 'min_ram': 0, 'tags': [], 'virtual_size': 117440512, 'user_metadata': { 'server_group': 'server_group123' } }, 'message_id': '19f8f78a-2ee9-11ef-a95f-bd45e2085de3', 'monotonic_time': None, 'message_signature': 'f8d9a411b0cd0cb0d34e83' } ] @mock.patch('ceilometer.polling.prom_exporter.export') def test_prom_disabled(self, export): CONF = service.prepare_service([], []) manager.AgentManager(0, CONF) export.assert_not_called() @mock.patch('ceilometer.polling.prom_exporter.export') def test_export_called(self, export): CONF = service.prepare_service([], []) CONF.polling.enable_prometheus_exporter = True CONF.polling.prometheus_listen_addresses = [ '127.0.0.1:9101', '127.0.0.1:9102', '[::1]:9103', 'localhost:9104', ] manager.AgentManager(0, CONF) export.assert_has_calls([ call('127.0.0.1', 9101, None, None), call('127.0.0.1', 9102, None, None), call('::1', 9103, None, None), call('localhost', 9104, None, None), ]) @mock.patch('ceilometer.polling.prom_exporter.export') def test_export_called_tls_disabled(self, export): CONF = service.prepare_service([], []) CONF.polling.enable_prometheus_exporter = True CONF.polling.prometheus_tls_enable = False CONF.polling.prometheus_tls_certfile = "cert.pem" CONF.polling.prometheus_listen_addresses = [ '127.0.0.1:9101', '127.0.0.1:9102', '[::1]:9103', 'localhost:9104', ] manager.AgentManager(0, CONF) export.assert_has_calls([ call('127.0.0.1', 9101, None, None), call('127.0.0.1', 9102, None, None), call('::1', 9103, None, None), call('localhost', 9104, None, None), ]) @mock.patch('ceilometer.polling.prom_exporter.export') def test_export_called_with_tls(self, export): CONF = service.prepare_service([], []) CONF.polling.enable_prometheus_exporter = True CONF.polling.prometheus_listen_addresses = [ '127.0.0.1:9101', '127.0.0.1:9102', '[::1]:9103', 'localhost:9104', ] CONF.polling.prometheus_tls_enable = True CONF.polling.prometheus_tls_certfile = "cert.pem" CONF.polling.prometheus_tls_keyfile = "key.pem" manager.AgentManager(0, CONF) export.assert_has_calls([ call('127.0.0.1', 9101, "cert.pem", "key.pem"), call('127.0.0.1', 9102, "cert.pem", "key.pem"), call('::1', 9103, "cert.pem", "key.pem"), call('localhost', 9104, "cert.pem", "key.pem"), ]) @mock.patch('ceilometer.polling.prom_exporter.export') def test_export_fails_if_incomplete_tls(self, export): CONF = service.prepare_service([], []) CONF.polling.enable_prometheus_exporter = True CONF.polling.prometheus_listen_addresses = ['127.0.0.1:9101'] CONF.polling.prometheus_tls_enable = True CONF.polling.prometheus_tls_certfile = "cert.pem" CONF.polling.prometheus_tls_keyfile = None # Missing key self.assertRaises(ValueError, manager.AgentManager, 0, CONF) def test_collect_metrics(self): prom_exporter.collect_metrics(self.test_image_size) sample_dict_1 = {'counter': 'image.size', 'image': 'f9276c96-8a12-432b-96a1-559d70715f97', 'project': 'd965489b7f894cbda89cd2e25bfd85a0', 'publisher': 'ceilometer', 'resource': 'f9276c96-8a12-432b-96a1-559d70715f97', 'resource_name': 'cirros2', 'type': 'size', 'unit': 'B', 'server_group': 'server_group123'} self.assertEqual(16344576, prom_exporter.CEILOMETER_REGISTRY. get_sample_value('ceilometer_image_size', sample_dict_1)) prom_exporter.collect_metrics(self.test_memory_usage) sample_dict_2 = {'counter': 'memory.usage', 'memory': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'project': 'd965489b7f894cbda89cd2e25bfd85a0', 'publisher': 'ceilometer', 'resource': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'resource_name': 'myserver:instance-00000002', 'type': 'usage', 'unit': 'MB', 'user': '6e7d71415cd5401cbe103829c9c5dec2', 'vm_instance': 'e0d297f5df3b62ec73c8d42b', 'server_group': 'none'} self.assertEqual(37.98046875, prom_exporter.CEILOMETER_REGISTRY. get_sample_value('ceilometer_memory_usage', sample_dict_2)) prom_exporter.collect_metrics(self.test_disk_latency) sample_dict_3 = {'counter': 'disk.device.read.latency', 'disk': 'read', 'project': 'd965489b7f894cbda89cd2e25bfd85a0', 'publisher': 'ceilometer', 'resource': 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63-vda', 'resource_name': 'myserver:instance-00000002', 'type': 'device', 'unit': 'ns', 'user': '6e7d71415cd5401cbe103829c9c5dec2', 'vm_instance': 'e0d297f5df3b62ec73c8d42b', 'server_group': 'none'} # The value has to be of the second sample, as this is now a Gauge self.assertEqual(232128754, prom_exporter.CEILOMETER_REGISTRY. get_sample_value( 'ceilometer_disk_device_read_latency', sample_dict_3)) def test_gen_labels(self): slabels1 = dict(keys=[], values=[]) slabels1['keys'] = ['disk', 'publisher', 'type', 'counter', 'project', 'user', 'unit', 'resource', 'vm_instance', 'resource_name', 'server_group'] slabels1['values'] = ['read', 'ceilometer', 'device', 'disk.device.read.latency', 'd965489b7f894cbda89cd2e25bfd85a0', '6e7d71415cd5401cbe103829c9c5dec2', 'ns', 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63-vda', 'e0d297f5df3b62ec73c8d42b', 'myserver:instance-00000002', 'none'] label1 = prom_exporter._gen_labels(self.test_disk_latency[0]) self.assertDictEqual(label1, slabels1) slabels2 = dict(keys=[], values=[]) slabels2['keys'] = ['memory', 'publisher', 'type', 'counter', 'project', 'user', 'unit', 'resource', 'vm_instance', 'resource_name', 'server_group'] slabels2['values'] = ['e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'ceilometer', 'usage', 'memory.usage', 'd965489b7f894cbda89cd2e25bfd85a0', '6e7d71415cd5401cbe103829c9c5dec2', 'MB', 'e536fff6-b20d-4aa5-ac2f-d15ac8b3af63', 'e0d297f5df3b62ec73c8d42b', 'myserver:instance-00000002', 'none'] label2 = prom_exporter._gen_labels(self.test_memory_usage[0]) self.assertDictEqual(label2, slabels2) slabels3 = dict(keys=[], values=[]) slabels3['keys'] = ['image', 'publisher', 'type', 'counter', 'project', 'unit', 'resource', 'resource_name', 'server_group'] slabels3['values'] = ['f9276c96-8a12-432b-96a1-559d70715f97', 'ceilometer', 'size', 'image.size', 'd965489b7f894cbda89cd2e25bfd85a0', 'B', 'f9276c96-8a12-432b-96a1-559d70715f97', 'cirros2', 'server_group123'] label3 = prom_exporter._gen_labels(self.test_image_size[0]) self.assertDictEqual(label3, slabels3) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/test_sample.py0000664000175100017510000001077615033033467023153 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/sample.py""" import datetime from ceilometer import sample from ceilometer.tests import base class TestSample(base.BaseTestCase): SAMPLE = sample.Sample( name='cpu', type=sample.TYPE_CUMULATIVE, unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), resource_metadata={} ) def test_sample_string_format(self): expected = ('') self.assertEqual(expected, str(self.SAMPLE)) def test_sample_invalid_type(self): self.assertRaises( ValueError, sample.Sample, name='cpu', type='invalid', unit='ns', volume='1234567', user_id='56c5692032f34041900342503fecab30', project_id='ac9494df2d9d4e709bac378cceabaf23', resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), resource_metadata={} ) def test_sample_from_notifications_list(self): msg = { 'event_type': 'sample.create', 'metadata': { 'timestamp': '2015-06-19T09:19:35.786893', 'message_id': '939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': [{'counter_name': 'instance100'}], 'priority': 'info', 'publisher_id': 'ceilometer.api', } s = sample.Sample.from_notification( 'sample', sample.TYPE_GAUGE, 1.0, '%', 'user', 'project', 'res', msg) expected = {'event_type': msg['event_type'], 'host': msg['publisher_id']} self.assertEqual(expected, s.resource_metadata) def test_sample_from_notifications_dict(self): msg = { 'event_type': 'sample.create', 'metadata': { 'timestamp': '2015-06-19T09:19:35.786893', 'message_id': '939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {'counter_name': 'instance100'}, 'priority': 'info', 'publisher_id': 'ceilometer.api', } s = sample.Sample.from_notification( 'sample', sample.TYPE_GAUGE, 1.0, '%', 'user', 'project', 'res', msg) msg['payload']['event_type'] = msg['event_type'] msg['payload']['host'] = msg['publisher_id'] self.assertEqual(msg['payload'], s.resource_metadata) def test_sample_from_notifications_assume_utc(self): msg = { 'event_type': 'sample.create', 'metadata': { 'timestamp': '2015-06-19T09:19:35.786893', 'message_id': '939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {'counter_name': 'instance100'}, 'priority': 'info', 'publisher_id': 'ceilometer.api', } s = sample.Sample.from_notification( 'sample', sample.TYPE_GAUGE, 1.0, '%', 'user', 'project', 'res', msg) self.assertEqual('2015-06-19T09:19:35.786893+00:00', s.timestamp) def test_sample_from_notifications_keep_tz(self): msg = { 'event_type': 'sample.create', 'metadata': { 'timestamp': '2015-06-19T09:19:35.786893+01:00', 'message_id': '939823de-c242-45a2-a399-083f4d6a8c3e'}, 'payload': {'counter_name': 'instance100'}, 'priority': 'info', 'publisher_id': 'ceilometer.api', } s = sample.Sample.from_notification( 'sample', sample.TYPE_GAUGE, 1.0, '%', 'user', 'project', 'res', msg) self.assertEqual('2015-06-19T09:19:35.786893+01:00', s.timestamp) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/ceilometer/tests/unit/volume/0000775000175100017510000000000015033033521021544 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/volume/__init__.py0000664000175100017510000000000015033033467023654 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/tests/unit/volume/test_cinder.py0000664000175100017510000003623415033033467024442 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.polling import manager from ceilometer import service import ceilometer.tests.base as base from ceilometer.volume import cinder VOLUME_LIST = [ type('Volume', (object,), {'migration_status': None, 'attachments': [ {'server_id': '1ae69721-d071-4156-a2bd-b11bb43ec2e3', 'attachment_id': 'f903d95e-f999-4a34-8be7-119eadd9bb4f', 'attached_at': '2016-07-14T03:55:57.000000', 'host_name': None, 'volume_id': 'd94c18fb-b680-4912-9741-da69ee83c94f', 'device': '/dev/vdb', 'id': 'd94c18fb-b680-4912-9741-da69ee83c94f'}], 'links': [{ 'href': 'http://fake_link3', 'rel': 'self'}, { 'href': 'http://fake_link4', 'rel': 'bookmark'}], 'availability_zone': 'nova', 'os-vol-host-attr:host': 'test@lvmdriver-1#lvmdriver-1', 'encrypted': False, 'updated_at': '2016-07-14T03:55:57.000000', 'replication_status': 'disabled', 'snapshot_id': None, 'id': 'd94c18fb-b680-4912-9741-da69ee83c94f', 'size': 1, 'user_id': 'be255bd31eb944578000fc762fde6dcf', 'os-vol-tenant-attr:tenant_id': '6824974c08974d4db864bbaa6bc08303', 'os-vol-mig-status-attr:migstat': None, 'metadata': {'readonly': 'False', 'attached_mode': 'rw'}, 'status': 'in-use', 'description': None, 'multiattach': False, 'source_volid': None, 'consistencygroup_id': None, "volume_image_metadata": { "checksum": "17d9daa4fb8e20b0f6b7dec0d46fdddf", "container_format": "bare", "disk_format": "raw", "hw_disk_bus": "scsi", "hw_scsi_model": "virtio-scsi", "image_id": "f0019ee3-523c-45ab-b0b6-3adc529673e7", "image_name": "debian-jessie-scsi", "min_disk": "0", "min_ram": "0", "size": "1572864000" }, 'os-vol-mig-status-attr:name_id': None, 'group_id': None, 'provider_id': None, 'shared_targets': False, 'service_uuid': '2f6b5a18-0cd5-4421-b97e-d2c3e85ed758', 'cluster_name': None, 'volume_type_id': '65a9f65a-4696-4435-a09d-bc44d797c529', 'name': None, 'bootable': 'false', 'created_at': '2016-06-23T08:27:45.000000', 'volume_type': 'lvmdriver-1'}) ] SNAPSHOT_LIST = [ type('VolumeSnapshot', (object,), {'status': 'available', 'os-extended-snapshot-attributes:progress': '100%', 'description': None, 'os-extended-snapshot-attributes:project_id': '6824974c08974d4db864bbaa6bc08303', 'size': 1, 'user_id': 'be255bd31eb944578000fc762fde6dcf', 'updated_at': '2016-10-19T07:56:55.000000', 'id': 'b1ea6783-f952-491e-a4ed-23a6a562e1cf', 'volume_id': '6f27bc42-c834-49ea-ae75-8d1073b37806', 'metadata': {}, 'created_at': '2016-10-19T07:56:55.000000', "group_snapshot_id": None, 'name': None}) ] BACKUP_LIST = [ type('VolumeBackup', (object,), {'status': 'available', 'object_count': 0, 'container': None, 'name': None, 'links': [{ 'href': 'http://fake_urla', 'rel': 'self'}, { 'href': 'http://fake_urlb', 'rel': 'bookmark'}], 'availability_zone': 'nova', 'created_at': '2016-10-19T06:55:23.000000', 'snapshot_id': None, 'updated_at': '2016-10-19T06:55:23.000000', 'data_timestamp': '2016-10-19T06:55:23.000000', 'description': None, 'has_dependent_backups': False, 'volume_id': '6f27bc42-c834-49ea-ae75-8d1073b37806', 'os-backup-project-attr:project_id': '6824974c08974d4db864bbaa6bc08303', 'fail_reason': "", 'is_incremental': False, 'metadata': {}, 'user_id': 'be255bd31eb944578000fc762fde6dcf', 'id': '75a52125-85ff-4a8d-b2aa-580f3b22273f', 'size': 1}) ] POOL_LIST = [ type('VolumePool', (object,), {'name': 'localhost.localdomain@lvmdriver-1#lvmdriver-1', 'pool_name': 'lvmdriver-1', 'total_capacity_gb': 28.5, 'free_capacity_gb': 28.39, 'reserved_percentage': 0, 'location_info': 'LVMVolumeDriver:localhost.localdomain:stack-volumes:thin:0', 'QoS_support': False, 'provisioned_capacity_gb': 4.0, 'max_over_subscription_ratio': 20.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'total_volumes': 3, 'filter_function': None, 'goodness_function': None, 'multiattach': True, 'backend_state': 'up', 'allocated_capacity_gb': 4, 'cacheable': True, 'volume_backend_name': 'lvmdriver-1', 'storage_protocol': 'iSCSI', 'vendor_name': 'Open Source', 'driver_version': '3.0.0', 'timestamp': '2025-03-21T14:19:02.901750'}), type('VolumePool', (object,), {'name': 'cinder-3ceee-volume-ceph-0@ceph#ceph', 'vendor_name': 'Open Source', 'driver_version': '1.3.0', 'storage_protocol': 'ceph', 'total_capacity_gb': 85.0, 'free_capacity_gb': 85.0, 'reserved_percentage': 0, 'multiattach': True, 'thin_provisioning_support': True, 'max_over_subscription_ratio': '20.0', 'location_info': 'ceph:/etc/ceph/ceph.conf:a94b63c4e:openstack:volumes', 'backend_state': 'up', 'qos_support': True, 'volume_backend_name': 'ceph', 'replication_enabled': False, 'allocated_capacity_gb': 1, 'filter_function': None, 'goodness_function': None, 'timestamp': '2025-06-09T13:29:43.286226'}) ] class TestVolumeSizePollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeSizePollster(conf) def test_volume_size_pollster(self): volume_size_samples = list( self.pollster.get_samples(self.manager, {}, resources=VOLUME_LIST)) self.assertEqual(1, len(volume_size_samples)) self.assertEqual('volume.size', volume_size_samples[0].name) self.assertEqual(1, volume_size_samples[0].volume) self.assertEqual('6824974c08974d4db864bbaa6bc08303', volume_size_samples[0].project_id) self.assertEqual('d94c18fb-b680-4912-9741-da69ee83c94f', volume_size_samples[0].resource_id) self.assertEqual('f0019ee3-523c-45ab-b0b6-3adc529673e7', volume_size_samples[0].resource_metadata["image_id"]) self.assertEqual('1ae69721-d071-4156-a2bd-b11bb43ec2e3', volume_size_samples[0].resource_metadata ["instance_id"]) self.assertEqual('nova', volume_size_samples[0].resource_metadata ["availability_zone"]) class TestVolumeSnapshotSizePollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeSnapshotSize(conf) def test_volume_snapshot_size_pollster(self): volume_snapshot_size_samples = list( self.pollster.get_samples( self.manager, {}, resources=SNAPSHOT_LIST)) self.assertEqual(1, len(volume_snapshot_size_samples)) self.assertEqual('volume.snapshot.size', volume_snapshot_size_samples[0].name) self.assertEqual(1, volume_snapshot_size_samples[0].volume) self.assertEqual('be255bd31eb944578000fc762fde6dcf', volume_snapshot_size_samples[0].user_id) self.assertEqual('6824974c08974d4db864bbaa6bc08303', volume_snapshot_size_samples[0].project_id) self.assertEqual('b1ea6783-f952-491e-a4ed-23a6a562e1cf', volume_snapshot_size_samples[0].resource_id) class TestVolumeBackupSizePollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeBackupSize(conf) def test_volume_backup_size_pollster(self): volume_backup_size_samples = list( self.pollster.get_samples(self.manager, {}, resources=BACKUP_LIST)) self.assertEqual(1, len(volume_backup_size_samples)) self.assertEqual('volume.backup.size', volume_backup_size_samples[0].name) self.assertEqual(1, volume_backup_size_samples[0].volume) self.assertEqual('75a52125-85ff-4a8d-b2aa-580f3b22273f', volume_backup_size_samples[0].resource_id) class TestVolumeProviderPoolCapacityTotalPollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeProviderPoolCapacityTotal(conf) def test_volume_provider_pool_capacity_total_pollster(self): volume_pool_size_total_samples = list( self.pollster.get_samples(self.manager, {}, resources=POOL_LIST)) self.assertEqual(2, len(volume_pool_size_total_samples)) self.assertEqual('volume.provider.pool.capacity.total', volume_pool_size_total_samples[0].name) self.assertEqual(28.5, volume_pool_size_total_samples[0].volume) self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1', volume_pool_size_total_samples[0].resource_id) self.assertEqual('volume.provider.pool.capacity.total', volume_pool_size_total_samples[1].name) self.assertEqual(85.0, volume_pool_size_total_samples[1].volume) self.assertEqual('cinder-3ceee-volume-ceph-0@ceph#ceph', volume_pool_size_total_samples[1].resource_id) class TestVolumeProviderPoolCapacityFreePollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeProviderPoolCapacityFree(conf) def test_volume_provider_pool_capacity_free_pollster(self): volume_pool_size_free_samples = list( self.pollster.get_samples(self.manager, {}, resources=POOL_LIST)) self.assertEqual(2, len(volume_pool_size_free_samples)) self.assertEqual('volume.provider.pool.capacity.free', volume_pool_size_free_samples[0].name) self.assertEqual(28.39, volume_pool_size_free_samples[0].volume) self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1', volume_pool_size_free_samples[0].resource_id) self.assertEqual('volume.provider.pool.capacity.free', volume_pool_size_free_samples[1].name) self.assertEqual(85.0, volume_pool_size_free_samples[1].volume) self.assertEqual('cinder-3ceee-volume-ceph-0@ceph#ceph', volume_pool_size_free_samples[1].resource_id) class TestVolumeProviderPoolCapacityProvisionedPollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeProviderPoolCapacityProvisioned(conf) def test_volume_provider_pool_capacity_provisioned_pollster(self): volume_pool_size_provisioned_samples = list( self.pollster.get_samples(self.manager, {}, resources=POOL_LIST)) self.assertEqual(1, len(volume_pool_size_provisioned_samples)) self.assertEqual('volume.provider.pool.capacity.provisioned', volume_pool_size_provisioned_samples[0].name) self.assertEqual(4.0, volume_pool_size_provisioned_samples[0].volume) self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1', volume_pool_size_provisioned_samples[0].resource_id) class TestVolumeProviderPoolCapacityVirtualFreePollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeProviderPoolCapacityVirtualFree(conf) def test_volume_provider_pool_capacity_virtual_free_pollster(self): volume_pool_size_virtual_free_samples = list( self.pollster.get_samples(self.manager, {}, resources=POOL_LIST)) self.assertEqual(1, len(volume_pool_size_virtual_free_samples)) self.assertEqual('volume.provider.pool.capacity.virtual_free', volume_pool_size_virtual_free_samples[0].name) self.assertEqual(566.0, volume_pool_size_virtual_free_samples[0].volume) self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1', volume_pool_size_virtual_free_samples[0].resource_id) class TestVolumeProviderPoolCapacityAllocatedPollster(base.BaseTestCase): def setUp(self): super().setUp() conf = service.prepare_service([], []) self.manager = manager.AgentManager(0, conf) self.pollster = cinder.VolumeProviderPoolCapacityAllocated(conf) def test_volume_provider_pool_capacity_allocated_pollster(self): volume_pool_size_allocated_samples = list( self.pollster.get_samples(self.manager, {}, resources=POOL_LIST)) self.assertEqual(2, len(volume_pool_size_allocated_samples)) self.assertEqual('volume.provider.pool.capacity.allocated', volume_pool_size_allocated_samples[0].name) self.assertEqual(4, volume_pool_size_allocated_samples[0].volume) self.assertEqual('localhost.localdomain@lvmdriver-1#lvmdriver-1', volume_pool_size_allocated_samples[0].resource_id) self.assertEqual('volume.provider.pool.capacity.allocated', volume_pool_size_allocated_samples[1].name) self.assertEqual(1, volume_pool_size_allocated_samples[1].volume) self.assertEqual('cinder-3ceee-volume-ceph-0@ceph#ceph', volume_pool_size_allocated_samples[1].resource_id) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/utils.py0000664000175100017510000000346215033033467017644 0ustar00mylesmyles# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import threading from oslo_config import cfg from oslo_utils import timeutils ROOTWRAP_CONF = "/etc/ceilometer/rootwrap.conf" OPTS = [ cfg.StrOpt('rootwrap_config', default=ROOTWRAP_CONF, help='Path to the rootwrap configuration file to ' 'use for running commands as root'), ] def _get_root_helper(): global ROOTWRAP_CONF return 'sudo ceilometer-rootwrap %s' % ROOTWRAP_CONF def setup_root_helper(conf): global ROOTWRAP_CONF ROOTWRAP_CONF = conf.rootwrap_config def spawn_thread(target, *args, **kwargs): t = threading.Thread(target=target, args=args, kwargs=kwargs) t.daemon = True t.start() return t def isotime(at=None): """Current time as ISO string, :returns: Current time in ISO format """ if not at: at = timeutils.utcnow() date_string = at.strftime("%Y-%m-%dT%H:%M:%S") tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' date_string += ('Z' if tz == 'UTC' else tz) return date_string ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/version.py0000664000175100017510000000121115033033467020157 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('ceilometer') ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/ceilometer/volume/0000775000175100017510000000000015033033521017423 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/volume/__init__.py0000664000175100017510000000000015033033467021533 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/volume/cinder.py0000664000175100017510000001755715033033467021271 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common code for working with volumes """ import math from ceilometer.polling import plugin_base from ceilometer import sample class _Base(plugin_base.PollsterBase): FIELDS = [] def extract_metadata(self, obj): return {k: getattr(obj, k) for k in self.FIELDS} class VolumeSizePollster(_Base): @property def default_discovery(self): return 'volumes' FIELDS = ['name', 'status', 'volume_type', 'volume_type_id', 'availability_zone', 'os-vol-host-attr:host', 'migration_status', 'attachments', 'snapshot_id', 'source_volid'] def extract_metadata(self, obj): metadata = super().extract_metadata(obj) if getattr(obj, "volume_image_metadata", None): metadata["image_id"] = obj.volume_image_metadata.get("image_id") else: metadata["image_id"] = None if obj.attachments: metadata["instance_id"] = obj.attachments[0]["server_id"] else: metadata["instance_id"] = None return metadata def get_samples(self, manager, cache, resources): for volume in resources: yield sample.Sample( name='volume.size', type=sample.TYPE_GAUGE, unit='GB', volume=volume.size, user_id=volume.user_id, project_id=getattr(volume, 'os-vol-tenant-attr:tenant_id'), resource_id=volume.id, resource_metadata=self.extract_metadata(volume), ) class VolumeSnapshotSize(_Base): @property def default_discovery(self): return 'volume_snapshots' FIELDS = ['name', 'volume_id', 'status', 'description', 'metadata', 'os-extended-snapshot-attributes:progress', ] def get_samples(self, manager, cache, resources): for snapshot in resources: yield sample.Sample( name='volume.snapshot.size', type=sample.TYPE_GAUGE, unit='GB', volume=snapshot.size, user_id=snapshot.user_id, project_id=getattr( snapshot, 'os-extended-snapshot-attributes:project_id'), resource_id=snapshot.id, resource_metadata=self.extract_metadata(snapshot), ) class VolumeBackupSize(_Base): @property def default_discovery(self): return 'volume_backups' FIELDS = ['name', 'is_incremental', 'object_count', 'container', 'volume_id', 'status', 'description'] def get_samples(self, manager, cache, resources): for backup in resources: yield sample.Sample( name='volume.backup.size', type=sample.TYPE_GAUGE, unit='GB', volume=backup.size, user_id=backup.user_id, project_id=getattr( backup, 'os-backup-project-attr:project_id', None), resource_id=backup.id, resource_metadata=self.extract_metadata(backup), ) class _VolumeProviderPoolBase(_Base): def extract_metadata(self, obj): metadata = super().extract_metadata(obj) metadata['pool_name'] = getattr(obj, "pool_name", None) return metadata class VolumeProviderPoolCapacityTotal(_VolumeProviderPoolBase): @property def default_discovery(self): return 'volume_pools' def get_samples(self, manager, cache, resources): for pool in resources: yield sample.Sample( name='volume.provider.pool.capacity.total', type=sample.TYPE_GAUGE, unit='GB', volume=pool.total_capacity_gb, user_id=None, project_id=None, resource_id=pool.name, resource_metadata=self.extract_metadata(pool) ) class VolumeProviderPoolCapacityFree(_VolumeProviderPoolBase): @property def default_discovery(self): return 'volume_pools' def get_samples(self, manager, cache, resources): for pool in resources: yield sample.Sample( name='volume.provider.pool.capacity.free', type=sample.TYPE_GAUGE, unit='GB', volume=pool.free_capacity_gb, user_id=None, project_id=None, resource_id=pool.name, resource_metadata=self.extract_metadata(pool) ) class VolumeProviderPoolCapacityProvisioned(_VolumeProviderPoolBase): @property def default_discovery(self): return 'volume_pools' def get_samples(self, manager, cache, resources): for pool in resources: if getattr(pool, 'provisioned_capacity_gb', None): yield sample.Sample( name='volume.provider.pool.capacity.provisioned', type=sample.TYPE_GAUGE, unit='GB', volume=pool.provisioned_capacity_gb, user_id=None, project_id=None, resource_id=pool.name, resource_metadata=self.extract_metadata(pool) ) class VolumeProviderPoolCapacityVirtualFree(_VolumeProviderPoolBase): @property def default_discovery(self): return 'volume_pools' def get_samples(self, manager, cache, resources): for pool in resources: if getattr(pool, 'provisioned_capacity_gb', None): reserved_size = math.floor( (pool.reserved_percentage / 100) * pool.total_capacity_gb ) max_over_subscription_ratio = 1.0 if pool.thin_provisioning_support: max_over_subscription_ratio = float( pool.max_over_subscription_ratio ) value = ( max_over_subscription_ratio * (pool.total_capacity_gb - reserved_size) - pool.provisioned_capacity_gb ) yield sample.Sample( name='volume.provider.pool.capacity.virtual_free', type=sample.TYPE_GAUGE, unit='GB', volume=value, user_id=None, project_id=None, resource_id=pool.name, resource_metadata=self.extract_metadata(pool) ) class VolumeProviderPoolCapacityAllocated(_VolumeProviderPoolBase): @property def default_discovery(self): return 'volume_pools' def get_samples(self, manager, cache, resources): for pool in resources: yield sample.Sample( name='volume.provider.pool.capacity.allocated', type=sample.TYPE_GAUGE, unit='GB', volume=pool.allocated_capacity_gb, user_id=None, project_id=None, resource_id=pool.name, resource_metadata=self.extract_metadata(pool) ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/ceilometer/volume/discovery.py0000664000175100017510000000454115033033467022021 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient import client as cinder_client from oslo_config import cfg from ceilometer import keystone_client from ceilometer.polling import plugin_base SERVICE_OPTS = [ cfg.StrOpt('cinder', default='volumev3', help='Cinder service type.'), ] class _BaseDiscovery(plugin_base.DiscoveryBase): def __init__(self, conf): super().__init__(conf) creds = conf.service_credentials # NOTE(mnederlof): We set 3.64 (the maximum for Wallaby) because: # we need atleast 3.41 to get user_id on snapshots. # we need atleast 3.56 for user_id and project_id on backups. # we need atleast 3.63 for volume_type_id on volumes. self.client = cinder_client.Client( version='3.64', session=keystone_client.get_session(conf), region_name=creds.region_name, interface=creds.interface, service_type=conf.service_types.cinder ) class VolumeDiscovery(_BaseDiscovery): def discover(self, manager, param=None): """Discover volume resources to monitor.""" return self.client.volumes.list(search_opts={'all_tenants': True}) class VolumeSnapshotsDiscovery(_BaseDiscovery): def discover(self, manager, param=None): """Discover snapshot resources to monitor.""" return self.client.volume_snapshots.list( search_opts={'all_tenants': True}) class VolumeBackupsDiscovery(_BaseDiscovery): def discover(self, manager, param=None): """Discover volume resources to monitor.""" return self.client.backups.list(search_opts={'all_tenants': True}) class VolumePoolsDiscovery(_BaseDiscovery): def discover(self, manager, param=None): """Discover volume resources to monitor.""" return self.client.pools.list(detailed=True) ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7919414 ceilometer-24.1.0.dev59/ceilometer.egg-info/0000775000175100017510000000000015033033521017606 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/PKG-INFO0000644000175100017510000000706415033033470020713 0ustar00mylesmylesMetadata-Version: 2.2 Name: ceilometer Version: 24.1.0.dev59 Summary: OpenStack Telemetry Home-page: https://docs.openstack.org/ceilometer/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Topic :: System :: Monitoring Requires-Python: >=3.10 License-File: LICENSE Requires-Dist: xmltodict>=0.13.0 Requires-Dist: cachetools>=2.1.0 Requires-Dist: cotyledon>=1.3.0 Requires-Dist: futurist>=1.8.0 Requires-Dist: jsonpath-rw-ext>=1.1.3 Requires-Dist: lxml>=4.5.1 Requires-Dist: msgpack>=0.5.2 Requires-Dist: oslo.concurrency>=3.29.0 Requires-Dist: oslo.config>=8.6.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=3.36.0 Requires-Dist: oslo.reports>=1.18.0 Requires-Dist: oslo.rootwrap>=2.0.0 Requires-Dist: pbr>=2.0.0 Requires-Dist: oslo.messaging>=10.3.0 Requires-Dist: oslo.upgradecheck>=0.1.1 Requires-Dist: oslo.utils>=4.7.0 Requires-Dist: oslo.privsep>=1.32.0 Requires-Dist: python-glanceclient>=2.8.0 Requires-Dist: python-keystoneclient>=3.18.0 Requires-Dist: keystoneauth1>=3.18.0 Requires-Dist: python-neutronclient>=6.7.0 Requires-Dist: python-novaclient>=9.1.0 Requires-Dist: python-swiftclient>=3.2.0 Requires-Dist: python-cinderclient>=3.3.0 Requires-Dist: PyYAML>=5.1 Requires-Dist: requests>=2.25.1 Requires-Dist: stevedore>=1.20.0 Requires-Dist: tenacity>=6.3.1 Requires-Dist: tooz>=1.47.0 Requires-Dist: oslo.cache>=1.26.0 Requires-Dist: gnocchiclient>=7.0.0 Requires-Dist: python-zaqarclient>=1.3.0 Requires-Dist: prometheus_client>=0.20.0 Requires-Dist: requests-aws>=0.1.4 Requires-Dist: aodhclient>=3.8.0 Dynamic: author Dynamic: author-email Dynamic: classifier Dynamic: description Dynamic: home-page Dynamic: requires-dist Dynamic: requires-python Dynamic: summary ========== Ceilometer ========== -------- Overview -------- Ceilometer is a data collection service that collects event and metering data by monitoring notifications sent from OpenStack services. It publishes collected data to various targets including data stores and message queues. Ceilometer is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. ------------- Documentation ------------- Release notes are available at https://releases.openstack.org/teams/telemetry.html Developer documentation is available at https://docs.openstack.org/ceilometer/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/ceilometer Code Repository --------------- - Server: https://github.com/openstack/ceilometer Bug Tracking ------------ - Bugs: https://bugs.launchpad.net/ceilometer/ Release Notes ------------- - Server: https://docs.openstack.org/releasenotes/ceilometer/ IRC --- IRC Channel: #openstack-telemetry on `OFTC`_. Mailinglist ----------- Project use http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss as the mailinglist. Please use tag ``[Ceilometer]`` in the subject for new threads. .. _OFTC: https://oftc.net/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/SOURCES.txt0000664000175100017510000006000015033033470021471 0ustar00mylesmyles.coveragerc .mailmap .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst bindep.txt pyproject.toml reno.yaml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini ceilometer/__init__.py ceilometer/agent.py ceilometer/cache_utils.py ceilometer/declarative.py ceilometer/gnocchi_client.py ceilometer/i18n.py ceilometer/keystone_client.py ceilometer/messaging.py ceilometer/middleware.py ceilometer/neutron_client.py ceilometer/notification.py ceilometer/nova_client.py ceilometer/opts.py ceilometer/sample.py ceilometer/service.py ceilometer/utils.py ceilometer/version.py ceilometer.egg-info/PKG-INFO ceilometer.egg-info/SOURCES.txt ceilometer.egg-info/dependency_links.txt ceilometer.egg-info/entry_points.txt ceilometer.egg-info/not-zip-safe ceilometer.egg-info/pbr.json ceilometer.egg-info/requires.txt ceilometer.egg-info/top_level.txt ceilometer/alarm/__init__.py ceilometer/alarm/aodh.py ceilometer/alarm/discovery.py ceilometer/cmd/__init__.py ceilometer/cmd/agent_notification.py ceilometer/cmd/polling.py ceilometer/cmd/sample.py ceilometer/cmd/status.py ceilometer/cmd/storage.py ceilometer/compute/__init__.py ceilometer/compute/discovery.py ceilometer/compute/pollsters/__init__.py ceilometer/compute/pollsters/disk.py ceilometer/compute/pollsters/instance_stats.py ceilometer/compute/pollsters/net.py ceilometer/compute/pollsters/util.py ceilometer/compute/virt/__init__.py ceilometer/compute/virt/inspector.py ceilometer/compute/virt/libvirt/__init__.py ceilometer/compute/virt/libvirt/inspector.py ceilometer/compute/virt/libvirt/utils.py ceilometer/data/meters.d/meters.yaml ceilometer/event/__init__.py ceilometer/event/converter.py ceilometer/event/models.py ceilometer/event/trait_plugins.py ceilometer/hacking/__init__.py ceilometer/hacking/checks.py ceilometer/image/__init__.py ceilometer/image/discovery.py ceilometer/image/glance.py ceilometer/ipmi/__init__.py ceilometer/ipmi/notifications/__init__.py ceilometer/ipmi/notifications/ironic.py ceilometer/ipmi/platform/__init__.py ceilometer/ipmi/platform/exception.py ceilometer/ipmi/platform/ipmi_sensor.py ceilometer/ipmi/platform/ipmitool.py ceilometer/ipmi/pollsters/__init__.py ceilometer/ipmi/pollsters/sensor.py ceilometer/locale/de/LC_MESSAGES/ceilometer.po ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po ceilometer/locale/es/LC_MESSAGES/ceilometer.po ceilometer/locale/fr/LC_MESSAGES/ceilometer.po ceilometer/locale/it/LC_MESSAGES/ceilometer.po ceilometer/locale/ja/LC_MESSAGES/ceilometer.po ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po ceilometer/locale/ru/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po ceilometer/meter/__init__.py ceilometer/meter/notifications.py ceilometer/network/__init__.py ceilometer/network/floatingip.py ceilometer/network/services/__init__.py ceilometer/network/services/base.py ceilometer/network/services/discovery.py ceilometer/network/services/fwaas.py ceilometer/network/services/vpnaas.py ceilometer/objectstore/__init__.py ceilometer/objectstore/rgw.py ceilometer/objectstore/rgw_client.py ceilometer/objectstore/swift.py ceilometer/pipeline/__init__.py ceilometer/pipeline/base.py ceilometer/pipeline/event.py ceilometer/pipeline/sample.py ceilometer/pipeline/data/event_definitions.yaml ceilometer/pipeline/data/event_pipeline.yaml ceilometer/pipeline/data/pipeline.yaml ceilometer/polling/__init__.py ceilometer/polling/dynamic_pollster.py ceilometer/polling/manager.py ceilometer/polling/plugin_base.py ceilometer/polling/prom_exporter.py ceilometer/polling/discovery/__init__.py ceilometer/polling/discovery/endpoint.py ceilometer/polling/discovery/localnode.py ceilometer/polling/discovery/non_openstack_credentials_discovery.py ceilometer/polling/discovery/tenant.py ceilometer/privsep/__init__.py ceilometer/privsep/ipmitool.py ceilometer/publisher/__init__.py ceilometer/publisher/file.py ceilometer/publisher/gnocchi.py ceilometer/publisher/http.py ceilometer/publisher/messaging.py ceilometer/publisher/opentelemetry_http.py ceilometer/publisher/prometheus.py ceilometer/publisher/tcp.py ceilometer/publisher/test.py ceilometer/publisher/udp.py ceilometer/publisher/utils.py ceilometer/publisher/zaqar.py ceilometer/publisher/data/gnocchi_resources.yaml ceilometer/telemetry/__init__.py ceilometer/telemetry/notifications.py ceilometer/tests/__init__.py ceilometer/tests/base.py ceilometer/tests/unit/__init__.py ceilometer/tests/unit/pipeline_base.py ceilometer/tests/unit/test_bin.py ceilometer/tests/unit/test_cache_utils.py ceilometer/tests/unit/test_declarative.py ceilometer/tests/unit/test_decoupled_pipeline.py ceilometer/tests/unit/test_event_pipeline.py ceilometer/tests/unit/test_messaging.py ceilometer/tests/unit/test_middleware.py ceilometer/tests/unit/test_neutronclient.py ceilometer/tests/unit/test_notification.py ceilometer/tests/unit/test_novaclient.py ceilometer/tests/unit/test_polling.py ceilometer/tests/unit/test_prom_exporter.py ceilometer/tests/unit/test_sample.py ceilometer/tests/unit/alarm/__init__.py ceilometer/tests/unit/alarm/test_aodh.py ceilometer/tests/unit/cmd/__init__.py ceilometer/tests/unit/cmd/test_status.py ceilometer/tests/unit/compute/__init__.py ceilometer/tests/unit/compute/test_discovery.py ceilometer/tests/unit/compute/pollsters/__init__.py ceilometer/tests/unit/compute/pollsters/base.py ceilometer/tests/unit/compute/pollsters/test_cpu.py ceilometer/tests/unit/compute/pollsters/test_disk.py ceilometer/tests/unit/compute/pollsters/test_diskio.py ceilometer/tests/unit/compute/pollsters/test_location_metadata.py ceilometer/tests/unit/compute/pollsters/test_memory.py ceilometer/tests/unit/compute/pollsters/test_net.py ceilometer/tests/unit/compute/pollsters/test_perf.py ceilometer/tests/unit/compute/virt/__init__.py ceilometer/tests/unit/compute/virt/libvirt/__init__.py ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py ceilometer/tests/unit/event/__init__.py ceilometer/tests/unit/event/test_converter.py ceilometer/tests/unit/event/test_endpoint.py ceilometer/tests/unit/event/test_trait_plugins.py ceilometer/tests/unit/image/__init__.py ceilometer/tests/unit/image/test_glance.py ceilometer/tests/unit/ipmi/__init__.py ceilometer/tests/unit/ipmi/notifications/__init__.py ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py ceilometer/tests/unit/ipmi/notifications/test_ironic.py ceilometer/tests/unit/ipmi/platform/__init__.py ceilometer/tests/unit/ipmi/platform/fake_utils.py ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py ceilometer/tests/unit/ipmi/pollsters/__init__.py ceilometer/tests/unit/ipmi/pollsters/base.py ceilometer/tests/unit/ipmi/pollsters/test_sensor.py ceilometer/tests/unit/meter/__init__.py ceilometer/tests/unit/meter/test_meter_plugins.py ceilometer/tests/unit/meter/test_notifications.py ceilometer/tests/unit/network/__init__.py ceilometer/tests/unit/network/test_floating_ip.py ceilometer/tests/unit/network/services/__init__.py ceilometer/tests/unit/network/services/test_fwaas.py ceilometer/tests/unit/network/services/test_vpnaas.py ceilometer/tests/unit/objectstore/__init__.py ceilometer/tests/unit/objectstore/test_rgw.py ceilometer/tests/unit/objectstore/test_rgw_client.py ceilometer/tests/unit/objectstore/test_swift.py ceilometer/tests/unit/polling/__init__.py ceilometer/tests/unit/polling/test_discovery.py ceilometer/tests/unit/polling/test_dynamic_pollster.py ceilometer/tests/unit/polling/test_heartbeat.py ceilometer/tests/unit/polling/test_manager.py ceilometer/tests/unit/polling/test_non_openstack_credentials_discovery.py ceilometer/tests/unit/polling/test_non_openstack_dynamic_pollster.py ceilometer/tests/unit/publisher/__init__.py ceilometer/tests/unit/publisher/test_file.py ceilometer/tests/unit/publisher/test_gnocchi.py ceilometer/tests/unit/publisher/test_http.py ceilometer/tests/unit/publisher/test_messaging_publisher.py ceilometer/tests/unit/publisher/test_opentelemetry_http.py ceilometer/tests/unit/publisher/test_prometheus.py ceilometer/tests/unit/publisher/test_tcp.py ceilometer/tests/unit/publisher/test_udp.py ceilometer/tests/unit/publisher/test_utils.py ceilometer/tests/unit/publisher/test_zaqar.py ceilometer/tests/unit/volume/__init__.py ceilometer/tests/unit/volume/test_cinder.py ceilometer/volume/__init__.py ceilometer/volume/cinder.py ceilometer/volume/discovery.py devstack/README.rst devstack/local.conf.sample devstack/plugin.sh devstack/settings devstack/files/rpms/ceilometer devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/requirements.txt doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/admin/index.rst doc/source/admin/telemetry-best-practices.rst doc/source/admin/telemetry-data-collection.rst doc/source/admin/telemetry-data-pipelines.rst doc/source/admin/telemetry-dynamic-pollster.rst doc/source/admin/telemetry-events.rst doc/source/admin/telemetry-measurements.rst doc/source/admin/telemetry-system-architecture.rst doc/source/admin/telemetry-troubleshooting-guide.rst doc/source/cli/ceilometer-status.rst doc/source/cli/index.rst doc/source/configuration/index.rst doc/source/contributor/1-agents.png doc/source/contributor/2-1-collection-notification.png doc/source/contributor/2-2-collection-poll.png doc/source/contributor/2-accessmodel.png doc/source/contributor/3-Pipeline.png doc/source/contributor/5-multi-publish.png doc/source/contributor/6-storagemodel.png doc/source/contributor/architecture.rst doc/source/contributor/ceilo-arch.png doc/source/contributor/ceilo-gnocchi-arch.png doc/source/contributor/devstack.rst doc/source/contributor/events.rst doc/source/contributor/gmr.rst doc/source/contributor/index.rst doc/source/contributor/measurements.rst doc/source/contributor/new_resource_types.rst doc/source/contributor/overview.rst doc/source/contributor/plugins.rst doc/source/contributor/testing.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-base-config-common.inc doc/source/install/install-base-prereq-common.inc doc/source/install/install-base-rdo.rst doc/source/install/install-base-ubuntu.rst doc/source/install/install-compute-common.inc doc/source/install/install-compute-rdo.rst doc/source/install/install-compute-ubuntu.rst doc/source/install/install-compute.rst doc/source/install/install-controller.rst doc/source/install/install-gnocchi.inc doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/install/cinder/install-cinder-config-common.inc doc/source/install/cinder/install-cinder-rdo.rst doc/source/install/cinder/install-cinder-ubuntu.rst doc/source/install/glance/install-glance-rdo.rst doc/source/install/glance/install-glance-ubuntu.rst doc/source/install/heat/install-heat-rdo.rst doc/source/install/heat/install-heat-ubuntu.rst doc/source/install/neutron/install-neutron-rdo.rst doc/source/install/neutron/install-neutron-ubuntu.rst doc/source/install/swift/install-swift-config-common.inc doc/source/install/swift/install-swift-prereq-common.inc doc/source/install/swift/install-swift-rdo.rst doc/source/install/swift/install-swift-ubuntu.rst doc/source/releasenotes/folsom.rst doc/source/releasenotes/index.rst etc/ceilometer/ceilometer-config-generator.conf etc/ceilometer/polling.yaml etc/ceilometer/polling_all.yaml etc/ceilometer/rootwrap.conf etc/ceilometer/examples/osprofiler_event_definitions.yaml etc/ceilometer/rootwrap.d/ipmi.filters releasenotes/notes/.placeholder releasenotes/notes/add-aodh-metrics-afbe9b780fd137d6.yaml releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml releasenotes/notes/add-disk-size-pollsters-6b819d067f9cf736.yaml releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml releasenotes/notes/add-json-output-to-file-publisher-786380cb7e21b56b.yaml releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml releasenotes/notes/add-map-trait-plugin-0d969f5cc7b18175.yaml releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml releasenotes/notes/add-parameter-for-disabled-projects-381da4543fff071d.yaml releasenotes/notes/add-pool-size-metrics-cdecb979135bba85.yaml releasenotes/notes/add-power-state-metric-cdfbb3098b50a704.yaml releasenotes/notes/add-swift-storage_policy-attribute-322fbb5716c5bb10.yaml releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml releasenotes/notes/add-volume-pollster-metadata-d7b435fed9aac0aa.yaml releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml releasenotes/notes/always-requeue-7a2df9243987ab67.yaml releasenotes/notes/batch-messaging-d126cc525879d58e.yaml releasenotes/notes/bug-1929178-a8243526ce2311f7.yaml releasenotes/notes/bug-2007108-dba7163b245ad8fd.yaml releasenotes/notes/bug-2113768-a2db3a59c8e13558.yaml releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml releasenotes/notes/ceilometer-api-deprecate-862bfaa54e80fa01.yaml releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml releasenotes/notes/ceilometer-event-api-removed-49c57835e307b997.yaml releasenotes/notes/cinder-capacity-samples-de94dcfed5540b6c.yaml releasenotes/notes/cinder-volume-size-poller-availability_zone-2d20a7527e2341b9.yaml releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml releasenotes/notes/deprecate-aggregated-disk-metrics-54a395c05e74d685.yaml releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml releasenotes/notes/deprecate-contrail-256177299deb6926.yaml releasenotes/notes/deprecate-events-6561f4059fa25c02.yaml releasenotes/notes/deprecate-file-dispatcher-2aff376db7609136.yaml releasenotes/notes/deprecate-generic-hardware-declarative-pollstar-dfa418bf6a5e0459.yaml releasenotes/notes/deprecate-http-control-exchanges-026a8de6819841f8.yaml releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml releasenotes/notes/deprecate-http_timeout-ce98003e4949f9d9.yaml releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml releasenotes/notes/deprecate-neutron-fwaas-e985afe956240c08.yaml releasenotes/notes/deprecate-neutron-lbaas-5a36406cbe44bbe3.yaml releasenotes/notes/deprecate-odl-07e3f59165612566.yaml releasenotes/notes/deprecate-pollster-list-ccf22b0dea44f043.yaml releasenotes/notes/deprecate-vmware-ae49e07e40e74577.yaml releasenotes/notes/deprecate-windows-support-d784b975ce878864.yaml releasenotes/notes/deprecate-xen-support-27600e2bf7be548c.yaml releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml releasenotes/notes/drop-collector-4c207b35d67b2977.yaml releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml releasenotes/notes/drop-kwapi-b687bc476186d01b.yaml releasenotes/notes/drop-py-2-7-87352d5763131c13.yaml releasenotes/notes/drop-python-3-6-and-3-7-f67097fa6894da52.yaml releasenotes/notes/dynamic-pollster-system-6b45c8c973201b2b.yaml releasenotes/notes/dynamic-pollster-system-for-non-openstack-apis-4e06694f223f34f3.yaml releasenotes/notes/dynamic-pollster-url-joins-6cdb01c4015976f7.yaml releasenotes/notes/enable-promethus-exporter-tls-76e78d4f4a52c6c4.yaml releasenotes/notes/event-type-race-c295baf7f1661eab.yaml releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml releasenotes/notes/fix-notification-batch-9bb42cbdf817e7f9.yaml releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml releasenotes/notes/fix-volume-provider-pool-capacity-metrics-7b8b0de29a513cea.yaml releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml releasenotes/notes/http-dispatcher-batching-4e17fce46a196b07.yaml releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml releasenotes/notes/http-publisher-authentication-6371c5a9aa8d4c03.yaml releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml releasenotes/notes/include-monasca-publisher-1f47dde52af50feb.yaml releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml releasenotes/notes/keystone-v3-fab1e257c5672965.yaml releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml releasenotes/notes/make-instance-host-optional-972fa14405c1e2f6.yaml releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml releasenotes/notes/openstack-dynamic-pollsters-metadata-enrichment-703cf5914cf0c578.yaml releasenotes/notes/parallel_requests_option-a3f901b6001e26e4.yaml releasenotes/notes/parallels-virt_type-ee29c4802fdf5c8e.yaml releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml releasenotes/notes/polling-definition-efffb92e3810e571.yaml releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml releasenotes/notes/prometheus-bcb201cfe46d5778.yaml releasenotes/notes/publish-network-resources-with-invalid-state-6693c6fa1fefa097.yaml releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml releasenotes/notes/remove-batch_polled_samples-b40241c8aad3667d.yaml releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml releasenotes/notes/remove-ceilometer-dbsync-53aa1b529f194f15.yaml releasenotes/notes/remove-check_watchers-a7c955703b6d9f57.yaml releasenotes/notes/remove-compute-disk-meters-264e686622886ff0.yaml releasenotes/notes/remove-compute-rate-deprecated-meters-201893c6b686b04a.yaml releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.yaml releasenotes/notes/remove-direct-publisher-5785ee7edd16c4d9.yaml releasenotes/notes/remove-eventlet-6738321434b60c78.yaml releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml releasenotes/notes/remove-generic-hardware-declarative-pollster-e05c614f273ab149.yaml releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml releasenotes/notes/remove-intel-cmt-perf-meters-15d0fe72b2804f48.yaml releasenotes/notes/remove-intel-node-manager-0889de66dede9ab0.yaml releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml releasenotes/notes/remove-meter-definitions-cfg-file-config-476596fc86c36a81.yaml releasenotes/notes/remove-meter-definitions-cfg-file-d57c726d563d805f.yaml releasenotes/notes/remove-monasca-d5ceda231839d43d.yaml releasenotes/notes/remove-neutron-lbaas-d3d4a5327f6a167a.yaml releasenotes/notes/remove-notification-workload-partitioning-2cef114fb2478e39.yaml releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml releasenotes/notes/remove-opencontrail-88656a9354179299.yaml releasenotes/notes/remove-opendaylight-c3839bbe9aa2a227.yaml releasenotes/notes/remove-pollster-list-bda30d747fb87c9e.yaml releasenotes/notes/remove-publisher-topic-options-7a40787a3998921d.yaml releasenotes/notes/remove-py38-80670bdcfd4dd135.yaml releasenotes/notes/remove-py39-8c39f81f856bee9f.yaml releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml releasenotes/notes/remove-sahara-9254593d4fb137b9.yaml releasenotes/notes/remove-service-type-volume-v2-08c81098dc7c0922.yaml releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yaml releasenotes/notes/remove-transformers-14e00a789dedd76b.yaml releasenotes/notes/remove-uml-e86feeabdd16c628.yaml releasenotes/notes/remove-vsphere-support-411c97b66bdcd264.yaml releasenotes/notes/remove-windows-support-0d280cc7c7fffc61.yaml releasenotes/notes/remove-xen-support-7cb932b7bc621269.yaml releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml releasenotes/notes/rename-tenant_name_discovery-1675a236bb51176b.yaml releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml releasenotes/notes/snmp-cpu-util-055cd7704056c1ce.yaml releasenotes/notes/snmp-diskio-samples-fc4b5ed5f19c096c.yaml releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml releasenotes/notes/support-None-query-45abaae45f08eda4.yaml releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e803.yaml releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.yaml releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml releasenotes/notes/switch-to-oslo-privsep-b58f20a279f31bc0.yaml releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml releasenotes/notes/threeads-process-pollsters-cbd22cca6f2effc4.yaml releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml releasenotes/notes/transformer-ed4b1ea7d1752576.yaml releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml releasenotes/notes/volume-metrics-01ddde0180bc21cb.yaml releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/2025.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po tools/__init__.py tools/send_test_data.py././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/dependency_links.txt0000664000175100017510000000000115033033470023657 0ustar00mylesmyles ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/entry_points.txt0000664000175100017510000002174715033033470023122 0ustar00mylesmyles[ceilometer.compute.virt] libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector [ceilometer.discover.central] alarm = ceilometer.alarm.discovery:AlarmDiscovery barbican = ceilometer.polling.discovery.non_openstack_credentials_discovery:NonOpenStackCredentialsDiscovery endpoint = ceilometer.polling.discovery.endpoint:EndpointDiscovery fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery images = ceilometer.image.discovery:ImagesDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery tenant = ceilometer.polling.discovery.tenant:TenantDiscovery volume_backups = ceilometer.volume.discovery:VolumeBackupsDiscovery volume_pools = ceilometer.volume.discovery:VolumePoolsDiscovery volume_snapshots = ceilometer.volume.discovery:VolumeSnapshotsDiscovery volumes = ceilometer.volume.discovery:VolumeDiscovery vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery [ceilometer.discover.compute] local_instances = ceilometer.compute.discovery:InstanceDiscovery local_node = ceilometer.polling.discovery.localnode:LocalNodeDiscovery [ceilometer.discover.ipmi] local_node = ceilometer.polling.discovery.localnode:LocalNodeDiscovery [ceilometer.event.publisher] file = ceilometer.publisher.file:FilePublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher test = ceilometer.publisher.test:TestPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher [ceilometer.event.trait_plugin] bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin map = ceilometer.event.trait_plugins:MapTraitPlugin split = ceilometer.event.trait_plugins:SplitterTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin [ceilometer.notification.pipeline] event = ceilometer.pipeline.event:EventPipelineManager meter = ceilometer.pipeline.sample:SamplePipelineManager [ceilometer.poll.central] alarm.evaluation_result = ceilometer.alarm.aodh:EvaluationResultPollster image.size = ceilometer.image.glance:ImageSizePollster ip.floating = ceilometer.network.floatingip:FloatingIPPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster radosgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster radosgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster radosgw.objects = ceilometer.objectstore.rgw:ObjectsPollster radosgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster radosgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster radosgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster volume.backup.size = ceilometer.volume.cinder:VolumeBackupSize volume.provider.pool.capacity.allocated = ceilometer.volume.cinder:VolumeProviderPoolCapacityAllocated volume.provider.pool.capacity.free = ceilometer.volume.cinder:VolumeProviderPoolCapacityFree volume.provider.pool.capacity.provisioned = ceilometer.volume.cinder:VolumeProviderPoolCapacityProvisioned volume.provider.pool.capacity.total = ceilometer.volume.cinder:VolumeProviderPoolCapacityTotal volume.provider.pool.capacity.virtual_free = ceilometer.volume.cinder:VolumeProviderPoolCapacityVirtualFree volume.size = ceilometer.volume.cinder:VolumeSizePollster volume.snapshot.size = ceilometer.volume.cinder:VolumeSnapshotSize [ceilometer.poll.compute] cpu = ceilometer.compute.pollsters.instance_stats:CPUPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.read.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskReadLatencyPollster disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.write.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskWriteLatencyPollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.ephemeral.size = ceilometer.compute.pollsters.disk:EphemeralSizePollster disk.root.size = ceilometer.compute.pollsters.disk:RootSizePollster memory.resident = ceilometer.compute.pollsters.instance_stats:MemoryResidentPollster memory.swap.in = ceilometer.compute.pollsters.instance_stats:MemorySwapInPollster memory.swap.out = ceilometer.compute.pollsters.instance_stats:MemorySwapOutPollster memory.usage = ceilometer.compute.pollsters.instance_stats:MemoryUsagePollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.bytes.delta = ceilometer.compute.pollsters.net:IncomingBytesDeltaPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.incoming.packets.drop = ceilometer.compute.pollsters.net:IncomingDropPollster network.incoming.packets.error = ceilometer.compute.pollsters.net:IncomingErrorsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.bytes.delta = ceilometer.compute.pollsters.net:OutgoingBytesDeltaPollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster network.outgoing.packets.drop = ceilometer.compute.pollsters.net:OutgoingDropPollster network.outgoing.packets.error = ceilometer.compute.pollsters.net:OutgoingErrorsPollster perf.cache.misses = ceilometer.compute.pollsters.instance_stats:PerfCacheMissesPollster perf.cache.references = ceilometer.compute.pollsters.instance_stats:PerfCacheReferencesPollster perf.cpu.cycles = ceilometer.compute.pollsters.instance_stats:PerfCPUCyclesPollster perf.instructions = ceilometer.compute.pollsters.instance_stats:PerfInstructionsPollster power.state = ceilometer.compute.pollsters.instance_stats:PowerStatePollster [ceilometer.poll.ipmi] hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster hardware.ipmi.power = ceilometer.ipmi.pollsters.sensor:PowerSensorPollster hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster [ceilometer.sample.endpoint] _sample = ceilometer.telemetry.notifications:TelemetryIpc hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse meter = ceilometer.meter.notifications:ProcessMeterNotifications [ceilometer.sample.publisher] file = ceilometer.publisher.file:FilePublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher opentelemetryhttp = ceilometer.publisher.opentelemetry_http:OpentelemetryHttpPublisher prometheus = ceilometer.publisher.prometheus:PrometheusPublisher tcp = ceilometer.publisher.tcp:TCPPublisher test = ceilometer.publisher.test:TestPublisher udp = ceilometer.publisher.udp:UDPPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher [console_scripts] ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-polling = ceilometer.cmd.polling:main ceilometer-rootwrap = oslo_rootwrap.cmd:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-status = ceilometer.cmd.status:main ceilometer-upgrade = ceilometer.cmd.storage:upgrade [oslo.config.opts] ceilometer = ceilometer.opts:list_opts ceilometer-auth = ceilometer.opts:list_keystoneauth_opts ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/not-zip-safe0000664000175100017510000000000115033033470022037 0ustar00mylesmyles ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/pbr.json0000664000175100017510000000006115033033470021264 0ustar00mylesmyles{"git_version": "e2eab4b75", "is_release": false}././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/requires.txt0000664000175100017510000000133115033033470022207 0ustar00mylesmylesxmltodict>=0.13.0 cachetools>=2.1.0 cotyledon>=1.3.0 futurist>=1.8.0 jsonpath-rw-ext>=1.1.3 lxml>=4.5.1 msgpack>=0.5.2 oslo.concurrency>=3.29.0 oslo.config>=8.6.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.reports>=1.18.0 oslo.rootwrap>=2.0.0 pbr>=2.0.0 oslo.messaging>=10.3.0 oslo.upgradecheck>=0.1.1 oslo.utils>=4.7.0 oslo.privsep>=1.32.0 python-glanceclient>=2.8.0 python-keystoneclient>=3.18.0 keystoneauth1>=3.18.0 python-neutronclient>=6.7.0 python-novaclient>=9.1.0 python-swiftclient>=3.2.0 python-cinderclient>=3.3.0 PyYAML>=5.1 requests>=2.25.1 stevedore>=1.20.0 tenacity>=6.3.1 tooz>=1.47.0 oslo.cache>=1.26.0 gnocchiclient>=7.0.0 python-zaqarclient>=1.3.0 prometheus_client>=0.20.0 requests-aws>=0.1.4 aodhclient>=3.8.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922488.0 ceilometer-24.1.0.dev59/ceilometer.egg-info/top_level.txt0000664000175100017510000000001315033033470022335 0ustar00mylesmylesceilometer ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/devstack/0000775000175100017510000000000015033033521015570 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/README.rst0000664000175100017510000000172115033033467017271 0ustar00mylesmyles=============================== Enabling Ceilometer in DevStack =============================== 1. Download Devstack:: git clone https://opendev.org/openstack/devstack cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] enable_plugin ceilometer https://opendev.org/openstack/ceilometer To use stable branches, make sure devstack is on that branch, and specify the branch name to enable_plugin, for example:: enable_plugin ceilometer https://opendev.org/openstack/ceilometer stable/mitaka There are some options, such as CEILOMETER_BACKEND, defined in ``ceilometer/devstack/settings``, they can be used to configure the installation of Ceilometer. If you don't want to use their default value, you can set a new one in ``local.conf``. Alternitvely you can modify copy and modify the sample ``local.conf`` located at ``ceilometer/devstack/local.conf.sample`` 3. Run ``stack.sh``. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7889414 ceilometer-24.1.0.dev59/devstack/files/0000775000175100017510000000000015033033521016672 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/devstack/files/rpms/0000775000175100017510000000000015033033521017653 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/files/rpms/ceilometer0000664000175100017510000000003015033033467021730 0ustar00mylesmylesselinux-policy-targeted ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/local.conf.sample0000664000175100017510000000273515033033467021031 0ustar00mylesmyles[[local|localrc]] # Common options # -------------- #RECLONE=True #FORCE=True #OFFLINE=True #USE_PYTHON3=True #PYTHON3_VERSION=3.8 # HOST_IP shoudl be set to an ip that is present on the host # e.g. the ip of eth0. This will be used to bind api endpoints and horizon. HOST_IP= # Minimal Contents # ---------------- # While ``stack.sh`` is happy to run without ``localrc``, devlife is better when # there are a few minimal variables set: # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter # values for them by ``stack.sh``and they will be added to ``local.conf``. ADMIN_PASSWORD=password DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD LOGFILE=$DEST/logs/stack.sh.log LOGDAYS=2 # the plugin line order matters but the placment in the file does not enable_plugin aodh https://opendev.org/openstack/aodh enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git # Gnocchi settings # Gnocchi is optional but can be enbaled by uncommenting CEILOMETER_BACKEND CEILOMETER_BACKEND=gnocchi # if gnocchi is not in LIBS_FROM_GIT it will install from pypi. # Currently this is broken with the latest gnocchi release 4.4.2 # so we need to install from git until # https://github.com/gnocchixyz/gnocchi/issues/1290 is resolved LIBS_FROM_GIT+=gnocchi # to control the version of gnocchi installed from git uncomment these options #GNOCCHI_BRANCH="master" #GNOCCHI_REPO=https://github.com/gnocchixyz/gnocchi ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/plugin.sh0000664000175100017510000003450515033033467017442 0ustar00mylesmyles# Install and start **Ceilometer** service in devstack # # To enable Ceilometer in devstack add an entry to local.conf that # looks like # # [[local|localrc]] # enable_plugin ceilometer https://opendev.org/openstack/ceilometer # # By default all ceilometer services are started (see devstack/settings) # except for the ceilometer-aipmi service. To disable a specific service # use the disable_service function. # # NOTE: Currently, there are two ways to get the IPMI based meters in # OpenStack. One way is to configure Ironic conductor to report those meters # for the nodes managed by Ironic and to have Ceilometer notification # agent to collect them. Ironic by default does NOT enable that reporting # functionality. So in order to do so, users need to set the option of # conductor.send_sensor_data to true in the ironic.conf configuration file # for the Ironic conductor service, and also enable the # ceilometer-anotification service. # # The other way is to use Ceilometer ipmi agent only to get the IPMI based # meters. To make use of the Ceilometer ipmi agent, it must be explicitly # enabled with the following setting: # # enable_service ceilometer-aipmi # # To avoid duplicated meters, users need to make sure to set the # option of conductor.send_sensor_data to false in the ironic.conf # configuration file if the node on which Ceilometer ipmi agent is running # is also managed by Ironic. # # Several variables set in the localrc section adjust common behaviors # of Ceilometer (see within for additional settings): # # CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 300. # CEILOMETER_BACKENDS: List of database backends (e.g. 'gnocchi', 'sg-core', 'gnocchi,sg-core', 'none') # CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. # CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # Support potential entry-points console scripts in VENV or not if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin else CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi # Test if any Ceilometer services are enabled # is_ceilometer_enabled function is_ceilometer_enabled { [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 return 1 } function gnocchi_service_url { echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST/metric" } # _ceilometer_install_redis() - Install the redis server and python lib. function _ceilometer_install_redis { if is_ubuntu; then install_package redis-server restart_service redis-server else # This will fail (correctly) where a redis package is unavailable install_package redis restart_service redis fi pip_install_gr redis } # Install required services for coordination function _ceilometer_prepare_coordination { if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then install_package memcached elif [[ "${CEILOMETER_COORDINATOR_URL%%:*}" == "redis" || "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" || "${CEILOMETER_BACKENDS}" =~ "gnocchi" ]]; then _ceilometer_install_redis fi } # Create ceilometer related accounts in Keystone function ceilometer_create_accounts { local gnocchi_service create_service_user "ceilometer" "admin" if is_service_enabled swift; then # Ceilometer needs ResellerAdmin role to access Swift account stats. get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME fi if [[ "$CEILOMETER_BACKENDS" =~ "gnocchi" ]]; then create_service_user "gnocchi" gnocchi_service=$(get_or_create_service "gnocchi" "metric" "OpenStack Metric Service") get_or_create_endpoint $gnocchi_service \ "$REGION_NAME" \ "$(gnocchi_service_url)" \ "$(gnocchi_service_url)" \ "$(gnocchi_service_url)" fi } function install_gnocchi { echo_summary "Installing Gnocchi" if use_library_from_git "gnocchi"; then # we need to git clone manually to ensure that the git repo is added # to the global git repo list and ensure its cloned as the current user # not as root. git_clone ${GNOCCHI_REPO} ${GNOCCHI_DIR} ${GNOCCHI_BRANCH} pip_install -e ${GNOCCHI_DIR}[redis,${DATABASE_TYPE},keystone] uwsgi else pip_install gnocchi[redis,${DATABASE_TYPE},keystone] uwsgi fi } function configure_gnocchi { echo_summary "Configure Gnocchi" recreate_database gnocchi sudo install -d -o $STACK_USER -m 755 $GNOCCHI_CONF_DIR iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $GNOCCHI_CONF indexer url `database_connection_url gnocchi` iniset $GNOCCHI_CONF storage driver redis iniset $GNOCCHI_CONF storage redis_url redis://localhost:6379 iniset $GNOCCHI_CONF metricd metric_processing_delay "$GNOCCHI_METRICD_PROCESSING_DELAY" iniset $GNOCCHI_CONF api auth_mode keystone configure_keystone_authtoken_middleware $GNOCCHI_CONF gnocchi gnocchi-upgrade rm -f "$GNOCCHI_UWSGI_FILE" write_uwsgi_config "$GNOCCHI_UWSGI_FILE" "$CEILOMETER_BIN_DIR/gnocchi-api" "/metric" if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then iniset $GNOCCHI_CONF coordination_url "$GNOCCHI_COORDINATOR_URL" fi } # Activities to do before ceilometer has been installed. function preinstall_ceilometer { echo_summary "Preinstall not in virtualenv context. Skipping." } # cleanup_ceilometer() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_ceilometer { sudo rm -f "$CEILOMETER_CONF_DIR"/* sudo rmdir "$CEILOMETER_CONF_DIR" } # Set configuraiton for cache backend. # NOTE(cdent): This currently only works for redis. Still working # out how to express the other backends. function _ceilometer_configure_cache_backend { iniset $CEILOMETER_CONF cache enabled True iniset $CEILOMETER_CONF cache backend $CEILOMETER_CACHE_BACKEND inidelete $CEILOMETER_CONF cache backend_argument iniadd $CEILOMETER_CONF cache backend_argument url:$CEILOMETER_CACHE_URL iniadd $CEILOMETER_CONF cache backend_argument distributed_lock:True if [[ "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then iniadd $CEILOMETER_CONF cache backend_argument db:0 iniadd $CEILOMETER_CONF cache backend_argument redis_expiration_time:600 fi } # Set configuration for storage backend. function _ceilometer_configure_storage_backend { # delete any "," characters used for delimiting individual backends before checking for "none" if [ $(echo "$CEILOMETER_BACKENDS" | tr -d ",") = 'none' ] ; then echo_summary "All Ceilometer backends seems disabled, set \$CEILOMETER_BACKENDS to select one." else head -n -1 $CEILOMETER_CONF_DIR/pipeline.yaml > $CEILOMETER_CONF_DIR/tmp ; mv $CEILOMETER_CONF_DIR/tmp $CEILOMETER_CONF_DIR/pipeline.yaml head -n -1 $CEILOMETER_CONF_DIR/event_pipeline.yaml > $CEILOMETER_CONF_DIR/tmp ; mv $CEILOMETER_CONF_DIR/tmp $CEILOMETER_CONF_DIR/event_pipeline.yaml BACKENDS=$(echo $CEILOMETER_BACKENDS | tr "," "\n") for CEILOMETER_BACKEND in ${BACKENDS[@]}; do if [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then echo " - gnocchi://?archive_policy=${GNOCCHI_ARCHIVE_POLICY}&filter_project=service" >> $CEILOMETER_CONF_DIR/event_pipeline.yaml echo " - gnocchi://?archive_policy=${GNOCCHI_ARCHIVE_POLICY}&filter_project=service" >> $CEILOMETER_CONF_DIR/pipeline.yaml configure_gnocchi elif [ "$CEILOMETER_BACKEND" = 'sg-core' ] ; then echo " - tcp://127.0.0.1:4242" >> $CEILOMETER_CONF_DIR/event_pipeline.yaml echo " - tcp://127.0.0.1:4242" >> $CEILOMETER_CONF_DIR/pipeline.yaml else die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi done fi } # Configure Ceilometer function configure_ceilometer { iniset_rpc_backend ceilometer $CEILOMETER_CONF iniset $CEILOMETER_CONF oslo_messaging_notifications topics "$CEILOMETER_NOTIFICATION_TOPICS" iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL iniset $CEILOMETER_CONF notification workers $API_WORKERS fi if [[ -n "$CEILOMETER_CACHE_BACKEND" ]]; then _ceilometer_configure_cache_backend fi # Install the policy file and declarative configuration files to # the conf dir. # NOTE(cdent): Do not make this a glob as it will conflict # with rootwrap installation done elsewhere and also clobber # ceilometer.conf settings that have already been made. # Anyway, explicit is better than implicit. cp $CEILOMETER_DIR/etc/ceilometer/polling_all.yaml $CEILOMETER_CONF_DIR/polling.yaml cp $CEILOMETER_DIR/ceilometer/pipeline/data/*.yaml $CEILOMETER_CONF_DIR if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/polling.yaml fi if [ "$CEILOMETER_EVENT_ALARM" == "True" ]; then if ! grep -q '^ *- notifier://?topic=alarm.all$' $CEILOMETER_CONF_DIR/event_pipeline.yaml; then sed -i '/^ *publishers:$/,+1s|^\( *\)-.*$|\1- notifier://?topic=alarm.all\n&|' $CEILOMETER_CONF_DIR/event_pipeline.yaml fi fi # The compute and central agents need these credentials in order to # call out to other services' public APIs. iniset $CEILOMETER_CONF service_credentials auth_type password iniset $CEILOMETER_CONF service_credentials user_domain_id default iniset $CEILOMETER_CONF service_credentials project_domain_id default iniset $CEILOMETER_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $CEILOMETER_CONF service_credentials username ceilometer iniset $CEILOMETER_CONF service_credentials password $SERVICE_PASSWORD iniset $CEILOMETER_CONF service_credentials region_name $REGION_NAME iniset $CEILOMETER_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI _ceilometer_configure_storage_backend if is_service_enabled ceilometer-aipmi; then # Configure rootwrap for the ipmi agent configure_rootwrap ceilometer fi if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then if ! getent group $LIBVIRT_GROUP >/dev/null; then sudo groupadd $LIBVIRT_GROUP fi add_user_to_group $STACK_USER $LIBVIRT_GROUP fi } # init_ceilometer() - Initialize etc. function init_ceilometer { # Nothing to do : } # Install Ceilometer. # The storage and coordination backends are installed here because the # virtualenv context is active at this point and python drivers need to be # installed. The context is not active during preinstall (when it would # otherwise makes sense to do the backend services). function install_ceilometer { if is_service_enabled ceilometer-acentral ceilometer-acompute ceilometer-anotification gnocchi-api gnocchi-metricd; then _ceilometer_prepare_coordination fi if [[ "$CEILOMETER_BACKENDS" =~ 'gnocchi' ]]; then install_gnocchi fi setup_develop $CEILOMETER_DIR sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR } # start_ceilometer() - Start running processes, including screen function start_ceilometer { if [[ "$CEILOMETER_BACKENDS" =~ "gnocchi" ]] ; then run_process gnocchi-api "$CEILOMETER_BIN_DIR/uwsgi --ini $GNOCCHI_UWSGI_FILE" "" run_process gnocchi-metricd "$CEILOMETER_BIN_DIR/gnocchi-metricd --config-file $GNOCCHI_CONF" wait_for_service 30 "$(gnocchi_service_url)" $CEILOMETER_BIN_DIR/ceilometer-upgrade fi run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" # run the notification agent after restarting apache as it needs # operational keystone if using gnocchi run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP } # stop_ceilometer() - Stop running processes function stop_ceilometer { # Kill the ceilometer and gnocchi services for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification gnocchi-api gnocchi-metricd; do stop_process $serv done } # This is the main for plugin.sh if is_service_enabled ceilometer; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up other services echo_summary "Configuring system services for Ceilometer" preinstall_ceilometer elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Ceilometer" # Use stack_install_service here to account for virtualenv stack_install_service ceilometer elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Ceilometer" configure_ceilometer # Get ceilometer keystone settings in place ceilometer_create_accounts elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Ceilometer" # Tidy base for ceilometer init_ceilometer # Start the services start_ceilometer elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then iniset $TEMPEST_CONFIG telemetry alarm_granularity $CEILOMETER_ALARM_GRANULARITY iniset $TEMPEST_CONFIG telemetry alarm_threshold $CEILOMETER_ALARM_THRESHOLD iniset $TEMPEST_CONFIG telemetry alarm_metric_name $CEILOMETER_ALARM_METRIC_NAME iniset $TEMPEST_CONFIG telemetry alarm_aggregation_method $CEILOMETER_ALARM_AGGREGATION_METHOD fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting Down Ceilometer" stop_ceilometer fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning Ceilometer" cleanup_ceilometer fi fi # Restore xtrace $XTRACE ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/settings0000664000175100017510000000547015033033467017372 0ustar00mylesmyles# turn on all the ceilometer services by default (except for ipmi pollster) # Pollsters enable_service ceilometer-acompute ceilometer-acentral # Notification Agent enable_service ceilometer-anotification # Default directories CEILOMETER_DIR=$DEST/ceilometer CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf # Gnocchi is the default backind if both "CEILOMETER_BACKEND" # and "CEILOMETER_BACKENDS" are empty CEILOMETER_BACKEND=${CEILOMETER_BACKEND:""} if ! [[ "$CEILOMETER_BACKENDS" =~ "$CEILOMETER_BACKEND" ]]; then CEILOMETER_BACKENDS+=","$CEILOMETER_BACKEND fi CEILOMETER_BACKENDS=${CEILOMETER_BACKENDS:-"gnocchi"} if [[ "$CEILOMETER_BACKENDS" =~ "gnocchi" ]]; then enable_service gnocchi-api gnocchi-metricd fi if [[ "$CEILOMETER_BACKENDS" =~ "sg-core" ]]; then enable_service sg-core fi GNOCCHI_DIR=${GNOCCHI_DIR:-${DEST}/gnocchi} GNOCCHI_BRANCH=${GNOCCHI_BRANCH:-"master"} GNOCCHI_REPO=${GNOCCHI_REPO:-https://github.com/gnocchixyz/gnocchi} # Gnocchi default archive_policy for Ceilometer if [ -n "$GNOCCHI_ARCHIVE_POLICY_TEMPEST" ]; then GNOCCHI_ARCHIVE_POLICY=$GNOCCHI_ARCHIVE_POLICY_TEMPEST else GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-ceilometer-low} fi GNOCCHI_CONF_DIR=${GNOCCHI_CONF_DIR:-/etc/gnocchi} GNOCCHI_CONF=${GNOCCHI_CONF:-${GNOCCHI_CONF_DIR}/gnocchi.conf} GNOCCHI_COORDINATOR_URL=${CEILOMETER_COORDINATOR_URL:-redis://localhost:6379} GNOCCHI_METRICD_PROCESSING_DELAY=${GNOCCHI_METRICD_PROCESSING_DELAY:-5} GNOCCHI_UWSGI_FILE=${GNOCCHI_UWSGI_FILE:-${GNOCCHI_CONF_DIR}/uwsgi.ini} GNOCCHI_SERVICE_PROTOCOL=http GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}} # FIXME(sileht): put 300 by default to match the archive policy # when the gate job have overrided this. CEILOMETER_ALARM_GRANULARITY=${CEILOMETER_ALARM_GRANULARITY:-60} CEILOMETER_ALARM_AGGREGATION_METHOD=${CEILOMETER_ALARM_AGGREGATION_METHOD:-rate:mean} CEILOMETER_ALARM_METRIC_NAME=${CEILOMETER_ALARM_METRIC_NAME:-cpu} CEILOMETER_ALARM_THRESHOLD=${CEILOMETER_ALARM_THRESHOLD:-10000000} # To enable OSprofiler change value of this variable to "notifications,profiler" CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications} CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-redis://localhost:6379} CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} # Cache Options # NOTE(cdent): These are incomplete and specific for this testing. CEILOMETER_CACHE_BACKEND=${CEILOMETER_CACHE_BACKEND:-dogpile.cache.redis} CEILOMETER_CACHE_URL=${CEILOMETER_CACHE_URL:-redis://localhost:6379} CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False} # Set up default directories for middleware GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/devstack/upgrade/0000775000175100017510000000000015033033521017217 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/upgrade/settings0000664000175100017510000000073415033033467021017 0ustar00mylesmylesregister_project_for_upgrade ceilometer devstack_localrc base enable_plugin ceilometer https://opendev.org/openstack/ceilometer devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification tempest devstack_localrc target enable_plugin ceilometer https://opendev.org/openstack/ceilometer devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification tempest ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/upgrade/shutdown.sh0000775000175100017510000000114515033033467021443 0ustar00mylesmyles#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache # Locate the ceilometer plugin and get its functions CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) source $CEILOMETER_DEVSTACK_DIR/plugin.sh set -o xtrace stop_ceilometer # ensure everything is stopped SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification" ensure_services_stopped $SERVICES_DOWN ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/devstack/upgrade/upgrade.sh0000775000175100017510000000447115033033467021224 0ustar00mylesmyles#!/usr/bin/env bash # ``upgrade-ceilometer`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade Ceilometer # ================== # Locate ceilometer devstack plugin, the directory above the # grenade plugin. CEILOMETER_DEVSTACK_DIR=$(dirname $(dirname $0)) # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/functions source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/apache # Get ceilometer functions from devstack plugin source $CEILOMETER_DEVSTACK_DIR/settings # Print the commands being run so that we can see the command that triggers # an error. set -o xtrace # Install the target ceilometer source $CEILOMETER_DEVSTACK_DIR/plugin.sh stack install # calls upgrade-ceilometer for specific release upgrade_project ceilometer $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database # NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but # currently it is not. CEILOMETER_BIN_DIR=$(get_python_exec_prefix) $CEILOMETER_BIN_DIR/ceilometer-upgrade --skip-gnocchi-resource-types || die $LINENO "ceilometer-upgrade error" # Start Ceilometer start_ceilometer # Note(liamji): Disable the test for ceilometer-aipmi. # In the test environment, the impi is not ready and the service should fail. ensure_services_started ceilometer-acentral ceilometer-acompute ceilometer-anotification set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/doc/0000775000175100017510000000000015033033521014531 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/requirements.txt0000664000175100017510000000027615033033467020033 0ustar00mylesmylessphinx>=2.1.1 # BSD sphinxcontrib-httpdomain>=1.3.0 # BSD sphinxcontrib-blockdiag>=1.5.4 # BSD reno>=3.1.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 openstackdocstheme>=2.2.1 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7989414 ceilometer-24.1.0.dev59/doc/source/0000775000175100017510000000000015033033521016031 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7999413 ceilometer-24.1.0.dev59/doc/source/admin/0000775000175100017510000000000015033033521017121 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/index.rst0000664000175100017510000000100415033033467020766 0ustar00mylesmyles.. _admin: =================== Administrator Guide =================== Overview ======== .. toctree:: :maxdepth: 2 telemetry-system-architecture Configuration ============= .. toctree:: :maxdepth: 2 telemetry-data-collection telemetry-data-pipelines telemetry-best-practices telemetry-dynamic-pollster Data Types ========== .. toctree:: :maxdepth: 2 telemetry-measurements telemetry-events Management ========== .. toctree:: :maxdepth: 2 telemetry-troubleshooting-guide ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-best-practices.rst0000664000175100017510000000232215033033467024603 0ustar00mylesmylesTelemetry best practices ~~~~~~~~~~~~~~~~~~~~~~~~ The following are some suggested best practices to follow when deploying and configuring the Telemetry service. Data collection --------------- #. The Telemetry service collects a continuously growing set of data. Not all the data will be relevant for an administrator to monitor. - Based on your needs, you can edit the ``polling.yaml`` and ``pipeline.yaml`` configuration files to include select meters to generate or process - By default, Telemetry service polls the service APIs every 10 minutes. You can change the polling interval on a per meter basis by editing the ``polling.yaml`` configuration file. .. warning:: If the polling interval is too short, it will likely increase the stress on the service APIs. #. If polling many resources or at a high frequency, you can add additional central and compute agents as necessary. The agents are designed to scale horizontally. For more information refer to the `high availability guide `_. .. note:: The High Availability Guide is a work in progress and is changing rapidly while testing continues. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-data-collection.rst0000664000175100017510000002767715033033467024761 0ustar00mylesmyles.. _telemetry-data-collection: =============== Data collection =============== The main responsibility of Telemetry in OpenStack is to collect information about the system that can be used by billing systems or interpreted by analytic tooling. Collected data can be stored in the form of samples or events in the supported databases, which are listed in :ref:`telemetry-supported-databases`. The available data collection mechanisms are: Notifications Processing notifications from other OpenStack services, by consuming messages from the configured message queue system. Polling Retrieve information directly from the hypervisor or by using the APIs of other OpenStack services. Notifications ============= All OpenStack services send notifications about the executed operations or system state. Several notifications carry information that can be metered. For example, CPU time of a VM instance created by OpenStack Compute service. The notification agent is responsible for consuming notifications. This component is responsible for consuming from the message bus and transforming notifications into events and measurement samples. By default, the notification agent is configured to build both events and samples. To enable selective data models, set the required pipelines using `pipelines` option under the `[notification]` section. Additionally, the notification agent is responsible to send to any supported publisher target such as gnocchi or panko. These services persist the data in configured databases. The different OpenStack services emit several notifications about the various types of events that happen in the system during normal operation. Not all these notifications are consumed by the Telemetry service, as the intention is only to capture the billable events and notifications that can be used for monitoring or profiling purposes. The notifications handled are contained under the `ceilometer.sample.endpoint` namespace. .. note:: Some services require additional configuration to emit the notifications. Please see the :ref:`install_controller` for more details. .. _meter_definitions: Meter definitions ----------------- The Telemetry service collects a subset of the meters by filtering notifications emitted by other OpenStack services. You can find the meter definitions in a separate configuration file, called ``ceilometer/data/meters.d/meters.yaml``. This enables operators/administrators to add new meters to Telemetry project by updating the ``meters.yaml`` file without any need for additional code changes. .. note:: The ``meters.yaml`` file should be modified with care. Unless intended, do not remove any existing meter definitions from the file. Also, the collected meters can differ in some cases from what is referenced in the documentation. It also support loading multiple meter definition files and allow users to add their own meter definitions into several files according to different types of metrics under the directory of ``/etc/ceilometer/meters.d``. A standard meter definition looks like: .. code-block:: yaml --- metric: - name: 'meter name' event_type: 'event name' type: 'type of meter eg: gauge, cumulative or delta' unit: 'name of unit eg: MB' volume: 'path to a measurable value eg: $.payload.size' resource_id: 'path to resource id eg: $.payload.id' project_id: 'path to project id eg: $.payload.owner' metadata: 'addiitonal key-value data describing resource' The definition above shows a simple meter definition with some fields, from which ``name``, ``event_type``, ``type``, ``unit``, and ``volume`` are required. If there is a match on the event type, samples are generated for the meter. The ``meters.yaml`` file contains the sample definitions for all the meters that Telemetry is collecting from notifications. The value of each field is specified by using JSON path in order to find the right value from the notification message. In order to be able to specify the right field you need to be aware of the format of the consumed notification. The values that need to be searched in the notification message are set with a JSON path starting with ``$.`` For instance, if you need the ``size`` information from the payload you can define it like ``$.payload.size``. A notification message may contain multiple meters. You can use ``*`` in the meter definition to capture all the meters and generate samples respectively. You can use wild cards as shown in the following example: .. code-block:: yaml --- metric: - name: $.payload.measurements.[*].metric.[*].name event_type: 'event_name.*' type: 'delta' unit: $.payload.measurements.[*].metric.[*].unit volume: payload.measurements.[*].result resource_id: $.payload.target user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id In the above example, the ``name`` field is a JSON path with matching a list of meter names defined in the notification message. You can use complex operations on JSON paths. In the following example, ``volume`` and ``resource_id`` fields perform an arithmetic and string concatenation: .. code-block:: yaml --- metric: - name: 'compute.node.cpu.idle.percent' event_type: 'compute.metrics.update' type: 'gauge' unit: 'percent' volume: payload.metrics[?(@.name='cpu.idle.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename You can use the ``timedelta`` plug-in to evaluate the difference in seconds between two ``datetime`` fields from one notification. .. code-block:: yaml --- metric: - name: 'compute.instance.booting.time' event_type: 'compute.instance.create.end' type: 'gauge' unit: 'sec' volume: fields: [$.payload.created_at, $.payload.launched_at] plugin: 'timedelta' project_id: $.payload.tenant_id resource_id: $.payload.instance_id .. _Polling-Configuration: Polling ======= The Telemetry service is intended to store a complex picture of the infrastructure. This goal requires additional information than what is provided by the events and notifications published by each service. Some information is not emitted directly, like resource usage of the VM instances. Therefore Telemetry uses another method to gather this data by polling the infrastructure including the APIs of the different OpenStack services and other assets, like hypervisors. The latter case requires closer interaction with the compute hosts. To solve this issue, Telemetry uses an agent based architecture to fulfill the requirements against the data collection. Configuration ------------- Polling rules are defined by the `polling.yaml` file. It defines the pollsters to enable and the interval they should be polled. Each source configuration encapsulates meter name matching which matches against the entry point of pollster. It also includes: polling interval determination, optional resource enumeration or discovery. All samples generated by polling are placed on the queue to be handled by the pipeline configuration loaded in the notification agent. The polling definition may look like the following:: --- sources: - name: 'source name' interval: 'how often the samples should be generated' meters: - 'meter filter' resources: - 'list of resource URLs' discovery: - 'list of discoverers' The *interval* parameter in the sources section defines the cadence of sample generation in seconds. Polling plugins are invoked according to each source's section whose *meters* parameter matches the plugin's meter name. Its matching logic functions the same as pipeline filtering. The optional *resources* section of a polling source allows a list of static resource URLs to be configured. An amalgamated list of all statically defined resources are passed to individual pollsters for polling. The optional *discovery* section of a polling source contains the list of discoverers. These discoverers can be used to dynamically discover the resources to be polled by the pollsters. If both *resources* and *discovery* are set, the final resources passed to the pollsters will be the combination of the dynamic resources returned by the discoverers and the static resources defined in the *resources* section. Agents ------ There are three types of agents supporting the polling mechanism, the ``compute agent``, the ``central agent``, and the ``IPMI agent``. Under the hood, all the types of polling agents are the same ``ceilometer-polling`` agent, except that they load different polling plug-ins (pollsters) from different namespaces to gather data. The following subsections give further information regarding the architectural and configuration details of these components. Running :command:`ceilometer-agent-compute` is exactly the same as: .. code-block:: console $ ceilometer-polling --polling-namespaces compute Running :command:`ceilometer-agent-central` is exactly the same as: .. code-block:: console $ ceilometer-polling --polling-namespaces central Running :command:`ceilometer-agent-ipmi` is exactly the same as: .. code-block:: console $ ceilometer-polling --polling-namespaces ipmi Compute agent ~~~~~~~~~~~~~ This agent is responsible for collecting resource usage data of VM instances on individual compute nodes within an OpenStack deployment. This mechanism requires a closer interaction with the hypervisor, therefore a separate agent type fulfills the collection of the related meters, which is placed on the host machines to retrieve this information locally. A Compute agent instance has to be installed on each and every compute node, installation instructions can be found in the :ref:`install_compute` section in the Installation Tutorials and Guides. The list of supported hypervisors can be found in :ref:`telemetry-supported-hypervisors`. The Compute agent uses the API of the hypervisor installed on the compute hosts. Therefore, the supported meters may be different in case of each virtualization back end, as each inspection tool provides a different set of meters. The list of collected meters can be found in :ref:`telemetry-compute-meters`. The support column provides the information about which meter is available for each hypervisor supported by the Telemetry service. Central agent ~~~~~~~~~~~~~ This agent is responsible for polling public REST APIs to retrieve additional information on OpenStack resources not already surfaced via notifications. Some of the services polled with this agent are: - OpenStack Networking - OpenStack Object Storage - OpenStack Block Storage To install and configure this service use the :ref:`install_rdo` section in the Installation Tutorials and Guides. Although Ceilometer has a set of default polling agents, operators can add new pollsters dynamically via the dynamic pollsters subsystem :ref:`telemetry_dynamic_pollster`. .. _telemetry-ipmi-agent: IPMI agent ~~~~~~~~~~ This agent is responsible for collecting IPMI sensor data and Intel Node Manager data on individual compute nodes within an OpenStack deployment. This agent requires an IPMI capable node with the ipmitool utility installed, which is commonly used for IPMI control on various Linux distributions. An IPMI agent instance could be installed on each and every compute node with IPMI support, except when the node is managed by the Bare metal service and the ``conductor.send_sensor_data`` option is set to ``true`` in the Bare metal service. It is no harm to install this agent on a compute node without IPMI support, as the agent checks for the hardware and if IPMI support is not available, returns empty data. It is suggested that you install the IPMI agent only on an IPMI capable node for performance reasons. The list of collected meters can be found in :ref:`telemetry-bare-metal-service`. .. note:: Do not deploy both the IPMI agent and the Bare metal service on one compute node. If ``conductor.send_sensor_data`` is set, this misconfiguration causes duplicated IPMI sensor samples. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-data-pipelines.rst0000664000175100017510000002257615033033467024607 0ustar00mylesmyles.. _telemetry-data-pipelines: ============================= Data processing and pipelines ============================= The mechanism by which data is processed is called a pipeline. Pipelines, at the configuration level, describe a coupling between sources of data and the corresponding sinks for publication of data. This functionality is handled by the notification agents. A source is a producer of data: ``samples`` or ``events``. In effect, it is a set of notification handlers emitting datapoints for a set of matching meters and event types. Each source configuration encapsulates name matching and mapping to one or more sinks for publication. A sink, on the other hand, is a consumer of data, providing logic for the publication of data emitted from related sources. In effect, a sink describes a list of one or more publishers. .. _telemetry-pipeline-configuration: Pipeline configuration ~~~~~~~~~~~~~~~~~~~~~~ The notification agent supports two pipelines: one that handles samples and another that handles events. The pipelines can be enabled and disabled by setting `pipelines` option in the `[notifications]` section. The actual configuration of each pipelines is, by default, stored in separate configuration files: ``pipeline.yaml`` and ``event_pipeline.yaml``. The location of the configuration files can be set by the ``pipeline_cfg_file`` and ``event_pipeline_cfg_file`` options listed in :ref:`configuring` The meter pipeline definition looks like: .. code-block:: yaml --- sources: - name: 'source name' meters: - 'meter filter' sinks: - 'sink name' sinks: - name: 'sink name' publishers: - 'list of publishers' There are several ways to define the list of meters for a pipeline source. The list of valid meters can be found in :ref:`telemetry-measurements`. There is a possibility to define all the meters, or just included or excluded meters, with which a source should operate: - To include all meters, use the ``*`` wildcard symbol. It is highly advisable to select only the meters that you intend on using to avoid flooding the metering database with unused data. - To define the list of meters, use either of the following: - To define the list of included meters, use the ``meter_name`` syntax. - To define the list of excluded meters, use the ``!meter_name`` syntax. .. note:: The OpenStack Telemetry service does not have any duplication check between pipelines, and if you add a meter to multiple pipelines then it is assumed the duplication is intentional and may be stored multiple times according to the specified sinks. The above definition methods can be used in the following combinations: - Use only the wildcard symbol. - Use the list of included meters. - Use the list of excluded meters. - Use wildcard symbol with the list of excluded meters. .. note:: At least one of the above variations should be included in the meters section. Included and excluded meters cannot co-exist in the same pipeline. Wildcard and included meters cannot co-exist in the same pipeline definition section. The publishers section contains the list of publishers, where the samples data should be sent. Similarly, the event pipeline definition looks like: .. code-block:: yaml --- sources: - name: 'source name' events: - 'event filter' sinks: - 'sink name' sinks: - name: 'sink name' publishers: - 'list of publishers' The event filter uses the same filtering logic as the meter pipeline. .. _publishing: Publishers ---------- The Telemetry service provides several transport methods to transfer the data collected to an external system. The consumers of this data are widely different, like monitoring systems, for which data loss is acceptable and billing systems, which require reliable data transportation. Telemetry provides methods to fulfill the requirements of both kind of systems. The publisher component makes it possible to save the data into persistent storage through the message bus or to send it to one or more external consumers. One chain can contain multiple publishers. To solve this problem, the multi-publisher can be configured for each data point within the Telemetry service, allowing the same technical meter or event to be published multiple times to multiple destinations, each potentially using a different transport. The following publisher types are supported: gnocchi (default) ````````````````` When the gnocchi publisher is enabled, measurement and resource information is pushed to gnocchi for time-series optimized storage. Gnocchi must be registered in the Identity service as Ceilometer discovers the exact path via the Identity service. More details on how to enable and configure gnocchi can be found on its `official documentation page `__. prometheus `````````` Metering data can be send to the `pushgateway `__ of Prometheus by using: ``prometheus://pushgateway-host:9091/metrics/job/openstack-telemetry`` With this publisher, timestamp are not sent to Prometheus due to Prometheus Pushgateway design. All timestamps are set at the time it scrapes the metrics from the Pushgateway and not when the metric was polled on the OpenStack services. In order to get timeseries in Prometheus that looks like the reality (but with the lag added by the Prometheus scrapping mechanism). The `scrape_interval` for the pushgateway must be lower and a multiple of the Ceilometer polling interval. You can read more `here `__ Due to this, this is not recommended to use this publisher for billing purpose as timestamps in Prometheus will not be exact. notifier ```````` The notifier publisher can be specified in the form of ``notifier://?option1=value1&option2=value2``. It emits data over AMQP using oslo.messaging. Any consumer can then subscribe to the published topic for additional processing. The following customization options are available: ``per_meter_topic`` The value of this parameter is 1. It is used for publishing the samples on additional ``metering_topic.sample_name`` topic queue besides the default ``metering_topic`` queue. ``policy`` Used for configuring the behavior for the case, when the publisher fails to send the samples, where the possible predefined values are: default Used for waiting and blocking until the samples have been sent. drop Used for dropping the samples which are failed to be sent. queue Used for creating an in-memory queue and retrying to send the samples on the queue in the next samples publishing period (the queue length can be configured with ``max_queue_length``, where 1024 is the default value). ``topic`` The topic name of the queue to publish to. Setting this will override the default topic defined by ``metering_topic`` and ``event_topic`` options. This option can be used to support multiple consumers. udp ``` This publisher can be specified in the form of ``udp://:/``. It emits metering data over UDP. file ```` The file publisher can be specified in the form of ``file://path?option1=value1&option2=value2``. This publisher records metering data into a file. .. note:: If a file name and location is not specified, the ``file`` publisher does not log any meters, instead it logs a warning message in the configured log file for Telemetry. The following options are available for the ``file`` publisher: ``max_bytes`` When this option is greater than zero, it will cause a rollover. When the specified size is about to be exceeded, the file is closed and a new file is silently opened for output. If its value is zero, rollover never occurs. ``backup_count`` If this value is non-zero, an extension will be appended to the filename of the old log, as '.1', '.2', and so forth until the specified value is reached. The file that is written and contains the newest data is always the one that is specified without any extensions. ``json`` If this option is present, will force ceilometer to write json format into the file. http ```` The Telemetry service supports sending samples to an external HTTP target. The samples are sent without any modification. To set this option as the notification agents' target, set ``http://`` as a publisher endpoint in the pipeline definition files. The HTTP target should be set along with the publisher declaration. For example, additional configuration options can be passed in: ``http://localhost:80/?option1=value1&option2=value2`` The following options are available: ``timeout`` The number of seconds before HTTP request times out. ``max_retries`` The number of times to retry a request before failing. ``batch`` If false, the publisher will send each sample and event individually, whether or not the notification agent is configured to process in batches. ``verify_ssl`` If false, the ssl certificate verification is disabled. The default publisher is ``gnocchi``, without any additional options specified. A sample ``publishers`` section in the ``/etc/ceilometer/pipeline.yaml`` looks like the following: .. code-block:: yaml publishers: - gnocchi:// - udp://10.0.0.2:1234 - notifier://?policy=drop&max_queue_length=512&topic=custom_target ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-dynamic-pollster.rst0000664000175100017510000011473415033033467025174 0ustar00mylesmyles.. _telemetry_dynamic_pollster: Introduction to dynamic pollster subsystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The dynamic pollster feature allows system administrators to create/update REST API pollsters on the fly (without changing code). The system reads YAML configures that are found in ``pollsters_definitions_dirs`` parameter, which has the default at ``/etc/ceilometer/pollsters.d``. Operators can use a single file per dynamic pollster or multiple dynamic pollsters per file. Current limitations of the dynamic pollster system -------------------------------------------------- Currently, the following types of APIs are not supported by the dynamic pollster system: * Tenant APIs: Tenant APIs are the ones that need to be polled in a tenant fashion. This feature is "a nice" to have, but is currently not implemented. The dynamic pollsters system configuration (for OpenStack APIs) --------------------------------------------------------------- Each YAML file in the dynamic pollster feature can use the following attributes to define a dynamic pollster: .. warning:: Caution: Ceilometer does not accept complex value data structure for ``value`` and ``metadata`` configurations. Therefore, if you are extracting a complex data structure (Object, list, map, or others), you can take advantage of the ``Operations on extracted attributes`` feature to transform the object into a simple value (string or number) * ``name``: mandatory field. It specifies the name/key of the dynamic pollster. For instance, a pollster for magnum can use the name ``dynamic.magnum.cluster``; * ``sample_type``: mandatory field; it defines the sample type. It must be one of the values: ``gauge``, ``delta``, ``cumulative``; * ``unit``: mandatory field; defines the unit of the metric that is being collected. For magnum, for instance, one can use ``cluster`` as the unit or some other meaningful String value; * ``value_attribute``: mandatory attribute; defines the attribute in the response from the URL of the component being polled. We also accept nested values dictionaries. To use a nested value one can simply use ``attribute1.attribute2..lastattribute``. It is also possible to reference the sample itself using ``"." (dot)``; the self reference of the sample is interesting in cases when the attribute might not exist. Therefore, together with the operations options, one can first check if it exist before retrieving it (example: ``". | value['some_field'] if 'some_field' in value else ''"``). In our magnum example, we can use ``status`` as the value attribute; * ``endpoint_type``: mandatory field; defines the endpoint type that is used to discover the base URL of the component to be monitored; for magnum, one can use ``container-infra``. Other values are accepted such as ``volume`` for cinder endpoints, ``object-store`` for swift, and so on; * ``url_path``: mandatory attribute. It defines the path of the request that we execute on the endpoint to gather data. For example, to gather data from magnum, one can use ``v1/clusters/detail``; * ``metadata_fields``: optional field. It is a list of all fields that the response of the request executed with ``url_path`` that we want to retrieve. To use a nested value one can simply use ``attribute1.attribute2..lastattribute``. As an example, for magnum, one can use the following values: .. code-block:: yaml metadata_fields: - "labels" - "updated_at" - "keypair" - "master_flavor_id" - "api_address" - "master_addresses" - "node_count" - "docker_volume_size" - "master_count" - "node_addresses" - "status_reason" - "coe_version" - "cluster_template_id" - "name" - "stack_id" - "created_at" - "discovery_url" - "container_version" * ``skip_sample_values``: optional field. It defines the values that might come in the ``value_attribute`` that we want to ignore. For magnun, one could for instance, ignore some of the status it has for clusters. Therefore, data is not gathered for clusters in the defined status. .. code-block:: yaml skip_sample_values: - "CREATE_FAILED" - "DELETE_FAILED" * ``value_mapping``: optional attribute. It defines a mapping for the values that the dynamic pollster is handling. This is the actual value that is sent to Gnocchi or other backends. If there is no mapping specified, we will use the raw value that is obtained with the use of ``value_attribute``. An example for magnum, one can use: .. code-block:: yaml value_mapping: CREATE_IN_PROGRESS: "0" CREATE_FAILED: "1" CREATE_COMPLETE: "2" UPDATE_IN_PROGRESS: "3" UPDATE_FAILED: "4" UPDATE_COMPLETE: "5" DELETE_IN_PROGRESS: "6" DELETE_FAILED: "7" DELETE_COMPLETE: "8" RESUME_COMPLETE: "9" RESUME_FAILED: "10" RESTORE_COMPLETE: "11" ROLLBACK_IN_PROGRESS: "12" ROLLBACK_FAILED: "13" ROLLBACK_COMPLETE: "14" SNAPSHOT_COMPLETE: "15" CHECK_COMPLETE: "16" ADOPT_COMPLETE: "17" * ``default_value``: optional parameter. The default value for the value mapping in case the variable value receives data that is not mapped to something in the ``value_mapping`` configuration. This attribute is only used when ``value_mapping`` is defined. Moreover, it has a default of ``-1``. * ``metadata_mapping``: optional parameter. The map used to create new metadata fields. The key is a metadata name that exists in the response of the request we make, and the value of this map is the new desired metadata field that will be created with the content of the metadata that we are mapping. The ``metadata_mapping`` can be created as follows: .. code-block:: yaml metadata_mapping: name: "display_name" some_attribute: "new_attribute_name" * ``preserve_mapped_metadata``: optional parameter. It indicates if we preserve the old metadata name when it gets mapped to a new one. The default value is ``True``. * ``response_entries_key``: optional parameter. This value is used to define the "key" of the response that will be used to look-up the entries used in the dynamic pollster processing. If no ``response_entries_key`` is informed by the operator, we will use the first we find. Moreover, if the response contains a list, instead of an object where one of its attributes is a list of entries, we use the list directly. Therefore, this option will be ignored when the API is returning the list/array of entries to be processed directly. We also accept nested values dictionaries. To use a nested value one can simply use ``attribute1.attribute2..lastattribute`` * ``user_id_attribute``: optional parameter. The default value is ``user_id``. The name of the attribute in the entries that are processed from ``response_entries_key`` elements that will be mapped to ``user_id`` attribute that is sent to Gnocchi. * ``project_id_attribute``: optional parameter. The default value is ``project_id``. The name of the attribute in the entries that are processed from ``response_entries_key`` elements that will be mapped to ``project_id`` attribute that is sent to Gnocchi. * ``resource_id_attribute``: optional parameter. The default value is ``id``. The name of the attribute in the entries that are processed from ``response_entries_key`` elements that will be mapped to ``id`` attribute that is sent to Gnocchi. * ``headers``: optional parameter. It is a map (similar to the metadata_mapping) of key and value that can be used to customize the header of the request that is executed against the URL. This configuration works for both OpenStack and non-OpenStack dynamic pollster configuration. .. code-block:: yaml headers: "x-openstack-nova-api-version": "2.46" * ``timeout``: optional parameter. Defines the request timeout for the requests executed by the dynamic pollsters to gather data. The default timeout value is 30 seconds. If it is set to `None`, this means that the request never times out on the client side. Therefore, one might have problems if the server never closes the connection. The pollsters are executed serially, one after the other. Therefore, if the request hangs, all pollsters (including the non-dynamic ones) will stop executing. * ``namespaces``: optional parameter. Defines the namespaces (running ceilometer instances) where the pollster will be instantiated. This parameter accepts a single string value or a list of strings. The default value is `central`. The complete YAML configuration to gather data from Magnum (that has been used as an example) is the following: .. code-block:: yaml --- - name: "dynamic.magnum.cluster" sample_type: "gauge" unit: "cluster" value_attribute: "status" endpoint_type: "container-infra" url_path: "v1/clusters/detail" metadata_fields: - "labels" - "updated_at" - "keypair" - "master_flavor_id" - "api_address" - "master_addresses" - "node_count" - "docker_volume_size" - "master_count" - "node_addresses" - "status_reason" - "coe_version" - "cluster_template_id" - "name" - "stack_id" - "created_at" - "discovery_url" - "container_version" value_mapping: CREATE_IN_PROGRESS: "0" CREATE_FAILED: "1" CREATE_COMPLETE: "2" UPDATE_IN_PROGRESS: "3" UPDATE_FAILED: "4" UPDATE_COMPLETE: "5" DELETE_IN_PROGRESS: "6" DELETE_FAILED: "7" DELETE_COMPLETE: "8" RESUME_COMPLETE: "9" RESUME_FAILED: "10" RESTORE_COMPLETE: "11" ROLLBACK_IN_PROGRESS: "12" ROLLBACK_FAILED: "13" ROLLBACK_COMPLETE: "14" SNAPSHOT_COMPLETE: "15" CHECK_COMPLETE: "16" ADOPT_COMPLETE: "17" We can also replicate and enhance some hardcoded pollsters. For instance, the pollster to gather VPN connections. Currently, it is always persisting `1` for all of the VPN connections it finds. However, the VPN connection can have multiple statuses, and we should normally only bill for active resources, and not resources on `ERROR` states. An example to gather VPN connections data is the following (this is just an example, and one can adapt and configure as he/she desires): .. code-block:: yaml --- - name: "dynamic.network.services.vpn.connection" sample_type: "gauge" unit: "ipsec_site_connection" value_attribute: "status" endpoint_type: "network" url_path: "v2.0/vpn/ipsec-site-connections" metadata_fields: - "name" - "vpnservice_id" - "description" - "status" - "peer_address" value_mapping: ACTIVE: "1" metadata_mapping: name: "display_name" default_value: 0 * ``response_handlers``: optional parameter. Defines the response handlers used to handle the response. For now, the supported values are: ``json``: This handler will interpret the response as a `JSON` and will convert it to a `dictionary` which can be manipulated using the operations options when mapping the attributes: .. code-block:: yaml --- - name: "dynamic.json.response" sample_type: "gauge" [...] response_handlers: - json Response to handle: .. code-block:: json { "test": { "list": [1, 2, 3] } } Response handled: .. code-block:: python { 'test': { 'list': [1, 2, 3] } } ``xml``: This handler will interpret the response as an `XML` and will convert it to a `dictionary` which can be manipulated using the operations options when mapping the attributes: .. code-block:: yaml --- - name: "dynamic.json.response" sample_type: "gauge" [...] response_handlers: - xml Response to handle: .. code-block:: xml 1 2 3 Response handled: .. code-block:: python { 'test': { 'list': [1, 2, 3] } } ``text``: This handler will interpret the response as a `PlainText` and will convert it to a `dictionary` which can be manipulated using the operations options when mapping the attributes: .. code-block:: yaml --- - name: "dynamic.json.response" sample_type: "gauge" [...] response_handlers: - text Response to handle: .. code-block:: text Plain text response Response handled: .. code-block:: python { 'out': "Plain text response" } They can be used together or individually. If not defined, the `default` value will be `json`. If you set 2 or more response handlers, the first configured handler will be used to try to handle the response, if it is not possible, a `DEBUG` log message will be displayed, then the next will be used and so on. If no configured handler was able to handle the response, an empty dict will be returned and a `WARNING` log will be displayed to warn operators that the response was not able to be handled by any configured handler. The dynamic pollsters system configuration (for non-OpenStack APIs) ------------------------------------------------------------------- The dynamic pollster system can also be used for non-OpenStack APIs. to configure non-OpenStack APIs, one can use all but one attribute of the Dynamic pollster system. The attribute that is not supported is the ``endpoint_type``. The dynamic pollster system for non-OpenStack APIs is activated automatically when one uses the configurations ``module``. The extra parameters (in addition to the original ones) that are available when using the Non-OpenStack dynamic pollster sub-subsystem are the following: * ``module``: required parameter. It is the python module name that Ceilometer has to load to use the authentication object when executing requests against the API. For instance, if one wants to create a pollster to gather data from RadosGW, he/she can use the ``awsauth`` python module. * ``authentication_object``: mandatory parameter. The name of the class that we can find in the ``module`` that Ceilometer will use as the authentication object in the request. For instance, when using the ``awsauth`` python module to gather data from RadosGW, one can use the authentication object as ``S3Auth``. * ``authentication_parameters``: optional parameter. It is a comma separated value that will be used to instantiate the ``authentication_object``. For instance, if we gather data from RadosGW, and we use the ``S3Auth`` class, the ``authentication_parameters`` can be configured as ``, rados_gw_secret_key, rados_gw_host_name``. * ``barbican_secret_id``: optional parameter. The Barbican secret ID, from which, Ceilometer can retrieve the comma separated values of the ``authentication_parameters``. As follows we present an example on how to convert the hard-coded pollster for `radosgw.api.request` metric to the dynamic pollster model: .. code-block:: yaml --- - name: "dynamic.radosgw.api.request" sample_type: "gauge" unit: "request" value_attribute: "total.ops" url_path: "http://rgw.service.stage.i.ewcs.ch/admin/usage" module: "awsauth" authentication_object: "S3Auth" authentication_parameters: ",," user_id_attribute: "user" project_id_attribute: "user" resource_id_attribute: "user" response_entries_key: "summary" We can take that example a bit further, and instead of gathering the `total .ops` variable, which counts for all the requests (even the unsuccessful ones), we can use the `successful_ops`. .. code-block:: yaml --- - name: "dynamic.radosgw.api.request.successful_ops" sample_type: "gauge" unit: "request" value_attribute: "total.successful_ops" url_path: "http://rgw.service.stage.i.ewcs.ch/admin/usage" module: "awsauth" authentication_object: "S3Auth" authentication_parameters: ", ," user_id_attribute: "user" project_id_attribute: "user" resource_id_attribute: "user" response_entries_key: "summary" The dynamic pollsters system configuration (for local host commands) -------------------------------------------------------------------- The dynamic pollster system can also be used for local host commands, these commands must be installed in the system that is running the Ceilometer compute agent. To configure local hosts commands, one can use all but two attributes of the Dynamic pollster system. The attributes that are not supported are the ``endpoint_type`` and ``url_path``. The dynamic pollster system for local host commands is activated automatically when one uses the configuration ``host_command``. The extra parameter (in addition to the original ones) that is available when using the local host commands dynamic pollster sub-subsystem is the following: * ``host_command``: required parameter. It is the host command that will be executed in the same host the Ceilometer dynamic pollster agent is running. The output of the command will be processed by the pollster and stored in the configured backend. As follows we present an example on how to use the local host command: .. code-block:: yaml --- - name: "dynamic.host.command" sample_type: "gauge" unit: "request" value_attribute: "value" response_entries_key: "test" host_command: "echo 'id1_uid1_pid1meta-data-to-store1'" metadata_fields: - "meta" response_handlers: - xml To execute multi page host commands, the `next_sample_url_attribute` must generate the next sample command, like the following example: .. code-block:: yaml --- - name: "dynamic.s3.objects.size" sample_type: "gauge" unit: "request" value_attribute: "Size" project_id_attribute: "Owner.ID" user_id_attribute: "Owner.ID" resource_id_attribute: "Key" response_entries_key: "Contents" host_command: "aws s3api list-objects" next_sample_url_attribute: NextToken | 'aws s3api list-objects --starting-token "' + value + '"' Operations on extracted attributes ---------------------------------- The dynamic pollster system can execute Python operations to transform the attributes that are extracted from the JSON response that the system handles. One example of use case is the RadosGW that uses as the username (which is normally mapped to the Gnocchi resource_id). With this feature (operations on extracted attributes), one can create configurations in the dynamic pollster to clean/normalize that variable. It is as simple as defining `resource_id_attribute: "user | value.split('$')[0].strip()"` The operations are separated by `|` symbol. The first element of the expression is the key to be retrieved from the JSON object. The other elements are operations that can be applied to the `value` variable. The value variable is the variable we use to hold the data being extracted. The previous example can be rewritten as: `resource_id_attribute: "user | value.split ('$') | value[0] | value.strip()"` As follows we present a complete configuration for a RadosGW dynamic pollster that is removing the `$` symbol, and getting the first part of the String. .. code-block:: yaml --- - name: "dynamic.radosgw.api.request.successful_ops" sample_type: "gauge" unit: "request" value_attribute: "total.successful_ops" url_path: "http://rgw.service.stage.i.ewcs.ch/admin/usage" module: "awsauth" authentication_object: "S3Auth" authentication_parameters: ",," user_id_attribute: "user | value.split ('$') | value[0]" project_id_attribute: "user | value.split ('$') | value[0]" resource_id_attribute: "user | value.split ('$') | value[0]" response_entries_key: "summary" The Dynamic pollster configuration options that support this feature are the following: * value_attribute * response_entries_key * user_id_attribute * project_id_attribute * resource_id_attribute Multi metric dynamic pollsters (handling attribute values with list of objects) ------------------------------------------------------------------------------- The initial idea for this feature comes from the `categories` fields that we can find in the `summary` object of the RadosGW API. Each user has a `categories` attribute in the response; in the `categories` list, we can find the object that presents in a granular fashion the consumption of different RadosGW API operations such as GET, PUT, POST, and may others. As follows we present an example of such a JSON response. .. code-block:: json { "entries": [ { "buckets": [ { "bucket": "", "categories": [ { "bytes_received": 0, "bytes_sent": 40, "category": "list_buckets", "ops": 2, "successful_ops": 2 } ], "epoch": 1572969600, "owner": "user", "time": "2019-11-21 00:00:00.000000Z" }, { "bucket": "-", "categories": [ { "bytes_received": 0, "bytes_sent": 0, "category": "get_obj", "ops": 1, "successful_ops": 0 } ], "epoch": 1572969600, "owner": "someOtherUser", "time": "2019-11-21 00:00:00.000000Z" } ] } ] "summary": [ { "categories": [ { "bytes_received": 0, "bytes_sent": 0, "category": "create_bucket", "ops": 2, "successful_ops": 2 }, { "bytes_received": 0, "bytes_sent": 2120428, "category": "get_obj", "ops": 46, "successful_ops": 46 }, { "bytes_received": 0, "bytes_sent": 21484, "category": "list_bucket", "ops": 8, "successful_ops": 8 }, { "bytes_received": 6889056, "bytes_sent": 0, "category": "put_obj", "ops": 46, "successful_ops": 46 } ], "total": { "bytes_received": 6889056, "bytes_sent": 2141912, "ops": 102, "successful_ops": 102 }, "user": "user" }, { "categories": [ { "bytes_received": 0, "bytes_sent": 0, "category": "create_bucket", "ops": 1, "successful_ops": 1 }, { "bytes_received": 0, "bytes_sent": 0, "category": "delete_obj", "ops": 23, "successful_ops": 23 }, { "bytes_received": 0, "bytes_sent": 5371, "category": "list_bucket", "ops": 2, "successful_ops": 2 }, { "bytes_received": 3444350, "bytes_sent": 0, "category": "put_obj", "ops": 23, "successful_ops": 23 } ], "total": { "bytes_received": 3444350, "bytes_sent": 5371, "ops": 49, "successful_ops": 49 }, "user": "someOtherUser" } ] } In that context, and having in mind that we have APIs with similar data structures, we developed an extension for the dynamic pollster that enables multi-metric processing for a single pollster. It works as follows. The pollster name will contain a placeholder for the variable that identifies the "submetric". E.g. `dynamic.radosgw.api.request.{category}`. The placeholder `{category}` indicates the object's attribute that is in the list of objects that we use to load the sub metric name. Then, we must use a special notation in the `value_attribute` configuration to indicate that we are dealing with a list of objects. This is achieved via `[]` (brackets); for instance, in the `dynamic.radosgw.api.request.{category}`, we can use `[categories].ops` as the `value_attribute`. This indicates that the value we retrieve is a list of objects, and when the dynamic pollster processes it, we want it (the pollster) to load the `ops` value for the sub metrics being generated. Examples on how to create multi-metric pollster to handle data from RadosGW API are presented as follows: .. code-block:: yaml --- - name: "dynamic.radosgw.api.request.{category}" sample_type: "gauge" unit: "request" value_attribute: "[categories].ops" url_path: "http://rgw.service.stage.i.ewcs.ch/admin/usage" module: "awsauth" authentication_object: "S3Auth" authentication_parameters: ", ," user_id_attribute: "user | value.split('$')[0]" project_id_attribute: "user | value.split('$') | value[0]" resource_id_attribute: "user | value.split('$') | value[0]" response_entries_key: "summary" - name: "dynamic.radosgw.api.request.successful_ops.{category}" sample_type: "gauge" unit: "request" value_attribute: "[categories].successful_ops" url_path: "http://rgw.service.stage.i.ewcs.ch/admin/usage" module: "awsauth" authentication_object: "S3Auth" authentication_parameters: ", ," user_id_attribute: "user | value.split('$')[0]" project_id_attribute: "user | value.split('$') | value[0]" resource_id_attribute: "user | value.split('$') | value[0]" response_entries_key: "summary" - name: "dynamic.radosgw.api.bytes_sent.{category}" sample_type: "gauge" unit: "request" value_attribute: "[categories].bytes_sent" url_path: "http://rgw.service.stage.i.ewcs.ch/admin/usage" module: "awsauth" authentication_object: "S3Auth" authentication_parameters: ", ," user_id_attribute: "user | value.split('$')[0]" project_id_attribute: "user | value.split('$') | value[0]" resource_id_attribute: "user | value.split('$') | value[0]" response_entries_key: "summary" - name: "dynamic.radosgw.api.bytes_received.{category}" sample_type: "gauge" unit: "request" value_attribute: "[categories].bytes_received" url_path: "http://rgw.service.stage.i.ewcs.ch/admin/usage" module: "awsauth" authentication_object: "S3Auth" authentication_parameters: ", ," user_id_attribute: "user | value.split('$')[0]" project_id_attribute: "user | value.split('$') | value[0]" resource_id_attribute: "user | value.split('$') | value[0]" response_entries_key: "summary" Handling linked API responses ----------------------------- If the consumed API returns a linked response which contains a link to the next response set (page), the Dynamic pollsters can be configured to follow these links and join all linked responses into a single one. To enable this behavior the operator will need to configure the parameter `next_sample_url_attribute` that must contain a mapper to the response attribute that contains the link to the next response page. This parameter also supports operations like the others `*_attribute` dynamic pollster's parameters. Examples on how to create a pollster to handle linked API responses are presented as follows: - Example of a simple linked response: - API response: .. code-block:: json { "server_link": "http://test.com/v1/test-volumes/marker=c3", "servers": [ { "volume": [ { "name": "a", "tmp": "ra" } ], "id": 1, "name": "a1" }, { "volume": [ { "name": "b", "tmp": "rb" } ], "id": 2, "name": "b2" }, { "volume": [ { "name": "c", "tmp": "rc" } ], "id": 3, "name": "c3" } ] } - Pollster configuration: .. code-block:: yaml --- - name: "dynamic.linked.response" sample_type: "gauge" unit: "request" value_attribute: "[volume].tmp" url_path: "v1/test-volumes" response_entries_key: "servers" next_sample_url_attribute: "server_link" - Example of a complex linked response: - API response: .. code-block:: json { "server_link": [ { "href": "http://test.com/v1/test-volumes/marker=c3", "rel": "next" }, { "href": "http://test.com/v1/test-volumes/marker=b1", "rel": "prev" } ], "servers": [ { "volume": [ { "name": "a", "tmp": "ra" } ], "id": 1, "name": "a1" }, { "volume": [ { "name": "b", "tmp": "rb" } ], "id": 2, "name": "b2" }, { "volume": [ { "name": "c", "tmp": "rc" } ], "id": 3, "name": "c3" } ] } - Pollster configuration: .. code-block:: yaml --- - name: "dynamic.linked.response" sample_type: "gauge" unit: "request" value_attribute: "[volume].tmp" url_path: "v1/test-volumes" response_entries_key: "servers" next_sample_url_attribute: "server_link | filter(lambda v: v.get('rel') == 'next', value) | list(value) | value[0] | value.get('href')" OpenStack Dynamic pollsters metadata enrichment with other OpenStack API's data ------------------------------------------------------------------------------- Sometimes we want/need to add/gather extra metadata for the samples being handled by Ceilometer Dynamic pollsters, such as the project name, domain id, domain name, and other metadata that are not always accessible via the OpenStack component where the sample is gathered. For instance, when gathering the status of virtual machines (VMs) from Nova, we only have the `tenant_id`, which must be used as the `project_id`. However, for billing and later invoicing one might need/want the project name, domain id, and other metadata that are available in Keystone (and maybe some others that are scattered over other components). To achieve that, one can use the OpenStack metadata enrichment option. As follows we present an example that shows a dynamic pollster configuration to gather virtual machine (VM) status, and to enrich the data pushed to the storage backend (e.g. Gnocchi) with project name, domain ID, and domain name. .. code-block:: yaml --- - name: "dynamic_pollster.instance.status" next_sample_url_attribute: "server_links | filter(lambda v: v.get('rel') == 'next', value) | list(value) | value[0] | value.get('href') | value.replace('http:', 'https:')" sample_type: "gauge" unit: "server" value_attribute: "status" endpoint_type: "compute" url_path: "/v2.1/servers/detail?all_tenants=true" headers: "Openstack-API-Version": "compute 2.65" project_id_attribute: "tenant_id" metadata_fields: - "status" - "name" - "flavor.vcpus" - "flavor.ram" - "flavor.disk" - "flavor.ephemeral" - "flavor.swap" - "flavor.original_name" - "image | value or { 'id': '' } | value['id']" - "OS-EXT-AZ:availability_zone" - "OS-EXT-SRV-ATTR:host" - "user_id" - "tags | ','.join(value)" - "locked" value_mapping: ACTIVE: "1" default_value: 0 metadata_mapping: "OS-EXT-AZ:availability_zone": "dynamic_availability_zone" "OS-EXT-SRV-ATTR:host": "dynamic_host" "flavor.original_name": "dynamic_flavor_name" "flavor.vcpus": "dynamic_flavor_vcpus" "flavor.ram": "dynamic_flavor_ram" "flavor.disk": "dynamic_flavor_disk" "flavor.ephemeral": "dynamic_flavor_ephemeral" "flavor.swap": "dynamic_flavor_swap" "image | value or { 'id': '' } | value['id']": "dynamic_image_ref" "name": "dynamic_display_name" "locked": "dynamic_locked" "tags | ','.join(value)": "dynamic_tags" extra_metadata_fields_cache_seconds: 3600 extra_metadata_fields_skip: - value: '1' metadata: dynamic_flavor_vcpus: 4 - value: '1' metadata: dynamic_flavor_vcpus: 2 extra_metadata_fields: - name: "project_name" endpoint_type: "identity" url_path: "'/v3/projects/' + str(sample['project_id'])" headers: "Openstack-API-Version": "identity latest" value: "name" extra_metadata_fields_cache_seconds: 1800 # overriding the default cache policy metadata_fields: - id - name: "domain_id" endpoint_type: "identity" url_path: "'/v3/projects/' + str(sample['project_id'])" headers: "Openstack-API-Version": "identity latest" value: "domain_id" metadata_fields: - id - name: "domain_name" endpoint_type: "identity" url_path: "'/v3/domains/' + str(extra_metadata_captured['domain_id'])" headers: "Openstack-API-Version": "identity latest" value: "name" metadata_fields: - id - name: "operating-system" host_command: "'get-vm --vm-name ' + str(extra_metadata_by_name['project_name']['metadata']['id'])" value: "os" The above example can be used to gather and persist in the backend the status of VMs. It will persist `1` in the backend as a measure for every collecting period if the VM's status is `ACTIVE`, and `0` otherwise. This is quite useful to create hashmap rating rules for running VMs in CloudKitty. Then, to enrich the resource in the storage backend, we are adding extra metadata that are collected in Keystone and in the local host via the `extra_metadata_fields` options. If you have multiples `extra_metadata_fields` defining the same `metadata_field`, the last not `None` metadata value will be used. To operate values in the `extra_metadata_fields`, you can access 3 local variables: * ``sample``: it is a dictionary which holds the current data of the root sample. The root sample is the final sample that will be persisted in the configured storage backend. * ``extra_metadata_captured``: it is a dictionary which holds the current data of all `extra_metadata_fields` processed before this one. If you have multiples `extra_metadata_fields` defining the same `metadata_field`, the last not `None` metadata value will be used. * ``extra_metadata_by_name``: it is a dictionary which holds the data of all `extra_metadata_fields` processed before this one. No data is overwritten in this variable. To access an specific `extra_metadata_field` using this variable, you can do `extra_metadata_by_name['']['value']` to get its value, or `extra_metadata_by_name['']['metadata']['']` to get its metadata. The metadata enrichment feature has the following options: * ``extra_metadata_fields_cache_seconds``: optional parameter. Defines the extra metadata request's response cache. Some requests, such as the ones executed against Keystone to retrieve extra metadata are rather static. Therefore, one does not need to constantly re-execute the request. That is the reason why we cache the response of such requests. By default the cache time to live (TTL) for responses is `3600` seconds. However, this value can be increased of decreased. * ``extra_metadata_fields``: optional parameter. This option is a list of objects or a single one, where each one of its elements is an dynamic pollster configuration set. Each one of the extra metadata definition can have the same options defined in the dynamic pollsters, including the `extra_metadata_fields` option, so this option is a multi-level option. When defined, the result of the collected data will be merged in the final sample resource metadata. If some of the required dynamic pollster configuration is not set in the `extra_metadata_fields`, will be used the parent pollster configuration, except the `name`. * ``extra_metadata_fields_skip``: optional parameter. This option is a list of objects or a single one, where each one of its elements is a set of key/value pairs. When defined, if any set of key/value pairs is a subset of the collected sample, then the extra_metadata_fields gathering of this sample will be skipped. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-events.rst0000664000175100017510000001456415033033467023212 0ustar00mylesmyles====== Events ====== In addition to meters, the Telemetry service collects events triggered within an OpenStack environment. This section provides a brief summary of the events format in the Telemetry service. While a sample represents a single, numeric datapoint within a time-series, an event is a broader concept that represents the state of a resource at a point in time. The state may be described using various data types including non-numeric data such as an instance's flavor. In general, events represent any action made in the OpenStack system. Event configuration ~~~~~~~~~~~~~~~~~~~ By default, ceilometer builds event data from the messages it receives from other OpenStack services. .. note:: In releases older than Ocata, it is advisable to set ``disable_non_metric_meters`` to ``True`` when enabling events in the Telemetry service. The Telemetry service historically represented events as metering data, which may create duplication of data if both events and non-metric meters are enabled. Event structure ~~~~~~~~~~~~~~~ Events captured by the Telemetry service are represented by five key attributes: event\_type A dotted string defining what event occurred such as ``"compute.instance.resize.start"``. message\_id A UUID for the event. generated A timestamp of when the event occurred in the system. traits A flat mapping of key-value pairs which describe the event. The event's traits contain most of the details of the event. Traits are typed, and can be strings, integers, floats, or datetimes. raw Mainly for auditing purpose, the full event message can be stored (unindexed) for future evaluation. Event indexing ~~~~~~~~~~~~~~ The general philosophy of notifications in OpenStack is to emit any and all data someone might need, and let the consumer filter out what they are not interested in. In order to make processing simpler and more efficient, the notifications are stored and processed within Ceilometer as events. The notification payload, which can be an arbitrarily complex JSON data structure, is converted to a flat set of key-value pairs. This conversion is specified by a config file. .. note:: The event format is meant for efficient processing and querying. Storage of complete notifications for auditing purposes can be enabled by configuring ``store_raw`` option. Event conversion ---------------- The conversion from notifications to events is driven by a configuration file defined by the ``definitions_cfg_file`` in the ``ceilometer.conf`` configuration file. This includes descriptions of how to map fields in the notification body to Traits, and optional plug-ins for doing any programmatic translations (splitting a string, forcing case). The mapping of notifications to events is defined per event\_type, which can be wildcarded. Traits are added to events if the corresponding fields in the notification exist and are non-null. .. note:: The default definition file included with the Telemetry service contains a list of known notifications and useful traits. The mappings provided can be modified to include more or less data according to user requirements. If the definitions file is not present, a warning will be logged, but an empty set of definitions will be assumed. By default, any notifications that do not have a corresponding event definition in the definitions file will be converted to events with a set of minimal traits. This can be changed by setting the option ``drop_unmatched_notifications`` in the ``ceilometer.conf`` file. If this is set to ``True``, any unmapped notifications will be dropped. The basic set of traits (all are TEXT type) that will be added to all events if the notification has the relevant data are: service (notification's publisher), tenant\_id, and request\_id. These do not have to be specified in the event definition, they are automatically added, but their definitions can be overridden for a given event\_type. Event definitions format ------------------------ The event definitions file is in YAML format. It consists of a list of event definitions, which are mappings. Order is significant, the list of definitions is scanned in reverse order to find a definition which matches the notification's event\_type. That definition will be used to generate the event. The reverse ordering is done because it is common to want to have a more general wildcarded definition (such as ``compute.instance.*``) with a set of traits common to all of those events, with a few more specific event definitions afterwards that have all of the above traits, plus a few more. Each event definition is a mapping with two keys: event\_type This is a list (or a string, which will be taken as a 1 element list) of event\_types this definition will handle. These can be wildcarded with unix shell glob syntax. An exclusion listing (starting with a ``!``) will exclude any types listed from matching. If only exclusions are listed, the definition will match anything not matching the exclusions. traits This is a mapping, the keys are the trait names, and the values are trait definitions. Each trait definition is a mapping with the following keys: fields A path specification for the field(s) in the notification you wish to extract for this trait. Specifications can be written to match multiple possible fields. By default the value will be the first such field. The paths can be specified with a dot syntax (``payload.host``). Square bracket syntax (``payload[host]``) is also supported. In either case, if the key for the field you are looking for contains special characters, like ``.``, it will need to be quoted (with double or single quotes): ``payload.image_meta.`org.openstack__1__architecture```. The syntax used for the field specification is a variant of `JSONPath `__ type (Optional) The data type for this trait. Valid options are: ``text``, ``int``, ``float``, and ``datetime``. Defaults to ``text`` if not specified. plugin (Optional) Used to execute simple programmatic conversions on the value in a notification field. Event delivery to external sinks -------------------------------- You can configure the Telemetry service to deliver the events into external sinks. These sinks are configurable in the ``/etc/ceilometer/event_pipeline.yaml`` file. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-measurements.rst0000664000175100017510000011330315033033467024405 0ustar00mylesmyles.. _telemetry-measurements: ============ Measurements ============ The Telemetry service collects meters within an OpenStack deployment. This section provides a brief summary about meters format and origin and also contains the list of available meters. Telemetry collects meters by polling the infrastructure elements and also by consuming the notifications emitted by other OpenStack services. For more information about the polling mechanism and notifications see :ref:`telemetry-data-collection`. There are several meters which are collected by polling and by consuming. The origin for each meter is listed in the tables below. .. note:: You may need to configure Telemetry or other OpenStack services in order to be able to collect all the samples you need. For further information about configuration requirements see the `Telemetry chapter `__ in the Installation Tutorials and Guides. Telemetry uses the following meter types: +--------------+--------------------------------------------------------------+ | Type | Description | +==============+==============================================================+ | Cumulative | Increasing over time (instance hours) | +--------------+--------------------------------------------------------------+ | Delta | Changing over time (bandwidth) | +--------------+--------------------------------------------------------------+ | Gauge | Discrete items (floating IPs, image uploads) and fluctuating | | | values (disk I/O) | +--------------+--------------------------------------------------------------+ | Telemetry provides the possibility to store metadata for samples. This metadata can be extended for OpenStack Compute and OpenStack Object Storage. In order to add additional metadata information to OpenStack Compute you have two options to choose from. The first one is to specify them when you boot up a new instance. The additional information will be stored with the sample in the form of ``resource_metadata.user_metadata.*``. The new field should be defined by using the prefix ``metering.``. The modified boot command look like the following: .. code-block:: console $ openstack server create --property metering.custom_metadata=a_value my_vm The other option is to set the ``reserved_metadata_keys`` to the list of metadata keys that you would like to be included in ``resource_metadata`` of the instance related samples that are collected for OpenStack Compute. This option is included in the ``DEFAULT`` section of the ``ceilometer.conf`` configuration file. You might also specify headers whose values will be stored along with the sample data of OpenStack Object Storage. The additional information is also stored under ``resource_metadata``. The format of the new field is ``resource_metadata.http_header_$name``, where ``$name`` is the name of the header with ``-`` replaced by ``_``. For specifying the new header, you need to set ``metadata_headers`` option under the ``[filter:ceilometer]`` section in ``proxy-server.conf`` under the ``swift`` folder. You can use this additional data for instance to distinguish external and internal users. Measurements are grouped by services which are polled by Telemetry or emit notifications that this service consumes. .. _telemetry-compute-meters: OpenStack Compute ~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Compute. +-----------+-------+------+----------+----------+---------+------------------+ | Name | Type | Unit | Resource | Origin | Support | Note | +===========+=======+======+==========+==========+=========+==================+ | **Meters added in the Mitaka release or earlier** | +-----------+-------+------+----------+----------+---------+------------------+ | memory | Gauge | MB | instance | Notific\ | Libvirt | Volume of RAM | | | | | ID | ation | | allocated to the | | | | | | | | instance | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Gauge | MB | instance | Pollster | Libvirt,| Volume of RAM | | usage | | | ID | | | used by the inst\| | | | | | | | ance from the | | | | | | | | amount of its | | | | | | | | allocated memory | +-----------+-------+------+----------+----------+---------+------------------+ | memory.r\ | Gauge | MB | instance | Pollster | Libvirt | Volume of RAM u\ | | esident | | | ID | | | sed by the inst\ | | | | | | | | ance on the phy\ | | | | | | | | sical machine | +-----------+-------+------+----------+----------+---------+------------------+ | cpu | Cumu\ | ns | instance | Pollster | Libvirt | CPU time used | | | lative| | ID | | | | +-----------+-------+------+----------+----------+---------+------------------+ | vcpus | Gauge | vcpu | instance | Notific\ | Libvirt | Number of virtual| | | | | ID | ation | | CPUs allocated to| | | | | | | | the instance | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt | Number of read | | ice.read\ | lative| uest | | | | requests | | .requests | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt | Number of write | | ice.write\| lative| uest | | | | requests | | .requests | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt | Volume of reads | | ice.read\ | lative| | | | | | | .bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt | Volume of writes | | ice.write\| lative| | | | | | | .bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.root\| Gauge | GB | instance | Notific\ | Libvirt | Size of root disk| | .size | | | ID | ation, \ | | | | | | | | Pollster | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.ephe\| Gauge | GB | instance | Notific\ | Libvirt | Size of ephemeral| | meral.size| | | ID | ation, \ | | disk | | | | | | Pollster | | | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The amount of d\ | | ice.capa\ | | | | | | isk per device | | city | | | | | | that the instan\ | | | | | | | | ce can see | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The amount of d\ | | ice.allo\ | | | | | | isk per device | | cation | | | | | | occupied by the | | | | | | | | instance on th\ | | | | | | | | e host machine | +-----------+-------+------+----------+----------+---------+------------------+ | disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The physical si\ | | ice.usag\ | | | | | | ze in bytes of | | e | | | | | | the image conta\ | | | | | | | | iner on the hos\ | | | | | | | | t per device | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | B | interface| Pollster | Libvirt | Number of | | incoming.\| lative| | ID | | | incoming bytes | | bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | B | interface| Pollster | Libvirt | Number of | | outgoing\ | lative| | ID | | | outgoing bytes | | .bytes | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt | Number of | | incoming\ | lative| ket | ID | | | incoming packets | | .packets | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt | Number of | | outgoing\ | lative| ket | ID | | | outgoing packets | | .packets | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Newton release** | +-----------+-------+------+----------+----------+---------+------------------+ | perf.cpu\ | Gauge | cyc\ | instance | Pollster | Libvirt | the number of c\ | | .cycles | | le | ID | | | pu cycles one i\ | | | | | | | | nstruction needs | +-----------+-------+------+----------+----------+---------+------------------+ | perf.ins\ | Gauge | inst\| instance | Pollster | Libvirt | the count of in\ | | tructions | | ruct\| ID | | | structions | | | | ion | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | perf.cac\ | Gauge | cou\ | instance | Pollster | Libvirt | the count of ca\ | | he.refer\ | | nt | ID | | | che hits | | ences | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | perf.cac\ | Gauge | cou\ | instance | Pollster | Libvirt | the count of ca\ | | he.misses | | nt | ID | | | che misses | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Ocata release** | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | incoming\ | ative | et | ID | | | incoming dropped | | .packets\ | | | | | | packets | | .drop | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | outgoing\ | ative | et | ID | | | outgoing dropped | | .packets\ | | | | | | packets | | .drop | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | incoming\ | ative | et | ID | | | incoming error | | .packets\ | | | | | | packets | | .error | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | | outgoing\ | ative | et | ID | | | outgoing error | | .packets\ | | | | | | packets | | .error | | | | | | | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Pike release** | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Cumul\| | | | | | | swap.in | ative | MB | instance | Pollster | Libvirt | Memory swap in | | | | | ID | | | | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Cumul\| | | | | | | swap.out | ative | MB | instance | Pollster | Libvirt | Memory swap out | | | | | ID | | | | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Queens release** | +-----------+-------+------+----------+----------+---------+------------------+ | disk.devi\| Cumul\| | | | | Total time read | | ce.read.l\| ative | ns | Disk ID | Pollster | Libvirt | operations have | | atency | | | | | | taken | +-----------+-------+------+----------+----------+---------+------------------+ | disk.devi\| Cumul\| | | | | Total time write | | ce.write.\| ative | ns | Disk ID | Pollster | Libvirt | operations have | | latency | | | | | | taken | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Epoxy release** | +-----------+-------+------+----------+----------+---------+------------------+ | power.sta\| Gauge | state| instance | Pollster | Libvirt | virDomainState | | te | | | ID | | | of the VM | +-----------+-------+------+----------+----------+---------+------------------+ .. note:: To enable the libvirt ``memory.usage`` support, you need to install libvirt version 1.1.1+, QEMU version 1.5+, and you also need to prepare suitable balloon driver in the image. It is applicable particularly for Windows guests, most modern Linux distributions already have it built in. Telemetry is not able to fetch the ``memory.usage`` samples without the image balloon driver. .. note:: To enable libvirt ``disk.*`` support when running on RBD-backed shared storage, you need to install libvirt version 1.2.16+. OpenStack Compute is capable of collecting ``CPU`` related meters from the compute host machines. In order to use that you need to set the ``compute_monitors`` option to ``cpu.virt_driver`` in the ``nova.conf`` configuration file. For further information see the Compute configuration section in the `Compute chapter `__ of the OpenStack Configuration Reference. The following host machine related meters are collected for OpenStack Compute: +---------------------+-------+------+----------+-------------+---------------+ | Name | Type | Unit | Resource | Origin | Note | +=====================+=======+======+==========+=============+===============+ | **Meters added in the Mitaka release or earlier** | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | MHz | host ID | Notification| CPU frequency | | frequency | | | | | | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU kernel | | kernel.time | lative| | | | time | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU idle time | | idle.time | lative| | | | | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU user mode | | user.time | lative| | | | time | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU I/O wait | | iowait.time | lative| | | | time | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU kernel | | kernel.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU idle | | idle.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU user mode | | user.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU I/O wait | | iowait.percent | | | | | percentage | +---------------------+-------+------+----------+-------------+---------------+ | compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU | | percent | | | | | utilization | +---------------------+-------+------+----------+-------------+---------------+ .. _telemetry-bare-metal-service: IPMI meters ~~~~~~~~~~~ Telemetry captures notifications that are emitted by the Bare metal service. The source of the notifications are IPMI sensors that collect data from the host machine. Alternatively, IPMI meters can be generated by deploying the ceilometer-agent-ipmi on each IPMI-capable node. For further information about the IPMI agent see :ref:`telemetry-ipmi-agent`. .. warning:: To avoid duplication of metering data and unnecessary load on the IPMI interface, do not deploy the IPMI agent on nodes that are managed by the Bare metal service and keep the ``conductor.send_sensor_data`` option set to ``False`` in the ``ironic.conf`` configuration file. The following IPMI sensor meters are recorded: +------------------+-------+------+----------+-------------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +==================+=======+======+==========+=============+==================+ | **Meters added in the Mitaka release or earlier** | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi.fan| Gauge | RPM | fan | Notificatio\| Fan rounds per | | | | | sensor | n, Pollster | minute (RPM) | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi\ | Gauge | C | temper\ | Notificatio\| Temperature read\| | .temperature | | | ature | n, Pollster | ing from sensor | | | | | sensor | | | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi\ | Gauge | A | current | Notificatio\| Current reading | | .current | | | sensor | n, Pollster | from sensor | +------------------+-------+------+----------+-------------+------------------+ | hardware.ipmi\ | Gauge | V | voltage | Notificatio\| Voltage reading | | .voltage | | | sensor | n, Pollster | from sensor | +------------------+-------+------+----------+-------------+------------------+ .. note:: The sensor data is not available in the Bare metal service by default. To enable the meters and configure this module to emit notifications about the measured values see the `Installation Guide `__ for the Bare metal service. OpenStack Image service ~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Image service: +--------------------+--------+------+----------+----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+========+======+==========+==========+==================+ | **Meters added in the Mitaka release or earlier** | +--------------------+--------+------+----------+----------+------------------+ | image.size | Gauge | B | image ID | Notifica\| Size of the upl\ | | | | | | tion, Po\| oaded image | | | | | | llster | | +--------------------+--------+------+----------+----------+------------------+ | image.download | Delta | B | image ID | Notifica\| Image is downlo\ | | | | | | tion | aded | +--------------------+--------+------+----------+----------+------------------+ | image.serve | Delta | B | image ID | Notifica\| Image is served | | | | | | tion | out | +--------------------+--------+------+----------+----------+------------------+ OpenStack Block Storage ~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Block Storage: +--------------------+-------+--------+----------+----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+=======+========+==========+==========+=================+ | **Meters added in the Mitaka release or earlier** | +--------------------+-------+--------+----------+----------+-----------------+ | volume.size | Gauge | GB | volume ID| Notifica\| Size of the vol\| | | | | | tion | ume | +--------------------+-------+--------+----------+----------+-----------------+ | snapshot.size | Gauge | GB | snapshot | Notifica\| Size of the sna\| | | | | ID | tion | pshot | +--------------------+-------+--------+----------+----------+-----------------+ | **Meters added in the Queens release** | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Total volume | | pacity.total | | | | tion | capacity on host| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Free volume | | pacity.free | | | | tion | capacity on host| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Assigned volume | | pacity.allocated | | | | tion | capacity on host| | | | | | | by Cinder | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Assigned volume | | pacity.provisioned | | | | tion | capacity on host| +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.ca\| Gauge | GB | hostname | Notifica\| Virtual free | | pacity.virtual_free| | | | tion | volume capacity | | | | | | | on host | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Total volume | | ol.capacity.total | | | #pool | tion, Po\| capacity in pool| | | | | | llster | | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Free volume | | ol.capacity.free | | | #pool | tion, Po\| capacity in pool| | | | | | llster | | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Assigned volume | | ol.capacity.alloca\| | | #pool | tion, Po\| capacity in pool| | ted | | | | llster | by Cinder | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Assigned volume | | ol.capacity.provis\| | | #pool | tion, Po\| capacity in pool| | ioned | | | | llster | | +--------------------+-------+--------+----------+----------+-----------------+ | volume.provider.po\| Gauge | GB | hostname\| Notifica\| Virtual free | | ol.capacity.virtua\| | | #pool | tion, Po\| volume capacity | | l_free | | | | llster | in pool | +--------------------+-------+--------+----------+----------+-----------------+ OpenStack File Share ~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack File Share: +--------------------+-------+--------+----------+----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+=======+========+==========+==========+=================+ | **Meters added in the Pike release** | +--------------------+-------+--------+----------+----------+-----------------+ | manila.share.size | Gauge | GB | share ID | Notifica\| Size of the fil\| | | | | | tion | e share | +--------------------+-------+--------+----------+----------+-----------------+ .. _telemetry-object-storage-meter: OpenStack Object Storage ~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Object Storage: +--------------------+-------+-------+------------+---------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +====================+=======+=======+============+=========+=================+ | **Meters added in the Mitaka release or earlier** | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects | Gauge | object| storage ID | Pollster| Number of objec\| | | | | | | ts | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.si\| Gauge | B | storage ID | Pollster| Total size of s\| | ze | | | | | tored objects | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.co\| Gauge | conta\| storage ID | Pollster| Number of conta\| | ntainers | | iner | | | iners | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.in\| Delta | B | storage ID | Notific\| Number of incom\| | coming.bytes | | | | ation | ing bytes | +--------------------+-------+-------+------------+---------+-----------------+ | storage.objects.ou\| Delta | B | storage ID | Notific\| Number of outgo\| | tgoing.bytes | | | | ation | ing bytes | +--------------------+-------+-------+------------+---------+-----------------+ | storage.containers\| Gauge | object| storage ID\| Pollster| Number of objec\| | .objects | | | /container | | ts in container | +--------------------+-------+-------+------------+---------+-----------------+ | storage.containers\| Gauge | B | storage ID\| Pollster| Total size of s\| | .objects.size | | | /container | | tored objects i\| | | | | | | n container | +--------------------+-------+-------+------------+---------+-----------------+ Ceph Object Storage ~~~~~~~~~~~~~~~~~~~ In order to gather meters from Ceph, you have to install and configure the Ceph Object Gateway (radosgw) as it is described in the `Installation Manual `__. You also have to enable `usage logging `__ in order to get the related meters from Ceph. You will need an ``admin`` user with ``users``, ``buckets``, ``metadata`` and ``usage`` ``caps`` configured. In order to access Ceph from Telemetry, you need to specify a ``service group`` for ``radosgw`` in the ``ceilometer.conf`` configuration file along with ``access_key`` and ``secret_key`` of the ``admin`` user mentioned above. The following meters are collected for Ceph Object Storage: +------------------+------+--------+------------+----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +==================+======+========+============+==========+==================+ | **Meters added in the Mitaka release or earlier** | +------------------+------+--------+------------+----------+------------------+ | radosgw.objects | Gauge| object | storage ID | Pollster | Number of objects| +------------------+------+--------+------------+----------+------------------+ | radosgw.objects.\| Gauge| B | storage ID | Pollster | Total size of s\ | | size | | | | | tored objects | +------------------+------+--------+------------+----------+------------------+ | radosgw.objects.\| Gauge| contai\| storage ID | Pollster | Number of conta\ | | containers | | ner | | | iners | +------------------+------+--------+------------+----------+------------------+ | radosgw.api.requ\| Gauge| request| storage ID | Pollster | Number of API r\ | | est | | | | | equests against | | | | | | | Ceph Object Ga\ | | | | | | | teway (radosgw) | +------------------+------+--------+------------+----------+------------------+ | radosgw.containe\| Gauge| object | storage ID\| Pollster | Number of objec\ | | rs.objects | | | /container | | ts in container | +------------------+------+--------+------------+----------+------------------+ | radosgw.containe\| Gauge| B | storage ID\| Pollster | Total size of s\ | | rs.objects.size | | | /container | | tored objects in | | | | | | | container | +------------------+------+--------+------------+----------+------------------+ .. note:: The ``usage`` related information may not be updated right after an upload or download, because the Ceph Object Gateway needs time to update the usage properties. For instance, the default configuration needs approximately 30 minutes to generate the usage logs. OpenStack Identity ~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Identity: +-------------------+------+--------+-----------+-----------+-----------------+ | Name | Type | Unit | Resource | Origin | Note | +===================+======+========+===========+===========+=================+ | **Meters added in the Mitaka release or earlier** | +-------------------+------+--------+-----------+-----------+-----------------+ | identity.authent\ | Delta| user | user ID | Notifica\ | User successful\| | icate.success | | | | tion | ly authenticated| +-------------------+------+--------+-----------+-----------+-----------------+ | identity.authent\ | Delta| user | user ID | Notifica\ | User pending au\| | icate.pending | | | | tion | thentication | +-------------------+------+--------+-----------+-----------+-----------------+ | identity.authent\ | Delta| user | user ID | Notifica\ | User failed to | | icate.failure | | | | tion | authenticate | +-------------------+------+--------+-----------+-----------+-----------------+ OpenStack Networking ~~~~~~~~~~~~~~~~~~~~ The following meters are collected for OpenStack Networking: +-----------------+-------+--------+-----------+-----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +=================+=======+========+===========+===========+==================+ | **Meters added in the Mitaka release or earlier** | +-----------------+-------+--------+-----------+-----------+------------------+ | bandwidth | Delta | B | label ID | Notifica\ | Bytes through t\ | | | | | | tion | his l3 metering | | | | | | | label | +-----------------+-------+--------+-----------+-----------+------------------+ VPN-as-a-Service (VPNaaS) ~~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for VPNaaS: +---------------+-------+---------+------------+-----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +===============+=======+=========+============+===========+==================+ | **Meters added in the Mitaka release or earlier** | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | vpnser\ | vpn ID | Pollster | Existence of a | | ices.vpn | | vice | | | VPN | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | ipsec\_\| connection | Pollster | Existence of an | | ices.vpn.con\ | | site\_c\| ID | | IPSec connection | | nections | | onnect\ | | | | | | | ion | | | | +---------------+-------+---------+------------+-----------+------------------+ Firewall-as-a-Service (FWaaS) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following meters are collected for FWaaS: +---------------+-------+---------+------------+-----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +===============+=======+=========+============+===========+==================+ | **Meters added in the Mitaka release or earlier** | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | firewall| firewall ID| Pollster | Existence of a | | ices.firewall | | | | | firewall | +---------------+-------+---------+------------+-----------+------------------+ | network.serv\ | Gauge | firewa\ | firewall ID| Pollster | Existence of a | | ices.firewal\ | | ll_pol\ | | | firewall policy | | l.policy | | icy | | | | +---------------+-------+---------+------------+-----------+------------------+ Openstack alarming ~~~~~~~~~~~~~~~~~~ The following meters are collected for Aodh: +---------------+-------+---------+------------+-----------+------------------+ | Name | Type | Unit | Resource | Origin | Note | +===============+=======+=========+============+===========+==================+ | **Meters added in the Flamingo release** | +---------------+-------+---------+------------+-----------+------------------+ | alarm.evalua\ | Gauge | evalua\ | alarm ID | Pollster | Total count of | | tion_result | | tion_r\ | | | evaluation | | | | esult\_\| | | results for each | | | | count | | | alarm | +---------------+-------+---------+------------+-----------+------------------+ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-system-architecture.rst0000664000175100017510000000442515033033467025705 0ustar00mylesmyles.. _telemetry-system-architecture: =================== System architecture =================== The Telemetry service uses an agent-based architecture. Several modules combine their responsibilities to collect, normalize, and redirect data to be used for use cases such as metering, monitoring, and alerting. The Telemetry service is built from the following agents: ceilometer-polling Polls for different kinds of meter data by using the polling plug-ins (pollsters) registered in different namespaces. It provides a single polling interface across different namespaces. .. note:: The ``ceilometer-polling`` service provides polling support on any namespace but many distributions continue to provide namespace-scoped agents: ``ceilometer-agent-central``, ``ceilometer-agent-compute``, and ``ceilometer-agent-ipmi``. ceilometer-agent-notification Consumes AMQP messages from other OpenStack services, normalizes messages, and publishes them to configured targets. Except for the ``ceilometer-polling`` agents polling the ``compute`` or ``ipmi`` namespaces, all the other services are placed on one or more controller nodes. The Telemetry architecture depends on the AMQP service both for consuming notifications coming from OpenStack services and internal communication. .. _telemetry-supported-databases: Supported databases ~~~~~~~~~~~~~~~~~~~ The other key external component of Telemetry is the database, where samples, alarm definitions, and alarms are stored. Each of the data models have their own storage service and each support various back ends. The list of supported base back ends for measurements: - `gnocchi `__ The list of supported base back ends for alarms: - `aodh `__ .. _telemetry-supported-hypervisors: Supported hypervisors ~~~~~~~~~~~~~~~~~~~~~ The Telemetry service collects information about the virtual machines, which requires close connection to the hypervisor that runs on the compute hosts. The following is a list of supported hypervisors. - `Libvirt supported hypervisors `__ such as KVM and QEMU .. note:: For details about hypervisor support in libvirt please see the `Libvirt API support matrix `__. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/admin/telemetry-troubleshooting-guide.rst0000664000175100017510000000155115033033467026220 0ustar00mylesmylesTroubleshoot Telemetry ~~~~~~~~~~~~~~~~~~~~~~ Logging in Telemetry -------------------- The Telemetry service has similar log settings as the other OpenStack services. Multiple options are available to change the target of logging, the format of the log entries and the log levels. The log settings can be changed in ``ceilometer.conf``. The list of configuration options are listed in the logging configuration options table in the `Telemetry section `__ in the OpenStack Configuration Reference. By default ``stderr`` is used as standard output for the log messages. It can be changed to either a log file or syslog. The ``debug`` and ``verbose`` options are also set to false in the default settings, the default log levels of the corresponding modules can be found in the table referred above. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7999413 ceilometer-24.1.0.dev59/doc/source/cli/0000775000175100017510000000000015033033521016600 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/cli/ceilometer-status.rst0000664000175100017510000000373715033033467023026 0ustar00mylesmyles================= ceilometer-status ================= -------------------------------------------- CLI interface for Ceilometer status commands -------------------------------------------- Synopsis ======== :: ceilometer-status [] Description =========== :program:`ceilometer-status` is a tool that provides routines for checking the status of a Ceilometer deployment. Options ======= The standard pattern for executing a :program:`ceilometer-status` command is:: ceilometer-status [] Run without arguments to see a list of available command categories:: ceilometer-status Categories are: * ``upgrade`` Detailed descriptions are below: You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: ceilometer-status upgrade These sections describe the available categories and arguments for :program:`ceilometer-status`. Upgrade ~~~~~~~ .. _ceilometer-status-checks: ``ceilometer-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. For example, missing or changed configuration options, incompatible object states, or other conditions that could lead to failures while upgrading. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **12.0.0 (Stein)** * Sample check to be filled in with checks as they are added in Stein. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/cli/index.rst0000664000175100017510000000034015033033467020447 0ustar00mylesmyles============================ Ceilometer CLI Documentation ============================ In this section you will find information on Ceilometer’s command line interface. .. toctree:: :maxdepth: 1 ceilometer-status ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/conf.py0000664000175100017510000002353115033033467017345 0ustar00mylesmyles# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Ceilometer documentation build configuration file, created by # sphinx-quickstart on Thu Oct 27 11:38:59 2011. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT) sys.path.insert(0, BASE_DIR) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = [ 'openstackdocstheme', 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'oslo_config.sphinxconfiggen', ] config_generator_config_file = os.path.join( ROOT, 'etc/ceilometer/ceilometer-config-generator.conf') sample_config_basename = '_static/ceilometer' todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Ceilometer' copyright = '2012-2015, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**/#*', '**~', '**/#*#'] # The reST default role (used for this markup: `text`) # to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] primary_domain = 'py' nitpicky = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ['.'] # html_theme = '_theme' html_theme = 'openstackdocs' # openstackdocstheme options openstackdocs_repo_name = 'openstack/ceilometer' openstackdocs_pdf_link = True openstackdocs_auto_name = False openstackdocs_bug_project = 'ceilometer' openstackdocs_bug_tag = '' # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Ceilometerdoc' # -- Options for LaTeX output ------------------------------------------------- latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', 'maxlistdepth': '10', } # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False # Disable smartquotes, they don't work in latex smartquotes_excludes = {'builders': ['latex']} # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-ceilometer.tex', 'Ceilometer Documentation', 'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometer', 'Ceilometer Documentation', ['OpenStack'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Ceilometer', 'Ceilometer Documentation', 'OpenStack', 'Ceilometer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = 'Ceilometer' epub_author = 'OpenStack' epub_publisher = 'OpenStack' epub_copyright = '2012-2015, OpenStack' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. # epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. # epub_exclude_files = [] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # NOTE(dhellmann): pbr used to set this option but now that we are # using Sphinx>=1.6.2 it does not so we have to set it ourselves. suppress_warnings = [ 'app.add_directive', 'app.add_role', 'app.add_generic_role', 'app.add_node', 'image.nonlocal_uri', ] ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7999413 ceilometer-24.1.0.dev59/doc/source/configuration/0000775000175100017510000000000015033033521020700 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/configuration/index.rst0000664000175100017510000000160515033033467022554 0ustar00mylesmyles.. _configuring: ================================ Ceilometer Configuration Options ================================ Ceilometer Sample Configuration File ==================================== Configure Ceilometer by editing /etc/ceilometer/ceilometer.conf. No config file is provided with the source code, it will be created during the installation. In case where no configuration file was installed, one can be easily created by running:: oslo-config-generator \ --config-file=/etc/ceilometer/ceilometer-config-generator.conf \ --output-file=/etc/ceilometer/ceilometer.conf .. only:: html The following is a sample Ceilometer configuration for adaptation and use. It is auto-generated from Ceilometer when this documentation is built, and can also be viewed in `file form <_static/ceilometer.conf.sample>`_. .. literalinclude:: ../_static/ceilometer.conf.sample ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8009415 ceilometer-24.1.0.dev59/doc/source/contributor/0000775000175100017510000000000015033033521020403 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/1-agents.png0000664000175100017510000014157115033033467022552 0ustar00mylesmylesPNG  IHDRa3bKGD pHYs  tIME  > IDATx}xUs<@ PBClIcUs ˠ aH@txf-f(0zZHR@<$xĄH,lלdZ;{<%Y{g{;n޼ySg-H.\# pNWyy9G:r?q97c?w,O߿BУu1r;TG tKV%1$OjnSn5:BcXek$\FrekwRΟK5fl"t Ћ<@|oϮ59yz|lvozK+״?|lW?kjhVYkk;5ZЬݛcKҲJ񱙪8y-#<554*IR &?Z4[wi'<2LG5~ub#Dcz"1M2*-r(=Kֲzwn${XcԖ, SscJ[Dd+ړ]Ł%&_u;V*<2'&pӞL /=kkJ˳Z˪%p|XPE 'pH.@F*O6Q'/trG5wS KcySC^Yvz+̄^g#ӾIבֿf"o={[jjhyƒ>dZm[N=O#%vH4k,zqu]ƃz֪Y?j >;LiݯW`6)oŽQs=%I*K"<ح<-[7Y*3kݠH>2;EJsTCr2s#iymFM[ixۖ0?8u;VjֳkȡwhܴLbKRH ߖ)<2L~NӟLսcGkȡd|yŃzM f?_賮Z6jD)}lm|Wi'^~ MJضDmnlqK ^zV{ǎ֒러6ۜi슶<᚛uc$WQ'M4,?aCѹ'/G X{'1W T;Ko؍v:XaҴK8y=+ֺߦd, yB}$5uq'Me_^x5w- [s I6+lFJIXv7N36$ljhְ5fh])0io&c=uF(;.mpM"xwǜ|(=uT֗>ֺ_ p <حl57)/YH]w[&$2Samc?)mUq>{l.)b)ݾib߱m|ޫWk h/Ii5)m*N^ڜ<]jtiy/l786]rL8'~x)k,a1|5F$6;~ࣃ_ɯ_V#qLā^r`f=ici klBuE#dY ߫XGO/lVg FChV>b$U 2|nw[?:)5!+'/h /e?7FۍƵۜ.v.k;t5s =az6pij}˯|~ncGx#k'R KYضO;w;yk ۭ͟,Sm.Cb?u'9m{ 7@&jXP}^kc )(.]RkT 9T'27Ǜ)M'jm}PaϦwDN9TOd={5wS ߖ ǧ=e:PxPwMJ.]։çTqo1~Gjk{[VڬT576׺+}([F΍uCu5]AcWsCN>0j-?F'>.UڬTMJ^TLaf$r<+1s":bp~QR_Ο7,TXZ k_ʲ:N#FҳZG N|@ ^I&>,oi_yloÿץFAIt:i ćкee^Od=s^wd7bcnr'7 x]oϭ${@bC?k$\FrekwRk:[47tx ~e*mװ,F?E!qNIF.fҹsTUUEaJNN7o޼I`Y k,;wN~DGG+))Iaaa>,I6mt IҤI( 3 -\FPǏ׎;:څ :rm&kjji^^ xG#n g?(89w*++;o~x<"O?Tׯ_v\xG#nO\FȢGQLܟ=O~K*?xH<# YLܟk1AK6_zHGx@$L4hkj֬!a;_$qАއ #@<#2ÇT_.dD7&\>әw.:ĉJHHɓ)>]r=|q>(:x ~37txLsÍ 01?#HN}&utt4$'O֍7x Gˀ _g+ǣt 7عGx@m9CT6\òH.\# p25H.\# p25H.\# p25H.\# p25H.\# p25H.\# p25H.\# p25H.\# p25H.\# p25H.\"_Uyek:IR┞:z+E0Isڪ:qIң=J๥s3mF),>QߌVV 2[Wܟ+\vN啗UQY#I萮[m3^I]ޙJ۲MOIVg ,E#|gqU60$itlO#vW}cϙNaGΚ ɮ: {Jio$䮕|.՟]ڭv˸Y0ySTdXHgJr}cr7SErv p]a@olѦk_RB^PqV;zk>+RPqi{Jv"NhgM2oSG'FcY }3ZL,'&)=5Y/gg(eG#%>z\z=Tx@,l\\vkK3F0pkeze<\%p;)^6dĄ8͛99OV^Ynֲ{DTrp1S{2ޣdhH 0O˶Zqס1r@5*o =l>IR튏a$dp+2Ӳ{e m{8EGΚ jSt䬊u-ݎ/n#-0ϼ&+oIcf0Ol3YR&xr՟]/!UBolQRRP=_{`]yF.蓖nkVfN[y9#ϙ Ǻn^^=:c>126ptl͜4J}Ƌ%I8[1ݸOEegͧccїze[O;)!Nd,QX|L~w\ K3 9i3mLK7ZG%7gw͝c:*8-3]fN<?"Z՟]Wsڰ43ؘ7sccՂb=.X1?ʽ>ezgY %JAzQaHKf;vN{ioQaJQNf Hslg]<Β>SuՕF|Tv6+ktŏ IDAT]آs7KYYIjWqzوIe]AovমZW }1,-(Z{]Z9Ӯ -Nos%څ9iڴONrNmP0cߞ`I\cSuQXݸWQa0՟ux-J͌۞]k:3i=uTgZF,9鎣ߍ{Sx"&h{k\9S*uDZ3XKJZ Z% jb,}:}?wVxϺd߯ь->XuM项FАAkUxNpEeMmUٖM1>IEeӁJӲ̙b:3`;[rCB#M7̟St䬎{`r7 ԬE%T^yY'w ,=Zj)3Nq`|=4wl5ە2l$zƇ]nyÕq5ⲳ*;ՐA>u,:rVY3$vkFJUu$ :ۮ~:ø.ꕂbs7- Z}ke@'{vM7͛׽VTvVO0۵iSѽ7^vJ:ɚ9lٷ w{ja{Zj=Y奌s[2[LذiӞRI렽=vk(Sjgs(lK;#Eegm{?|kW?uъe19F31!KUu0L:MWsttUm/׫}5qrvNZ V7w}X3͛*X9_=gug.QbBW)ol~eedϓy3'->?3ʭEC")olܬ˨1YG [SGѹئ]l6 D R>ם1ؓlWbBy^'w4GZ7g֨R{Ζ\]}Ζ\s6EyeM:yrbB\H_ڦNm]+%lg8cNYNyNfNZi֕F}_W&%->#d+&um7j8j_qSXrLK7u̯iH d %&}8fXڲtM-m'6:21{vŏ֛XW |s"3,ufx3s5ڼv~(3i7߷+khS{q͌ЎXNζxs"ŏT'{ve+*kF-; ڧC$Viƽ %bA<)ݼyS'wRV1;Fcߩ->>%gdu®ܼ!3ghr{3/GL7\FCYQ un`$ѡ1H*+}uV^ -; M+3JV=gvj딞CoR S\|-gNLӡ->$(z#Luka\+EGκ)/gghL39`w:0w<^wsSQښhZ&~DcM{Kmg1]Y mX 2tj*$ѓў.Q^Y:{23GIcixOlccaiUu>7k;/olm[y0-ؒ65-Z F[ڦAjoO >r*Io_Ș:N'w2P=&'3-`3xkV߳?STXr̬FbBkL|+롹;L6;_{+/'ԯym5ʢ_htlL~* #IW^Oׄ } ?lXi y$!;$rPQѥ&;vҧSblH ۩H֑TF_(?.d*./FgYLǸj}e5o攐fIYƨ(.)z ]<~?]jM7('3Mɶzcih۶oƽǩ->oڔNKwT@-=5vHTdR-h(uWU[0 쬬):{͜C5fN5`uUYL~]q u]N#NKX}Nmte}-o&&%tOGD}m쩝652j? x˘:.ŲkdٌZ^m{;$:=w)6;f 18ܸS:'%,F]cMsƽ@^$n̓$:[0#!^kG'Zqݡ;lg<JҖ m+[묎su[1dP?q Z{~O:pLz#WotKݽmsΖ\3QkUU[WW[:ꮨ0KYulWeF Xl Y?_E60`w_ڬ̚dN'Uu7p˚z(5c?Ӌ >yHeTiUb׹emGDkHdXhKqڴԜrfDWRB͜v[)}hMw3uua#ccsYr}eTM$z:&CbBDE BCHbAmg.쌐@7uR{丙P6C"|x\"4&bg|l2~/nh`:tͤHNi]-!NIQ*; Yyv ViqC!,SbVU%y3';b=T\vN($(-TvrWn}}31H)w,ew^<lImEa4LBTآW[:i Z |Ėi]9J#5AGW4d:O ^n٫+{vǤ`g$}^qY]&ټim%GG4c\=;8ltfwz-۸Ø V7bێ(s7땂"WgDE)m΢#gv"gM[,Ӳ^_25$qĽSinx;n-RZ]fr_=yoLqLk|q}}k0a=zګmևm0S}֐uo1Z?_Gm;]}#,l׾A r5ͭ8]2>:=gty7sو 3,9>& {ڭE cm6̔$(we*,9Bjqg$D ҫۊc5hzv5lͳ`{JFv=c܈NW}c^)(nY|1aFR3^3 eZwr}}QHȬ/uTfvݺք@WU[gX⪪N6=&w>{e炶_)(Ra16#"Da}caRlÐA*X5];n{:Dm}cr_P%yF?C=PF]l1^.ևqMu%}pvk}teimk;mg +;-㻻1S@:̤K}czfWoT?uKgꌅyN{5caZ+yҀ{ёf-1!lܷwϰin*{v (=̚'ez:wYn/Ro25ڴوqUiƋ:um4C]WV){o2y-ѱ1loPd^HaJ1:;xN{]&pF׵֑CI#)\nٹ޿bl}M~f6HtMcc|mN:Ղbۤ_- `hs)s7jtLkUdTrޒflXgۦ4hS>q-CϬѲ{}ﭛ6Ռכm4h3vgGxNy/۸ϱk4ű甽zYw9]J}N"|>_zl{|*{v=::09~]TdYgW|>I>mĄ8WK@,9ֶoϙn^]651Ƶl{5,9n&D 2): ڶ9 |,h51!N:66?Xq`1Tvݬ\D Rޒ.yC")O*uGDk'+3tW_6ҍ{t^ L#"+k4 \itۚx+kd~?w"Ôsnf^[! blc\+42SޒZq+kyb|Lh~Qȳ IΒcy/IJx=mLU:bW'(gNLӛy)롹e N-Nki"N.?aٟ&dkxk{_tP 4;aL}Ԣ%Ut䬊5DܬXѦʘ:?賲ҧ(o**kTXrLGx͘`umdzN_\3l7J hSJ hJѦ[6,TRBٟ0+NGD`sa'5yKf`0*2Lo_P#z;odsFM{Kkr>Wa}_u̲-; D5vvbmթmfUYOi׾NLӛיF;Ss!265bA**kly3'ggq1β.$3JEмQ%cGD`|ۍFƨ`|2v;[cn4uC"mS^آz9;#hVY<:vѦ7s465DEU*X9'v[g #cͿMML;~| q*X9_'w o$S/gghHĠflvĮ+y߼%ް4So_P{mTdX@wh ܽ1Fnu-Lkڨ3m#o)9{6,4?C%}j7m3v&Fxom C[r{m 7oޤसX%%%$#5vҘ_x\U[g&xݜun(`/[OY\p7xxHqd}c*>jMZGҦ=ue2`];Sղ <{/ݰWw|9Qh1,lEE䘹f%ߑ8Fw0 qx;N{J>{!d/z#ds)Y3Qߌ?^!oo->61T\vNUuz:wIӨo~ǗغIINf Vg VWdҀSj>0̛Ə):zƫ%}6R7odMH.@/k6dOQ^)(Rye_7jXMwWuevP,  2[󜓑Ww!uvѿIAf*> Rx H<T'{/__̊y>]=E_QxngF嗮RߥEI~Kn}ֿ:KC$5@H<n<|Q_)<8RE nռic͙Q 8$cJScq[q8@a#@<kJiNӶy[e=J3G`! SSov/+-Va!w_^Ŵ&xGpKQC x3oXhۿ TިW w'sH.cJӣߎ54MVyo*d$? G#﷮}~hǨeAg0 o$H.\mJSGvYc?Gx$.Uюfd$@<2׌)MFI'ukzCd$Qp##D֨aߧ9獝Mx::ynwֿ 83@<#mɬ Hҏ{G"uJ.i[Fss##@<@w*tM$8֍7@<eȘǦ<8REJ6Sx#6UHps5{^G<#H.p}JӘ7N|6P#ЭJ>h[f!?Xs@ȬSe$яQ] H<ݥj)A1霉/$= :x[xx$2%E^?|pk~:[D=x$ܩ8Mʃ#n<֙G,5H.\# p25H.\# p25H.\# p25H.g<d+6-luc9 E5- W,n $Fr {S$TkyKli,WH\3)Zs'fY<΄Y$2[YR:%VynujYJCwӟ/@لM \F\3iX|1&p!<{c__ގ _z%\yX|:/~rܹ"rFs}@Ey`1]uEE\u췶 zpmq=/K,'L&jjj.rMMMd20/Grɛ ޼n\>n3X}}|rocLo~ ?+\gh>|8Μ9^q1`\2n >NG·7yQO?Oo ލ<1ڸxعsEYszLľ}""{J' .e*++#\㏧`yeUn\dF&',:x`^|y݂رcE\3ḽ=/XaQ,t$`>~xu.020c5Xô96d>U /`>r䈟"BghMg\0r߿^uEE^Y{Rx>`ôodd$ Ē`bSy=`9R J8z;[blfYiMr)/]0 ?[ޕ*g.d6f`9"N;X}#0?3I(0000gX8pi9}'{],캄s6s7,(jܯz.6rB ӓf/bS [嚄Ź=Η-{m!605<{%s{[Fp,i0Cyos'`9qWf5yQϼVy9gNEa^zɇi(t,Rȑ#ާ4ěP/IĤ0&ty$\0Xذ",z|w ذ^4+_1X㉱85|dd(6fdw\0;>ȑ#},(>6w^pZjƿY{LDd2q5dee`cQbڏn`` +"_ԙ ~xa=&~r`Ν;$`9"o-cڗl6orwY\ʧj'ǃ_2߿?jkkɀ###yk߫N'VVWT?_׽,.Scǎ7twwǩSft555gd2zzzbtttJ .|oJrbpp0=b9Fދw 8LCaU5w\ ~˱xcie|Eta+0v?:;;{10Mѣ3./_̪f폯 6/O`Q 'Ƣwֹjiݟ2hdرc3H2q:'+{?9>5_zp2` USZo}rdjٰaC:0`V} _(y^vmM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @[844ynQ__ճr1UUUE&ɻ+=غukTWW\BJs* p*.8p m?p@WWWlٲ%""bq}{hnnl6.okkݻwmc͚5Ste޽166hllPI]ܖZ'֭[>jd?裏Ʈ]"ɔ\|زeK|K]$]fMGWWWZq[jR'(~UUUZTr󥫫+"&MMM*9hr8i0Yrқy&* KRrLk֬ڄ$ڢ+cڵy M~+{thjj >|8{챈~k׮xb͚5ҒkEEE޽;pc쌮%VBg2ƣ?:;;ZxLv횰d1]6ɾ룩)c麥 d&˅}[ZZb޽6abۣ̅#"":^8p@tttġCގ4Xoiqغukz{i`֖VWG]]]uhii~nٲ% psf)""ͦ{I{zz-]? 9*/\-["Ji׮]yM#voo^K?ښ/~x]LhIUnvvv e#&NPm۶5kġCbhh(^ AСC1DXX]oXZ]\]Fnk׮1+ ٓ?66{쉈=Zxll,CXb޹͛^G}4byzbϞ=O8$D}޽{>^{_CҞՅ_2!\Nz('֮]VV L۪8p@^[L&nXbT⽍}/d24έmaښ4POHCb#9mmmiՕf) =9$$m…Atl[[[>̍QUUafImUUUpy*DWWWTUUMh!lcϞ=ioxkV2}',KC}} tҁwq%/$;۩*:ƥƥ%lƖ-[1ZZZ9VW\rDUr&hnn7QX[,NjQj(elٲmD[[[ţ>{.lQ*\.#YTr'+VRVWWGWWW477iiiui-/\.U1VUUU&͋I }' KMWq0Tp航سgO"cll,}x>S䊹JM'#kv튪-[g &6 V5p-*I#&o'(bm\lI cihBhll xȅH㩶H_JR%[<9Wr^cy׮]mmmߟ~@9pXEq/qgggZ \؛9dw>3_'ވ|2b'j #:::СCQ]]D pK[N]H~҅D %~no峝b7dO鎎cPJ. fs]6n===E+V Iho=b֭S-_l O[XT%4@1峅E|п.Z IDATbe˖ؽ{wl۶-֭[===ytD2A޶m:rz'ǜcchh(~t[iˎ-[-3nزeK^;bz{{c˖-8 ضm[~b׮]y}sT$T='w-[޽{J[ ,-k֬_kհgϞvhiiI>ٳ'"ȓ6.:WWWGOOOÇߎ ghkk{l}}}<_ӓC|>]]]i{Igd2ёo޼9JVl:GoDWI'}sIUUU477G[[۔';(.OdEIT /0aڵkJ穬9vuuM';::c.:mmmVFkkk>c:9z&=''Pklb{ qm_Be\-0K @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لM @لm!jdd$']gttt;::/Ҥ^:*++$D LHg%<>hoot˗7 ' <Ѳ嫯j*ɓ'$H20%˗/[~x؄yX|9ˋ#nnax 7~f' <.S07.: ụŷ-knnN C[ , yUov}glo``.e`"\|̂ep0 .Nef,Xx s6`&\fL̂ep130 . e`FK,Xt7Y pi.Y p.f*`Ҵ8_73">mϏ`$\f]9wB pA&ŋŋG&WcUE`ҥrAmmmttt)*`NFFFɓQ[[k03yU0 =FGG{wZ`.]CCC,_<""œl6FGG#"[ !\.2s]ww0###9Ä~0Ê0ר\fȑ#/ܣrfHa|qWM>9IݸzAq݂/ spH` o Pgܰ|{vƶY,Xڊ 8sp an:ydX<00틓'O>_0>U[z:hoow2Phootق刈x饗 sp&fwlժ8qժѣGc߾}iEw߸0k*DĻx||E? fp;:;;^qoâҁmũ.P*r ` e#q`9Q8fPE gxW]Qn\/bK fqȑ` ؿ^|qu e`dd$cpp0]vqWf  `s۷o/y&d2ug}}}ttt˷l?p]6"ۼtvvFsss8a9.@~:/XyÔtuuEWWפ'G=}8p b͚5鲞t_I1.>|8,  Gmmm'u++Yzzz""b֭Zt޽{fϹ8 3̄ؼys޺ձyuK 92L477GgggDDLķĽ ̜Ç#"1CCC9#roooHN0c 444׿XxqD|08kepr ߟՓ344f'rlr;_PDmmmر#/`g38-Mm۶XlY[."mV4MY$ [[["_QQݻcݺunݺJ믿>-[2aKKKTTT9N[ (! l:{ߍO~|I Ja+-[P}e^x!f'QUU7_V˶l/B&F~kݻӐ9+:::bllh`.$=/`~{qW(J+ؾ}{>#? wڕW]MMMqhmml67d~njdݽ{F}}}tkkk,[,?]~hkk^KKKݻ7"&S1Eee儀ǃcqFL* o{{{c4X~𵭭-b֭yrDDuuuŖ-[+bq̅o5kDWWW^UtCCCm۶ 쎎*{KO0I8rHD|0qX|YE~yuuuuŬY&:::)}lvtt}L]템&-)( |mM<"@:ͦm;ձy8|0`*++%""/`/Ľ }'ߛree?A]vd2B^崑HytY{+*.v\O&+~!0 L---xxg"".0E rm-&,ͭ".gk֬ɫD.V|_}²B&C Q[[3xtĿ3G7.(0oذ!9TۉHBb=Ip}}F. vZ| shhhB M LOCCC477OW0+7&֛1 ݻ7"QL6_y*ŞSk֬XT pACCC|_ŋGDįό}'38D*c-c͚5i/9Od[̅ϩpv9W6= n#\st5Ď;懎? `JSUUU۶mKbݱ{H+CCCE7ӛ9'~{>|8>۶mm۶\A 36v555'`ֻl+"Ʋe믏e˖E[[[TUUšCi>U(e(>^̓m?b8p@466Fccc<裱k׮t=0?fHmmmܹ3cpp0""~<\db׮]f2l6N޷uhjj ōvڼձk׮ I_;1'ډhllL'[vm455tۄ0*++cǎy3s[n{.5Z[[ϤP"WZxף*2̄LeU0h3, Up2dhll,~N%2̂7nL.YIEÇcd~۷o믿>""iMj\Ybtt4J%1~ضm[tttDGGGw߄e&\YdbǎKR2_WWWGx覦&0 `MK/.ik׮I Kpf@`12seaݱ4X_1.-*`uwwGggg^|or{|W]Qw߸(j2̀l6GIo_uEEܻqa,07 k"qa,?AyԩS9K 422188.q+pK `  10=t㢸 $\KD"Һ+2uln;wƩY?Ɏ/Y~׉\{g;p9GCCCDD0`A $\K+O>OoQiS|hɒXje֞?>WuVf2q×d Jhhhf1::>30ߕYX)``~.%'==%|wcڛn˗.O47=tj1gd2رcG?_-=ш/~}{sp.qoE7tAekjkkcǎ~7N:}7"dN!e:}:NƊXjejvMc>""~m7Y>qҥbeK㆏瞋N%VG2կ{zE}}>t>vlwCKLx\M]]\L\KV??g+\._4?ш8[z/{խO-[cccEǮ͉};wF{{{ FxėW20S*++ 2S5uuq×Xt{ojћ u{,Y*˗.p,/fћ,[o-N}QbEԷ4lj}ewMqX28#%/~1>zEȷ,읱~¸D?k?{~%N}gBl=jn4aǎMܱ|qUi$`XD)0'~~iY|y. eÒ෇o4x8xlGݭD͆/ۿ<樻2K+v#_y=D>-s<^gbE}}l9k7m?} Ϧ(&ctҥ%mi8o*Vf2n}4o󨩫K?splڛnL֞?U67_y%>}:Vd2d =&,I{%pqҥyǕ;d2qͧI$+++cΝfȑ#109z&膅vm%O sTRu1鿓K8yƵ3n.~kV[;Wkμ.~/"""ynM]],Jibv} iZ趓1Zɤc[ɤsőd| +s]r?#eowIvjVE7|X-qҥQiS=Dך衼Mw>u-7Oyu͵DDsɱvaQр.u \zխ-ӱ'31^ub_mn^x~]-dժYNrDĩx'#"&Ŀ ߈';;u,6cԛ=ӓ4Nz~+'jqҥ;[t"S}}n:.+K,'=qe'<.+>utъ'瞋ov/ 9G2\r'pw&w!}'jo4UI+b VlӧccLw~5>kLLX|}_/Hfk0%=~_ m$iDcp|hɒXj\lVf2i;++Ϲo>]4]Q_KVe(o>utJ8TGD\Lz.j6ԕ<ĉXreYc2siX×hnM|l9H xս+SHTEpVח][S/Mh.Y*~;fC](zeeۿ4:|nر~[CK=T__S?7M!jkk=FGG 9G 7_].n'ŕܒwI#|1x8QiStS$bseeojkkcǎf$2SSFN&+mdժWf2i<|xG)W_?TR_}| G^ciMbE&n%+WKG\h10߈իW7{LއxɪUi !t$ /o|߇3~/͜ЫdN\tiMbnF+bɪU&xx1sﯷ2VN|PYY;v/˱sN \y& Ik& /?z}{sGDW:_sǟKo մu?ֶ6 +닎Knb IDATZ28~lDWn~O&w흟梏=ח֢LV89馒UoK*ndd$x8x`KqA%Oeg_?L[_n׾~y#xW.+ĩfC]d>~<\Yֶ}.yp6|ȕWƒUͱdx{x8r|xkLdbC7:=5+s[oſ-Yj!}.xL7X1|D Z#"x{x83qҥQ9䱅iXrenڔcxftLjvӦxx=qҥqgLT=㪔pyOOf=|9o'bg㣷ܜ\=<C;?r1;v~[c*>T9b6Gy$j7m+&dNţ[O8r_Zbyĉ&;yɧ =1Rd_Xć,ɛT__?oFFFb߾}K/sp.{zf[OO~x1͖|*ۭKC̷Ng-&#=DZ]J簢>Eۇ_ 1>vlJ:6w1X5]{`.R瞋䷢8մ7{o,[fttT?f.ye75u:ǎߴn}-խ/yl~Lc2n K֩}dժXD1rD鹠x.V\>$"{?+BKH۷/Ë"qa-/qT3p-[l `:%oرHS}}z*1iqqu cD,"y9`:0EmؼnAl^g.]2t`\'\IL ue(\`\&\`  ɓ'B 9嫖Fp~ 92L,^8=&F38C wQ?BfHH;à LR!D U1]w3V%({cnZ L^[),!B a) I4&BA-ẘ$$k5~&|~vV^$?7^ҿ~Wh'>>^WC3.֢E,`Lƻ˘ D @N'c2pN0&`SfѢEJNN%dԜLux>jM- .WLpc2^}U>,e*]ϟWSS2`@#\*zsN t vuIe'I|uezIn\/e'̉ic 0h. ۧ;w}⤟ޠ(|ӧOvO? );yx:j !e)T=L?q?᱇pw]ʷ%Wo) FRu@V̉).jNBܖx]S_肶6DŽaNMa6m륇@22Ao߾nImG2>ٷo_@륟8׭6BewtoH^C2ЁMwqȈwJvܩKvW}cV?I^0f$eikkɓ'*RO֧~p_RRLMZV>twtPՒx(;yfN \$J.p!:t(g}Vv"..NgΜC B+WjΝz$FdszEQSE Iҧ~i<;#:=OWxuzjns}tvQs~}kC2$Ÿa=v$iTL'<ƪ˯tTtN__|<))ɜg_EGG+99nիv1/6\Q700 t%7s7M@6fحAG|b~zj@\%;K?q^[m翾:yI:9틷~(5B0,VܹSpaz0%|w?W 2pm1(SO)))ɼȞOt.`PrѢEz'gKZ1m?j Xx\Ct.#v0,^bA0͛żk՞ݞ>}:ntR"\FX*w⢤Oy(`@c,ŠdD2.99Y/bw漴зzouk~ \\F:`&X@Gn;Yetfet:Cv19OmP XEE%:Pe-==]Wɓ)`P!\FDBбx\R .TTTd) `@c,Mn[r!p1b"F 2 bSڹsN>M1xw}pyx<:tЧvN 1PQl :ӧx ƙ:( tÅRׄj c1s:%I:7۞:gRj{e>}u!eő#3ZVZbc)URUU* 1ǧzBzpjLL_n@}cS#Q]m4IHN+^C `vpLJٱ3c7׍cFc.z2U[ty߈Q1rdS.ekp*XM4\c˓fՍT IIj2G`o.GE)F뤔Le>F $ٝ?(}p@jj5l.׾INR~p@j.Zbc믗$C2@?B j2oo 2~|p9&& Џ.跾>ܼ=|H Џ\O 0k߾};v~S[Vvm"Ⱦ}t!IRSS\.Ezۇ(@.`H"`zpC3}r@.`ș$539e Iy@.`"`pC3=."`"u=%sZ X]cW嚑^MָɷW$MIMiDArd̓$#0K8@;Ї6|;}셏hCO)k^𜪎Vh3H\ԥco⭚J3=Q}\Se^}kz^-W0]C }bvԐqE5kO]%=_'?$<5}zzG| D2#}ޞ[+ioV^HdletD_ kwzm=9KVjG\U@18ً1oweFGm\yķD0\^ɷhJZz G*1ż_y[ͭ9r#WFby[DqsܢJ#;WFPo\bUP[kJNMWƽžQyBwoWSc9n '臥鿞6o|XEigkF°at T'jkͮ.ոɷH<JvkJZL>}Zx< pH}M׺v@'d[p*ٶ^vԀǍk65Gy[k)ޢ _:8ٸ~etPx_5ڮ[joQ_@36Ѯu xƚֺP#jku¥xgmXyBG>ܭU XWEequ.y@'koV-yfhlx0`>Pou.y[+I0x7H3}:ޮ_ͥU*<ب{F .w@3xAUxQk1H_.n7^yNj*TFm.2_[흾66oSD|rƟ/K֓윮'^v_G[c4%5]UG+6>;SyBK)1%-]&bnJh(yWt=靀>\9ҙЃARJ+*3l>72Z.1`oQnQoˆ;yol:Z]޴;FR뉲!N}KP=v>bSCfRv5 ;p溺:}>o0aU[QKKbbbt:#>!^uuwokl]ig)MŒ5g/\іm:y[z{~Xm/-⭺{R3m!۳Cngfۭkv999Z|233{uyyy:pfΜ7k͚5vkΡp>Bݑѱ8(h?uJ!-Ao,IOVɶSE'kK˵zkt?ݑao$pͨ15辎jeT>BH#2JJJTRRBFx.5ټX#Ewu:.f~a 9KVj¥SU{^X<9s` vam_uX >-0oذQn[EEEŋtʨX_Y̙3%.,ŕk>.16S Δo5*F;jvz{$ux\bILԤ{fKNyC Ύʶy[tTRg78 ߹CʣjkJSmQ5 _/xNwo׎ߘrY> 5X0x}ڮ6oKpfḋL9NXB/ݯk߯9(nsM[KtVR\!}S=MY=p #;Kq$)ԏ5~KBYy.gA6/` ! c|Eo<B?#LQIH;z-OBhk~VyF_t9zr㹡6ov>5@GIW;tP p;vW.< w˖nַ-vi*I--1wt}7!88Csx] {PH6Twoex幀NT׎H~$t3#) P3CmFSӕnfC nwoW_+D{?cccGfҬY!4k,ux͚5KÆ Ә1c4w\s$G$:^ss 6L]Lsĉ5l0͚5K+VP]]]5j~4/**ܹs5f5Hf XZnwjVi#'׷$>vx&Xt5$lM*^^BtBBjoZO;:Йqhx9WhhTm8VW `=?~mb55kJjpu3qqFܾ9硕Z<`=>oW*;_4Yz;8OySu¥[U^Z[SmQp@&_|E}ȇPǣҥKax𵹹HvwUlllzuuu;wnPxxTRR|߿?VX4vLeeerڿЬgFF@~3gԬYg?o5577kܹa̙3UVVa n$tknn\ۙ5z<:SSsu\qUǣFG | `0Pc1rZty[xJW-I/Etݑ%Odzڮx]P. N2LtuQɩsyh~ hx̮io8pnbbZ IDATbTXXGj3g+=xby<hÆ ڿе<OؙP~~ƏB߿?X#TAAʔbP)))Busѻk;~xy<: Qly|,/0j$ܗ_^gݾ Iܹ.#Vtu$%1QnG4^$:vKRgu޿_kpLIKW#7c&O5Cً173M\3EJ=K'.w= f7Vݗdr qoӛޑt% iatFk /:˸wQPntrN/6Zk0PL\.ݸm>0בL8p@EEEZfM󤤤,h999ڱcTWW &tym %BNNN⋠Y%%% wlff_pZux&ݝARcru=YAc.ZQQjKK,R\Jq:=sVsN;=}ekw]$&֟Tf'o?ۿä,MY0? ^~[WfٜNcZc꺷憬mo铷:9K7/[ss^ʷQ%ǣÛ6 UmNns/]#xQ ))abs:ee>KWIkC>{0x n*k\sT (\hKUa/066VsQQ8tss<Oȅ.(URR(\̚5kxZNup:ۋڵkp^#L?~|Xhĉ3N FGn^0qxӦ1j_O;,$KMOҍ)):n]8Cwm66ܩS3iFX,JHu+ Z%GvqZu˥S{.P {Vϵ*!%ED-[&{F~.l`]wܾ>*!A/Mp:u׆+ ٚͯ/bk,6Y{L7߿ 3iYS:l]-[Y_ɎҾF~] Ufg;^[#K)sWGn̜9S%%%WuĄ ccc6LĨE+V_|܀^HNNNudmk;,L0A))){pm.*=cFc=,זO_5Ua8jխy{wʂ.Z$BϮ6=j5;~cm_$ 6 שcQۼ~á^w0[P3{F~޼YV@M/Gvjv qoQrjVy]flo0@sJJJ#:mmyyyf5kfM0!aNk4\6v!=a„~..x:M_*䈃Pڇ/ mmhP3Ϛݛ>{<: |#1WF=Ld^lmա͑nxt/NH7{DԱc*{ـ^z}otd ٚZY6z<|p(ՠ:^zux&#%ܷ G ^,p?بϮ6ǎ>sڳwۢ'/ߵ+?y0'8A7m6t3;kol5s̟a*)biŖߣ ^a'oֆ64K'`β} ݶ6H61/7u8׫Û7kcݵӦM[.S[o_ )Z#jDM7O7߿ lN՗EW Nnʺ oڬ_VWuA}2Y|W[~ʂtFX//Wo>cFZA.uر+ewL$|5-eǒS5%pp:'Li7qbccrӽp ;G(9_G:=ۭ͜9MaIYYJp:ÆEͷ//פ,Z/$F\׫Ϯ_(ͦi˖iڲeA;u7O_UVߠ ^kk`x}TRRdKu#gdԱcڳ<~[` +B>T7mf֤{dΒ#;Kgjj4b Ԫ5rgu{W_&ylόH5 cFvojx_6>SSr5z<--դ߯5:,6{&w(!_0h Q{%%%ڱc&L?K~jk˯Sh 2";vNsՆ fh͚53gvZvҥKΝ;{~KsX1`hĈFi<2VSCySC\䙂 9]/sB~l壡m'0fBG[cڼ_醿z3`0~wI]?;K \A ֆ}[ *Bu wc8~r7qZ@ĕɾcmv]6mvݑ%6:o_SQî)2rT>Do:wA1G "\"x M AȆLJns{Ooz'5_nsw`.2]r"t:駟b`P:yΟ?߭c `0@f}o 9fzm-!uporG*Bv<پEGK5gJy!κ] <>fWU5;7^yNt.֯_ꈏ#XB`lfw:tM :XЕ5fzG>( :מ[Px= \6CF[y"`Ep۸+khKy#> R@x0ofwۢ?_lvVREw/\ *m|pv-j>xx9Ia¥7ihk%fl7#,jjɑK:Q}\vTs ec3{iKuϹ{R=Ep'%+UyBǣ,3A!oٚJxm}1/t:75ԫbг?~dF5FO\ӑv$95]O0  X:G fv.X]延^c/h~9Tetwm7?{ҰalΒJ#K延)1xs9 ?,MIMom \D<2f&ߢowmב}c>`a.]D;wޓ$%A i/ Ze&{%Xԣ`!XzpCNVFke r\C 2,e0$,=C !!::ڼM 04~nޞ3ov 1e'+(ΝiԅfIRTT)  ^A vLZ1>}:E eeNIIa$.0HՔNՙ_/Zz 2З5Ga,\Pk .'ӧOםwIapA⛋S XNJJ8^G Z>wۯfޗB jΝiJ}YsDZ~ppXEͅ:wA/XR6iѢEr: pU.7',**Jwy>&[Ul,ˆSrrN)::e/ۢ #FP>j*I\Nnݙ4Zw=/ɫ#Cӟҳ!g"nGYAw Dpb]Aw N g\Fq4 0~BsDv͢ z|l^/7[_rU~B?_7_/Oh+Wg[h{lz}V}i~b=>nN;C@ ;:\չ^Iҩcg">yI={W9u3O8uhsgifcDᓳ^- =szwn}=eܹqީkK0痺}P rBw=ZVix|\GtPs̵rqс?痔3++o Vw Τqp:vzG&Ա3׳t̀=ׇ`;;MG&G@0NٹXnw>TO^I]ٲSrTQ~@fa2> |r֫~ו:"~d>~&`aǻw mֵMͿ:\ՁChW:58͜k/^Wt~G|H ^uvt:;e%ݙ4:$qNt^ܗe^Ա3:}.~~IޗGfMQ/jO]wVά,l{*?ЁCz'C&KRrU-XwV@^${ᶋ_O{|G.$;kKۙzF1<`{>W=SǕϯ, `}vzdy,Io^&I?Q Vkǭ׎u za2o:\տ_vƀz۵2/^s!}*?P|M"DE,@3s }=[s\lb { ݻmeTv\Ba+WЩcgUuz>-^'+[eա֕2MdMM=+[uetԐ7_/ӛk|-QɯYAMKU;F-Tɯigm^B өcgd|LRW'`BuKz'Dݧ%t}):u6UuhV?R+KigmV,8;Uo{WGfMSɯש3+KWj?2 ,ʴ'(o.w϶y>UϾ$4( Fpw9Н~=Z[, 3A=|yݣjr|4qlmU?wa>3.%`{>Af_+?8+[Ys]wֽRjrݙ4* ukSquDOdiZ <ON;\/la@])9zĖl>/m5%g2s=j{gs+@+suT;.UK__&wopؔpZέ~ϙ;EO}w nڳ1Lm7ISԱ3g@ώFB2\)km{S`#KGI'f4!1]V,}@&`v+ׁz+y,eː=zG&{}mQ C .:m@),8j?H]y^٥n6j2gُpw4+q6v*w'$қEIʙt=7431k6 l؜'g{:u.~~Iw=Ro >~V4︾SMU+,r̂- $Nۛm9f:rU;֕;;OJZ'{)9#՝39[9hs̩ 컽qZWJ^lԵDȏz8ĭUu#U[Q+skV0A2[rSְ&W0 \Ǖ3=híi'lU)4hnmF<,TC4߹F6\w{px?xv((@w7>Zk%^TCEO^rfu1D=gnA_ڶN)lw}zI{ǥj϶Z?/_ڮ]JZ+!qmU ^,.B9Hpv\jg w/Z>^lm6%gL֩cgv[yT nٰY痬_p0d^ Z,˨({5O5umwώOԦW~i:MحLAGྞs[͡)`EH5wΖY?WسOt\j:vF?_׬75 is=ShǺ[ZlTնuBO?RtKV;`y54nwuWjMn?R8-UwƲs֭fr|x|*'gN5x:V 5{>2  .08%gL/[M`ܳ/>Թ^- M;[=ާzEO,:fնuGkfc,@BϾ[ԕ[Y<ڊzUt|GΞus_N//EѽR] /}}V]=9ylU@ :;7us^w=Z?d}_/la@{s_j{.ި{K+:zฆ)gVWڻw\|N?WUά,M.~~IQmEY9#Нqp|6K>~Foڲ4/%$+uS+?ЗTQ|K^Y_++Kˬ| Z"_{UQmɯԹ?6Aͣufrv+!1s532ߑSƲ U77E.zRWL_F1\ ^+s+njVn]65jPB6[z%ueE왴91˻oA>>Ez+W h铳^]m:=?O'R߾=8. I+v܎Ӡ(>cx'ݾx`ύecA5Aw Dpb]A=sN?CCW;:ڎn^! *2D!11F0&܍¬Y(Kctm~O3@l, 2Gg9#GTFFh@ GJLBcGyAw#GW]pa @ٵkx<7o BAyիiDQͽz7MӈY>3]| ?##n dWFޝQM[W4lǸ #??"EJM'6gu.03@GH ]D.n?G#@G,"jwq ?#@?f@pQ#i`f#v@̷'G^1z;:Kr骆']$͛Gc7_4Qv#@Gbʑ#GfI=gy5Q6ߏ=4M:F?^#pS8p`| l|_^m LzZmhLB.2uT]vM{<}ǽzo!5GI$ (I/_?G=#6l`|]19av9==]4,pf#@@, @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1v̟ͦu3_m:; #?G#b]7+ ##nzepS?GG ~f@4USS#IJ^M'g#$iƌͥѯ:;;ң$''+..#1 nqqqJOO!##@@- @ " 1. @ &zQ} aÆ)33S#G?C?^|Yv#G*##Cqqqcjjj˗i `---jjjd:@9rDeee40@< ;p2 *33wcc#c!07nB'0@y怋22=DV0Gjiia,Qgggc.0ᭌ]JK'[@&&X@?hnOX ձۢ#FKE6}W"]%ݡ'}AOG`#}/Ee,ǭW#~+wPb]Aw Dpb]Aw Dpb]Aw Dpb]Aw Dpb]Aw Dpb]Aw Dpb]Aw Dpb]Aw Dpb]Aw Dpb]AQ]ߠo_IMJWFZr&@ xJ8"NsmzG$IߟI7/ԤQ|AiWօ?_oT~C4do}p\fkTu}/Ts$)e($꘾ѹZ ~#Iz ٱmse*#}, 4"NwqԤQڱj>^]sDZt=VY`M@gdeh^V[JvGI3]{b|<oM]_{ 7Ң7E} 8pKwjZ /#-Y)/{BXHJpީJ5W}}qM[:hc蝪U5fKC'Pc嗛U@51>Noْn"vŶK =e^'_]5)ݺblg^-5i̘EsG_X/IZJIM飯4J_~>l\koU3Ғ~rՇoo'kV1}ډqZLN}0βt,<&Io&Re%k35gT[\FX_\V͡Fm\6;1gCݟVIOwA 7c=^£V5˞Wka^+ Zi{ =1>N&k t}~MNz<#+M8eܢ(e IDAT(s}zp$Yc+'x1]s$`\6}ҭY]ҿz2Vi%駥uf?i&5y.E}XJk߇Lh6p6,}Z&ԥ޿E^n/ _\V ~gIHˆa|ZJUu *]5u߾?$TsKǼmmygqTUߵod_-93&d~}0.bcDxya,:ᑧr 7S5|AZ-sX|Ĝ4PZ4_>[;`NMeS>?[j}xq-3dA'iRWf^J3R>VZkwTYܬLy*Y}SΪi~̘j>kﴂy z\9a-p#2{[h?xwJ3W5;Hc/߲ưcyxW1ozGUu gX$M>soף na?9}>3.9B|>fܬ ۦ{{~_{>b~r-kz,E:0o1w8J7Wʢ$lqLz M7snXV.SFX%)?! S~_)?!&Ndm+u]ݿ0ʸ},>6 c2ęg{a?3j7D*:nqnTP& PqކEI` π/(_cVs։MYfW~b|&/Za}i}iڿ0 >V,UN'܀}0JU]CGyu-iɹ_?Q4PoYҕ2{2Ij(l|PE =`woRƌt7DrZǻSLn6. t|OeHfeM-J3R)/{}< ,iCJMf[mm ~y>~L WRa}}P+Yo9%2ZyBo-S]b9F^Sd{͢9!q0_{g}iUڧ7>=Ƨu_N Bf|zg#{~^aQ@kBklRFa0As9&&u#O s|f[1w8Y'\5@z5F sƞId&mpř5bU}= Ws3ӭ}sJƭxq *@NnVf`M&?3urrAuܙՁ 9S{SFYl^]s[`HDRZ}h8\ۢ9cNO%LV4Sp1'Uu ٮ^#~anΌK5YAv_Mt.5fϠ\z f^j;hvr2qz}Meʘsao\|ΌQ)JG`WnVqwV:~F3mO~ܬ a)ҜSCPSsMWUu Xn̴/bv^=q~kԫy61=`es`rc-#:mNCp@u~YSNW+3"_1#^52d:Ң2&u8oqyf՟^ !k~|])X._N]/;>?ܭ%p3,gs,No?n%q$k쁬q/V7fI֢nNpŠ5yю5&; \S6ǿ 3a҉qse)0f>w 9YO>>p}7Ǡ|~n ܸlY6,ꗱ} y[_gK}0ؕg3öc`ok.^?nd@n?;5 T0 R",`uwSFF|ԤrxQ^+ en1H]"^+Ru}|ݶ KN[ol}Jz$LHgQNv- Rwk]O6=wo {=Z(6)= :3'^7dPOK;I"<V5DC'=:iSNxtaqUÙ6)a }d腛SЇ]pfs_rϢ\Pm.PUנ+k1qpF[v)Q;u::[|d1>>~ZZi[@=~G2í%$M6rO.v n ZsfLqk2u3jd>A641Vi|=lJ5!#8eO#?᱂&3(/+SI4>-YN&s]ȍp>K1LicuG[*/{^ĜJTqv7:6/=SD2gwc91ȅUz29 Ey=Agh u5mWӞe 7ZV%eH%y" @nmWkٓce%kΌzj;a{z˾*Ij(feQ[Z]{LEWz_>Ƨ% 'q04DW.MX= /1'wL5G &f&[]If O_dSF)-'٩?j@n]BAN+b&wKKVFX՟d=4'Gg~0c֛2n*Rp>/3ckA?ݯF=䨲p3j*^n ?_w_F|p4mR2*#-}up /{u dym2kpjjn߲ԂA,+Гv &У>5Ajjn2o %D=n;{f8q~>Ta~'`N+Kݪk{Iں^!?肐Yp5;|IPx4sokX`0U5XܬLU ;ENv]XJ;N\sft 7o~yaqt`[L@op<_?\ \Mӟ_egQقіHs3{Y=\`i~o>L-&c'?,uCa:x? hMēM93Z8j#`=r}8{\ۛ[R3Z{u2W£*] w&X0bXf ;?}->lQfĴgPa{6˂5쭽yYoqs2x{nlSsISVweVv=aw&(+d?鴯d]I]Y8YTfHj^VU\Fbͪi}nz7ưsJT^sEz8+PkS"5˫[eF Si|mO{Ҏ{[\Nn~'d H0tcݧhf,Y靈Ǯ>. wEM%pvG}ik-dfOx|w7z`b^ }.:{wQA΀dgn5s[xv5=pCHM ],ϼu_mȄ OKY\U?ֽyo[5;]WoСiG_{5!WzOxT6^vd&iM-ں@@(sy[ CNqm&U3'6ccy&ҢL&&sK+zG|ZLXY&4sIwZ{)0A&'lY7?,ЧM1'&9I]ng~OK.mᆰf̜do~XMNzlG_XﺈRV|t9KbN9mSs-ߴ/k=zkfƉH%=Ǣ]`39q|;cXw2Cn ٧v|]sqs ԣ/cA;ָĤCuq֘$5ǧ%G#!smg[kvj}:tc}G_XN vw+80buE]*zN #lw8OK!YM -a0mX:OFˆa*[ʚo)cFUWW ei٦}JM \2WM-|uJ^t|F )1>Ni&InXD}#?BXf"=A>QSsgPFZʗw׿ugr氾fd%,`wm\6[ʘ?Qo3OKֻ^B׃k'Z+qQ;5)-}윐`=xd&k ''v߸l#kPU]uc yoЩmW \xJTskIgs:8xMo=/ש%9.>6dN))d+u-Ɯrhٸl2Ғ s&eH=7qyXc͆OG}a.1>N1d 3ם3cuG}hv S93>f~eUu}coLˆaV~9ȩclΌ:z1@9o>@*2wL|0lݟ1#a::?!j/{aR ̪};1蜑>6yI]c{®J /$~J~Cf!{rB `nkp{uܢ]Bfb'ܤ& +zj|Ak6,ZEDFl~dձ=EB%KRƌT &Rw8AlIk$U\@1'žNW n+09̓͜nΌT Ȟ3 ڃgtuyAar|Z>8TӜS] UulOQT֫yJ1,dyw!w,F>VU?Ǧ斀ݰiךֻ_ S=71>.d;tmk[s][qi_1s^]\1ur1ig m.2oXt@ns\Uն 외y9o(SZ"׭6jHҿqS ٣+I1crssovxlb[ٛ1 a[`̕Uok`ɾٟv1mR ťǬ7鏃~\O_P\U?>yK aI_1@~!(ygd㼚0v|nu'b8 ^ѿOoZ|y=6m$I#$~ۗ&lwxtq7cJ_}m漮?K*u>o̖3K*=DY=Cb|ܐ&UϋMzt#@歴`0yUe}bk֠S> ٳSޜ2ؓ^>?{38tmM{ٛ^qn-||{a 20Dj:]sV/7_do 7cD[#s!(/+Ӻ?ZW 4Rc9R|YXfeF n_L+%i$ sɟmmS۴fw|]2f琕;D`HUUfmqt] 譒c ޻eOТ9ں6`!`sfLu\C]1#W ܇4mR^+RSsu{d~aM|ZVQ&qleO:tc-6>-Yiɚ3!!.[z00T&"3ndk"&a,1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ " 1. @ &zr_}NCG}1<Ψ!OFp@p>nӱh@5gi}1Pezhȑ40o!Q?QAL#lԨQ4ud=\6u IDAT@?#6~?̀hTWWFTrO='G^IҌ3K#}$bSSS$)+++ K222/;m)!!!u]]9s]Lf?iWܭ%I;wԼy\?,Iꋄ`ϧb͝;7 (e]vYڵW#%%E2}WZdUvvv[K76##wsΐl[>˒3gj***Rff'мy󔚚3wMdYnEN>1zj-Y$}/m[ 7]Svɒ%z嗵eன0br չm}JII ⦦d@w%,YիWR^10.s7ްILLԬY4w܀̂hŚ9sUbR)))7o vm*..}2UTT)11Qz饗󩬬*s h׫2$KBާRzsu t2zU\\l=w޼y!mloJk?SSS`yf$VV_z%$ܹsC=x_%,YbM?%%lW_}em;s̀m ~/**KK~r~̙ Tfffܹӟ踽$Ν;m8nSTTd$BBB{|Wlko޼;***^:ڙv >$͛x=êSVV>S|>~ܹSRׂqGڼy6ol7%lNqq222駟JHHЮ]k.|CXnp Teeei˖-:,o(!!!d!L5x r*C`)}$ڶNۿ|;wlb=ǾO Wx%KXmܕ^ZV^a޼yVv7O?Ucc,Yb 3YfOvvU6"8 `h*s5òm&(d:JHHLtzLhollT]]Bf,=kedd,Rg#Nm%ɪE,ulٲ%icx㍐ իuA `hp]PMg͚y^ڵKsRzk fffZN#8c8xK?8^pޔ5kVHԕl/^7 CS6SP;8st322: Ź͛-[zᇕ"egg[\3wRh=hU jF Jkp ;m䃛b}߰^ZW͛駟J - )l<6k,}0Znjllܹs~a/5`2?M]{Sp]{ @ڃu[,Rm[&!lɒ%Zz~^z%D2f{p )xq;cn]RFbbvڥO?T/}> MHAeZQQѫlXD̙3eתzVV \,Sqq[WW-[(;;;L p l_v5qbٽ#|*++ X.55U[lڐz keeeV6=hi1&h$^m{t VTTX %&&uuuks*ik-l Vp7wd/`/1oϷ|9e,Kvv$); f>*;'X%}@lu6̸ IhΚ$=c[qZ5mu##Ff ErR%Mr{[EI@ ߯zoj1|V: \YY BPYYVQOOTSSq̵k$I%%%͛q,&Rcٳg{5IJJRQQjkkl2ns1s[,mmmZlTYYsb]V ͛#*nbmN;fnohhPCCpe6\qq222'\x-33Seeefx(aBeeebk;wd`޶4KJJRCC*++#P_ಲ2UWWծڻwcpjhhбc`g2(33S'vҥlcwRQQpHC^ls5Tv- \.WD@ePkhhPffϑ܇lwj$7o[IҼf'>֙~VZb?q(k.4rݑi$aHc͠.@!8D qp.!]Cw @"8D qp.!]Cw @"8D qp.!]Cw @"8D qp.!]Cw @"8D qp.!]Cw @"8D qp.!]Cw @)@}}}ڿ.\yq }GhŊJIIaa"Ew:qĨ… m20іQǠ|yϨC.02w\vӀU]]mk]oNSͩC+W,/~?W涵kR kך>lxX$}s]JǮ?&F.b~~>[啯|/|[.pw1 x vCA$%.d8/.0w1dC x vqǵk׮1 fUWWg̴9D ]U+`'o8]`ls#ֿ`{)^]`|Ж+!!A)))L0w і.ġ_0BJJ =eqh߾}zq套^`"%eC~Iw^/(r7MB+&tu}9u* ܍s/[0D:t&1@[Cw @"8D qp.!]Cw @) թS՛8CA]_y_2|} }GL"& áիW3.`P_L阥>s\pA.\`21a|GZh .?gSa"0)1 .wީsMJ~OTݚjKbwK.u1.ҧfES'ʙcrT&@ ҙK3SiQwM& pԩVn|>0|)JO3_9O{I`"DIL4_[$wޤ$w:9OX< p`6|ʄ0Rbįڭ}cǀlHνԯpf;tL}`b{ァ-Z waHQ;=[zOfώ!;?'Wd7ց^˟X?jء~mYuٳC8 ͪMuYt/=ĉ7z$IgΜfR.|}甩z?$M ?Dl;N:N4zt\3%1䠸/Ы~I=,I7UUaF@pXm8G $T)ju~+hG~M#P^8qE1ĉx޹+5Z:UuqB 5g3yPE 9~JoZޯ3?{Wko1{Ou}1=FOe-jۧ jfCOrzWh(4] ިĝ-htKN4/k}ۍ0|߬}뵗UgRr8\)Y\,\xkhy>tYvDoVm@*м\]_j+1J]~mvW|tJsйS%QʭCמ戚[Z]?*@ejmӮ-?C:>5틺eB ^n_TQQ:BbF(Iy|nRXuMIvjV7ڧ=zx;f ^AktKZޯùPT/Ыm=sxhy=HқŰUf%$}"n3\)mv7"PB{^-5 oڏJk//;C{\$*x]Q ;F9ݞ#wk׏JRIҼ܈ӨEk\sm^1\)Զ 6"9e^?bm($px}x$酟z"8N7jǴ3w"* jO$I^s=Q^2C.:gTpr@өTWl vuMN&Ō0\)m]\FObog`<4ی_+s,)Adjo77<85ydnM'j65QqŅkaI;ւp 66>zp.TΒ¨kLw(%͡ ]~z.FpTt%СgneJ)٨)˗pUR֦åeۦΟ)HYr7nyܭ`J}\.-i!<fp.ٹ0ΨxuqUF?dȍG5oޣkj!xrXΑUm50ެ1*D]p"Řj+wuၧւ'VHY*n^uhzZnN-ڸANgTbKOW*,ۍ dJU]cXTZ5{)f]GE_xUqx~ȶh# y\N// p{"ŘIuUoߒ=g<*x$)եCՕ@`H * WAu1~\L 0q;cmm UF3- fѽ~\xQ);Hl`hyN֙f١igD-6/F k؆WZ/f=3'c1;~?.E pcꁧiB6n$uԿvDsP#0X9F/Q׌:z56 N@8j1~N߭sSڰs%YR_TMur wc΍`ˍdzM}GR__?d&4=9=v^n/|9gBg0aߪzU U־CRt;#>P_7Zl1 '3Ͼ`9{tOԪmUpj^e?TjZzxj숸7A pcĶJ}MMn=ÂU+ڵfCKrh:m.n}Z7V7(5+Kmm .55T Ѻm vvFnݮE~B끵k򋿏:.ҭ[e u9ڵoy\zz<աp7잌GKrָkئMs~m9 P 4ƹ@Ԥz} ʖ>-[Zmf`gKO_jyFIsr)5+zK IDATKSv-ݺU|R׫6RjVdUgCX (<(YyyZjliΟ ߓj}@$)昇*<751MM.<)6ɓaT]Eo_a}YIIRҜ9Zjma Wy2ze0{\!ϋvRGǀb<{}i.}uwj>M ޾皗{SU^R;~>g^\YIs#B~UΒB=F ٳC[Bռyëkp@ ;~x_uxnnm'5׮$nk썺p  jS[PZ\{(73p0bLojQ{[zf]uHyyD`oljyΨǣ4 w?w_vv#F5?̂'VzolTJ?{u%IyB둲򈰰Շ~[7hNAliJv:uS{f 8ԱNە.ZlvyޜX-=MWA|򩈪cv=8LC(Q ؅z38" >DUIw{_8XY5-ic9/yaa?[q|y.g36ʽsuque~N639J'{<`بO /7۶j0(Iz駣~U>+WmDz Tk  o|@@'CcMv:#PƘNqkMs{3kV_X8^U+T\.:,cc276L<jϧ?>s?Y#[zX}_:y-L>,NӶv3wWM{\:F'N30qbT7nqR|X!_ǻZ>)5 sƃ Cb?Ҿ:OOF̑ec虉|\_xrgz^FwZ -'b [-CNz<#0FծZ׫/Ыsr^۟WjGx|hˀqޞ!ԃn-j( i.>/5Bk=(ySSF/؁{סs<9Ԭ,Mٔ<שN=iYx%@k=^ 4C[gn5;}kؑ'G^]fN}v1=F[TO6g?T Vhz퍹=++KIII6D잞]_Tx;&IPff'`r!Ÿ9mRrէ!gt@+[zxi9;.a/oɜ,XR/77tNAԑrnh>49 , 76ꤧzԃeu/U5~ε3@ν  *MIIъ+eeef:˥bz0icҥjhhֱ^W˖-$=zT6͛UQQ1a \w1n ?-гhsQv^yy.:v>^rmLݝ?uѡ>ttU fh vuc mn.|uJv:5@i.W_j /mvH;~\c33]Ip„Uee=:UnWVcKK|뿢:jUSv-CI?"`i٫lCjc1qy+W_\_8_U';#` 6V믛ϐ#/O 1| |h:h5kV^=c8zhm^WGmmmc= Z`ҥ4*-nVRR9Z2Fq׿=r?Bjfx痷@xYyy1[oJ^ڤvن!Vk7XXIg-= ·a4n]].2g:/b:b,vݧԬ,%;!,Zuzcaݮٳ<שKJ _]|uur,^lُ@voF :竬LŪٳgUSS3.}op['%5;` 0ތ Ŗ^yhKOrAZ󵿱%_ǀ#Iy|}C a T:ף~z}ZĪ_|q}̥v_Ae`^a+c/f"F뼰{YXn=\UszRy9.x$~4Qݡ4_{&oUp+03 ́bjKbւU+*̎4qVJv4-ڰ!j{iVv)ih6lPjX7|uu vuIxiYXVE4{etjW-z[zZTީvWc.rUEt*JR}Cj@DU744ReeeڹsǺc=f644bkGT2jjjE{={ҥK#r󬬬A޽{hٲeZx<*++Skk;6FegHYYeӃntP÷HyeFmm`P4-xbt##e۶rA[7h4nSZ]άtE}]Lt}>)+;ovg=;|uuacw[yzRZroMNeKO`Pm`gj|ʜd3{ vuHYW3wyfkSGXA[Ml\T8M}qNypmtdq%_R<555Xرcx<Ν;uQܹSBm! QRg*11Q7oѣGuQݻ7Z\;Jeddo޼$;~$@xvϞ=X-5??_n[jkkӖ-[vG.744AhMMMTkqq\.>UVVƬrgYYylmm渴4"Xn[ْnT*++ټysDm^~`bT$\1]# oG}>Pu@JU@`ؽX0rh5 i.|êl,7< vVbO=+}}3ȗ1B[9/]wKP~[rs!59|I=_덨-**_hg IIII}g3X)`'%%@kn[[lQOO^\,D_FFe˥"FUVUUI zjv\cp瞓ja,֟P`jW ɽs¥ձ[#r匡K &@  O{7xJjM pojcb-XR|~=lO+zE@>Pm{wTO4/;W+`pչlpӯ]~LshfC-o?4gZ{δP_W De?TBxn[ݟ$s\Rwqtk.t%I-yY~pL(wՌ4=9r{/(%˥1]ks\.TX˥Kرczfk[QQѠ-W쎖ᶚ_nc`0wL*'z)6r7nP izj~&*L\-R-_^wh\,gIʈ}^mqIަ._Z3z{'f_^3/V)5Qc8~JoroVmS[|wEƒVϼXuN6k׏Jp^^{^-3JR͞1VQQg%o 﫫_X~Okn MGk7򷺺:3s˥2]vB?waXh&>yzpӚSP IJ[CvOdLFz^z*Ls\)ռC-)%m*jcYiz{`KT}jۧ=jf#\)m~~@~V :ެڤ76)%mryo,0j}^m~i0k/G}оW?Uf;\5ofmm$$$hΝP߯O?4x؝̒xTQQ544\tvA{xwL:N5oۮmەtj.utJ ƌ@rWx~g({Z~7fëiSfms\{z^ wzUD˽w[!nj^c*t䪩ny:~JBm| tKεmZ4$$$h޼yPMs ֣ARhѐ2oCCjjjx+ǣa-6+;Z2S`2UK 3(}tkTY# FL WϦfDW?+F,I|_B˅jmFn?ZFQ׏UͻpMT k!Iza'B=یs9bV 3X7Wj/q+4Ņk܍uG_qvkŐ"3^7##C~ʒUvvi.v&IXK.ձcxРb%%%G555fliiiDF ^YkQP8ڿtKw#zm6C@'oyN# yuyٹ:ڬ]~9yj߭[.-حTSS#m <33SŪ1rbbp\:z:y%%%񨢢B j[59PX8ù[Db܏=`"Lzl=|_ˆp*\5iPoz뵗%I~ q{Rk//ٍV G}6jS3Lk.>ٿ[l3gy`NJѨ:gL zIII1CfffZM<51Vqa5`l&=w^vεeӡ}jyNl3 k?@obfFPjء ]~-~twUk^vnTT+վC_?W?4~zc#DNIsDgZ W޿B~Ct6+6On/`셇k+zTEEb.vb/.`3*Ws*Pޣkf&6SX_ZbެzYA- rf&Kˈӽ@ #W[[jkkezzzTUU_Q k׮]cˎ;.IrH`I0*ڿ=WvIҿ{DiM~<za;@Z|_9K b5z[ǕЎ*)jΒA\:ڬs(6Csr|@en}0"p}?Iڕ`Z|0D===fǩXTM -H@^T&5Q=f Ji{vVYR%þ~=2tCp#^]e6m? CRRTQQ!ǣވ*++rc#LjF ;/;w5po[L|ba&$UVVR^׬u\T-@ n o\g?u w9no0,bbNs-dbWWMMs?H=*w$ C_]7Cˊ+qpLZ]CM#իWk޼yLG &+]ة>;-Z??gL KgNwM&q.}Yn[z5.Ip`}/a-[IDATܹZbL:Sʅ<)@KNNּyh"B].ӣmbڴQ9߅]")tѢEL2̙3D-"ײrnNIIQ .!]Cw 7߼o3 m]Cq(77WӦMc"čd\.&Qtǵk׮1 _8D qp.!]Cw A68IENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/2-2-collection-poll.png0000664000175100017510000010021715033033467024520 0ustar00mylesmylesPNG  IHDR/4bKGD pHYs  tIME ! iC IDATx}/H:S$h jlfzOQ$zcn0bW LWoXe%GNF#j93՛!uLf bE 11r{Ϟa~|k-r|y9|ȘS4EK @& /L^t&?;v숽{jQ[[!;vCi f555Q__BUUU1s̨osN#sΉ3gm0Ç(# #8Q򗿬!o}+y#䪫K/4;{00B9眸//SY|yE.':SVwwFTnaqF (0Jr\:b1}5 {~z/61jƽOEāC^+/D,4czmZ蝦Zek6[oekc!JkA:eˎ vy$&#n9n~tV~2ͽ.__|dSuB{GUO41KDOxڴ摂ԁB<V.;5ֶUGYE E+2"">|T>t#ۖ?o.(l:٠;st=۞h'1pI l-[vāukԆCK^I3'H:H6ܞ$f|NS{V5ο8V.YPmvwk- ̊ /s%͝].6=1$omO<6uĘ{A]pRlӚM1a\u|dZmLW?x>Uc+o,l;)gMu?+//-U7f5/.z]<N^ZalʟH^+tAL41ckω /xKƉy c֧ٚK^SEE5ƙwmzoNXm\ŕ%RcƏ]ە,ݯuuzQgn}>g|}՗Ϝ0:/j(nٖ6=[ ',^xiS[dA4?`LO {5ǿkω+nXg0( :ݯuƖm8|gw?*zPꎇZ-ۋlI J2QwJ|>ܞXtEmg{Gl槶+Xˉl~j{}gӏ=E.1>tߟsw-^qêц]ks +KgS veZgܱ9 ]Mvߧ?JGN#"AqǑߙgڙۓ~SMblŖmh޲=֭k槶ޮ}+: g[q!.>;OWW5clE-ݰO_u+?>4?=nE %۵o\twɱ5SfLrsfت_͊ekzmE^ꈻ6=a{g{G\twqOyM%5X\Wb}k[lek6Ū%CbrcR}k7>ާ j5&7xt}n?Lϝ:[GW3<6IԤaRp"_w?^XdA7*r\^3Nʼnc¸Xzo07y$L4̛nXcbbƘ1ߕK2ԒGm/jg7~%?&LÇtq{hOmϿn{w۲5^ݰ"?'tuêb}:,57̎t(U?}󱩓&,X'u3s㞥7_s/+ه->CZIRj,AR['8@Y=y]:guϜ{A]td|}טoN$mݴfSoS̽.tu<oՒ6/ᘽڍGWKZ V/]LW7]}y]q} Eq-}I/Uplg{G<~oStF;aIh`<;t(7g\}51ͮywn|">ƸƂvXwdxi/ޓ\rpϖmdS8ak[`눩&7;;F|Ç6ŵ Inr^03sߒ? nrwkU÷ĵ Y8?Lbͮ7ڲ-t,^xY|g̊g%C-Y]v.œnOFhKsm̈́nK^a캂Csys{s6̛l;_fS~|co\L9}:ibW0|{E`UO@_S5B#ݝ?\@_O t));*0L8lA}yX?OV^&=uVoj R_7l֧d_R+?O"]2|.?ݺ-_,dcsgו+MO}R192t GНr_g4 g0:/3/l>Ԡ'L${Avl*]Qym.7Ӄr{3 EJeˎ|.m PIwı`{AϤ)9u oDܞ[`<*LXQPTz,]U߇KkkGk֌?ђ_|Qc\twF`}@кȼuͱv%1' !rא~;„$@&m ƺ[Ә4嬣UK,g0qIZ[w=Ӥd57ҡhۣ+op0_FEu|.emijRc̟SQs+AkA2HOJ+?`6~c .׭./}Zg,큘:nMͽ.ή)gĔI=|lٖxu/ۘ0of@eaˆC-OdžqXc&G̈Սkā_\r캨^塖qc|豕7fbDxIQΊekzND[ݶ;t8ix[.HwbUF"H_R7-,ַŮ}󫓒CfUS(<=eRM=BuP+x7&ǔjbk\%+0Ioq@(x1 L]zlFӮ} {ަ>_9~l88oVZvn?>X>/)zHGl;2@`hٲ=Əj:Jt[ fܺkO7kx OAOfP{T?w )J6=?5f9ZЏ?w:d/!?' RБ T:9+usIfclo9l{Pqb\%d>O@jbت! E1PʦcٚM ާ'z-}S/;5ϤƛlOJ5I,'}T?u|~)Լuq͑Gطl9B˜6^`Rs R}zLcʵϏ)I7/UTz@bmK(M*Y>k'Y]}*SiqVɣm'* JW';1WX;飋]C.aѭĢ[0oBKNz,UMw`._PW0PJز-W.#=qK='~ Ӷl˕$nN~¸l}k[ܞc]s4?=vxᒗOײeG4?=+(Oņﶕ Flkb"RLw۟#=1Hdmǣn!}=O>^k}rM_{$l׿W$!o[o>$;o]EJ]([7r<1TݶÏ:pRT,o7-ew<9 ]m 1ށh#"ݷPN3=P~zNwpq~ R*X\T`R-rPP!$W_79`+2@l9mQy$uت>{]|}c~qU}?LJFCScѭ\l7nW%wvs[}Zȟ|^o+nXgזmϾqt`U &*&#KGIJ=IǏ*{G~kSgaU>|xT&cX;;/3k4fL? Jo[XfS|l;?Kֶ|k9.yzqbu{"2u3a\udB~;/1Hpȍd,*kCO_\-[Ǯ}c󹸲7ז/*iv_0/#[q]zwgnZl++׭ߧc@A_7Vd6=ѧ;]e nzٻl2LJ}Kl+2D~Nز5v˶ܠϞ ÒGr)gc+ojr¸xަȠʦc꤉1嬚L+dˌ1mت-_ IDATOm?rMު% mا͎췷k =ަ7H0/Xt1ov]~v#ջ[ێQʧdDRUتfPk[ޑ&[yc~lȷE-;l?Rtتܿ⺸ZgDDlc!AIһ_Vn??fnfLidVwIw\zM%'|Qc}׊K[owֶO3͘^s/+8(1uxlr]q;Kly늎Gwm1a\u|ֿ)Ɨ2~lU;&ċ>s[ɾf dB{GA6a\usJ6̛YMMWoz}ɺŲ5⭷}R0=+vC.y]oS'MKfd>=Е6\k7=go}>_홌{Kw[uc>dN{7KUG>oܨ|K&dzRX\y3 +FuE68ll̟Tw:ib;6=qՒ!)`H3mҋ ȵ S^5ij28=+Vާ׹9xD-F[U9uĂ|.[ycww&YYQ8Njo\~䁮|H#^P߿)nsI5̛'ׅ7`,)Č)3YXN9&V-Y_hLuc݊bت>}ͪ% J5+}~!芻R_~gstО6\_k)ٶV童N*6K߰63/ƚ9c2M8/÷Ī% ~kcՒ÷Ą\I#97G֘ãǍ\.k֬wāts{Wwy>h?o0e[3>ض?ܴTn(V愱U ӵt2^q?~ioDDWu\kpe8oX绕p,])ƌ1皷fQ=,xJHΉ%9YEWIO+ %aެxCq bՒ1eRMLPMigoiˇ3ڷ!kx;Obt[H/?룟-/jΉ\cʹ2C&ƪ% cٚMq;>J[bm嶺/0'i/yi7s91prVMS Km8ή3FYV\2,z`|QOkcg{G3czmgNY7uXt`Dtq՗kceMd$%IK @& /L^$2Ix d$%IK @& -^}(fG+!λb;.&1Pzdu!*~|J\r5Q=¨8Cn ů5Q=Qw5BvҩPF0J:~@=QUc=Gڛ"@=G %Όi"uw=+ zTG ɀ3"~T ѫ۫Pm @& /L^$2Ix d$%IK @& /L^$2Ix diJZWُyn UC'GHvfw3E=CZ/]kE=GֱFԣzKбng 1T٭=QEϾ?}iP2cRUU X]]a`]SSQ`yƬⓟdtttTꫯj 'ŽܪcΜ9Q=zlhhؿ`ԣzd&N]vYfΜQ=2qE΍FDg>;1gG̙3c̙uԣzJ/9,#?l+}5ϛ76g.>#~lP#ޜ]Fvwn|߯{qͅ?qCzTK+-?|9?1ذ'=;QM*9y+v?'c+}BCzapW""bƹWdž'Rq/,ݯwņ?/_x,Ϡ#d'H\lxğaOcYAԣzAygDD/ݗǑ V\8#>'7G/5's8;zٍ 1z:|L_w}riLO46GPl_ot9;ܞ|SoWJ NKI'3Ƅ#!L=7jlP# ]R7 ;jƹ#"⭟A=ICxq#Yt:#"G׏}BeRԣz*X8.?z^ j興q_DDLC@@=Cxqahy=mƏ_錈GPG/ǁ?0.pjHMԟ1b]qWβ7\'#58G8/ɼޏ%K7lI<97^3Ν3Ν_B3P# XEÔ+z{w5'`\=lNJK2͹FW![j٥7jLP# [=~jVC4&G%ǯFDϲR^xe_ĝR ԣz×{Ö{΍qߠWCNVK2-Y?܉/Dzϝ~@=zT0\S>z^-Y}P@S4YV ޒPh#Gns~l΋ @=G /ɬ*|2g#'}ԣz#A'5#?%,s+z-ՀA=GVw¡VjiA=%tw*^ߍ?㭟"n~A=GzLVA'^|`\| gI>à#pd҄ym\T܏=vQ=ztEōoO:i#dL^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^t&`ۡH3zTGɊy3OZ!@=&dؼy(r&N@=¨8CZ#Th֬YQGFXmmmyUUU\C#1>J;v숎LL/b:t(O&fΜi2IYػwoL>=jjj#q򗿌_|1"">g_TУ;|J(bUUU\uU%eKNxַgs9'.]Q>`DI^tij~{ݻ7""fΜ7pF{bǎO~2,XQ@-IceD޽{cݭq`tttģ>СCz̮_Lˈՠַ4 dO>K""6o\602vѧsQ$䄞˄FOGGG^::T$H03<<Q7uo]ރ>hHxI1AS?ן[ 31KvZvZϿ:t(oKO>d~s7ommm FAwwws='1&~oFƃ>FUǩҢ`8 .?=8{8jLm^</b\xᅙ:Nٛo=]3&~D% ݧ_Sb[㍟ΝQSScYa+¾;?|_~{˿ O}_O-+Q[[Ї40+/9 .Z #;V^g/ .>cLDD}Ƙ>u^D,9+"Ό~3OIGj1gʼnga3O3VG,غa15G>4&#<%'˄FV:.Ke{{CDCocV1U>^`:@ FF.+OM J}cz{G}Tm0%ӏ~]፾6G雂8'LgSIp9\m?S\j\T{Jɺ:}L_{r޽3gԐ0@tFOHYu>{cIcŻEsŇ??~Ƅa_Wwߍ#gՌNOmƫ0Ğ|غuk_xj|p)?80+/9 4LX #[åļs W<3OjLdXY&nSxG`s=5{Ut6ӫ[Tr`pziov)1waXyqme L---yljsƇN}{_tT( .{{bxZϿڳSGǯ_h*byWݫ_!r=믿=O)|vi%?vo?g%믿^xF ueB C-{c:%^^[Ϣ CzWYpS9 ;Oc08ǥ .!d?8fе Os 2 >Z\S\C;ۿuf mǦ#Z[[oj)oiiN K;C\&c]6vƧ駎KI*@}cñ*7C_z_;-b6>294$% <{\QxVGG'` /9'gC\'kLdr' C)`ix\.bdG}Զ*0qk8j_3ħC^r\2=(`B[Vap9! .t&ah-3Ol. c_ǭzw#߯;ohd&K2o˄O6NR5 'rco=cT{) fet=5裏jp(`YǾ-Xݻwohh$F+LJWRpui5cF&"f-'9@}ߵ,Tb,P\.o79fH0}]{{{̜9SClÇkwpYΤq6]9K.j(g⿿㳟? 'θ Vrri\zpm{7~ޓFMMM,_\rXfMOM;eH/RUUK.`BJwwwq=)![X7of C4M@VU\FD_=w;::NrҸy^zǞ1?^6(詁{3"9}P$_{7t1.T™U=7#zH>}z̙3VUpL|C÷ʼsO^U{*0DTl̙q 7{'uHgV {KKː=S?Oc.ʿ~S xG.jjjq8)=[TѤq=7?}/ ?f?3ZZZbp ̙3={Yp" wUS N6cǎmyv*+.JW6osq#%1鄡S ӧO*f{qfV_{F`tPJjkkLhkk7|sHWOn% %pB)w3! YxYWWF S4EK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2I IDATx d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%IK @& /L^$2Ix d$%Ii-y晲waޫW.555~ ` /ɬ|pԾw{{{s饗Fmm?085mڴ?N81~?*jjjdի  S⣵}C߮99ǫǡ_.x#ʿ]UUK.j$ad%U]]K.s9'ovg)U .=.0dZ;v7Ճcmۻw{\,%7` /9.d)%Ǎ0>*; @FҊJeFs[4MLծ d1FWǤ)t:D $jTQU֬]jo>~%u^Vz3$KNoCx [/a[=`\X%lfLKk#HIp `}`\%"FwLK DD*$KDp&%^"" 0 .+TIp`޽*((PUUh0wܩJK,!DDs*))$'`RExp8D 577kΝfpi/暛j&Q,^}_}O :1Kq`%8a"ּL?yJq>O %դ^Nhi+ZST^1ذlȑ#x<׷FK+5fX8`\ڛK{jϞ=׷3$|euu<m^Źn_w\&8Iʤ"%XXIQZ _q#w l97,Y6M;HK]O,` .$T^TUUv?X&Cx QUU|~9Xw9 .%X@h\6KgGΝ;˕Y3, hЏN<)c~Mp @o#IwEp @;*/͙3GUUU*))$mo]я3+f!fOxx.1brz䚕fUs=gw:{rss*I*..֡C4n8%3#06]ʬhzwy[=v̀$NJo KSbb+IQzK233e˖*++x|YunOU}@ X`V/^a^*p!q. dee}ܥV&&JR땕7}CI ^^k6`1YYYZ~bbb$u'j 0o.]-ŋa_x<;(?^=X!ek!"͞=[V ٳgk…*3-Zd^{|=Əo^ԶYٳv` 9NYFjiiWdB~w:wUYgzsI|Aŋl1B^Wn[O5bĈkfggeQQ-..6ZjlUVVafΞ=[/^ԸqyUVV… *..fc Ax e;vPCC$ҫ6lܪ&O 3m7jTƢ"-<'|R[l1~7$) Ço[@dW\O5O>JTs5|rtY. %؀`ܹS%%%̯3$*=Z`7knm|֬YJMM5>|x@yg.+$y] D.\UtN/&l'j[VfE 0:+\wg C/^ ykWcZUmT^ALK^^/Gr+IwBpͮ*CUYZ8yz|C\xU^ -YD_6J+:wbb0Vnת ts4;_3cL) %TVVV@"U&ƏHxigϞ `H/uqjLOI;o|c!Z %XVVz)0!tc… 5n8UVVjkѢE{ {Vcv~O0𴸸X=*++UYY^xAg6WKvCx 67qDYpA(**v5~x͞=[wy>sz烮VVVv7;z뭷$T%!t:f%$$0~֣^.K^WO>cykkrss駟yEeggƹ]9yyy/s);;[O>?Sy n NS6lP~~땕5cժUZjMNjj͵ 3].Wȍtݹ']SSS`|Cvp8a-[L%%%:r^F;3ƦB7S K0G1SNrp01ظUܨ,--Ֆ-[TYY.+:\{NR[[Y… U\\̐m`T^@عsv]j՗Z %Tss˥&`Cڱcc1H'frlJjii18srRL,`#7\L/&9;wʬh⸅` 9rDԔۇEiJ2W[{jϞ=׷OG+!&{Rx ^n_>,J+g V/s΀IQZ!c/bj5 (,Bp @*/"TPPz؏3.'/%`b\.>%f rc~/WIwL `@# p:ZfM@N)&``#p:zW4vX;W/WDx p8f͚m&0^3cG[cWr  `ACyyyVm=B 8/t=_(LCBzvܺ^&5_x0At}Ӓ%Kt:x$/Y3,*{}Y*`c61m %efvÇwU8JK'W]ȴ4ݻe$U{|ޣ¶9YF;vӧ%IG[%`"ej¡~E%X?!=] +Vh¼y*Z@u#n3sK\\ 隻1'+k^ 9PKrr|~ߞLzKk ~%34<59sf<9gΔsL C-}|xgARbMwac펤ύԼm tijnnȐ[Oe_^zU)cKHO׬_$}w*ΘP?ضPQ>3g׾wfO~2`1a23嫭 OY 9vLe,~XRfNFxyWWݧymѪmchh)}|sNgU*.%E.e:|6$ 0(ҔhO? .KwP^:j?#^EIؙwSss{{l͊~|땼^5>?~-I~[OZ4Çb$By>Z: ,߷OU_dM[\k;NzSf^+R+_M|55W}ٹ/W|OjjT^So544>^33`}'//Ox @D έHJV|JbZvćV\+߷O3*m<͛?oѫUtUzWS B7v :h=oUWZVu\vo}>;q))fq'vkV@0ホjmB7:)Ԍ$K !Sv|z*tKr:}..75$) t:L /mhj{{赔zWǶm3KI1.+۳é:v]v64\Ÿ˪ÇyC嫭5yw|~u*t+dx|:%ef*!=]?ڲYeݥ? {ѷ={(++KI Օ|WS :qЮqII/\#Fjx|Uz#îMT\J⒓42=]#Cn#ICbE8gj}-79sْy:|Xuқ =gĉr:f2ExiC{rMerBMcc;)99a7ꖸ55[oQihwt7?~X.AfCymu.TTSp7(ɯ]jknԅ򊀖|{ 'jjT/̙9լ4-U~{jϞ=UUUL /k]V%U5[l$w̍4Msoc褠cKI [UyLC]i23mՋ46|߾_SBz&-~\'u儗ܬ:uDALa--ɣ^o8}R>3gU6r˺O;Ɩؒ v&efvƥh X-_udk%Ǫ\" cT9;l54>^~HRڎVۄWScd~Hq))!aJNM].!7By$iJNN~JnnOQ#QVP#5a}74g=/^0=fͯUťxlvk[nImAoJiih||Zw߭||aތ+3/|hUy[λۃ5w7*;מ]W{;|+$͛<q.z&sNحҊ5k5/_M}a{lVCy;ycu U/4ih|=tNfv=mۮY//ת:|XHJV˥djkuBλZ ۬&vIDATy˱]>ZAnެixd2YVؤd3P^C7e/Ivc۶wL[B^wH\jk;aeUUU*((P}}yaQZ9cbD;& `{P}j8}ZVPRffPu ݖpŸI7^V>c۷kjnnsL [OnNnV\JrAWSI,}qcl;Mc>ZZ kV߯?oߡozM7ts\5mr;WV߯۱m52-Mqɚyy!+y zi_Dx [Q{e?93؎{ iGX&@~vWWVv7:vCBl8,$h 4~3~zK0)`EЏ\.rssͯ"U&0^@?땐`;wšjha~%XԆ iQš:Q$Ká'xB>y6r@Fx 3gmn[:ȗ5%9[sRZ.3DsCx evX] m`3F(/6LF~ DK)G5mO}ll.Tyy=՗##;#ӹncR@1p8-^@WAA땛,&`+ݫ={_9r`;AvU]]dl"DjKl^;v5Z!zrՖ#cL[m$*3$I,,,\?Fi]VB_Eo8t [:/U[.u Vzn}Ɣ(MIdl,Y{wHBx rIy<כn5%0^AR LϪTPP@%^@?:y^cT[fS:ج .%9shƌǶ\\UV&0^@?Ӳec;w?^QywL`" p\zWikh 0^E8=!0s0 %XQywǾlWiG4Sp8vZ- s _GhرL"C^8NmذA{޽{%I__vN~cjڵL VhZ`֯_PMiTa~RŎ@x 6eTa>汯H^o]Q(& `k-X\ AtU'j09[UxYa<5oמ8q{ "*p["O p8Zd\.W@&ve5/Nq8vm sƌnV+Y^W<-(կɓRb"{|x ``"KDx /X%K"`IL젹Y{UUUU^711Qs̑!1K؂yyy<>F8l߮W_ҫWM:Q|Ga6H& 6/X%K"`I,%^$KDx /X%K"`I,%^$KDx /X%K"`I,%^$KDx /X%K"`I,%^$KDx /X%Kf U__/---:uu'11QzUPP`1WWW+??h45V;ح/}o̯cbbd8MjmmmeЗ`2wOO{ѕoHj .׬Y# 9YFcǎ5ߕK^_DBIp л/o`\>K+;}N&%@!%!$[ +}b^r`\KXLKEx ^#$KX^_Ax [Z/a`\X%l7LKk"dIp `]0 .u3&%^n$$KAxН^Z[[[Df竺Sݹ"]|oxhMh6G?O>yՍB~^ܰؐoT}v w~_8>o`xSSu ӫ14:q3~2;a+ +*Sݹ󊋏UҘQ!O,׬G?3ôJ55~8 5=jG)į.5`klRݹJ3Jsf+į)'Gݹu=U1;!C̒tJ&Lg+ܼߟ<'Uqj< 2ɳa匿;w^R|M| /:g̙7`,ύsS.5!7y{.sǞy1s^ԄImkWymlԄI76ٰuƌ 8?xh!zSaF:Oǿj Co`3ZuUtX}nۥ%m}oCz{.=3? ̫(4k(9pTR[& W ug&ݠ1_OG%iC"I}Hon*ԔZƠ,*@ ߏvg7,z{ۮ=kX Ռ9m!cτh]jҺt4a8mleKojnfd?3MR<eƹFɳQ7ɳQ<5!ϗ:BV-]X[ۤm܅]jү7vooۥWi{ցڰu&L|Bu9ѷ2dw򙼗Tw~8g:U/m}o&L3eZRg#j5LV=~~I{]|$MAA\k;ym~@bsB]ke5,O?ThwLY*4f\p4.5a>eӥfIҌ94f׹5=zW[ksܰؐuכ %IOLKW,VZF2RԫΝƷɳQ.VZFƌҌx@y̼uᄌq f(7?7.5ڸmYٚMwLYY))0kmնݛ4ct~!30[5ufNΟρ s7 ﳒG ^ /Ci2}rP58fwuΛlض6dœsfr¯sHv&>/*@KM2}r@grt‶Mަ.7q9B^;SwC:SVѷ )_j|c_c[ ώwgr^^|qZ~CcYҾ> a뚰|zmpkX]G?ӁCZجۡڨi|+ 4TPkQ.lsdҘQ8qy3N?3 Q[a]Rݹ*9pT3N2:=u< ? ~!d]_QViVMf5RT? nBV)jʠhl Bw癇xl qfnFkwLӶЁ݇t`!]T {굶pՌRGGEmAgǹ:]rF;:/uΛU^8]ƶ1pCr9`|~e6y6Նギ㿃q CWՆ'4R{5WvpXaӻ^2T 7Xj݇BrK5,C8wQ޾ˬFjîXQ Kt)ڒΗR_<)-#U%kQn۽)(4Z;0Rz Ν7Xӿ⴫v)'_3h6_U!?sR8yV5?5YS 7'R~G'z_ȼjЄJՙJ;ڞǏnOr|_o*!}o5 ? ՈAo>ed}j՞BΝWlC.s]gt΋z&怒5YOLRG.WK3|_IilUF yPPVQV?Y µ['wd5Y.5U~b%).=p v~zZaõ!RvS. zFܞ~-6MIx^n..|n땶//i(M>٬|\7wQҙjC0~M@sl-̽??#)ZzmPKœ]zye._cFQ}Hml4?sTr^@3LWScJSɁ0i|~6(h3}NL?g2Rö?L徨G|͘3Mz3P}܀xF wz_Yɳ0i.5PtHzc6QZ/- 3>D ?%\jkM­-|bؤ iZbq@wZFVLo1w6ƹɳQukA-K/6CϒG{Bpyejg6?[`\?0.g-ػz&ӽuJ_GlC 4olԺuҬ,0[?ן-yLcC**lwVŚ={ >Hm­鯢RMJӽF.5u(_cLsn{.W'??;f/n9/4k,h;R}Qu7,ۡ{ܗ㿑 b X%K"`I,%uxܦzZ9k]g*t΋ >G8Kjmmm|pժUz>re˖c!Ko$KDx  QZIENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/2-accessmodel.png0000664000175100017510000012576615033033467023564 0ustar00mylesmylesPNG  IHDR3TzTXtRaw profile type exifxU @DTYR~ ;hz,AFXGL$>+L^(KsTҫ&3د~?aMˮ%j+7v; }>B ,(.AJ iTXtXML:com.adobe.xmp Γ[sBIT|d IDATxwXWҋ􎨨XX&Ɔ%[,XbMvMޘ(X% %`7*bI |‚K]<gf֝ @DDDDDDD$TXDDDDDDDLfRaADDDDDDDJa)5VU޽{qVQ6|pՋAD -3ʢY DDuY D020;F/^"cc44XDPfQfgy<+IeAD 0>kkk,]ADDDDDJ13d}"""""R:ofVV):$2.[ ++sqaM!fԬDDDDDDDTfRaADDDDDDDJa)DDDDDDDTfRaADDDDDDDJa)DDDDDDDTfRaADDDDDDDJa)DDDDDDDTfRaADDDDDDDJa)DDDDDDDTfRaADDDDDDDJa)DDDDDDDTfRaADDDDDDDJa)DDDDDDDTfRaADDDDDDDJa)DDDDDDDTfRQcرc>+ "CqMܼy=Y)DDDDT0 "bcchQn&DDu8И?>P**v]ر)4 A 2SR|>"&$7.r-jlyx8/zTT|i@MDDTYlADTGZ` z;};; ,^ݠcn5M jh@=zǚZ*q'eE;"_$e617tYhMt+Z */<8kRפJ? upȻWР2ؠ߆_Do/b/_AFR**0tpOP mۢũ[/5ipԨo_h5n[;wV꺩13C}77|0Sغq;p\DDTTxBD, e"h!50Gjl,rr;wpg%. /Ǝh6b8+NNM*15/Rp{.dzV|{7~hا7/<83],~" lAXwcGGd|(#mGGk21mF@@9H<7 mq4ou]]D9 Km/:* G}{ŨQеBzBۻ'O @àke̔D?ۻ9= mL)u}V?Tuuq(^?}&e p&.W=hqp:NNWQQϣз/aƻ0h0xu8r9o޼u \ר_?tZ@jMCCvqkg\Y3W8[6ӦJ-00UjwvJ. =[Π:}%2aذ!L&YkaI&9VܢsR@ 8u8 i8 fϣ ?\DDTD7L8 ]l)޽uqСXu+t]\ߣG\\\₃OKWhԷ/ ;vD>}1o·*Uj*t]\2brss1C>}`޵+MZd=@OO SWWt;!Kz?a֥ ;vt)=~\-uի1oNj`|*Tҙ˗1|\ Îa߫Ϛgܾ2}-0 nn0r$|7oF˗2#e)^|LC80uuEC~ˈ0 ·W~!@]N8- ˗T:̛ {7{ѸWe VlFdwaO~84z oh9z4L7zxL5m-4? |0n, VaO_u 9T]n=˭UFeLrNZZhn-_VjOŃx1wP[J+e#r yhiᷕ+!TSÒ$&Fj}dT jjضjԅ·ѣ=y2DHJIs>m6' 3+ ?څ#)%陙͢Y8s2>1g\7oaҒ%uYY"&!{O@c~nmÞ=!)%9"`զM=yr@ǝ;1h </_BWQ ` Q>>p72knEѣP,v KߣGFijgムkfAX燭V~!bA5M&*J4x0Tqc︾WɘiϟݸcTTx TUyoق7QFj\ލ8(ICOΜãGD>IP0կsTyEafzpǏ#7+ rQ BSy|445Q[?HB -<<жEDD 3զys,6 陙bb2tt87kKɑ0 [8|f ` qa,􄚪*D<}*)CVc'=3<-B?wNj}jZf^|ǎUh( oǶ_-mϝCsW87klၼ=z~*~+< B 8e {oIe6@ `q?xI.CX |};ܾ-J??ag#H Aٳ$}Ь,1ed?yy!;v }UoaΝضjJq_ghPMS2TeÀN+\= ]v`aw q+YJW0:u=ǪqˣQ(Ǔ<4e={FϘ ޳mOP[OrQQwq矘9f ~ٳ!7o{vbXܲ?sr 'N,‰_P~~k6UQ)̅_4LD@ xvźk ƍ6Xm;99a%4z4޹S_ɓ1eJߥM^&!Mm<<+gϖ,o`c'">9Ǯ#GоgeIѠr &LnJݺCfF|rr-1JMŔ1|q xOEl{Iy4A:~XX_VV<`nxLvEJt,##1Q"K*c]5uU-uVogm֭<2J?W`өmm&:Z 8zQ2'"<̨ l]FƊaoc??`Ƿ( =`a.\&s|1}R˚5lX$- pbӦ2_/6QB8Uj]MxԲKE}6xFcuJw>Ĭիܹ45lۮeKD:xK3n`m-yX>2ץ/3!((hPݐ 6r@|9Qo#Ғk;Jɠ ev/o'sJsjy Z\~.m%3ܮ?\߲>CDDކ?y{c_cVU9ؔhZéؓ2iJOYU!ɯ^aU$$HƩͫ(ϊα|uRx+-yoZ <=W@Ңkv 5emPWd+9hUypuC*9}cT¨IcuZ0C u]]89EtuefA]WjZZ%֒CMs:+E (5iYDDD 3!]ajdO R@ZF#r_+Ƿ6Occ{dhR|W޺_gj!wK%clf zrm@$8MF~pƕY˖h?w۷_jX7m };[$WӞ'C]#I +'L}5Yg%Yk33Ą⌗W۹Z.j BDD 3޵7")%HJIͼyr^WK s Uxl+cx;KK,1[ԅBB}=ji!5=)_kjMz:33%cYzוlyMLrl=QQ p0.\V!''SzGN5Ku35v~FhhnAkhn+W!hdHQ|ML`ܴT~ M[th7k&7 .-`ب 3f1IuF몪(PUEA5YgeՑxԲ< mW4ןa)7عd )fV{e[:N XoGD='L(CdCdg'*ŋЯ̙ZrzWԅ֭۹F;E_hPe'$Yx4 fT|sF>GF8 5MMD__43->hЫ>cb_ܹEDnnx Ϟ!ܯ1{7F>؅( ߸k89_R%K0&(:+A^PUWdzE^vD:{*B!DD0]Yψ>}0{w޽ѿkW<ҟ^<&p0rD"$x4t(55#2* 9"ⓓABdTdи~} vF/\Դ4G|T(42***u(۶ I/_"#+ 1a"degCV'5UU:r7ldfe!2* >7bPSUiN+ɘhN]Wų8kPue^ÇAlB~-ܺI"Tqb4[ΞEZ|NBzBE"ffŃ fDNZׯ/}N1'Ts {$޺ܬ,䤥KQu&֨h'ArmtaF EN Ӧ ҭľp><ӦWiQ3Np #6k۶S|1iJsrtM`Pl5KE;y,)9m_~'KQ܏;wb296h_o^,\V$ 0ãJ,.n#)EvmM _/bOEJm.4&$ Ur2*sTzUSaa(`2 sF8 [=".w @OGB!,-1f@߹TŖ+A&@===tsq УC|Kʪ&)*kAD5-3"iDDu[f"RaQyB ""z+DDDDHf\h`ADDr+hl߾BDDDD "" Vs 3Hn%g6d;0"DDDDD ""z+DDDDHfQdaADDebADDDDaIٴot]\XD  r=V1 """"00 "DDDDfԲ˷na QNvsC#y3^$zΝₛYփOΝ+~v躸G..uqӧ%\}c aǎhЧ>45H,'ɺϱ2|8o"DDDDD 3J??agaС45Efv6.^}˗c!<_|! I!C0(xl?~</3-,LR|f ` G63C|r2>[~@^:Z핖??QūWf N_"=öU0 y8 %oz 2aU'@_W.½[7aوONti '8r F'U֙+WTf2OO,8Q 'ND~AVw;W^ ^bTDsŋtLmu=Ǐcב#X ;(٘8dKIE.OZf\ەגB[SRvٱ(DDDDf2s=+gFdTCB ׮aU#$O2{DZdt) IDAT>z@U),ZZx3gPOOH1 """"e13zxԖ-X em:nMHkpܼ -к)[-, Nx/cADDDDuÌZ4qb8뇫wZ'38x4v :yذj)[,77Wsw풹mKp2>7)0DDDDTW0̨EONƄEpJMEnn.aυSapp(:wwhkjPp07^&`ADDDDu [f(\ڼ"AUlDrD"DDDDu!@H1 """"T[A ""0*E @ Ƚ^SV077q [?B666Oq=^#XKz*FMBOO_>>SܹsRy!"b@a[S޺l[~^~;wʕ+ر#.\PcNLL?W^<:V8v:w/bݺuHLLDll,szF ???ÇYDTԕ Cch{ 3dii={v/ꊨr{ +ꌺd1 "z̈́&Mݻwc׮]:t(>JK.aÆ oK߻w/ꛅ2MB\C^oz=6hܲ^$er0DD uVj SNE.]`l\־|2֮]|̜9VVVHLLh"bݺux"RzQQQRA\>_271۷cƌU/_xzzJ)/V\sΡUVDfͰ}v̞=wAfXDY2D9Y3弉eUf gV 3H)%''ŋBo@Éʒ:yέq[[[hkkFw]<f@tt4222M%?P(ZoVt**0ШsKFN~~|ɑZS J$*lqQVamm JZw(S0ƒ-imbo UUpP]B¹/,| El $Gzn.ssJ$bcc7oJ-wiiڴ$Pidsss#>> XgFDDDDD﷨(DDD ::HIIu꒰BGM :jj0TW{Yr4V HR-<]ZJhܸ1lmmakk&MH-5fF:###t˖-C֭;wܹsw9Ug%붦Gdgg--R2r{""" PO(:tB U)HH`eND"$R2022M4y7j,̸|2`8vdH$1i$رc۷oݻW^={@F3*:U5?7n ::{ԲիWn:#..999@׮]_e˖2Ŏ;w^ܼy^BzѣGO?j_|TUUcƌPSc/A"""Rl2 .45a(LSjj-1~,]2lAzn.H4iMu5>F:tQBo7o+flܸw܁jƺwѣu⡀`رcO`ee??? ̙3ٳh߾1h ܾ}/֭[aee8ܹSNŦMpQM뼽ѧOT/ƚ5k0n8:u X|9̙wb˖-aH!ܸqn#BihP]暚 .F)H, 9' 7BCC pΝ;ɩFtQAALܽݻ֝>| aii5k0l0~m2>}iӦAPWW!q%Yyu+ +++?QC׮]\rssuVCKK &&&5j֯_L,]T5>|8^ÇPWW=.]"44#F(B(֭[8x`Νfff066իwBDDD #99탗|}}?Hڪh&&jcw++ % 8ml:ZK b_Xh|||t3@PfXXFFRwލ ==W^ERRt/-lw5T<(((@LL ?ϟ 6DGG͛7O?PUUŋqIagg͛'`bb ׉lll|r㏸>Ç=ݻ|8{+Wu şW^̙3.^Xjs:+n+(ƍC>}+5ʋ@ ĉK5kT8V@X#,, }.|]t)nȐ!شi,Y"Νz-܁bС8sT:PQQId%ԕKIb1[[[tDDDT.]kbѢE k--t02GVVlccc輷teB!须١Bص+fq]q044޽{Qa^zظq#e˖v%իWm۶, /L܇'66k֬ 6lAV$9s&̰}vԯ_ؼy3OM6 1+W,Cff&UkDgmm gggZCC29;;ǎCAA\]LaooRĴ0uTJ-x=zte3Fj{,YϞ=^o׮S$aÆ 6latDDDT+qQ|رcԀc {HC.lc~hzP[^^^2{jQVDEEÊ+$i6-vAUVefffTm... 8>3pEܧ]%h>>l}AŃVpB? 4ёtEIII;*jHq@ ̙3Xv-._, *ʒOKByaXVѽ{w?yдiS@nJzУG$rc;t-Y@h@ L9~2EFT0T @B̙3jjj։𤋮,SNRa_U7G%LSSS7nܐq P$~JHH믿ƛ7oqJ{TTď[naРAx) dggKZ?մR!:А-0H` >F##I7qS'52իG^^rss_('?c gqǎaeePPGGGjQX-iYe%gV~:HvȜʭ1tQ1ul.ckݺu[9-\me/dEFT5ڵycǎ033CH###ں 4׮]ÇqF4lׯ3^%ۦORӳcXf ޼ySc+.\+V@UU?'FMGDDD5*99k׮ŁBffmi z& UU4`P#66֭þ}j'HKKݻw֭[KZdb޼yPSSêUdv4hqA~yyyꫯJ=H?}ɰ/ ,#FkpĉR]v-) ;w,!$$T!k~(@Eg(b䯿YFEꤼcgbǏP8FEϹ+*YQkDcHMMEhh(_dKf$'' AjjұcРAhѢJa}$aŒ%Kн{wL6  駟#""qE[N.3Ġ--aZ oooɗ$w1)]@l+WZ'&РAɲ7nH$e/,ѣGeV%]|VpJ}e˖To@޽;{_Pnݺ!((gϞEnݠ'w###$''R-z)YK.9F׮]{%}J:}4/|'pss+544l2lڴI:#>/044c<T"""ꐞ."+гhvwXhb6(9-kq%[gl޼DzeФIcʔ)Xz55k={ŋ m޼9v) oK>XZ ___? 6Ă gϞ<[nг2a/?;v쐫>Ə;;;ȵUɓ'#((iii>̙HKKKL>\<%m ٭j5а.BCCe$Q IDATmܳg`oo6mHYfԎǏQF ݗ""" <[ˎիq)Ġ^zի|||$KMM?ÇHKK5jcΜ9eN ;v`Ϟ=q^~ CCCiƍȑ#K!}aȑe/EHH^zSSSa0# xowhԩN3YN@'G5iw'p#pZ͛7BP兔w9 yy+&УGi15VQ & gϞEƍ%˷l/V^#^Ӱa2[jUjxSyX[[u$>L0&L5𐻬:ȑ#e?to4"""VHIINJ ACUHAttt쬢BÌ3lٲ?:w OOO===ѫW/DIq?,֓srf ,-3;wD͑  _~%444Yi |x}*H2H!$" Ì"***?>ϟ5"""""zo=NM>+jUHrg7""""""x5jYgADDDDDDR|G!IIH.wv3!"""""R.nHOADv^IF[Uqqq066V MLL@ /ⰊJn@\b"X1TR/ 2432`P fڶmzr))8tPQ{equ (͙JMgmWefe*V_WnMMU4m3sgdo~ܷr x|<\\:׍r9\B!t^EhDf&## IK\v~>gCr^Ix_EMŒrҥ ݻ}0ܹ3~`2=4h %Kz̲eP( 4H\PP(/{ŋB!DmeI` @na!޾jb\LNf{d$gq 3$2W0_l2.]D֭Kݞx? 4O>… %˖-ʕ+-,]T[!O$>;2a4"::[[[֭[aaaŠAѣQQQlڴܘ:u* .$(([[[&Oә7oW\Ғ'r!uDGGccc&+SNeѢE˔)S8z֭[ӧqy9r$O// ɓyw%88[nѠA&N6Y"##rmiGUǏONNG!226m0i$> |}}߿~>ٳ̜9_7ߔ7|C100B!61D[X͌ İ;23lx|b~/zCdd$ӧO^|EFɴi})ϟW;sLT?q=֮]XM6QXX_W5}booϼy󰲲RԩSU[mGUso{[ڲa4h fܜǏWv5jg֭9r9sгgOƍWj\ c8w\߼yݻw3p@233eB! 5v\TCEa\֭[K988Zlc{Z> %U*{饗QXttfoooΝ;WMOHII듔q[mGUso{7nƍ`dd>ŅJVZٳgyw177ޞu֕Ig^^o,]N}6z-L",Bqt,q 'ΎD[{ NM%85}G## q31L__$.;tHJ|VLN61lZPP@hh(z) 21d>Fk״fr4lذ̰EnΝ5߼ySO?t>W&?~<}孷ȑ#ZTdl6UϽ^کS6f͚5$''!]YygٸqFիҥ 2B!e+(!.FWx gVXHDf&gaabrܨaT3/.&'?&nW\2tNM7hxϐdCsN[Ξ=K||<ߐ6o\kQ"ѵkWm۶( ੧Z^|釶J߲eK@O@ӦMK9::ACiƍ_}ׯg<3l޼YNsHEV[}mQ۞F=qOլ#֬YkVק2zju@qF:t l-NDDD{DDD^ lll,BG0'8!ܜ;VVkhnD 2p46S]]e0ApC\v6ieg^k220KM"9JG[b FAVغu+3hѢX*Rxθh888ߚQxGGG\]]Kц]{GFF NNNus\\VT߿?Geر:taÆsNprrR_EV[}mQhk/&&@=Udرc˖-ΎٳgxbMA 5˗3n8 K.Ғ~B˗/WЫi~J @C!#WP՝;Xݹ|860(Hgٻ.X`6Z瓞GlVI99}.^a^A|=Ν ?@ƍ媍?0k[bR۸w#Mm{;vʵqv#(MWAVfɽ:vquuell;U鏶,,,H+jh FzX`ܹ'ңG|||ʬobb/իٻw/{a,}up}duh9YnHc&!qPPQ41&̌ IgBa>::X`XДKz^:H%=?_"e8# LSS̋GfOԫWO]v9J5kVnEW4hyYKLfxyyq%nܸ^6*f|l۶;v 0GGGq7(*2kG[{͚5ܸqC;v/V輢v+,,dРA$&&/`nnuh۶-oǏG>L 2իWyf,1 ¼yԯsg_PadlHNcgg'7_!DR|sQYilBzSQ2Muu1@G_,VИMULRR083tL22ftܙݻw|r}oBCC#77WkPa"K tq1bFM@@+V >>yaggGHHuwޝ+Wwwaa!7n࣏>bڵ駟G8>664*}:ׯ'<< >|8 RwMõkhѢNjګҟr׮]L8WbaaA6m46r-y;9{,ڵQF:ur\ڴiŋ9zGbM_'|Rꋚ'==]cW6>(OqdZgڵ ?^{nWnŊr#DĂ +ؘ<]LL!le qҕ'IJ338#XD;;[7?0C֭[ԭ[???PnH| ɸއP???BCC9uַ\x9waaaԫWBADD&V%А C 30z:4=ZK0bos9rfɷ1tPǡCV\7߼rff&| 퓁{d\k/SSS{\f2UO[޴~KXQ>KŗgԴ@C !>@& jRʿ_V!9Yæ,5m&aCDTTFbժUuVϟO9rؑ#Gҭ[7WqBGM4$B! 2`ӱS5//OOO&MĄ 0,Q͚5kdW!Dy@C !B<$xHttt?~<ǏBd!~!B!TjyNrxjO 2B0C!(d!BH!BP] 2B!$B!nB!wIQAqqq( EP(8 !V] 2B!4IQA.]W<11~̙3jժFB4;А C!$ 3*K.w^uY~~>;wÏ?56NBQS=@C !B$x-[ƥKhݺ#?*!ѣ4$B!(C 3\k4jԈYfqܞ={x簶Ȉ&M0grss5DP`mmMdd$/2XZZ2fJmW...( BBBCP0fx Q(4hР9ʊ_5jQ붴ʊ%L>J߽{7m۶'''ƍGnn.mڴAPpuB@C !Bf?~rrr8riӆI&g۰a?<:ux:vȄ h͛f͚ŗ_~IXX/2K,)@\hӓg -͛7K!K.%55cbeeUb*zQQQDGGckkKݺu+|ᤤԩSYh2e=Zcߺu+}ݝ Ο?OTT#G%߽BZhH!BŒ\z-lmmٰa 4ƆYfannXYYl2077/hի3{l7n}ӦMǗW,SNiI^^233Yx1;VU"czy"##9s&nnnxzzҷo_N8Qc333=z4]Yb6m???B@C !Bflܸ7n0h ...p!u֭[ILLy2RSS5ڽp&&&r7774/oӧK<ʊYѨQ#].[j|ZgeTQ1U}pqqA__}}}u3 ? 7iD<<<;{mWe=ɽaO?Dhhh2*sS1}ꩧ4hٲe-ޯ?{,M6-1>%BhH!BQ ŒnŊՋ/ pΌ3K.;vLwOzغu+oF*vy-<2d7~g-fr˸q;nFmAeƴxp"jQ#Gdxzz3i$nݺ@6m4ڸpBM5!xXB!:w0&&ggg1۵kW8Ypֶk|TnݺEݺuS BQ3o<"##5%boή]hپ#;ot5b"_ Q` Ce߾}:tooouʕ+JNBN5Cx!A =2 B!2*\N>͐!Cf۶mתk|bԨQʕ+?>۷gȑ+qTBT/FFF.'BU_s IDATPAΝYlׯu0M>;;;~7nL^^L4 &`hh(߱BԔO>DBJcѢE2B!ԪB!B!.3CTGe홡##B!BD !B!B(f!B!F0C!B!5B!B!Q$B!B!D'C BqU^JFF xҭ[7رc?~."f!B<AAAܹk׮`'ֵk$xlBVVchB!Fff 6lνcvܩQf!7E<1Ӓɸ#dee_ !$Bٵk 1\]]y'<!N>pi <@C2B(B! A5Jdd#?g``:0vyߑ"A⡰waaa2(%33BI?% 2JwOc9-[匌=J!x<Qfh'B!ʤ89^BT[Y6ǎ#11QѺBGah',3B!s``b iج-{%'%aB!D%\zkF2BGN e&B!zU FXmYɕ3GAFL-h؂./ CuRz_%(Dɒ$B!"n\Arv>ɒ=A |)1aY4-F|fGpq@!*Ifh'B!6, N"&-SOLL165I b)DݺQk U x2!BQe7.qP3@έ^H,Ig%\:q MğCݬNaa!w9~:Eq.lr~.?|Gȕ@rppcWp5w~~_3kalfN/)їW1kqpNnFbhdw4Md#Xx %'B !B!̜,Bз]N]'I1?_,mX?B d؁93b그yʯF3{K|dp;6 <4iTysm|7m4^Z{g/ٲlɷ8su_NφljFl\8N$4@NV&?_b®Q"/LP ̛7Oc!#LB!Du}:_V""$o)_m[asr3Y6] 7o#w寮6+rm\uWz u74ĔG`bf[gVcS3^:تn#';'cnms='_),,ģA3X65{:Ԝz>-yuTe#J:w0А%'I 33B!D=vcB=6o} : 3P {r;6beqlcB.[?: i9GMD,wi겳Oochd.71͓k( [Nέiׁ-j]i+n^9'JPo2 h׹\ !aB!P(3C43ws.<1yJ 3 @vJj`@^nN0CYcW۷bwP_Z@Cff!Bމw1W'D|K[l\H/Ak9cHNQc~`ݜ mԂw&mLv:^w l W6RH;:v&rsE!dP!BQi~16#>JƦݥ"pwjOcS @s)ʅЪrjFjslZ;Sݬz/NP̏|]}W>S~_!\<)$B!o@Jb<֯{M[ݸ0M$ŗ4@P?oFrz6¯S/ [8ߝ۱ػsصK\p7Ou[]#CeYݨ D-*j.Iu`a՚0C!D4fMqo4(US6dx^:s+[]M˹AnN6c#ٿu5Mэ>/Q7%1t#S~Z2Рdgr9M)#DGW(8_fз]GNv&N"@tuK~m,Roz `ne 3 K3=}6, dgfpャj4c"qײׇhȞB!jV6mߙ;:u epx\ް7alf{}z9aljӱ+>c{&|j/vu|V_&'+gFcuP/k sX;~[5}Wl!4Lo_c0t}lRnvgJO1CC 3BGhF=s?`/a'xwM#8zi3o_wqg#%x5f?޷ϗ5P+p2d P]NJb< k=35^'7\x^zIF(D!DCҭY6&Ь20B G/Q~{ ]ruV e`D!33BZ׌zzjfJu;kA|׫-?/a`bœ&N!5 18oC'؄wO+j֚. '" 囄$f!BW9Qw v#6i7+dyuFMh3DGyu{SS_^\@o1ǻxwS?+5@ؘv?=`Z0៽?3ͧ՝}@Go`($@CPPXXH<& 3BǦA>],,(๩s9y'W/%VFV4}NNÞ/s\?G|Prݰh􍌩Ӧ}9KWw:9;?%ȡ%-Kz|,6=s nz{=zM_?r~ˏDbfD^鱓ln=Hbȵ'}߷<з82g9Y( ud7Q!BQaêN]zw ]'O pӚ־'!o\ / \SM iUǖNDdp=:z0Yw0׾̜SwGFGRGµ 1%6k`М/IB~v6w"nϲx:zz|m quN r^)fK g%d_Qђ!3!FSAQsqMg~Nkn8P{o\)g.aŨu4}ˬjͲeWFm\m3SK=ϥ[' CQf޽T|SSI&/VUk=f!] JI&97&hoqA'̡bcdoA>ݏ 0vk%rҔ!j +7e 2۲WגnސϗBݻw?Vbde>>4{"OԩVf 'HQH!F+m#*(WnfFj/yVtͰ<זmpmن>!->#qnReg&fꋡi㙙(&9!^l1N_k> {xx:t ((F0?d"O&iέ_Oykؐ3T-h$aB!Di t ]eVnD3%UmDGVkI78z3?kC5c1m]0#)4f-&iO[h쁵YϬQhc 3ߧUGyc 60CԼ &Key}VZ}{Gu#++ZG;;GFmΞ욵$ݼ @!i;zj?'p}߾ [6 ]Ær'<ݻqyI+zfhk85k y٤pA.nBVrr86mJ7#+KSRx7uL6!O좥"N[Z>*ô7; ^\1'_kwlɥJ{A嵽;F=;wcā@:L>a_D=rM0|X\i߳.]f ܸ7ĺ^=ɑ{3oܸ\l[y}i,YkZXkhW}ڍ3fT<]3'Gt103 we঍:8ha>[=u==mli^v9M]ff!x"_Ϳ>ɞcr ;ڏ76dIZ%ymߙ38#najgO^ͰQ{Bx 3+F(U޳V?xzŅ8pw"0Atf[ʇp/!WՃ^+\"u+X+N} ?=/00ˑ9'o]''+;:t5^oBP0uhwn]Ŋ/Ge4zԜY?g8shNnoRzks12RurcWzы-HNe8z(ܢٵ~ N$)>Cc7x7odoP((?Λ``dL;xl--QyUDH uRnaaa˵IZd<|V-ݠw0up eΛkЀLJFc(׮#pR@R4i'\woHJDxE٥ ;pJ穑%p:.oFz|Iדex,9J{'" 7&/x51' 003'9J>?7xe?ŖeeL{OR|4z>ofS"~Yˎ nvkfݻ 37eζS80u͟f69Avfn4mәkaKeߘ{ [2ϟKrB, #@>܅0b,#Ol#1.٣_څsߎ$3=k{'~[=NcY\=%)>X,msS{V]]h_֐F11$._ ;72p,y7}:3GHP?ŮK}wF )ڨ,MO篩_{"yefs<lVv*ۖ>1.pxLCoQOfR6n܏٫pu̚5FGSGNZ:}&/D߮eDDCjTg׬wߡK~=0C!1~8W.EVJ2a[/z9M#{01pt/?l8V_ߛwL,3CNu/kxi$,mbbn^>p3H&27CF\@w_҆X{},0"#3_:SCx3';eSޥq6Ri+ Lpnkc"?/VU:dl\ppC`}?{ w ??puyNv&c3 z |:U6=fh;*̸uU:^MJOW>X;DsJjQ--33*r\*_B~HW(d*?-ae׮]M~ٙd³rVY׊3S{{R9t)Γun*mY-a̪y2o%Kw,ԡNv??2]}.sѹ2103c3ϒV IDATB!5~`ݜ mԂw&mLv:^wSoXJsrTU>\=M?'?1s%D|K[uk{J8,m~@r~V9)ߧIaT®].Pշ8II K۲s+[l„\2 .RF~φ2TT Mէ{KuN@@I`r6ӨOpw|Pb 3}oT-OO.|+rVZlpsiGɴxuuq'<{zwrf!B{6-)qP]~ixߝ߻MMtz-b̬u?_.X%R~o0Rtv~L beƦ$揺QU]Tt] +7+"ׯ 3RnDŽsDܸ Ciᡧo\}*Ϭh{R +z͛>7؟BhiYz\|z PG=oRj7Q09FſVt4\%‰k9`ƒ'ͷ7qbsi׎N|?srj}m&B!5۱ػ} vINy{;7':?et`[ ¢,^v7TG[}Ej.c_hκ9uzł4^S OOWFuQ=W_WYJɤ"`bf#IݴcSsz핡RIv4V.Iu`/BBǦMi?v,/ 7Wsx n)g6= #+Kt ph҄sԌH 6Ϯ]cQ[Wt0t[yرX׫!δxuZvW L>}:;``fBWsgg}|w7SmyYY֕ne:abgKynL,103!BQ4mDžqu*z}_Mk,\]pa9hϩ%7 =ٳD N._&aB!v'j6~C3h 9Y޾ `yx7oCȕ@?5HIJɽ>M]N)'`ljcOޙ ݱ+>c{&|jo*fm0vz~^?ƾcSsNb; ι`ʙ|debLv;x?4m T*r*.ua9Ͻ^oڊow}|fПT̬lНRu=L97]^V69{~݋^ys2nqV$=.S+Vb]߂rx LJ|CC+Ֆnr33 vIާz$q߹*95:-AW_n1sp@Μ!I 8ǍIy߷[[ H y3s(,,,BkΜ9\~189 "$Ϧx)$WX{N޲Q%_3Gy)~stIOy5JnCƷ]OZuP FX.ىX˞޽GUyrK 3B$$BA,$h$*Rm5j O-^5kh+F-IJRA !Pȅ$e&f$d0$?2g~93`~ 0WRC=v: jGp.#S{?WH'PjӁl$ Œa@ Fq?vXT}ir쌀:NW<'牞dggl6sccz3`aYCu=AFJJ ?AM]&XG0Pm 1nO!fd|􀚏s /_,12>U}}= áT&ڲe Ap EuNґE)Vqd~L]999ɡ93u&$L퓦P WWֳ>K!0(ca+ͦ| D @ !0 P3@@i&\gggfq#A}vjr8aH{WǍhڰa$i޽۴tR 0&I 5hRt}cp йA5? |4):V{^u~ #4̠Y* hwh@`PY+_VhARO)1 ނ8IRtL z 24 pf`TOF0n30*dh#3 Í@aF24?f` Í@aF24f eF0m8 7 Xdh#@@ Í@FaHn0r)`ڳgO9=o'%%q!ps/<:]Zt)7 a`L*..ƍU˟ 7 ~L3GɱyASN`xљ222dXTPP nK/(a=y4TS:**JJNN# dHR{#&NY2̃vݷPj0 6d2pp`f,áDLTSS.]w""RJF3:RWٖLfn:Vaaa-'rk$is/kb 1l6+//O*((PSSUkӆ0z[0ײ-QY=Wn &*//غ:\.:82 4n  $%''k͚5***Җ-[қ[7`Npk8Z7?Cmtc`֭SuuK\\)038l6+''GvWFo#z-LyGflUvv6`T#<.W_}U_$I%uZvþ7޺1V\,n 3l6+++ӥQ__Czu#ڥrjw}1rsseٸq`L "l6֮]^xOO)vzⰝK*W˩ I73o]/o.Zd0B.gK_<'ca(..VQQ\.vئ'o.1 233Tш0˔!ݮ'|Rj9u|л4\VewF7@ |`Z?$]So~F.Itc0P-a>[.Kljwz(77WfbE4UPP ˥*_ˮU3f/1ucpP`f0HUPPw}W.h>Ѩ2f0fn]KY{z{vltcDEEitc\aC[/?ޔ=:U#u՞̤3".JIG_zDEE)77W (C+99Yk֬QzzzZf Ae3a`6#ݮ vB f0 1i& fB a(0ƙL&ϟ{= l6 $dd$_h$Y,>*//Waa?N1Z^Bvd(dr:) Fᘩ,Ogb ^{"dffz\3ݦ ~.bd::m)&:35EC)+j:,S) jժUqFfU'VB^wwS Sf5Kd0ua;[jba ֪L.Yӌ}4`ёi1ٲl^fMbHʕ+կcfZWeeOPx+q$X7! mp377 7w8f4$xu,.%!!A&\X,^A"cBkk(dA`8íFmmmS[[#I5e\aa0oGL@d@QQQX,ZX,2LH0 *++0 jPACC$IUUU6NgH@dh4J$TH R3¸ >i)WdJƩQxG;7Aկ<>03qE|Qϔ w[ccuw o=a?Y132+I7?cebtM9)ؾUr':,K"HerVH!F 4!B!H>!teSTϴotHk-ʽSsnI3^u:B 9B( 'ÄH(٦?q^k5ܯt2=wOӊIYw3*k30 tf֬W(&eZiݷcwQ8WMykѼ4~ &|RGvUh3O|Wi(4"'y7j޸~S食s^i~풤/]߼߳{ݎ羚E&L7YKb̓S 5#S |&IҎg~ۯu0^/⽺;?4ɪ+ee*y!m]w[GhgK+Vjv ;UK+GҸqmQӯвWU+cy=] gΨW?կ3ԏں~Vڗig?rEf\TCzz0٪G>>O[ݯ.X&]{ de|uwu5_O9Ըowc_${-=03B>HѬC/}; @@DL$5 oƍgk˃kոJ].=G;OF7N V}Χ/m@[YdA<ŏ,gzwgqǫ䱟hӿqu}kj:m*mIbU+/ lSg[+_<@B=ZE=@ų뽾sz]ħOO8w=lϸv/m;n=s\߮ šrJI2LѧcDh`$ЙXーQRϣO#m3/9>vbRݔvF={} ɒg]%IVr)'j7+i@5 UvlSѤU£\p씹)ѧ4yo9cϯz=Fa Z>8褹+o)eV 2LR̂+bu)1:B=ֳ.-X9]=Ef*8̠t7Ⱦvuwui3v<:mJ&'5cBB=E oCM+t[ V}Niw'$ FL'fP;oڧ&IsM =0X3N3tOi|ҿ?Jx[Jr9=+U[S]j[ыwƟ>ܘܘg̑]|lnmyptMtMzP]\kwwxfF  @;YwPZnҜn42[5.(HG%z{udg.٭wܣiE=z=ѤezGT{v&5/ޫK=E]jWJ;:WE,T)\GYsݫwա> |T&Uɧ}_>̌6I:V^>ۊ[q6S a(L3cJGK)|aSpX/< 1 V"`PL&# 0&)55B`)0NIDAT 0f$''SFB a( fB a(p1M:1k>jLc3u#L3xFPRRR(cĸ3gΜ PЙ a( fG8IENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/3-Pipeline.png0000664000175100017510000006146115033033467023037 0ustar00mylesmylesPNG  IHDR2bKGD pHYs  tIME (7$ IDATx}Xu 3 &( m(TgEK=iin^'ovm;vw:묕GvcvnzӯMIOZAh"ȍ 3В?pp3|\WW|0k>w.^xQk( !JUVVR%@llt>8p@7nB.pBMPsStݜ;5aBmΘqb8,.NIFw}<~}>/鼼vϷ=ltql Uvhb!ݮDY"=smu튞2x֮Ekzu.\s^kٿz}Ţy~SSd,s+<.2~k m~eZ5Twڝ"'G{ lZN~%Iy6"'׸޺ov=2f]\ݰQsrֶ#2,.ΣƮYtjRVkM:.χTI:qS̅*ٷӭ='?r-d}k¿NUj`x,~t:_8bvfժX,/Wťp$KD23^]bZs8Ryc-]Y^8fTʟY۴gmsÒz ,O{i],v1k.̜v[ajcW]%lt\⺖sw:NPӞ}Ƹc*8GJ8ݏ?AP!՛:CϿ`vp @g iOxt GX>\U*,Ԯz#vdU}GhC6=v:&U,V/v4ZRV4ǻ,mus5v/I:cg[jm-vҞyƨmkA31sNy Ve@S)nLny8Mvkil$4gZ'dz=/ժGW*zee^[ӣLi.K OLcZ,1Bǟ0Z64W+bx\I99tF@H)-]b bUav?Ҕ̫VXժy),~hKƱnEWwCk_W}Ѣ%J;Ǩm*Iβ2tqsy}tlwFx -rx!Eڵ-Z: emRnrIbù(zʔ<:5ո٥P\áϿk۔GE㜮=)SDud[lsזƐY;jԻ,՞ǟhuY&5cqcqbJ?ogfGq3gۋڅ"7WX=z9Z>5c<]:;γ+ jws8+nLEN1lst9-:v-v vCvGgڭV乄aԱ;Uo\q̄S]֍l'\"7Wwl1nk]\+rs;4T{̾5 t95UIIF usZS!Uכz6B0wy}cUiZ[7sLBnبnz[~Xn-Ò= ~ux~ VVDxkuܞ毭ݗ*JBH_lYcm>kR!gyywVƶ2FNQtj1 +-<KH躐ZVf?:uJۺj_tHkv[ gmdkenF9ՠkxQ=ѕuwzdFTZkE.ܾ]βuh>okܗ?i-p\7w1X+KIw*޷~E$%] b[=Ɣ+;t &uLm.nLűvV~X\^ZKu8[ظ<ͷ?b]ժ?o%n!ۋǏr/K:q\PyWĽokohKYs MH;8l;h<[xlKL{iU"'Z]m-ݺ~6nB 8PJRw4P ӞyZhQ2E3g(:5UNg!l1 mlxQ_goW)oHMX%/YD}fP ,/{u:QuP%nWltyLVk59od{]s)*8f@rӷP1q !@H @HR RTB*T!B*T!B* !@H @HR RTB*TB*T!B* !@H @HR R RTB*T!B* !@H @HR @HR RTB*T!B* !@H !@H @HR RTB*T!B* !B* !@H @HR z:iHd𕥩N( ژ1cfYjf97kSjT5tBϧul6l61cfQ 7t&HCMG VtMfEJc#)+5n֫UuLCS:#kJJ)R`Z2H6.\i7\Ç6BA4~ť*>Uc?vL999ڼyD+RbQy]NS'SJhMe):ҦɓǮVuUj6 @3.^x/njLl^Z&{Y+ԄarZ 6iq|XZKJa)R:[na%T}bɑ#Uo Tլ~hiS4{{{O3f233iYђ ׫7t*jjBCb H83Q^SFFnfƬhIЫ}9lʇWSf%(#=)f2ғuw'h_>֭[u-\P@bzS92B~~χyӴYPիWk@CK*^iqo Lk/Goy{=kŌUgВ W0~"6짷P3i=3ɡ(ObbTf+%yVd#iuߒƏ_~Y(?zb»Ӕ9/Mf+[;WmPޝF^V4IRvvvϙ!|1^dѲg(>6A5zm*B*j@] ϸexUT'ƌVmP? ϙIytN7nfS||b$ixL\E@/\ЙS9jtJ*9UNգ56yYNYB+]ɤъ1X MZx׵a'~IQR15!! UMhHBmX"cFi!e 2kH+:kn}P3@{Pb /ÇVYs ィKVub9/M/oإ[*## »ZY k7چEjHDDDnBLAf㹯IJ$9jL6` ֩ƮaU War{nmIGqRRR~RѤdҗC&4T^fٕ@SȘ8]?^ O5d!=뙊S,UZt\K=ojT(:l֩(%Qڐ#Eڰ?| SԾfaCul"umx];vwC‡+5 DE"G+kMhjBCU5\!5v*:JA l6)s~V{n}QC@΄SekePd k` 2kl$Md/DQco!6b3UVeVii\lִM: >6RGEnZSRZ8:Xd]?݉z{`uԜGYzt vUá2**u+#=b@'pz_^͛IAR{zIe^'B UiOH1=2X 7LU SuP٩Ǎ;V[XW2ƭPen-fdlͩ}Ȑf(MC@JtvH7yˠPM6Cc'Q6 9P~A#cTnkĉSQ1L.CFzjÆ Z|9@HM6Tev{SQxᴓ!u4mr?ܭDK@ƏЈLGU ! @Lp&eOPqq1KP9-}vX4P0_q%O@SPSV}2~*"jX}LeXCCI4)ɣ%I-^Ɲ6ko1q ̚4-] >ڳC?gnWС*9x>lօ`MZ]hX|B&Mѫ85C‡#ڷ=qRC@bbE(:ekrpW6,#?NN""0XaZ$);;b ZY19eP>L 7셯 ]k¤ToWaUD)P/ժǎl- lU~~>dw_o'$k;d 2a׵c'h0V-u6$D~lVC@FR v- IDAT|^kI-X42 ~bxL㲏ew!8XWk!ᬍ !z֪%R}CC@ѽ6,R߷Xc'J3 oLøѪrK_0DGr;DEI)B/njVMhq_|BZ\C‡*c 7L)d5U :6جѣ"(tK)W ?M:]7Ϟ%]Vx{rdNEEQ?j|tѣ"< !M4iZ:W -_SQL Ѝ#m*))WCMh>3cs^d-V|Bq!6}vX5P ?`6RFfjkk)BjOE(z}umxx7ϞT/1 >vख़}4 -l$ƥ ]Y%QM!fe=fuU_BD1[CjS Ԟ ]| }[ SuS<UzUj)H3LAfr66qGP=;FPݫdjPoo!I-&S"\K}P 㣮mP w'S:;Ħ*TuP h/JR~AIɑan/+''Bh֬Y-%QQF@) ݠj1՚ @w"5!a:;M46qNk]?mX V[[k/֚:eL@E̚yyoT_wA@k8tՋQ -[ם߅Ԇ}kli5:2X7Ϟo$&4TeveeݦRG(Ml-J`b!zSPqmx%O{wI*":8]|m]6ͩtS(1Zf&4Y WdҴtǎb|*Eq)H:oZR*"&\A\wHgU_wA/bG)s nI tzeNNiܶs%LAf͸z?_$9V}>LC+NSt [H2")EO8ЋEw2s]CEӛ^st-՛L|'M!S+.7iZ,<ӿNEGQd}(%0nsMUlrZ, 958'$?ЇCjC@J?O!ùbvSߡ@S$2}=ÖA4mW =dx}9lM& ՐZo2L03YzT SeָD7{Bj=¸=r8]?+wNIRM&UlϓspU 1REZHuoEdIǦVlMRH2أ5K<6oEU8ɸMk*GB*Ucd1TTZQܿ45!! VT4ZS;&$pK{؆EҊ 4iZϗ%I!!AIZ"1;,Q@]:r@vcϚ5K=r>ђZfNwyu' eP$i%lyٸ}vME11m@{z칮zH5u!8&`ܮ DPT/UY=ǞwtkOH)+Rn$IBC * 6JʪTpLg*)s^ZExA]h=W= 5n_?W|5d#FqI㩇V0m0T[@P畛_C!TV֚hlQ}o*vIA!&4T@IeP(]}&`ʰ0Bj;nq*땝[IPF/]ϮBZ֝oNu gWuLjۘ>*kBp0kv@#Ե1]"=VZ7ǚ}@EwZKjQoLj/̊OHV~^7UTI @PDj $f{oh3UNpZm۰HYCs57`s!*-$Pi"'~RTa}৫/-W)HTo o'?<5{@?j-FJDDQIbQX]Eć'[@0ЍﵮUuW%d2f1q\ #GV O;6`.^HaRsuU_R.DP}ׯrrr( BGHuku564$|8R %%%ڰa@H5B[{ko"cS *{mv6A!Uh Ґ\-q3V U<=>{k{+/]3u}Zy-m^Y+[^}JN/oj⍩>qnP[q mޤ>>LPxn?3!n@ӬҊ+\R.Uo2)KT TT$Af㶿ꨩ֪CK'~Bl:߯ ; |*]g*]oIڭ)EJ~vڦMXjpjd1׍5ij{giqNk⍩?SsxEF(h$)~\JT͹qZQ1xcj:뺿r$Z:F렐%Ieq:;JM)I^$Ѣ +Ӄ+^K*R]&I_ѭSkoWS?esߙ8\l8\7:H%x}w[wΎD_+z/G[R7=]`P_YFWJ-}m9IҚW_7Z5oJ'SMںe)~\ۺRLJjͫ)K>R?i }k騩i!~ڰ \cmkfkkywO-Уﵴꏻvȉ8=سfRFFFχԺ@://Ln-ե8W}ՏHnMZoL=?zX'( :Wj첻G,ާwe靷we鍿h=~ŗ[ ` m~fԆ A}~ 16s umjiʚ䨩S/1^r9k<+k~uk;[6顥)pB[tIvTKx-m1q24Ѐ TKJJS]֭'bԯVʽ]Zz0{GH?CӘ'^hWq6otޭ[6=qUu)}^Kz/OjEƫ/M*uYSQSo_+rΨ_~*..?g}dE0.l}˵iIL~{hc:>-[thS*X o0Ѝﵮ ThSb|GP?VJa = N8B-d %TR*7Ad +l8CAg0|?~qYs葀|qAD6l0|<T2emy֓$A5;; ರN*j(lPܖΦ(:1~5ҷFXu0{e?F<QLPT^x :o z$׏ 7Cƥ^us5TA1Ը ĹJYCsڑ4OVdT&ޘ涥E*;UҒ"EF>R_5%yښw5v)}V͞[l4OiBɒ%[l;omRiIc\ePHHeOQSͅn:PU~RݺA]P݅Z\+zg\?Ai߻M#|T9聻oP@wk5՚xcÊ>AO/?DwvCjSyf%&J!Z%5vUhZzý-f-m65hߙiݦ?mKrTkŪ=ƵcN/g=eS+knyqPҪ*-8@wMO?{wHnhrY -ytuTk<5k5ac4^4V4ix*Ձ3T@A5%Ѧ/JN9y R{{w3%\/!٪UZ\ݦݻim21"F2(Dem8]4oj橻֭yNEJNI$G{6p^Kz`mƸVokQCVWkPʼcsU!KsZ1{WK7ϒZ%'==ҲUIzhczhc1-3Zm2TAt60_ZtЎ#uljg=m *u)A!YM?U_P9T*&S"e%kw1s5*? 3:jQR4?N|vJ5#NHAn(:QȕJW+ .^TTd@PpB$3ETGZ-j@CΟ)pt(f R\SG+Y^|@/5|CqUUz}n RV$IuYG\ r׸Z]CA kk)A*@HzþҸR4g*N]}BK*@P;Bj[ ԉ ފR]3(%5^xԮXW1yB*]}ۓY5ЭA_~Uu|GT_wA4@@TtD}a:9jL) ΕO,׽/T[wO=ST%);ޭiicAF\ :Qp}/xMdJ! 8m".a Z^ZB -@C*tP&AALdR}4-QwVyR>MQРSVMUGD_]+b/+7n§&UVR|0Ң RLXe\>jM|&ziHmTr?*RiM/:N6Ttv@r#55;ߕ)̕UYGrԜD+*T%&SjCСo G+&2WExQR=UVL zA!=~~}[r=zaٲ^WB*!+ZS >ڳhE hhPHu5EtZBT c(jmE:YV&IZkq8ʪU?6oMݿϺy|znjee hh0T{]\S'C4or8֭}f{9Ŝ(2nԩBzyݘ|VC+NS@3j*)]'ztݔ'T}D_=0өA:*Iz7tO犡}DEnjG8AQ^;]1eIcFRǪeN[O3} _7]o[˔69':&zM+nq+S.  IDAT-WjVZVn|io%.®?npm^~jNIRŢZ=5 A/\̉"}29j=;D ݪB2ײ^/>!vlM 0A icܪ{(^Cz~ٲ6ǿjzmRQiگ֯Kj$d1z~2exοZޘ8y$iڳGVJ9nvK/,[%cУWO[د{[H#v=SӦ6kGHF (+ө(I46YC΀nў%ݾ{DٔyWJ?Q\?Q͹Z>PGVmֆfR8ga?m\-?mݪ_hQv8tC)/?1_jv8t@{Gl=tHEbdO 22gmݽk,.ܶ#dO8yjh5ZY,:|HUfSmp{uż3˝8kz3=QdI@12n՜Jjj[z[4.g)mrG_ծ K,5yF@akݓOz٭w'4B܁M<_F|l"Eb᧞ԉ~cv8)+KںUv%#-M!jNeR7mPM3wá~ZE XآE-J3kwdx_Wk^~~Z5pcK܏xw@P KΠQ^,i\~]ޑCQ^TORwϾhSV+Vh*ꦛ_4eժ-![ע5#-M<hݻ4DFjݥVjC.8 fҿ._jz^}CXp)nj}`ފ{GvؾnB,U;Z۬ ˖!u={pZ 83$Io}CC""5$|8wYb44($EA^ZfFF5}=Q\vJ$IRxݷr?)ipCZ}]9ZqJۧ'vj%^u߿-VK;69ԉ3և9Zlsol\:ekݪ wˍÌTI*l6/6vқv^XEbQi1uAFFei˜1=JHѥ $"$ҫ6lM@n & ع6DG̹;gs{rc*ii)vT QO'ME=,,.aY0?`."c9Ko5|m*)QnvaVQnhL=qM}Ęn[[_?& 7=55{~|jPÃ&]}WoguH 3@VgHK3~KBxgŔ|rJγ7bݡ #bu慩!AUU7E;^w۪TUXY^Y{c˄X|ZDЖnO䱶cC;*ze{IZ5jێCN^zZ/XQ:渵۸7*ͬ0)]2>z(a uLP o|D/nSno2m:7+K\.s: fDž҉0-~0KFkhu?zϹ\c'Qę5!5ihH}F3ywŤu]7UKaDq!#w||ʵY[5w٣mczgۯ&*cA2yM536W *Xa]OUu6o; IƱyE]W͑f{^'_oEYj1S P_۵lNؚbL0jxΝ:qޮUƂ1CjV3OOlhho1P(ܞsoMkM^ܸQ/ܩچ{<谣hƍc4_ԓjd'Ksl|8̒6 ZX8Z[WNm1ݏM]Nq Lc›4ܩwvVwoEh#gH7ż5t+-:|%{;v8V-S}K^򍺨!UP%yce]*zY5ㆪ?[Uf5v{TR2<*zgᣨԐ݉11wƋtt0>?RMLُNvD^&GKkDC c{|:+7m5|zBl-q۪ U{TVZk 13TZc otE\` $7;+I[v35fAQ_W++)|9.ZszTǣzczQTGU W…TIZr^>*&䓖SP]v3ƕ+آ7mxkM3S;]x*!P GHp6}sOsOѥw*hژġF!He]5k$ #-,Ʋ'UiÂoϸ^4q(DrY]=Q^.Mo}/2+to%kD-]kjeV>(/'΅44>k5>^ob\]W.CǍPMY:X $5V]?㽪yskR-~:ZFz߯0o( 7T WlmUӺ3|D58  ~9~VQaLiuGl#h][s4yG:ԄwCa{+|ƿPgB}cM1,TR3ȇu:wܮ:ˤiI[{DP=wyV3!2H89jwolsY ʪ&WCUՍ+,ZShGkFRA@fgGܲsgD(!rZYiJ߯W++#mIVs;#}V[F-2BbnvqNڴIa'Μ_q7zP #=55Fá'#۵^0u7./ydwȓyv1&:& ELBǸ GX0={FL.UU7Sn8s]T3uxgUРgda~=Q^޾>ZYW++ǜ#=5Uwjxvm협yEzGlǣ\O߻Y8xj#lE |̙[W;ʝs&Fm<{U=c ]y߄Sl}Fgީ3׶2G/XG=X){x.PgIV^Z^љf~}q9+bMp'?o 7s;v$+K?ydž+Yf̽Yww5j07?X2;+9ozj6ӈ}jëWG4J$w͙\#~GtgOkѽPMpޛc*|-Gzth{{]gTT^)# eEc45iGEIP}REܚToG J71/Me W7}[L}'3;9nfI[{Ĉc5: y8>yz*|c_Uݨnz6WUE |㸤&OSI](iC+fpÁ8}iJ~u=SH@HPw͵Z֮`r~X.wF5t]#oc|m .?fK {-&uxpxZ}S.hq|hFXt;UUݤk=TVZEB)/xڂڢ|藿> n}RYil+sWf#hR^X Թժ@pcP3֭׼&kPCeeV﵀]F 7lہqϳwgn[斘doZkjxWL{;7(ϲH;8L ߯`T:$ )܈5]=zm%ac5kPU}}3FA6tN9|A ; lnz1ۚVOhj4w-yG^9nT/ f,0ima?^5 7'r=!/XenR}h(e[iyl۸x(\9{o{LJ?_O qׯGPmm$O:;g UI2ߝ'6`+X:;|j]zysy<%zys^_{v~ Eި/W&D-zߘqu{){V4ʺsv$yzT]K]W.CN?cQ`23R>P~w%M"%/W4C*ł}8u}P1jK:/gGo_Mg>a}O9~Wkf3(}޿FTIT:_[5>t3*R?Ӈ?7bw3mUlrng 1!/WHv}_yyTIT:-sο]=zi=yEOө_[t,9.V@07ܙh7l O ߎ{գCُN=:/"jАk @Tp!uH2*O =yFGvf%O{Zq}> @Ԋ 1w%gzJ7^tץ< 8c5r=ټ5CqBܒ *a랻% w=wIGzhSUN5`pWA9@@8!utt(eK5$I{ַoK^D|}ϧQӴ!uNJӊsuu%;ُN蓖SL =:y&bSix4]}} *#ihHY2ww=wHK9[YŹ(4 t !bjoА]STؒA-w}ތ uX͛'ix 5e._ENVLJ@Iw랻#֫z[PyiۣS Gt$S@n@@%TD>rֽ6#߿S GǕg]IIӯ꼬LB 9Ԓv-vwY y_ [׌溠OZ>utbY1au08ُN|wn_ W|JPoF|麖3`Ke&Km^ٯJ2j bڰarrrd٦PWVRPhա$u/2;Ӭ@JT@к^Ti58Gu]i( pP,N!Rog`ڕڕ7Oꋴ)BkHZBedj=ٚgJQ_ȜRSdo|-(S@_G0]II`@HŭK*II} b֐3qxB*DaX$I](5P Tf|I38rz|r  7c,G~~f( 'g|B@^YI8c$`Zd4)IfLB*%K!}K&IfbܢL& 1vs1!d2QуV )ɰ@/B*b3\A6tR`t 1=}US}R`Št=^d3mg `6i>fLǚT3fdRݿ3̢/_zT#fl65AQ¼0U[}bq{/۪i?#o_%O E@H[UXX&5o c[_W)L&S}L0+33)0 A5vfQ TG}T^= StSIRqq1@H şTPä@HI3*))Qi0ـz⼼=~v SU\\LUnpA9QAA)B*LUJJv\5rQՇjÆ !KQQ/_Í)Lգ.l6STNv q4 id/B*C~~y?lPiutzUVV  `e,ց Ljo*,,4B*AJJ6ׁ(굷)33S!-''G%%%j>F_{ `N-v^W/I*zJQ$j>ߦ{=Q UJKKe,VF=^  5rK<򈊊(B*n)))xYE_}*ޫWaa6l@ARN琷HȀ|2QTIA~U P-ںE  3ENNNĈ*A@"Ԋ `N)٬_w?Tʣ(Í;q^L@HT;:."?oנmTTmAJ---s(4^{똼=~ lSWWsILIDATjiu6SN)7Iwܩ PTnۧn?J%V3 `V`\-_\[nAB*r8:~̙ T_T ViL&v \t:U]]YfdVl `h:y{*((PYY0p(V̸p2ST QNub=\+sY |N,UTTDq 566p[d[+۪<֭vZ.|_$-_\vS @$ө&577+4eɺ4[֥Yla`R\r})gZ=Ll6=2yoB*Dsst:-I2͟ř6+%egdJ&0h nuygRIX,Z***bS y^9N\.uuuC@ gK( x233e6|YV!t*vtR AD QFI cB*T!B*`fwL]v)Eq̭:1 `(**RKnb$^+'NIENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/5-multi-publish.png0000664000175100017510000012337715033033467024077 0ustar00mylesmylesPNG  IHDRH  \zTXtRaw profile type exifxU 03EF1N%R7q*,8I۠1HxsѫN.M),YG}/],tunt5C#wdq]B,!aj iTXtXML:com.adobe.xmp :ysBIT|d IDATxX8 `@Mf7F~'w5=iM=i~iƽkk5!!111m6rc(̨?ƹ`a㺸2? R`u$H# G@,X <`y$H# gc{cEEEs: $7477A>OQ %S^^ 0Р׫冞a*..VQQrxF8v*jG]WeXvLK$>O|<<ϠC`QuV.휼2u>%S-i: 9jIsȟ6~aT%%%% %` rСsv묊Ϫ=]7mjIU@ iS&yyyfXw9L@zw^y<8b"c] ()gS-tꮻҢEh @I4776jMWN}EyW;uRԐ6UNJy0t]wiʕ%I}}jjj9N+|)i:1WuBfsav7\t@H3a\Lx t(cnĪDUuuu` U)4#KISmFj/kJPq|>ڻwoF"W$أd ru@R__;w=N02hAI^^*++U\\ ,c\$^Ww {Wŗn"n[֭c ]@rA޽[~|RTo+%jaB  dݪ { |O.8koڍVeev;HJ" zzb>nӺ#Zw{B^/Uĩcyyyڸq\.H:c> x<RJ:ޡj$jb/ {lݺuL$1DRw^#yB^^ cL$1|>رC czVGJ^ZƔ@sӳ>o)5c^V #֌^>44OճÖTii)D@B824OUuRr{3B8uӥG>8KIӳB ^H2'l*<u%%%ˋyB*Ic=@WNx≘OQ'J8=]zB]kΝ;uA I?A0 v#Niǎ#Iӥw*Ivܩ@`8Z[#I"8~a~=jnnfpcڨ$w G*;QqYޅ$bCI5j޽{u!~e;*|w k͐E;v``c֨$1$96myA{e`cR5AWh#og.佮 `LJx@w9߇:0Rע+-a#5 Hݫ o~/se׫;w2(1Ŗ;wk,*S9wK<jkkU^^ )477rtn3 D;RmamZ;%K>n18jkk{nH{$TB޽#o?j & ^AR__C;PU{OISCC<n7/ Nz>?'u]f {@ZjJ }Gӥ 6EEEM#9^Ԃ 'W^s"Sljkkӥ#0)׫IUmjkk i޽!K}meP7LƬZi>%.IƬU0T |= $Y+{OugޯaP7Ĉ׫>OcV Er\U@/ 1$1+-4fŐUE~`( zaGЅV:t*Q@+g=a+|<a aY_DzsEC1(Q3$tG0R. Dv@Z=Rz#+T DV@l64zTz#gҢ+- i  HB?!4l0Zx<vɥ"}l}@" 9 x<$N"UI~0* S `4( 9+}E}H|>H!$ak\WA$U$4!X B{ۄ~C 3m@ ) ill7{O݁)\^W^A$LIhj=]l?${SMM ׫Gz̀(9 XWmm^ݫj`G---zzg I` 5 !jYR`gBU|߇$0zbHBPA@FOLISKyW;9 v #$X! ήVF &jyi6ִۄ$+ȐDH!ż=ǨaԄVɅu|uI4m = I`~0 >hƵ+m1`]$6}:! lB$ZhCVwY!fR ?S:jCn.Ъr0t5N854/אaKe0 $a?B&VWMt$BIbpnZڢ)jl_No1[[8Er }Ϗx?d$#OB!ɋC.]2CGyD.8O,6)$cZ:m긬sk>ak q/J,.>{'g/2'IMǤϓ2o{^J*Iƀ9Z[8%k;ٮvmgRUQTm_Vԭ[JHA+HBпi^u?z f&;Ӂ락0I]mamT8#M8$‘珜‘O#7\ SKOjV$Ho+6Hr 9,!IeMʜw0Sj~uZ.^tUw2ۑ\ $%U=ou fJ` y7n3Foq\琦5lfNXoES4#+]~vQ?2Wfș.GFZ/u鋪;R%0rd92NW.\.yU [N5\SD[Ԋ0?KO ֣g;#V8uMm<S8 CI, |=xo^' I0 3ۇO_Ҿk#>J7WvpsN oLY:-[nDϑv=֯}1pf o##t-6I>:ß^~Ak lfNs.'#BÖj0, HF҇oKN*J6m-)m4y :{2_c+ܳ7ycW>3?7y&\ߦ+?0mH lgfɜ8N1i 㢡k޵Nn BdH$my 6sͯ*6AeɚkȑES?WT[[4 Gt\V^Mr 3;]߿ݥ-T=d8?ުjxYvY0@k:zcEjL;/0CJR+G 3eRu_=?ĥ@jؾC~4rW"㞍7}ݢ@P!{_h|t{ seҜEc', l=x皥|W XA32}޾Zq><0Hվ+z'$A 8e [a=ס->[ giz\Ce@“`0q㊾&T;ڢ)fw*6PcO_:S N6X͕|"MG:vS/=Xc_^2Cr 2l hǎ @Hq3h}n_=Wt~hʪa %eǶj-zÑGN~4C)PmbCl+<{6‘cp嫓۷#[k?Ñߖ5pJW<3'6esC Ihܚ8O85H gvY!+2}ZN`%48|:+GwJL {zڱm:?5F>ß] KwAи'ؼ}=P992'{V۷Cy!ACR~@!ZNܾ:P TyF[1zl8??=(n:^1yyAOOovO8/޹|XRIxX[8eD]q>nG_}00+2 *xE3˰λT ./T \YwpLD?a$TIoP@q˲@)P)ׇOSۿLHOgV}OKzt,ߨˏ?~—AN?[({!ܾr??$m~5nHUC`TPrˌa^nj8\?!H|k?a I`xIk  ܲ[*1 Ȝ!g[g@#Z29$Zuȇo_?Yl2{sC?`#$Izc {3bIB8Ӈh!$4 )**2o T1o^|WCeYo@bCnM`~ 14N=w2cbJ1]=Oh޶bIN-"IZ|LS4-(tc*-U3ueM~0I vIu59K&O$rj7vLkheT5Dsz`3W,K zǝ_.V_ \ ƙn s# 3n^NtcˇKMa]^2C,+gvm0}j> `Qva6S07,y&>f%RS0HU!l-ˤb ,$>~pެX5aV İƴp=;Z:}R/[[/uSbZ]z9QAnpZݱDKgNۃO .|aKdK1]A-kfHR63G[|Vu -U 3mf@!I|hUS$#M/a} 7liŽ2W)ECٯo${'6Β" Esgi7Z;-?fYYv-Y0_KWV}Xcda+{Iks s mC};UOKz|' 2'OkN NeY<5K?{2[;_lv`ln_oӱƀB$#367iFViZ5ǡUsޯR~utܫ)~ul_Usrw˰Mm9|B)m~?ުUs*54y\Fعΰpe(՝$ 0Bz$_S?yc[GOםwܦew,QvV}bX)SzK[lcJoywIR`ݚ@uEߪX!f+.>|;44v67>遧+Z^|Ѽ+p~ X7wBy& $;Z]zpNS2r\$noMS ?S433i8:"D>nkSȰ-ᆭ*OF ,o>}m:r~G[Zpy|2uG9rzac/d51@ק^xZa@pR%槪#_ɯDt4Lɞ3^q˲a 98N~jAH24P0ҔaaYuWc E~~Ԡ4ڏ>ᆭ$rijw}-6[y;7kl z2xHȷqXH$G~?Mcz}z_2 e#өIR\gO&Bd(8Kg\ hIƖVlG64j:p$9,/MӮR;h8tGzxtKžԼ91% Ks$ W4w2:r#9덹be|-^T0E>5DhŎ!X9k-;K:;1=y@b%þ4F *r) $`dIƿ_6oi_eesg%%΃5wm kuG]Q"6}{zjۋQS7Β=w9m{q:^ =+|cZ>sN>ߵn׵x?2& ĴM 5 #LA" Xf>5\=;Gя֡MkkVGO?Gut$IKӏ'~>~R +H*c4lnoCT0zHƉSLAE GyCXeII }ƸX|v~o=~SSMF⦂޾(5m8ݱ$ihX2F2(QӔ0~I Wo+ʇT$GƧi"X_M>=s.sKΏ{@L]TJJyC_. q% Tj9$\hV GVF82~}zs$平NCd6Xm\x3WIC…Vt@<pd{;nު~$?c^>tjKvf1ذVx1ҝ#s@bt:#~x#H4‘ [1}ԄH3_g8:7 7 Hr[fŝ%/}UY uE2yȱ,Xv_~YK̗5 w? m(!iH+PqYF 91(+‘zxɌoxYo6ç/<ޔUs߮x5?ު'Zvx'#ѧiO)+ӮolX% k/wE%ښVW҆hkt]t]ϵn~мOYvE\֚=Gw?`Iqq^uIg U"!B0#c3;]n.м OsGϧg[XO?I hɂS:}朎HO{ Vmӯ_wiymʴ?yL_̩@ӧOӮ y P.vtjMjɂ۟@_Wi=t1^ _0$tC!L]K034hȍӷ"a)/æy9fȰjI:qEu'5}㭼GGJOT΃̕iM=_ܴӧ7ob`.vt[Xo ܭv~Wש[f%HYz|7ms#⮸ ) ӢE\TQD\5O kvHRc_wonec> ilϛrGŎN=/ ¹3ԧg?Cٙ˿5oGϵtGaYkkּ935}j Tj8IXuG$_֡iɂi}v>=s.ⴝ7kh Ij0 98q6 `_|xrNu\֌t920b`p70([[3"VvG2u'~#1Ʀa$;w i6H&:þ߀"Ig;5#+]a3`w?ßFnt$~S$#3;]wɕ3;Ĵ^Uw]k1`WтLJr=}QO_FVՌt9ᆭ6l|ʕk692שUyə ײt$-"Ö:bzM#a$v=lkZ;H".';!40 0!(2|gvVu Z 悰 [gia~v}t6bPN va\CKO㭪knwk;F q%G vTZZj$Lt njkG0R#ðjkCՆw_oϫ/-U dj\ll3Uwe9^)Q+]&+kz_f;}zG*c0O_oݱ45/אaK3;}!׏ ,4H:ϩ^H `iWHKȰiF~Rߛ.K"=%v@B'{xŰL֜5]{{qf6b ;sP%콲2e|>?H*G`dl#ٹB?$QE(2o'm^^***q`8 HbB¦$a0}z)0Ά@淍6C )FX*u$ӛn'$LƂ$#0r# H" b 8HTWGͯƶSriZC 3 s =dƐ|{F[C8aBHVye3s"Vsr0g2>kȎ4}c~,NuH#xhzꩧ$I iSJ #|)iڛ{Ąpd|kn7-w˰ʙϛ@4:.K6͐^2C-U/c69O9qYGvmP]s\`fK,^GN:o#8:ʕ+KKIcO ;G‘ͯ_̜FHSa!gv>S:|$Ê珜O/\[.$qf0Paa#u{)~p^:yk38VjB `|"IJ`:t~2UQ eP> ‘ 5]q܎w\3;]3ﺦ6_Xh}EvY syKW}v }e)9eR..5|X'lHS^M3uW{`"ċk@b۵n:ܹSRakʻHô3ET\GHrhxyk:zcTU%IV׀ !ёmBe3(G`&⠡z[lb_HC+**h̊aH8o Qriݺu׍by&:m iG2~aaČp`5.]2o@&Zhy:s9ֳ#{jaz衇Čp`e#0:Ry*9I?u"H,jqҦ7n]Čp`e#0zJXO߾#Wf t%XIQQypF-/G{nI~$m*|OrTTT00#;z[j췪!mjㅅz ,6^On¬&illX:5D-q8Z~=icr}Oݻ͕nJHիU^^.Orlr!rݪUmmmĠ (郉NQ|5DL⠧0@~.3`ؒbn#%{[ZzWZ_]mڌ"y&:ÚyA)[2^Tߠ7ԉ:1W2ʸvEO Z0 n Lb@dK %:xk%R ,5Uk+ju7ؐV OSiC)|#@KΟ?` xb\.$*ZZZR577x544 S'!ueָgpLQQi @D`&jiiS}}~H9婸Xv]NSSL! Hb`U\\l6;0z:^^o}=rMq\2 CvN2#4LcS*CXSllrD~t I0!mO@`@r: q0 indn9F0L zرC~az'\rDhhh$~y< 8QNnn{[%I>7a %g_}w7` <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$H# G@,X <`y$l ƛ&}'Yfi `D rss9s -[b~Ouuu>455i՜9sTVV2͙3Gs֭[|׉b m߾=$DU]]mMMM{6/ֶmޮɓ'kZ|-Z&mٲE/iӦMZb ! @T𢲲RsaM{{^}}ݧv-ZHUWW:y<fPbŊC'|R===K/I<xBHQSS^l۶MR bѢEC*=sCV@r @D4{rrr# $''Gk֬4pIඑΡI'Ousrrsx<1M݉ʼM@A@iE08u`IpzͽukUUUŴR6$k" @?%'O65khjooYkphlLjV="%RVV7~)AJ<D@~d1g**}ՙsrrb>X*Mٹs6o,wjH /6m\i9hISS<O!P*HN_;wԊ+¾rss*k֬YC^0 L0̘5kV)&g֢E$֬5tMhX1Xs־BI:'R5ISSDp+ϬYOP=cc*8}O>QJJJ킽E ֬Y;wjϞ=fΝ;5yЪ kʖʰj@29- IDAT b:#BXV458f͚5+f͒4 /_QeefϞTWW7|SfRUUo2`\`UWWk˖-jjjJkӖ-[T]]  x9x<ڼy*++UVV[KfϞ*۷oמ={TWWY&9kx)ǣ;w_Cܡ^?b[Z0Ɖ?HWlڴI۷o$-_ +^z饰GoÎ!Y4{lfթ_`c;yd-_\ooFǛ7o6Ei={7l۶M7o;?ǣ?SM)|3ؘ,0=BW"@<#eDo>izfCWWp8x<-G]tZSwž_^ r|>Aaeta#k:9"""$DDDDs H)j@>3n+:v]' jjvySNvڻrwttt!Hhul^F5SSߗьPH$Ԥ9"'q^otQ6-"p:9nG0̹u`F5s\(m5x<2)$#"b@BDDDDE{1D?"pm 5bה̾􌦽),) H|3ό?_W 3S"#cyz{{Z,k_{<KgGM܇4mSqf뙈s+q """Ys\hoo FQ3]T߹5 5} AM=m\=.\Y3S1ocDݍ޼zӧ!,u۩~}cTg!_\QAz{{oxnG:F"u{-w H栫KNG:F8Svzwn:1[Bx:.ҺP]Z('p]]]pႜZ˅NͨbFq0v`Xf;z" ""\bCDDD4G---FHRD"p:rN# f{<(21p,t" xO5T #G$ ۷Osoż_Lk``` K2gA#ΟX͛t:SNN^#|v5{|$bDQ=;f嘯͖z\3NLMI$]. D%F~XlSSb&l2*b p`'"\ Hf)jvG1DVhE"q]By<#~uY^f6bTt:=o-<b^/^ }ՃnϙRջޘ=;F#ͦc@BDDD4K_SvQwm6b~ܹs^,:9eV/rH:::d[Ŧi۷tb p:H$hmmՔ'6b:ѯ-G[nSo> #4+~?h%"b@BDDD4\.zzzd'uŊhnnFmm-Ngn#v֢N3)jiiA(B:֔R"===. p6 ]]]Xbv܉J mݎh4 Ӊh4JܹSMMMQ@~tZ޿Q=@+H`Νhnn@{{ͣzDd]ܹ;wD0nQCDD4A AWWb 5@1FCA,C,kx< 91̘!+%5 VxT >OxO)hjj2}r\ÈF:@0SѨ χ`0SboۑH$䢮rɲ)QYYP/Ũ|ODDZ+D9Iapt8OOo| CU? RSWx<em0Dww7B'|:u*o@sNq ,v1oਮƷ"%ۿ-8N!"""ATVV.ܙNE_zb Tz$|"""/sE6܌D"=bʹ`L @,ABDDD 5\+v.Gs 1/DD4-q YrH rU+pj a@BDDDDxE]x.:CD <$DDDDDDDTQc@BDDDDDDD% <$D]~'OYDDT?^}UA|"EĀG?9x"+x<ׯ#͢! "b@BDK8 ?C""Zn޼! a@BD $DDT-$D@aHBDD! c@BDr!=~P$"e+_*6n$fHBDy"-wCx! "a@BDM7㿼*?(QɰX, I "E2wKnB]Qb,dWYVBhc@BDHVaomeזBhc@BDHDDğ}DD% -pC4GoMo'p~kB1$!";$D>(?3C9 D+hcHBD4w HhY|P|]{Xfxeլ]S*l^~ થlʵ rǐh~q5"Z6'G5OoXh*m^3=҂v_m/gfձ>bctGSY]L*8h[8Qu|*~#0>yc44}Ʊ dmukQm]`OKs*XVb.aTBc|Դl%^n 7f:nSkoЄ:޿ˁkn}=o?oȩmGɜr[m]jj8_UZiT1$!"b@BDĐDUuDc<ۨŽn܎xsZVT\#,ͺuxl^Nű 9&wPm]7U`5Ð{N޸p XV56xk찔T&0ʢʲ Oo{!ԁĭ)}r gЄ 5:$oƨtgo WTXV?oЄTUU5MylTS?,G2Q~f/3i[7o}ۨ♊}ܓGD}r٩ر~ ]|Qn޼qJ I\xYD&!ŋK/ᡇz]־o9اi$M*_[tO˚Odg..Tqo'o㽳W0-Scv_#Cbd_Oa5*/[FӛM[;䫵?Ĵ[SxsSM}Oƛ.VZ CtcsiRU˰wKH*Dz\u8oMO]cw{X#1u럙ə˚f4[N.XVbǺ5JgMˍruǔ I掫ѲI ?n޼KFԫ;jୱlaП#%5Arӵ:bӘ=k+gu߃3saKgJtXFA'L\WqshK|$^>[D=.yFbڴfӎ kda M{.gNEfώEv/\>n%" ym?G!4+|IV I$S?ϰ^ PWih*>c/e0^DU0 9f Y.OO,)FF-sQq{Ȁb͗TyP,C'1P#;u00q3llorMb̵M13W>7ҢYgDV{v/e4gk:D ݿ+8$!"b@R_zAǮ/ؐD.LܞRӝ4ngz-ӑF#ߚ*þjHcsVY{nǦ}&^ ъ@|*MM=Nl[`Yr魽 lF&NJj;LRq&_! "e;3\[*XZw5ra˙NdyOgUVL]&?>yo]h&v_>bĭ;flp}9[pQA,.INޞ"bHBDĀh$;uu?{m8snLhTǚEm;ӨupK\ÀAMd}Wcl,M#fS#?M-s CE4 QaQI/c˧wfK:$= |U[W-T7nc͗cxkNϴhXXqs`bt1ڶGך[cG]E'ܜ #j-,: *{J7n(x-xB**9rCsϳO?5d2d2%[oykYgr|1 !"Z6mތ?؄>cSo*-ؿ!VN6uLܚ*hɻ#Wd冚N.Θ5cȹ[[פSw7:=Z|Fl%n-eJ޸-Ý Ro='xj nrNǩ(]My'4n ҂oBҍ]LːwKVZdrC͒ٽ! -hO~FFXL&52\ܛ}-KKnMvlBUy܎څY5D'o㽑+x~٩jjMg'g.T7rmutqNuy&nM'g.bc͗ୱںPcXxIͱ?>s_uRmCۣ0l;x9K97zMqNMh"Rǚ՚]zD3S۵Hm ^\b=?۱5 {k+-\H5Irmv8VǼ\whp2e0~x+ C4>yo~[и N]pKd܅&wQČ[9FN vbI#2x~ss=5s4ﭭoPpz\2쭭BUyYN/e}Ď!\urz,FSY.5#Zk!݉jd~f@BĐd9|g|@1$1 Io-٩>}PHV윢O6 1^ HJ<$/9uWz$Fk(bb4Ӌ:1c)[V@A!ROb 446漧7}x!g'׿=f{~xN/otύ~yFSٳxwe9z.qz`5^Ӑ2 x֑G~C~2 m t _`rrGdzy jbzD=18qṟ ʫrGrBn9oy Z۾90N| ""~X$ZR&wq5B,VP0=J?B9ssl@bB|n;?=O>6D#}Do8  p?9+ÑmeLf&q܈|ZaE}}m#vQ/&{nd'OMñKhT!GчsoݘL :A\J&q#سgrtXV}z~mFFF048h:]T"7ntCJo/>cV=Sw,7nP|*FO}{gGI ^|*]du:>wY"Z{?qD6;tsdrm%Fe3g~<Ѐm8۱t q?۶ߟ.soL#3 zĴ32Gӳ~?B&#9Wk7aZׯ:G%% G-sb'dVDeYܼyQF?~xk;Fⴴv972";䓰ZА({f ζz}Gd2y;mCWʹ5$xcddLup0 HTAY$"F1i=&B-pGAǀ(χBPc?#VϽ=i0l|g#DCԁa Puu&0<᨞=WTTB;zá!١W|PBuSG<ϻ={z0HfTI"Gg}{o]fLeO #c HG?h&o==_k_rЬFQLS|t̍Y[,<:HN?Ai wo[.^䨮={1ⵓ'N h8={4UQQ1Vqi[3=4C"ͣ(?)Ku@&"lv8p8%m>߼^~;Npq.C-՚;yℜRev|/-5ņ˗.C" # r1W"9^.!AvFF}Rؽg>#446 Q9^}vD~K|uzXcɓj^{}gxa3"օ)d -,$DDܺu V?b8Jhd,xm|VMhvɌ#555Ը5q$SwY!DD Gp ѺhhlԌZx75SokGCc숫w=9VѣH_&''xc#148HONaiimQQQN8WG~H_ɜhvɓض΍ՃpLou|q6<7Y og2f.SO7 p8xAʡC?42 |:c`2d5z|Y""^=2N819+ogƥd>}( "K0ѣy[H H_m[=ΝAr|q(z_?^} Ν;SmV+^9tG-ÑEv n.XV[cDZN,Q$,FSY61Yp8d2paVR_G~xTv"W}L&qnddr$tGrהs##d2Fp4bϞ=$%>Skhhz5qr H/048dr5S ᨞. a߻ 5R}}=ZfM{Ϟ;zヒH_.%ǑLzP?_'ǹs#Fk[aDMv[3 !"Z>gk#!}./gୱEΕd2-m v ŸE{F `8w&1xxhꑏרUSs ϱTQ!FYV*#Mv.FS7 Vu5^[ʇµ`dީ=[[T[WOƩj['qs =66اi>D GJz}J "*FK/z,Vju(Z] =4lqSP`5xoiS,e+C V&+v_OU*쭭BUy;{%jjjN ?>syN39R\۱Q&1򒝺_%4<\t=[dq"{n-Uݤ"oC|,xZt?ba8B4OuXV}hY ђϿլ_3YXj2E?MeԂm;x9c8:d|6/gиxN iBi.GI\5eGSYd)1";cc>bNI4\|AF,GT 'Yxp(?жmhhld8B H>+h > G^U*@-v1]SWȎBOކcjUZW 5<3>ׄ!3󛫟6;9&p$uֱ w͢phfv"b@2555xYD gu>q G؄h Og󎨘/oM>ǚ/]krjjT[Wuug|t6o2Uak+TcEd yvZ|E<geТ1::Zq G -;p+_ \|Q ^ʘ. 03(3roGiv*_#vxc}r}֣TV]m|wȹ_AMM /D "#4u0QbpuYW-j܎>bEL1!"_ HhY|@d8#DDV""~@%#v1 #vXVqS7U ;uW٩uy3x9Mx‰[ӣRWswfl}r pYU[[NLI޸Dus;65uuoLDGD\0 !"~@ӶSwE쭭SaD7W?/FamSF;{U2Qc>B]$5v5`^`47ɵHԡ g "h*OMdཱིW]Oּnt=}9b4**-/qAַ>J~؄iȑ+_Wy6eVu=mEDGD0 !F:;~@,yv"{gQFR,uڵh-d8BDhο[JB6{"b@BD X,|[D""ZW|pha1 !bK/V #DD o% G1!"Z HhIpJ")6D_%><Xv-+'x>#DD -ik׮7 VϽ^{ 7oI"Eƀh Yv-GM} <$DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD% 2V}C6Ί z@ܽz@DDD"l%wY DDDDD$N!"V2vZVQ"97u͛ C=Ê ""*q Hرڽ{7+)6DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD%U@DDDDD}6/^dEѲÀ vU+N!""""z!V=p,KQ"""""ʫ_Wp5V=0vYW(ڈq <$DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD% <$DDDDDDDTQc@BDDDDDDD%U@DDDDDTD" .tr}!eH8}4"""""Ax^DQ X ^PhÇ|zhoo_秅$DDDDDD#<^4Ekk뢍9eXjkkH$066\sO o( hv  X ͰlHs>_<Ν;|}~ZbCDDDDD41 Q^h#>ѢI$B!qtww#Hn]v>D"!;fn& $ \.x^~a\pPtr˅P(k,éS+q)@?N> ߯9Vj ;tp:Fr ͆`0s?E8{?uww#N:eW Fm/>Çe[>XnoŊG3ݝs9?yx[ZZ5PLyh"""""EDJSSӣ300(v=sv߽^Jrl6exxXq\9ױ9555)EQ]`0(t:&ޮB!ye555ޏ)~Rbx===Jgg@ Bsnwu[Y]sݧ3O<.DyΟJ IجCsÀP\.fi:"P($;n5ןkllL%~_s,nW|>쐦R)ttE!p`EQ+E7֏P($#EQbxx8~DiZ>~M;X1M.(>}#}{R)Y7gD}>} [R)n+6MnpMvzs*1 !""""%NteugҨS.B  qԇꎹ"Wju,}>_k!"yDGv`#cvOuuG1*kG;mr^3a!t:s/}x*u|Q)JIDATA^v+"1:*yΊ-i%""""E#9 Z|>ZZZrJ7 "N.r8N]gĵ9pp= q󵷷ޓ?kb ^Dk2SN%i{΋zݐtuu܏axxx5QeηO`򕷧gVk0l+ݎP(YfѢPFQרoz.X,&;:fljN~UNQĢNtu(NaۑH$d8cT6u.~(c8F:[>a>vezp8}4Z[[r!#՗al\.N:x<.+Nlzl6>-o1G'Ŗf#HhQ|:VcNh:F:f+>(~>1A>:/ʯK5n 3ݏ~DL1囯mkcBl6jkksFDLCee%^/ :::сH$T*ex(|#?fSBίޭf @nwA unj1Dl4`4~Ng:fU~+_'aсD"跶j:Va8Ntvvb``cccPDB֋|6F>Çc-1 !""""LSf Oԣ R:tZv,ki5 q̎1 ":b{]k$_N Qh@ /Nhbi0n{WVZ/Fe\|Fb1tttSQ!$DDDDD4,l0{<9=EvommE",zi^DAl~D s?qBp8\p6R׫:ש9X4ՌP׏̞Bʫ-REvwwkCLٔGBYNDDDDD:;;ymllLuOJ$|xxXx% *v]n%JRP(6M}Ǔs}<zaz(C {~_sϧ~]]}G?(HD @q\n@#^p.l6D[v*===5Q.%zs%SS_}>qxxXlQ܇շωQт % v֛;-:cccpIN rBBSv^BH$8NMٚ4~}C }G\ә~uxxXVEYoucT|rHԳL:S_kllL 0 Ηy϶ΟJg֬^)`fDDDDDDx<5u3d]HX Ͱl9SYjkkJf]ljDtzum(o볐򴴴E Qbhmm@@9k8pT5Hhqqx^PqR'p @ggfnؘfq`0X,X,ƎetͲ Q_vt5f: QbH$H$p\x<0$DDDDDDDTV 1 !""""""ǀJ"""""""*y H1 !""""vaTVVbŊXb^/+Dhnny-N# ĀJV,CGG4N'l6v;+g8AQ{{{Q[[t:J*PJU8~wB(Ҍ|JJ*"""""*YO2zsi5N#PN:sx}l6[QO8}4N'\.#oq455zfbZ!M$p]o+ *>z }6-BZ]g<ޏf|ɀBDDDDDHS"bOGG鱑HDq\vPHD"'ZF^/_]ollLޮ ; )(ÊռxT*eX'P\.Dr?<<PT*TA!oVlR)#}].ὤR)%04gqJKKKNیͨ-E=ݏME{l6EQB266&6#Ѧ -x< +PHioo vG B4=Itvv*ө455)MMMJ(n% i!ͦ+HD BfS(---_k^oooWv,&B#)$٩3SekooW<t:vM}v8Q˦^ګ},l6Y{Q븽]ֳ͹83q˕S6uH&Z>zKR2P1h m%7 R,YHƀh|>I1;HDv՝Cѩ[oգQuYzsDzuݦ|>άG;~?l ~_tm6 q?n;^D~u] s7:N}?v}ijo:G)"D\[}Rm~hш5.B0kbA4F90vyh4YBjb ӯ/XA\Oljt=uy:;;skt|>ttt֝s1➝N'LQ_GGGNkwuuu{F"@oo<^l6{. >]]]9Su"Yft~mxޜ4*^`~͂555vo1… XbEzZ߲M󵴴}Ĩ^D;FH$ιmzԛm{͆٩EQ?jgB'n+eD]bwo<^< X h64d@BDDDDDL[Cu!Ϫu`somj4c{=_V=F]}NMMMx׋ViÑ>/o  ib5 ;O8Ķ.KFϙdTFP_x<طo_N z1k@M]WbJ>|Nb@BDDDDD4Կ6܉NcWWWQ#qfǘ$ifF.t|__Sp:벨GFP7C>2 Dԯ^sQuktvvӒZAGfC4):t1*Y࣯k)QOebpݦSJ i%""""E1S TwD0`^E8Fssfm q7hqK9z :FS/Y1uu>.{mm-VX!h=h4^selaXBinpD= h5Wl6|D{AúVFϋQ]GQTVVj| ^MN5G -u((֯ :"<џbE?oE'W[P)PHvng32è,')t4zVuyBqHӚ5e!طobؼ,* яLRwE#l]ѮzTzqbG_"0g!#^u[ZZH$4Jwb'""""ӣPNbٔellLS::: %hSl6@ 266tuu)v]k ǰ,HD4556<<,ѡR)EQ%*G'^BeџSeT(.KPEQRN{qNgZ]]]Z`Pl6exxx òl~_sT*enllL܉Ѭ=JKK4m#B!Rǀ\{{fhNAt(:>XQwp'B'^WwbTlnaBÉBBp:\l{l:rI@F: Q*2sϗsEQgkBc6TqV"""""Zp---F,cZZZL] #"nrL\.B!i@^W^Ofu|{<ӲvB!kqaq$ M3={^yfH$`xLS7Dn~VSo8mr+p@&+U +@Kl*#jc65$: uon$Izy^_F< pP 3󝿏_Q|z=mߛl(ku: +VUUmp~xyy''F#ApVB'4t:RA͍hQA .[;c[3Vm} G8쬁dj=ui*k<ĶmH|ȃ&[b_C9JUO3؂$N,KYkf ?{15 -\<pHc_ӓ$)O1FFa{p p1JDI|g$b9g,KIG>gIR$3g$nSz|T8{EQHPAce0h4R\t1▪*HPe2H(R$*Mz~c4+T$t:=X,ltww eYW$"MӃ&EQI^v^3w(8||^Iav+I;?ATzgz7m[gn[m۝1岞>WA 펭ޟW?֯V7Ϳ^(.˪Ujŗ x#0R"2UUUUv8eՏ?cz뷷*Rqk^38VYfz~~d2t:vUUUZ.4i!\F;MWWWl6;PƥocLFj!Gѫ6L&G- ޹%Tx;gNsPq{{[/cij*`Ǚs/i|Iq(Iү_{$>o)JS3h.1wMK]_W]J`ܟ^'@5=VUvwL[1;# !~cׁ/Ԙfu8sk޾ m; UV4vzGߣI+"ZeY&i?÷;LSc$Lw^mƱTOs^$skA+~$N[$˲߭~ǀU 9;]g|UM,U%߅EY$EQ3arŻ_ Kl{N2yZ8Bpd2QeʲLFIZ<ϵnR^[ 5Gk ?Wϓ$á,x<#Wuv].`UI <*H\34X,Z.Z,Jwww*bi[8v\.l6|>Wד1~oVPs,h:kv~z, -Kz=u:z=-˺k[ULخ9mǛߪ NZV'yv;.~2(Mf$NXF_6fiZ轧ކI5 qQmj{{{+c8{x'Fnj{uuo߾Jy+ C8)kz^TWEӏ1FQi0h4yxpH# \<pMb*IENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/6-storagemodel.png0000664000175100017510000014720115033033467023757 0ustar00mylesmylesPNG  IHDR$DβizTXtRaw profile type exifxU 0Ex8h/XMPد d  ^3t"1R휃w eYQC}&pufmvޅLQYS(WşnA,"jD@ iTXtXML:com.adobe.xmp "sBIT|d IDATxw|Tu7)3IτtJ PQ ZײWeu*kY]4łP$ %^' H>̜s|?9~p!7J\@p9 r$yPw믵j*l6}d҂ 4~x5!rl6mڴBzzHJSc ,UHJeǔHz%l#.G \@p9 r$H#.G \@p9 r$H#.G \@p9 r$H#.G \@p9 r<Ն)??_o*==]6M4tP,)Ţh9HXV*55U)))lgI R\\ 8sI6͛@&9~6< siQJ?ھ}o.d@1n8?; UD^^xb@ E |9hh?/7+C OQy'2vXUaM 7N&Mbx+d_šDx3 Z?,RL?vXGSҦmٲE[lQPP̙ɓ'swp=:HLLTbbb ac'h d2{EXPt8y~cd9|r[N'Oִi͝@=2سgVZkI'NU s?ZGVnՑ*+)RAA֯_M6iƌ4ѣ4ZʩG_\xY &P]#Ot"l6֯_DrРGVUVr`!ك .1474T:C95w\&ig=HNN֒%KdWh1f#=HԢE4{l͙3;'@bʕڲe瘡#t?QsDt=Xvno>#%%E ,Ppp0w"O9+DZZ-[LI^ϙBޚxLŏW/U~N z3 $aIc/v#@FO*>tcӦM+Q [ 7QMxLR֭cK@@"99Y$? Z\>g$Lp٣K+WE:H_^}"1RIH6oL/n84f˖-Zt).Щ@i2Y0+%KնoN(]S#>@ $B 8s$sKµ+$B 8S$m&=_E1CG8]tKtVUIIIqď(I*((О={(H73yyJ@P $&I3=2MG(gCDz=D۷ogrK!3@"//1 /r=H ?3J@;h:`P(*׃%$LBA\P:݁Dヮ%$L~*ÄE$۷btG{7LMMuzE2h($)99Y>+ZVQQ6ćھ}$i…ZЮ@"99ٱFXEsRRR_׎k!kא=P5^4pM^mn hvh? jLfoCE Չ#1. A1[` i_])mkW !I A' oQR8y%u؝/0`~'O{&bDb^Drrϖp*փ5]5==в6,!a$zB hcDo]i|GjEmnKE>ͮ>B pf M{㒒g{o/Uuui߯7^=x0#x}3Yi݃PNj3(//'lmhif;|P/7]nC(vD== 1L+ 7e(n(Q'W(-.Ғ矮Wn WއS`}ʞt3>W^fo%&h2MAZWZj˧j<44 %X榁Ӧ)zTY~%'ȧm[}ޭ&Iz:z%nDkj5㚟(iv|iGgf;FlczzEڲ~ZwtǍ3UQa2~$I+u'T'ڽ!(_8{ri&/^JS֜/Dޡ0n4',L/\?'x/2QrP@ ý_嬞hMM\O+x|M4N\R8[~yS?y@&kusNwOOIRqQ 䭵{/5wNMfmd+%aDdf8Pͷ+;3JGlQ}]?_3ߟq޼>yP\7'5NxO6,"J&_>XB6[֬|S&_(}6V4㚟8~k$I_ll$UWU;?y,зCPQ}}ubn[PッLVUfSSj"sopdF?C` t[mTIRd*!1c%IY~^g6; VVRf@^(K|lUu+ݼE 4X-x ;W!G離R~~x]e'%5۾ ⯿N#FjU))J]V_5<5zim?+țnOx~ Κ7 pUІ Ϩ#vλfS:eKjfѼT|~.> 2yLzkڬPCkN~}bjθw Gw_>ccu!6p6MZ=LJ_JӪ7^.92Iԑ-/?7AYht}In6?@ї^"IJ]VU%%mnŗZ|}̙oկI/*S`.)y?_}kFOu୷לޯh/הG9YL&Tu=w;0@n]%J]ؙ4hyOyoAwM|#"4PԔ)W@߉wDg6t=u&͔2Y?+ןo 7B&t4H'esx^FL2PG$ecF<(I:[:G*͕ŢӦikJ;|!C4wH: Gfl(5_h~jjA0{iOeT䤋4[~Zi[){Ht߽{{tf6yy)kN-8Psjk}J Ӥ>$˰a}J׿Ψyt -nhkH;YgU6Ya:^ľ%ii>Fܵ?חjϫ9(ҁҾᮡgw s]>P{^yE%i骭T:[J`ԯ\su}}uup:*eѷKHBFVktdfV:mm%ImؠM59swׁV5*)QR9'Rͦѣ[NHxMڙS :LOhqW'H:99e rzݣaFNF<z`UW:}qiٴY:nl߿! -Fv =Zrxee3x]qwkBǎ$f:wf^OxKkhH@ 'ث}ϴ%Iz\#M2eVeqI=`$իZ'~үT50NyZcO$@; U%SubNB=GUee]~6|jíqݥ5k[F??يz;Sf @p=Y{0]q!3g7!Fi^+tV)I[|?ĉnSWkvEGwmCO@$.yIRkZ|?a%}N]~.j8䪫8d~ǚiGZ{=p !IJdhMlڳ*0xF4_~r7eSu?.spSSW]M1]"#!/KM%?&(yz{wR׬Q]MbFy|#n4/2Rn]\Z]m5:6Wb.Tn2v{Nup:)5eɨбc5?hT\Vδmh=.ldXTXXHdWC}LsK(߀=&kv>We6LuλM+}" NPxTfР3헟SW|옾]&Wuuۗ^TѣV3ic~rBǍ?Q7/[kvGWuwi/_Ѭze5L=;چ)СC*ɒ*ٛ@]ơ£+~z'aDW[1].%>^@s<ߧ?P~JJs;j R]MtlgG4|?b<}TUZV+g߾nQg۸}*(.Nu5*iXRkvVP~ &OY xf;vw;{ ׷QZZ-Z$I?0r.~L4k,%$$th'|R eC4_0ӡCd.)QC\(cRƪOz)hWhL&l6e=D O;s\\\(bmg[{7l|=z;50LN+5#cH7NTV\|*4E3C@bذa?'Izܬ J:5#sHk̘1}\5$\0:ϭ#7q !p\oodu83 $L"$^=I;T]i(4iq8snݡD][Y  F@p 1}tǟJϢ#vX4yd ͬE:HDGG+66V}2EzI=~*>eӠݣ0[gv3g$F/iӧ3 FT O/c6&T҅Z'OVll,at!1ch߾}:+ŏaTʭlJIpkۙepp0EkYVeddHRSS%I={[';+KׯoܔT\{e>]&N;&Il:x`[^11jMZZVZ%Ilbbi@ Dttf͚ 6 'K׼߰.r8yJj=ѦMreffvX%%i[ݝ^ 4_YSzN45-$]l9{ $elϭ$enݪ]o=S]3Z 5${I&i:tHw)_J,zlVmYBՕ{aU g=|HMMUzzӝ#,;["eg,;[ =%%o0@OO)N@낌F]ZYujjT]Wª*UɻX?n`OhL2kNLjPTT=58 IDAT@[{-Z$IZ%{%:iͿ8捸Yn:+%%E'4z2xz^8|<=5<]ڪ*8=]eTffox-""B=Xj[W0::Zv$Ҧ5^|ڼGqEiƌYΝڻwo08]i!! #}|<=%kjpF)KSWU8{ 8)S$IockoG&3 cw>Ill,K|ViiiN?84;ckSBF }vEOسijVUJ޵[j* &F 9V\(;eSxtׁL|7%FDDD{(ܞ={$l6-ocZ4b`PK!$p]n0-_~xر7nmѝOHHP~~o%͙Ϝ-[ƵNaC=$L^^6oެf/>~NޫJ2Vw^ݻW} I&u˜ݸMC &tf+:M`I+%''kN+b4RaDd4'*2eTT8‰۷k ٳS4tW4n…X,ڰa+mz_/s5|܅}fewP}Έ{w0@Xb>3 HYS&?Dd4*h *+RZrU׫kWpUdXoH>_JYً}8y[J{W. ,tLg~45ߟ0Ц QFU*\Tc3u3<\٠)Sbhɒ%lڿs2sJ:_$x, 䦤ǍUYR 1 3kJuu޿_+1a<:}l7W7&>>^ӟ+Iy%O\̌#_/8ɤG}0@(*RқoDZZF7aԱҎ_TnJ$q6N[?֮]W"qZI9is/[RVnծVk+)88X))JY-_?;vt<iժUTVa4zT]0 ƃ_h痟:0L3g"4=, G#V^-I~8'{[Bܪ};5K4{m0c9OI3f,X@]bٲelڛ"KH~3f\UWkՊT|||y$$$hZrۧJv}{/?u,)I7oƏ K]V #pVxzQV|r=3mѓOZn:L ac.P䠡=+e.%,3gL] p  B^򨫣m۶I 8k|<=u^`v@{i yؐx7 &RR]ԘS5(nYVnу)yNevz ʽT"?Fjy; OIKKSAA0=__.,Tu}RRRzg Ѩ1Ӻu디$ͦB%n\čkAq3Dу}XGnVN; h̘1>}zp77兆*j>ts <=[U6 … eZ$۷OTV\;: Sph+84BI";%*-.TiqNW^vf^"""3f(88;r׉0y8g U`pI_@ "yc(maޚ2eLb8!I9Y*R~}=&ŅNs@NDD&OqB=E//D; Q0H&@R%G)>sHƄ8ʪe=W}TjjRRRBPvm1L԰a(z eFE:zEwBGq@aUcg&sis{sъ֌3%''KRSSU^^ޮ1,X,>W~P t12EFa9璘L]ReTTHƅh׼rA @4p4jȔ3sґ/ԀkVUI ŀ%kwQ$T^k?bp)w #F\0Ӫ˭e`OPemq #bSRelέ  t2SpΫ*)QҲ`rKȗ99aaG]]'pNIqL`3j{mdP55Е M $w89$-EJO`JضMYG:\V>>ݝtx;UU:sFfT$PQЧyʳRF_\EzyiFVU*fSTʓMFAAڝ꯿֔)SZ=>sB~P9ryi~eV<򃂔"٬z)/בrxxhkN)\Oa޾)8 Rib"8 @d0+|d y5kvUէRP KAJ|Uh VaETTSEEC1ZeVT(\6UkMtH *  JHHPOk{2ʬXdRIU";O./}HժEE*i[M_ sJ]@@W$NAZaRHNBrrTܺ:? IQ_UzayP// 1qyz:n5/*Q $ze5= LA@x2ǎ9 4d=JTTZRI  4E!{JUWJzdf*㛝*d2X> @oWi$P .^SUr??ZY+*k)=VEUU*VUUʩtKY'w*)qz()vz@|)@W=,),~.W^fU+֪Z {VTto dpsg~ 4!OO |)CK[599=28W(@7iQ;w9^秐#q*owwy{x]>7niba>lMt^`c [WmeVS+SU yWȫC0:@ɺ:elۦ77f67Z[+km$MGG7Rh_ar*+h0x=aQpV&l:TksklR)۷07Oضg INN / 68uu9 * U m=ӻY t~x&N-2#˽T~r _d-)>@W swzHM+4V%6oo8,B7K2-C6^l6S)Ϛ5KǏXUMAC,Xv^x|>xn VBB2|bW"z9ooo~ u\@p5mjk鶖ig sOhxa h^~*/-_bմhܔ+SVRԤ:))$EŎ{'s<,I:꬜ϱZ69---mt4zzH@+2~ArܲVs<d؋:vHݭڸU}jk}?UʞmŇ佒a]k'Լs~Z =rNx 箴4-ZH-ʕ+eZLۭVRRRT7@ 男ʊ ߿Δ-/ox|Ec2eN֠(`kv1{B{u? ˍU^^~-[#(11O===]/֏Tnf@Cb5o޼6e%IGQh{?? KW^zJ?أtm?t`g*=!YCF^9 +WϮئG}YDmҾmqի:UU+)O۷m,]W]qnst$=rB#++ч?|3e45t; y^uyK((4B|߾Eu<ͻq'r:vHU Eisu_~:?K_Tyy=rh?xXeϫv($"F}/HҽW3o7 &/Mڼ.Z㭗)4j]KHss_)o 6M˗/Wbb,X`B~~BE h4扥$I~}B?أc?|^$E 'S:(WH ԯ4jzaM~m?÷V^X_P^zJӹl𭲄FOlՓ˷(h^XaDN1=`[s{L_@-~[Y{Uֿey07Kv}^|{w&NV[[uIK3|SQzZyWЁ]ۯRMu~ZfwHVϡ(/[Ey t2Ua-U`g<'n{Yu)=l)8,C*4rݕwbjʕ$Wx$osʳht$L'K֐Qh2+z(_OZ~$\=W%h_~$n}YF]$)wǨп?"`< JA=W}}b7?ܡtݷ57u}5hxͿ 5y|t/j@ꆻCm2dGC;zDA! Ѹ)W6[.-Mh޶{Hw6~M^>~r/;|v]5k,ERY)|r4)yH@Ő hŬ[Kij*,f.ffxI:~ sZKݯ(nEN2|I֨eo}hä+_0I^^ު4izVkUu{<9?@Fs?@K6jԽLzNnܯ$ͭJNNh9S*,&V釕~ћu勤(_ _?}COk$HHHP֓YNN1cR^>ƪ11 4P,_~}ۯ$⑿j̤8K֣7_>cIҘI3`Ũ}i{:Ƿc$)nE?!GBKKz:Tӝ!@:b%NTki0:^yݒ6JIjy3㞭k7tmsp&9me_n@6\#i>YD^>~jޝ:r]Lyjijm[Z  v0i:k sOgԁi鳿oكiJ RQ#1}SmF}rKݯ|ש_÷QCF4;Ff(qzHn|]'O֦e m s$IA!׮,"vbm]m]ş7Y<[jUuld^~FbO>L6~t<dg7QNMe2w9Ң|4 p->d_pl]plݡww'?=ikUu,noBZAK8eKnϽи_P07K%  ϫY{Hd$}֋#ui+њKv@{ٳ5m4y{3 ʽtTiiim~ 9{<^_ZK-wѾD2_zG,$k !%Iz[Z{ywO+w}~&p:WWcIrssO#'^čPrIwZ󮪣۷]5h{Lt^p 9e䆶 xH{H[;uW3쑷~z=JNN0@ȈGڷ Ӡ\m^$iԄ +4?₋%ҙٙfZE ySvz`6gKLp~{I!#o8w67Me/Zױ -%* *5 Β$z)܋='$a.ּ9&u<=0F˟]aP@3Sh[ܘ6B+lt}R@p s;K+uW8v_,I*(( /^zIV״l6+66VyqaH@ F]>~gOdRٙ%4Rs{پ%{c2Oh^UUVIn{zEn lߪ7yXĆ\U::7ywO~.IϪ(_Ǐ ߀ /aMzi[/fG(,f*~V͸ѹwDG3ƽO;.hHHT>LҎqQaޝ"JHHGҒ?W̸;ֱTcVJ1{$iû͑= ϭ) "%E0l+)v6B 47ȕe?ED!8yG^[ֲa"NUQeeX_aYc0^fHMMm{˔%%%5eddd\IIIϞ4,b[\O{Ƕi^{m{m4r =u{ 1˨_"JHHC&<"~$_oIyY9݂ø󡧹p5)HҨ!Y;]׵w^.\جM 1;wni֭[dwy'Ǐwju So|G;vzf̘AttmcޱJscJscVJ[(pRe k$ G%>>^4Dj~~ \?yM^n'G0ϥ5Y6=enq*Yu|RJ~d-jVM]Ozno ʊRw?qmux}'M&O}RQVNqn.gewhdrNƧW/~+zڐFH\r$m˿'A dBbb_ܲZ 7Sj3?szR3u z!ڀFH'0pOq\s䥦c,&rV| QSBBDDDDD EBY;Kr!E L;|97ϜɭoEX\ftqѓ[jQ7mukrl )DDEz^*d`+Q=]9֊l֤tO01oƏ ۷ݻIݵWV  []޳GmD effr1BDSJ9r*=- 7'q)03S!p `S(+,ȧٵhu5[(-,lQeU]-z̹G9|9'On""""""WعdvZxᇉVp.Ss||y{4h= "֯'O7G03Vb 7OOrrݞѣ*Qj+Pawwn+jmbAdeezˮ3443f') KǷ87S_yXqd&'Ylʼn,0Yu|Zn6H;-?ضnݺˮbW C&Ϭ)39/_-׍Yk߹}-ϏFp滽 pQBBDDDDDdPlZd21}tFpOd 7Zۓ.&"︃fJeF׬ FifO_K6lgfl!?9C9~I ]M‘#5c?v:|=Կw5z ިJHL FntƎ~9oO&?W/ܼɉrN̞=YԆW(brMD~=bb ɜn/Ikp!1Qkވ#ٗN*"rŜ>#[.F|~ N#""""""-tcOĸq㈏Wp:9Ν\KȰp8H3grtrrӏ;0i$kĿG{',RrSN+0mL R,g4= %&s[&wDuxulh{(m[\O[Ŷ-=b۞JSm{^z=.&Br;PF|}}4iYzR:CɡUT i SeV:eA}aXE٦;Co}9}&z+ݢ"u;^Ou}U7y͋m[\O[Ŷ-]bێJSm{w=r:OΕ0֏Ƒq4W""""pah_nFY_327d6(:JsVh=z5]=n͋m[\O[Ŷ-=b۞JSm{w=e%Wne 1:y"DڄSeee "ٴiE Hw3i`ѢE HȻ 1O6\,xHC>IIxtCUG 5ZV#$DDDDDD.LĞOqu3 1*("-YiJi3IjoQtt +;[IU?'v荒|Ϟ$o1->ʲ&gsk%#N"##;y0~xΞíC &ɓ_mIMHǧ"""ir.l6Nzn{--qtGdtl gҤI)'<=[7Cx1Ϸn)蝔"""ig`tQaaa̜9enلGrK0L7xѦ*)!mnvf?9sdzgGkuݢ謔N#`HH;Ց6l'Nb(8WX}Wܿcz D؈$$*caԇgPL|/!IO}a6|e}EiHM7q#S F5>˨(+o8p 1G1(+.&9Nn(b8/dpf%$DDDDKQ~69UC}i<Tc)SN%::ZAiG|#<(}F dc޽1='?΅Çm5Tsȑqrqql g&plx.>Gsgתx8OXTlQwŏ~ZǸq-#npp %$DDDDC>t f# r[o\%'nec_<{wbۜ9d;@@d$7?cSO橧ػd){,weQ}QY_ɑO?ec2Wghzm[t~8wRZ &7_gP?x 6^!ҋ|x1G>]-=s@ƍ'gIrNTVN)lX___M>'"Mjr?SSp+$RM?=ern~4Ykqvu܁l{rNl|D>8ўŋ?{2J l){މ|p+y -^o .O;}wt:Ed&{ɷk""P5ie ?%Cr]cbb". >IIx:͹pu2O4xL։UǘMz6~ӽn')#"Kl. Ǿ1ʇ8um'9gfÏJHȵԮ?kF|)?3b9 sq87>7HM7/C5d&'׻=7 PkCKk_ VǓ~iv.Keyy={g=gbHϞ\[rΜ ۚT͌PBzRRRXbdt%tҤ;WKnn/+Zvd$&tC ?9L]#|((1rQBBDDD:"ctdbܸq HgIlĘ1c?~A3rßyKn=7sv~rs(-'v}۬[[eu;&S¢VSy9oޡN1F9rŶ 1zxQRPpMJHHqjzRcƌ#<}:3gT2Ջsb4`KPTVVud u[9gCk~„zv+#Yq9ĿsL_z$Q""{{b!pF+$\f)}j_>_oŃ4gWW?:eS2׿Ʒwo &7<4~wI>'x,z'<d;8CVQVTD1Ŀޡ8 gp[;, G7*+/lDD:iӦ3kH՜##L&=aaa tye?C,]*k_E99uE~;oQg{?gӿ$rXCoE,ykq=IIA_2cPw5k}AAܳt ~~ƧO?ÙslOo48Bb"O,:Ǹi&i rΜԮsF(!"9c|=;,~=&o/NgϞ32Iv-tv-{߷cͨ_[~TsebZ9<$C}n1‘O?:gYqb&O[Fѽ;.F# ?O7}$>@С(+#)m}DEiixFowl}mfΜhYNO#$D-EXmq9#|I,$"rQG!!U M&$4BBDDDlΟ"+Dv-'"Ұh-ZĂ7*))!!"׌ۢ@tEr(+.t>^k"vqI%|_BCCՄ"""]"+ H'H{ HLLdջ;ltEejӜeDDDD:;%$DDq7nd߾}Wm?bfy Ve+sԑg10qDͱ "")))X3b.,TP3Dhhheƭ\oZ&c36g0痶Ky8. I2ݮZDDv;IƗ!$5!ɓ,Ha4{n姟c6xt#"""r-RBBD>];;^DFFRP\α"T""""H DGGԛC9 H+A#iwJHHSBBDDDDDDDڝ"""""""&RCYw(˗/o&6awwn+jm"""ů~<Chh(3f1 tiիWn:?odƧg|b͖!v;%$DDDDDDD)!!"""""""N iwJHt`EXma͚5ddd( ""`HJJ"Ã2gu]+""ҁg k׮UBBDDjZ̝;Q t!i7I&5YV iB kAi>z%V`"""-GCp(Gi=!"""""""N iwJHHSBBDDDDDDDڄݝ34YV i!|~,>ɲZeCDD3y3P  8f9w+ti۷u[Y2+ޞӗ?"P  IDATZ˜9s& ި`HH dffr1BDDD4;%$DDDDDDD)!!"""""""N iwJHHSBBDD9sK_aڴi$&&* ""␒;CrDvwwD:,b{0bĈ&jONn ^fҳ'75YVw;i#Fܛ]'  VPBBDD(u$8Gi=!"""""""N iwJHH(sn~A Nl6;~.5) rՕ9;spp,۷7Y^ N(,, ___\խ!+ shhhʆH dddp2g }ه 1uTy{_`S8*s.5yRi"""-E8bCDDh j`d21ufHT?@n:t(< ;v"mWHݯڵtv}~#~bV0]yX'6kt(!!"(//'##={0o&֭[),,qسgn鴺EG30i֢eE+LJ/[%$DՏl={˙3g:evɽKPPF@n:e F~o. .ӿ, F0|A>\y65'Dcz-8x`˗/ """Nhh(111T ]ND y1XO㩧b\V}JHHу~UVڷdnf>cΝ;xcՌ=/-jj2x`̙Ñ#G픖bZ?СCٵk^š5kP@DDYxꩧ1cߏ(һuӨ iP!Xk%"L^^U/3w mێ?xINN'N0k, 3f ))qΝ;tzj:iiitM$$$O~~>۶mcȐ!K/7$$RoaڵJHHEGG3sLf̘abLX(%nn JBƁA1 '#0>x222[t4?<I\3{u!ǣsYMFջ/&&#F\DJH c۩SsٴiV .PZZJYY^MɡCDD:ZMS⋔sYU K~>v; 5DDFbݻ Kll,B is{/~`95wB~~~9y$7|3gΜQEDDDrn:$ǗRy>AUY$hN'=vr" *!C"ơcW[4[Frl 77l|||ڤ^x3gЫW/fϞM\\F ...-D/H3Y,yjjRRGt΀Ad=ً_^Ք燱r=ц<,=nFJ4;x詧gW:f9x K.慨Ξ={HJJ?AU=kMj psrr"L&S}zEDDD.C@@UYV=l?{dMt~>5u-.X\{Cy& =QCy/'wopBCCת4)))ロ^ Ϟ={xwXrecׯ_3<ĉy7^qq1ukCII Fѱӓ|+Wh"l6,ZVӧ3o<>cL«J^O>'77mGEEw/ꫯx뭷pvvO>?~UV1a.~x8۷ϲh"pٱcl6cYr%s sjIff&VNjj*EEYjʧ,i{POѣlù w{!gU`LHZ\VVӻwvww/yŹy5\-f""9T9wygTYYYHԬrGft֭ξ+VPZZZ7p7oۻ޶/Zl<@zBCCIHH7 ˖-sl>~w}u3[GyT***prrQFYLNDp(nvw,}3f Jgv6 Fi[9nKhhhz-""RRR0|8d] '?I Jv87YoEhTz!< QjիYn&Gb$22{_#$D^у8L`ى'ҿyl¹spuu%::ɓ'O;F14ǔ)Sb޼y:u=zkFHH*Gfztwwg֭x㍵Og!Cm6L&Ss'Xp!qqqlذ’%Kx='''?_ݻ7߸;M\g<#,YNWWWV^رc=lڴinj"lvkwW1clnqQyS~N+&M",,Ll"""rM Fwv駟Gȑ#pyΝ ?Yn믿NZZ7t 瓟϶m2dK8t?<瓑fc֭DGGg|&W^a…x|gX,?xINN'N0k, 3f ))NFJ< %%R{n&e,Y ϟf~MqvfWW_~uߖviÿ'>̙3%$Ds)))a,Zh\]]޽;3fdҥ ><<<i'|f3#G?l67üyx7aÆ xyy0|yי={6L&zK/+BYYjW=#33#G2|BCC1 0~g}{ݻc6a޼y9 P]ѣGIݿEQBBDOֻ}ĉ޽۱m;̾z>Z۷n [Xl6[s=|W"::/__ZC{)SjC}y%77o:GiH!&&={ ϟϦMZ\pRɓ۷a~a\\\X~=ݻwԩS6ZYg; Vk\jDDDDDڈ"]gf3 7|3gΜivǻd ,୷ު opGZ1bэ$""""FȆHQ3P_'fg^̙3Ջe˖q))..wgff裏pssfӦM&lFo4W Ūi=%$DÇ׻zeCTٰ~z~zꅇFXåz T@R;wdĉ| Zz'NԻ߾}DDDDDڈ"]ĢEݾ|r)k9V())ql9r$K.sqwwgCnmٳgyGxw=~DFFU12d+Vw… u#I+<w!%%Ek"]``|ΩS;w.ӟ+XDEE/AQQ~-&L //>}j*Gٙŋ3{l.\ng֭w}1bĈFɉ%KЭ[7֮]{Tna6㏙2e ɔpY,XĉINNn#'O_nsOή].k (+.")X\ *kNii)Fooo>CƏ_< SNeŎ/[xNPo~òe۫?F~m^xzϥ_~l۶ ??Z W[v-ƍ͍;wˊ+x_^n`x{{IrFC.֭[W`O>aʔ)RQQ86m1<ݱf̘Qի_9/\M#$DaEEE@Մcǎeƍd"&&{uܔ)SLDD#}vBBBxW0fΜέފ/Ӈ^x;wJF4殻⩧ĉx h4bX2do6 ucժU̞=c2a̘1lذ;ӱZIul1b=Kx!""" !]:1ѣG딱 N^fsI_zDZA#$b6i4BBDD6FjjjKDDD4;.7oY2^}3(_FFcB̈́Bff&YYYc""" oDDDDDiv;iƍ\|3>50Xrrr_7l?x:zرwwwG+i3VBV+v&|\]qm&r x>9pNՉPf3!!!f%,DDDDD=ZOcZ< '(q$ ΟfeݷFF|w3?]\uuu싫ɝ޶: 0`//o\5j,+ӫWUgd\a!k׮`!!!)!!W H>fg.evqшff3:;ׁV (./wJk|dRr G"22"""""z}ǎk׻ϩa1+a0Q? jD{ydƣɑ?+,l4G_DD ;]F $)) Պj%33E X\\ X \n..'7U7|oFVVYYYub^"$$P=%""""Rkk{=J ;F5>MQn.:}/Oۧpumq#iݝ{OSelNi).ϳul\F<1Lx7f}%$AFFGjt5'h29F5] zgc++#hɥɊ&(BCC#""""%b>ͦPQQ>DyyOs}̋7է7n]%ľ}] WWM&,FcVgEu&2Պؿ*z9HhhhCD+kpM7osQI^TDvI c+W$88#F(9!""""OcZyWHJ"bVc·f"L&i:aRV+V+mhħW/"#yV,m>-¦MLBt3 XƸjjHPAʕ+ɉx W绔6̾}ےINNfʕDDD0bFՔ@BBBaN٬J]0AUáRZ'Xt)&XƌCXX'""""f~֬YCVVV}NNTTB-n78QP@ja G}Ĉ#3fLPB޴!z8T/((͆"vΝ;`ĉJLHپ};+V3»ǔE}?77RkDQQ7oɓ'+!q5fX,А%i+1b8aVXTeg͚Űa7n+#QݧҘ Fb^JJ號OS^w]%RRRXdI2.. VPZR1q]w1zh1!" *}ɴ0|p%2EDJ I۵mۈhTF.OS\^N CC:1D k֬aڵJDHpuex@k%&֮]޽{:uzg fVBBDDKKJﯠHQkr8^hs9+U,^ؑpurb? Q2BTubb|Ϟt3/̙޽{ iB\u+9‹)$ IDAT/HJJJWB3g;wՕ1*0rx nxpBo߮tpg^ t6_-_~eȬge@+?/dee1gΜ%%|Bb"vqur`Ŋ-*HOcU0DDCXbiii,c>Hh3g7K'$ٲe P525Ӭ[7xŊ 4޽{qF_OO~>MNHlڴ 3bL`"jz3bV199DEDDDDݧ10LWUѧٹs'Me6驑rEyy9رc""""" |^^HTKHHh|MHXVM&9rչ"""""Ҙ3V)Q45 *"""""""6*+M5(ZSZ_܋""5<} ElDDDj^) 14-m+Sz1("ٺ-,[noKьɗEDlͥ"WՁlσnl|@UU\w5޼"qeffr1ϟR0DD6ih1JH\tngDž \75:BDDDD.[yj*F!WO\V-kqJH\ T%%NO\Avaj%#E9y))!xoã>& Y&հ !* ĥhURʽZU֟ m5J\+ "bXDH$ =!md ٷI&y͙3g|rN{|n=MLq1bqzqQ0I@ˌF|VYRAcpjk]'R1$wDDDD7WΜEsGjkqέ llHY_Z =%b_UtXNˑ:? c/\1!"""~r,~CZopy= @ٌ,B툽| (;v5f3>8 Scϊt:ih%A ).ngh@XNXN~~8ȞH^=>4 $ř3(WQS% HR*Y(]s3.(܂?QeDDSCEG֯_dzDs DK'3l  W @=ΨQYm3(%TV*"LԘͨ1!GLqr9{x]'lш2eFc~f3TT """"tUԹ4eF#ʌFP`\QiOSAobĉ0h4(n[ܜ8 ظq#[,`JX(T! lj"P(DB0(?akL&Wq&4u "hz4z38؈H !F\j? $zxG#`hT0޹jɆ\ @L0akX Lf 1ۍ7 "  F4Bc7s , dl߾fq0a--Ec` $An"88q Ӕ1=g6#C@U,10u`d2p9ab1%Ro5f3[OֶhB44 """aGh46Nbc5k$05h ӌPE:^D9{ M-u=+łԸ B@:{N8 J$!L,氨a<[,6]L ZC""""򪞦˗xE7@s[kD"4^Dnd2D_ߒ 0  B0pr4BTFO;ZGOD"S!knjuf4XkA:BcшTx($ccztAL!!,0i-׈EcTPWgB'_D!iH ޕ*Y}}aIW*U(\斖v'4I!.BS-:z ͎fCY R A~h8H>/tJ%tJ Ri$0*>{vQ%{?uc++`nw R)tJ%,"v'4Itja3yD"_ܨ3lBg蠷Z;Cg`h v;d""""=M`c־R1N~s GؤIH.@C%%n_:{pgw4>iY}E"drm6t0_۞FBf_22ݷfFFF Vo+*s"__DPy x{`ٺlh AkzXeX, k3dh84WOkoijBّ#hңN_TGsK[OstCw=MD` R`ՠ9l((iH bbM""?dr*L@] \m1چ W!1kBliH }mVD/yyy9)7 )T|˃X3/'""7%%%BEBʆB?Uۮi W !V(!ˠw'\j۞fNH[t%T,Ƙ.Ž3 6ŮX#@pPba;PFD@V3)AqTvrpv4]q^&ґajĠo#l d#/ ASPP<Hu`0W GΠI!fǕ.mB<{PaaP,A g**ߔp 7y:puOSVY6"*:Fsŏ4_GʉޏIg1@|Y"""""""4DDDDDDDq $H c:M~:^|E DDD4b0 ""Ƭf(,,׉ZYtwv]βADDDDDDD  AQZ $z!11: jL, )Zߏ<Ȃa ADD ˹\.Grr2,y%C<y """""""8DDDDDDDq $8 ^,`,Qȃ`kdrj5 BDDnʠU( 5 Y* $hܼy3sEHH C^?޽{IdCrr8ODDnJKKe )q4 D#ntXuHр+))͛a2\jkkHp2Qv]D4m60j% llٲFOaQC!ADbسg@'F[ \a ADGpo΀B":@4dAHHA,i6`F~~!Y"""x "vaě2F 0}]~"/JKKY""" R)*Zt. "굒99E!"꧘irM-FL0bFyB~~>%.t-۳g233YT~(Hk9' /t.GHpz۶msQ LSLDDD4"K 3mQvY׀obQ<V[[[n() ě2m& `޼yPEK:hph(B!jpx(ONN۱m,+1 JJJyfΤA4OBsAD4FJ-x+X!$ $FرF'_k0D^.++ 2 Xzu=oNƑ#Gܹsc%%%vݽwyeeengʕ!tm TmXimӱ2FFÙ8HQW^<5' >2_iMVt~Ɉ1j*{𱐩;N%M9UΩKhܯ}x~}Xy"HhA@:k.|W3iO؉, QSFESeN ѶF]ޣnGԳ}à wMtm TmXimӱcE Bh\ʠ-6֣8rrrf 'e@\~>uz 4 aG||<6nثfffѣ0bIԑ7_#U69Ch4i*p1C]ˆh Ԓe^dz> #dAᘺ #s& ]كg}%%%,Q!ݻqv_ ) `t$bB[YSQ>sWNb\[{}Xt5_II vẮ\洞DJXC@8h1ɮشiӐő#GP b?hݑ$abJ@%Z"tLY wfWA1&wбyQzGEyUyfW:>oya ;|02~" ":- ޳bϞ=Y_ /i{t :GH ~2-B'tN9Fe}ڦ>J%Tz$/_3qgf&>X]uuHip0g_bL85>a_1D""""򠶣%g߂Zr92\o?p0``njBҲ]l9Z-,-6L ({|fjj! ō,說{7r4^r̾}\aD:aqPÁF?_ 4aW(Do04(;_TbP7`Aۊ=S^ɓ!V*apy{}\ۺ~r9:5ꋊU>3zĨTS|}">>bVUNW_}0}|XM>[o說_ØkA̼y(9|W\;68fVZ͛7d24>@uwd""""bO@J‚ #}bښ eD|Bj58y'3w%çL{Ƙ)S!Sj6C[UK̮,>y2^D@sSϜũFʼna9~~(>t>H\(o@$Bo :\'g? PI` sbɋ/ut[fvѳgcW={6J0i?߾/Ӹ?6"$9 B~x< ~28aDo μ 'OT>LLL 6l؀ @mQ.z&O("""!lh]vb]}WcDʽ"h8% P G⭷7Dĉ-;o1H*>3sېFҲe`|bBT ! Waʕ $|:łvNe77СW%$`Σ뀖伱.-u>y8V혾f$ 'ÇͥskjԜ=r9ѳg#i28uXk'ݜ/:FYDpLv=MM87111xꩧzS<<0QZoo~ =Ǣ AՓ3aa9{d<_n>|!\9Bs֭s/G @Ύ7tl3-\OJCCq/׺Op23m^*J ^ ͆K6fGb2mc6i.mSLɝpp]˛q23LYɷYJpC8?ZVw"]o*1[8}}by1ײY3[D:9cZv]F9{s3x#}q3lł⃝?"qb\*q 3.[||~l~[Oކ˗;Nɜ߷+z @ t#Zk2'##عs'frݎ8䡡VV+2L77nam˨//#@z\O[ӱOuD ^[xC8BbAحVDϞRff茶A˵?1]TC"M} ?8-gٴ9-sڢ\uADDD4&ⱟnSO=iӦ(g~=< ?*XFحV(ûnci)fe߿x7 pOq{r%a̢ӡ1!徟O&C#h6tHV޻awtz9?MA`i&3͛燋ASXا$,iӂ 7O ĵq }zjδ1: (5W=g/~zH{9٧k[_|sHgz\v- Z[;鮻Z{=ح6$utb1j^n v zǤ~q@|M导eo~r9'OƼ_+zQh,-u=%RȡJLĵ 5j$x]3pq-i*x _M#m)!0C'[o?u@ !l$,y!+T^qBBpM Bʈ}1…{߇dH{9 ! Qa]?o@P !1 t\!V*a3q7>%o1=#3҂[^B}Q, LNDN_X_zЀzwހ[w?/~ZmgyyxSJ8pf<w@;'k(!i-MsDxP_?zH{9L^Wt-UUt4nܴ tDϞ({ ٌ#WJ|sHX K{Ε<|m+ ZfeǾCL<>0μ u.{ɓ!R(anjD8ϷQki_3|ҠAS'T;Vu߬&3 ZT:O?C#}z;6jT>>UFqzjj״_iAPɀ_Q@YH |/3H1e*(##aՠc8o!h8Ə JdSL]c_EX"8q9;l}PR~v/f΄LjE}q1.|N{-ŧbQ&//[lÿ)5ccq؈ T?qAyر& 0f}|GZLBb둜<[elD"ˑ6͜_yeO+7ŇyJMk6mT*kZЪGa5eZP}]KXtQ>cݻ0gY"QQAiK%+ *M1u޼ne Ar_B=w.rOW_ŕYDҲ{ab ADtT*״E?D](=P:Յ'a7v)(:|̤ǕnC$uowz? E\vʰXFk_{?@'v+<{_[O+8=<% V%rrH@+55* ۶mdBTU<(3peŌntK,v[1c + MJpXL>ǏY#4h_g-BXRbzXim˱2a }p7T!02A"\W1 Ae}}=jrak6am"oV-u}&D]{m?;N6.=|]mzIl#!L}x~8VzZr<}m=} y+eM0*;:M.|!,<5 G R)*Z &&u}ZZZZFc\sx7zl޼i`xm(r}ze? 1 6Ksژ3g222 pO[bB[YSQ>sfؿm3g:rlذ;vӧal="Ճ2-(Ps06T-]re2Qq$6{-^5"tLY wfWA1fMY?DPbݺuiA-N JDDD#Ri+“O>9lƍXx2= yf&q>J%Tz$/_3qgf&>X]uu_/tZw}f3yqn DDDmT{4# GH c-6L ({|fjj! ō4 ̓z&-- w$4WE?BŹ, y<=H^DWU= \s bCaPikGlj* aՠ ܁K܏9>zsӗO< _|t&zlLX$b4!Opni§L{Ƙ)S!Sj6C[UK̮,Oɸ{I`MM>s~'NTjl޼& *N JDDD oh =Mz{^ɨ;0hɫǝCŘp$z+-GCџح6]K^_1>y>k-ZOnAU|<&~;޿ 55Ǔ- O9`~.^xgxpPLLf-ʅY߀9-(y KX苧~aOӧ/ÞGd ]:x|<, 5g჌->+C(cκuo]۵m,l9 WiŎc#]wa9qM~i Zl6-Z}gk# gxky:ϙ.'me%䡡k]GG'~8~6/o߆clfCo~XX}%b<&CDDDÚ!)i bOӿ/ÞWk*+HUnѬc3@3:u }>erv|Pu4P|v z~m a-}~-Df&[|`nj,$ĵ+!ݫq앭h*+b'33_︃V/9ualp"""vJs@@t+f"V%aQ HOӗaO#^ᅚoR?9#~H}s:\~aߗ~Mת/.*! |/mXsx}bmDϚ ߻(3^38[D_C ѣGao6HZx7#X """tK.EavARGaDdee"!QeevϑuFzO#3Zf 1scc$b%gς"<Ҡ nD),pycImu+o PsʏGwQ 3>>GGꇌ $&&bΝ7qw;@Uzz:>9A궼4Np'Amm-B,  %>z޾Hi1c-z5($) jw?  fCLN,k_+V{2S`Pwn?@7mBT PFD`c4.a50>m1Ҟj5|BBTtOp11)<\~+V#<@͵~ϱtR<ӼD=z{Wٌ2yyyزeχN?=Zq|'`jhhX’%H{vuU0Gx-fm۷fv|<|(,:|IKl8go-OOipp8yҵl|ZҞJ^>~Xx7_/""""bO3{2"bD40㡇0]O3<3OZ=z@li_tj2CW]~#}_JVw̍P!ˡ}M 4HI4 M8kϞAHBAA_v)gd؈ogT< EX8Jt '33ݶW]Ö' > -V$D񁮦b3ĕ~pK}Q.~R $ŰY,\|_oS54*b1e=0inD4eQhoAu}=&O!&!ADDDDih4RX~=!3Y: $\.Grr2uzv """""""8DDDDDDDq $H1 """""""DDDDDDGף ZR3m """"""/TZZ^z aIYʱd||dOCCQ\!''uGm DSxА3H0iӦ DDDDԥdWOcIYr h Ĺ?/Xx1)( cÿt 11#HtBcؿIk"XAh-: FJmy|||0!'2N@4D4W0E66HiOscp0~B@qa{L89 NbV__蔊NCDs"--fѰiߏrE"ť=Mc`zAym}P[[}ZVN#V^t1N`DDDD4,{|+yy۽'N ؆4#TBh:D"AJJ RRRtJFc<8m:FW@eNcP /cCC}Jޤ?_j7mOW,˞{^V@AA ??hP__?*kTJc@DDDD4zh4pĉn;fG[Ԥ()mȚXKPr(#~ B!V8BCgNc4q#TII [p5©/x*\prj yiȖ8ur$_~} |^^lkńcrG8"R* ^M6uh^m@کEmmm9/)Hbbb^7$""""]/Rvچ Nw $|Y"""""""4DDDDDDDq $H1 """""""*̾ZIENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/architecture.rst0000664000175100017510000001535415033033467023640 0ustar00mylesmyles.. _architecture: =================== System Architecture =================== .. index:: single: agent; architecture double: compute agent; architecture double: data store; architecture double: database; architecture High-Level Architecture ======================= .. The source for the following diagram can be found at: https://docs.google.com/presentation/d/1XiOiaq9zI_DIpxY1tlkysg9VAEw2r8aYob0bjG71pNg/edit?usp=sharing .. figure:: ./ceilo-arch.png :width: 100% :align: center :alt: Architecture summary An overall summary of Ceilometer's logical architecture. Each of Ceilometer's services are designed to scale horizontally. Additional workers and nodes can be added depending on the expected load. Ceilometer offers two core services: 1. polling agent - daemon designed to poll OpenStack services and build Meters. 2. notification agent - daemon designed to listen to notifications on message queue, convert them to Events and Samples, and apply pipeline actions. Data normalised and collected by Ceilometer can be sent to various targets. Gnocchi_ was developed to capture measurement data in a time series format to optimise storage and querying. Gnocchi is intended to replace the existing metering database interface. Additionally, Aodh_ is the alarming service which can send alerts when user defined rules are broken. Lastly, Panko_ is the event storage project designed to capture document-oriented data such as logs and system event actions. .. _Gnocchi: https://gnocchi.osci.io/ .. _Aodh: https://docs.openstack.org/aodh/latest/ .. _Panko: https://docs.openstack.org/panko/latest/ Gathering the data ================== How is data collected? ---------------------- .. figure:: ./1-agents.png :width: 100% :align: center :alt: agents This is a representation of how the agents gather data from multiple sources. The Ceilometer project created 2 methods to collect data: 1. :term:`notification agent` which takes messages generated on the notification bus and transforms them into Ceilometer samples or events. 2. :term:`polling agent`, will poll some API or other tool to collect information at a regular interval. The polling approach may impose significant on the API services so should only be used on optimised endpoints. The first method is supported by the ceilometer-notification agent, which monitors the message queues for notifications. Polling agents can be configured either to poll the local hypervisor or remote APIs (public REST APIs exposed by services and host-level IPMI daemons). Notification Agent: Listening for data --------------------------------------- .. index:: double: notifications; architecture .. figure:: ./2-1-collection-notification.png :width: 100% :align: center :alt: Notification agent Notification agent consuming messages from services. The heart of the system is the notification daemon (agent-notification) which monitors the message queue for data sent by other OpenStack components such as Nova, Glance, Cinder, Neutron, Swift, Keystone, and Heat, as well as Ceilometer internal communication. The notification daemon loads one or more *listener* plugins, using the namespace ``ceilometer.notification``. Each plugin can listen to any topic, but by default, will listen to ``notifications.info``, ``notifications.sample``, and ``notifications.error``. The listeners grab messages off the configured topics and redistributes them to the appropriate plugins(endpoints) to be processed into Events and Samples. Sample-oriented plugins provide a method to list the event types they're interested in and a callback for processing messages accordingly. The registered name of the callback is used to enable or disable it using the pipeline of the notification daemon. The incoming messages are filtered based on their event type value before being passed to the callback so the plugin only receives events it has expressed an interest in seeing. .. _polling: Polling Agent: Asking for data ------------------------------- .. index:: double: polling; architecture .. figure:: ./2-2-collection-poll.png :width: 100% :align: center :alt: Polling agent Polling agent querying services for data. Polling for compute resources is handled by a polling agent running on the compute node (where communication with the hypervisor is more efficient), often referred to as the compute-agent. Polling via service APIs for non-compute resources is handled by an agent running on a cloud controller node, often referred to the central-agent. A single agent can fulfill both roles in an all-in-one deployment. Conversely, multiple instances of an agent may be deployed, in which case the workload is shared. The polling agent daemon is configured to run one or more *pollster* plugins using any combination of ``ceilometer.poll.compute``, ``ceilometer.poll.central``, and ``ceilometer.poll.ipmi`` namespaces The frequency of polling is controlled via the polling configuration. See :ref:`Polling-Configuration` for details. The agent framework then passes the generated samples to the notification agent for processing. Processing the data =================== .. _multi-publisher: Pipeline Manager ---------------- .. figure:: ./3-Pipeline.png :width: 100% :align: center :alt: Ceilometer pipeline The assembly of components making the Ceilometer pipeline. Ceilometer offers the ability to take data gathered by the agents, manipulate it, and publish it in various combinations via multiple pipelines. This functionality is handled by the notification agents. Publishing the data ------------------- .. figure:: ./5-multi-publish.png :width: 100% :align: center :alt: Multi-publish This figure shows how a sample can be published to multiple destinations. Currently, processed data can be published using different transport options: 1. gnocchi, which publishes samples/events to Gnocchi API; 2. notifier, a notification based publisher which pushes samples to a message queue which can be consumed by an external system; 3. udp, which publishes samples using UDP packets; 4. http, which targets a REST interface; 5. file, which publishes samples to a file with specified name and location; 6. zaqar, a multi-tenant cloud messaging and notification service for web and mobile developers; 7. https, which is http over SSL and targets a REST interface; 8. prometheus, which publishes samples to Prometheus Pushgateway; Storing/Accessing the data ========================== Ceilometer is designed solely to generate and normalise cloud data. The data created by Ceilometer can be pushed to any number of target using publishers mentioned in `pipeline-publishers` section. The recommended workflow is to push data to Gnocchi_ for efficient time-series storage and resource lifecycle tracking. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/ceilo-arch.png0000664000175100017510000025363015033033467023141 0ustar00mylesmylesPNG  IHDR┅bKGD pHYs  tIME f IDATxw\SW3!,NDuՂ:ajtNm:ְIHG$I @@z2{9r9 """""""2#KVDDDDDDDdvL81@DDDDDDDfDŽiiiHKK+7D"Qb:R*rcA*Vk LriӦ16e0GԵ5W۔&{}w/V[ٷoeʾ}+VپF(gϞ __*FHHъZtib%KʍヹsVkPǗgʕUa)+9ڦڦ5ݫ-[߾{}ٷoeʾ}+V[ٷo]`фB4$NHJBui&HMK>)1LɩJLmSV sM]k_sMY16u}w/V[ٷoeʾ}+VݾF/\x1~ຬ,df>( H STȫCJ(7P {HˡRʍ#zW99ڦh־jbmZַ^mi_[ٷoeʾ}+V[}t2(3cǁ$F8KU&p """""""c̎ """*W6>hƇu뀈"YDDTbwD89ؘ+@aa!@pp2c8D )))h4ptrO 6 #_x`7nX/> d;kh߾=|}}p "ŋ;{KKNNFrr2<~K;vse|W'OµkJKMIAjJ "Oc׮mm[۸RrܢN&+pJh4prvf-Q9^4 ͚7Ǥɯ 4wXf50qx۸:uSfp5nӦnݻHNJ¡CqY o~!6NsUODDDuEaaa)iH` QP@ܿÞ}_/Z[[[۪T*̛3'?VT[NpwoNm۰vj$&&A"1{\8::W… |:wƤɯg^zyػΟ??WcR&>Ы'dd`Ch֬8~ӷ/-^غe >Rnܼݺwy&e(,,+z)SD֭K}LT֯[/_FVV6mϿW_@]a-Ċ_~!-eIP{κ6s.Y ܼRADD®hٲU f ݻwgwF>Qbc))W۶?v`"??_,-- G#G…?DEFbסhtccb0bСyqH~Y/?yxC؟Xq:t[nZ/>¦-[M,[[lAnnoWUNyzq!=~><Zzv`>̞9So{FR L0,[ < jcG&2dCɤI-]`Tɓ|OŸv*%$$ixcw#9{6_[P(т5z4?+W!,bn|M7˿C T*MuZ\<ţ[صs'B!f͞'NrL,_Ʀ-[JeK>o5"꯳xoߴ -[BlL ~^Yz~Y3:t舕HdR9*[ʴSQRHP`ْxq(=pW^GD{1`}د]l6ʜk-x7pQ\Xlc;""bq׮^43AAؘ+i4<3h&Mƍ@(bР6}$A7oڈ\x]̚3͚5@ ToNôӑmXXX>|=z࣏M>m`n\Ys@$T_,t^/7|M4-b1vϿprn7.`3~<5z˖.7߷oǢo۶ujۛ|U)ERFZ_~͛!ÇC LTӧ ̠F2XsM&KL}s<`kk [[[4o.Ec_ؑDDDx4nPƍMLS@zzSO-+ ` юp_8Բ>>ݒ:v[C,ؾsfB}1t |8 nYQ9VjvkJqx?Z-[a՚F0*J;M{{{{<ݯ4 >ѣh4x_2)ʔeV,}vDDT'p ""&y>eVRS'%%?շ} .oּyeB`h|f߶mjZ$%%8,Ο;w`ڵذn3~c&mӱy&½!#=F]yz{W"Oٳaaa_V*slTUi'CSv8p`?F<|q炃Oe2eIػg7ۋ;w@=L>Gjp "ZхkRRɟw/^b:];n/]c'Oa6}:6m܀k֘'Q&sHJLRF1QT`x%͘>yyyغes۽J;ZtpppS_~*S)C{??ًצL'.GGc+ʤS}6v"DDTLpl d-QСqI7 Թы2~xD"<|I'T͛wgo5 ;wS,\~5{t '''~moggBT.Fz"?>VXcJ;P-4;Ǐó_(w֍ce憹csN:}{sXC?&3!"1j^C e2n߾HDD5A׭,* 7hgkNz 'ODNNN?(GsL`DDT0@DD5,"'ņp-@RΝ;زy3?, ,]nzʒlll;"7mBRJ1v8a}7g6ܹ\زy3f޹SjƉz}W,{sD~~>p5b9f3E_pAw<[,]P˘1}:233!}#77nj%ر+^tٳ;{j5tZfY>3xyIqQ_F_deeaÆAP`ͪUx!{`%U+Sysf#WO,f޽ Z<$%&b7m#!"ZŢً/F||<<0z8՘Wb஁!Jjݺ5}ڵ񁍍 >s{Zߵ[7߸={޼h4ko޾Z\Zݷ˰r W4||ץf x{-.aa?wN7mX쎈̪~E UVظe+,ј0n,--uvk׾6j+NQQ9ƾDb1 6o E]mKʖa̟;#EI_WF=ىQzz@xYL8Q?qa\Gc8C4xp.yaտ۷oA"A0g< ~6>.Wٿ %%666hѲ%}9Lxyb*pؾ-/^Dbb"T99 hܤ :v섑?os߇\.G˖-ǟac HNJ3zwgwY3;xuk (J4iśނ}:*3-]-[a`b h;p! DBBTl jT0WXSVEjK :1T|4O<9ڦ69ڦoJ!٘{Cel$yԊqP8KQN6l&4j`bXW`X2Hz4(Va{w_C3Q5&.ODDnZ߻vDddd8Ii?@AXy@.!@}#` Nep/owej#ܤcOO.v}+#ѲۗO;vK < =*R}|YW$fɲ݊-[рxٓ8@dPȊ & '?Fqj~$U6_G\X'z>t4 8ۺYy'#ĀSv󀐷HM4<=xOO IDAT:\јze{ 󚈈j >RAPuymeo{ V`t]w~^$ʀKK`귀K.ro& u=M>ʔ#%U&f~g N׀okyo"""lU*V1@De+~}Jb }&T q`Br[; U1@@vk.fd }jj*!uֆ6D p j K̃mQЍ6VPiYL8Qprq¡e%rjMzUUv.ؕY[oQ暪G""j6U k+w097BF']6i_"J*  uŮ /OLO sQ'X25jgw)%?Bųxh[?ޫ# Wު5UD 7@?wX`ID6hAAt 4gVwJMIZZ8#&!sc!XBo_q;_[Ңԝ5O!5 ,+gq&o{D> Z DN]xRTޝ,L0vgxR?@AAGҎ90b&ptrs<\}t졽0M=]n G ;fꬲe^Αv3djk G-p GrUUq7~ۏTlyT`BWyP2;>YHDD+w^wxz_†7nT*ٳ̅P8CFM(9`dY4m`kc ~ /t*u7Ga뮽PsYa _ȃֆ>E"|}}yjf!ZC,.szտR(U\d RFK]PkuSjp 56X"Dkgƍu@C[ODD $btEg?_|v &!d>d4 ήn %,JHDڱ5G^kߋL 7FHI\}|mHE>3Q/-C}DV́L&Ee4/(B:L gɆD [2@D􄄇#""muHS)ڴĂYSۺ9Z5bkgK9_MdeX! Xsw{f Y͇pndp]le H{yK޻ ůCR?.5%Z@"y8$2Y~ 5Y{6(En5F#!QuǪ?~p0BBBp Ns(A ݲ i;aH.ݺoq8$\zU~RV:58\D\ǂߖU*`g'ĤSlk+OX~;22w#rF[oNvmZ1ױlܸyu8`Ï_E3/wlov;;9bx*;<!\4lم{Sxn.ΰT?JUj|/#Al'4,S"'0vwD΃Zq_}n}>PPP"(e:.Y< s܈ Nզ-ۣ[+tj %,1oxLu K6ڻԻ'C{??sB*w#4wDŽѓO:0@DDO`k- H(Χ%dBnBldf=mmT@w؋qX$Bw:7qҟw-8?{o tTo*ɗv짍G`]z e `m蟺c=b"S OKKCDDbm|bqE;/'x8 (Q&W77tG>_ TH!Þ˓&cy5f z3_`wEv43#񊢤CE`Hp%ٲKok>*%2gps){]{?{ݵyk2BNH\[ԽsDFlPL[q!u9aY<`ƹ&mpH]T=6& %EkHd0#R7@E֭1cp.:cP@,F~Aax 't`ȰBd+r.=QPPPj;-צ6vuDI(1Ȯ'Q rJQKGQnb(-n޸ZCCaPwX[Z#hZfԐgW4s8y6?Ab&̛t`H_i1Mѱ}3lmҴ|UR5UV.^zC_UF,*J0G.s%8)e/<~A=,+ ܝdt%d rQՓL6U;S 6릗s/OCϮ`/_Ÿ"5=CB^PP(z tlT:@`c}J4P$ Su;/maz/xЮx6ct,)xp񲣧֪*V˴nv ~p|:x0qxz8Vce<2@TL6ϝ09 [[}G#}ݯ껊ﳬe[uъǥ?Ēd:6z-'J1{l:s!ZulYY:w;~PVcxsn0@DDDT~:{@_$d>E"|}} ԅ<4i>><9D 6ƊQDDDDDTeήnRj|]p{<1,l 1lX!%0@DDDBdd$v턟+wG!쑃[YD5L{ lU+Dxxx&^HKKÿ(eeP#E>e2VQ RT8vT;6M,jգ=l lY Thر#R̓W^DU.b{VtZf#j B"WXnBRGph;e[/VUNx[h[/pݲߙt "C9L$dtJRrۻ))2@ \Bկ'dV|fj\kJ "*=WD\\$"XYZc٫7#ؿw*; X)F"Ȭjl,+QcưȬRSRpVjYYYHKDdS*D^&m/ɰtR{7BLBF!ñ&L?wԔ\͛7ѭ{wt'OVpy@ @@Pt߇p R7+*1S=3""jb\H{7Bu{njűGp56j5NƅxcB!s F<$ + L8a" NKe276Yy]l[{-8B"@X'ؿw ˑ(RQ IUCp """":X6SJ{_4nD@P8CB;cN88H|#6&,!c̈́JD8JJUĄ^RLkjl... Wz{cԘ1HMI*G\Z,U*$dFY,UNdݵzT=1@DDDDDV,N4lQc<ыaDDR1d ptt2>++ ^^HT#2!%!&^؄jpB\D@VUHy 7ww;1G *vD!|_`&!"" -- KȖA B"iVQ ZqRAF6>kqz_GDZҭ>999O~T_}zc;3pQW[WH޶Hy2W>E:{ CDDJT".. ,-X!DL8P]w-.^셋z7(\8ZYDA,C,cwxxm?X#T,[~NN^~0h>~צ~!BFZ*$"Pjy]+WL&Ò%Kb!o&bX>}bCoc}[+ѣZO郎~7(͟$?~;RD6>@ܽ{S] w/|g233>}&:w7g6Rr @6>&' DE!55 !CC?~gVV3 4:}-DFzz|yk+W]ΝU!ޥ3ztQ/<߷m \OXXXQw"??ߤϘ4tN=\'kRYjCwڤxo\n]:c/b=;^{e2zս&O|ǎ)ud|&} 1@դ`eeÇ?v <5:oQ7f4K\PWvʳt|{6m=qC }tbݶwnƫ'n]YND=ګzHy"TT8apjtG[~Zd2L>A} G"cݚ5ڟl3;w=^{} [ׯ_5mtBnjE׮]g>U*9(^zNn@@u&MeΞ-<R9| .8d8j5`ſ=!^^#KZp*4sEEWFhBPo0ClJoݝ;w0l ۻ'\}bdeeBjÆaaHII_wnXZZ`eRW1Ùp 8U-Z(X@թqgrpxxxJ6@g֭k]jvǏdƍVXnn'lD\^swntYowRDDD#LE. Y!DL8PC2g'O_F> ?p@C.G.>ICD7ɸp<1M>}}+<(,,P}?ڷNJ?CPՈrϟ_ڑ/^Bx/? Ev6V,I{/OZO|408bE%ϩxy$xxx+q # /ۮhHJLĬw߅S#ݠGAnnsIDD?z=Xك_b5=tk;^p 2H`Р;g6zaСظy bqC?BhgqcFc3U%,kKKKݦw@ uw0Td}5u&!F bQA+PDquY֮}j.*NA;V "Q0H #Ls]jNN9yBΝ> ͟VC+O?oo~Wл7~p'+˗Cy2NFC`WEt(777?(̻w.&^$`=KaD[Wz}||ZZ;߿BB//[-kSn3#> ICuңH<`4C!f@0d.^@iiCp .Wc]wa9|0bH'!B!`0`4UbUQ/kPBB`0#x7 詧]bB!\_7IP0HqyX?ÆgoƆ_Ci)}B!\ٌo+{}sK= oA`믣 & c)&bp@9rw̜##?9i8t 9Qa]w"i};{>FQC`!j8!nM$Bqf:C oi\Y1.*Vo ^1b$k<<Ϙ対 JJJaow߅`%o<U㏱juz//>}*a zom_/.]; })j-oxu2$@'IPB!B ;#۰A7#<<, by?,| _%jjjZ|] E>}詧o^X,TUUaܹT(XD<B<.]=,lN6J!&_̞u7DRPCA->q kX"5ZG7Xp/" Ktw(5 x7Pi 3)U_|9V5sSSϛM87]Bжl]~ EF#Μ=-.*'FLJ8Bq\SS RS6㣵b___1U9lD7p,T-0-MGޒV'<}} z7zBqccCrnv*: 5хN5@`ؿeeeyM>?u*>h-ߏQG7zoߛןλByy9ك۶߿Ç;̟7S;$  ÆX͂ǂP`E@A48_LÚ}~}RcJePP`햝c.@zػc+L}*\d6n܈ӧS@:>#\.yl!}Y VěUﮄ@ b`d1cً/VUaÇwV:d\gϽŤkOD B!:xjq'bc֭طo/&Mk,~=<ѣGc /;nZ[BbΝPT< o͑^aCnFPPMdfdaȑxH$8zϻ:W~i>) `XsŬٳ#B*ju1t/$^r\&ʘ2}mV^R PT0֗q-@ހsLrfKm:qPHEHMMC'[]Yl-A8p@k>>Dkb[EDFd:ip JRSR  1nX,E"lJMmp(. C҅Ez,@!ΦH$5L |}}iFq]%%ֱ~W oݳgv VCҫ9kA˖<ȣxM=1;vOhKXו#PHE`Ժn?fdT(Lh\\D)..FQy3 BCȣOVN?ڨbAB!IZ a ע8ށ-{"#)"deDCxx8%!8 ofC6ՒeYp G_zkeCb{MI#O|]"ߏVSƊMq yyySRcv|՗زylVyN44#"0mhD[ӿK_pq-)̇ʊjK"ۏMgAvQ9{60 cFPBIMMFtLL777[AÏ>ΙϿG} ?{+QxJa_z\qxP^Sѣp?XU0zWcܘXس{7 w##"ȫ۰A7؝^G^^$B8]Ԅ8JBl:8qҾ„1aDeWfbKb-ךZ;jvkLQ- 0{\*"""#!aP\ܾ[lƟ_ׯ;cb0n76%f[i5!"PBȍy8q>p]#FIA".f!XnRSL$1x~O fƬٳQ^V;cطw/v6A#i8!FC*!M!!^|g `0pDX$m5&Od  g${PPo̞3~=H^#phŹ\&z8Bi,~'̿o*++탑lrzMB:K~5lRmݹC)h˞!"&(eNs :o~CŽyVBhZyAZvQ9>0bGPB<qP H Fg'?mmsbtr9RdgC2C.@181kfksSɄ_~[PNP7݄ɓ1!1> z!TB!X, GQ_Eꪪp߽s[+FhZ?v }AeeY ˣ 8g/AE%!BaVzcK>gC-8u p$3 >vn FBB]RA!rk{*v-|k6c6^~)=<<0p@ 8 ^z. BHQB!6$ONJIjeBk6wE=ubGrk<<<֭Ci%z3]Ȥ[X, 5(@!U*U*ONBA~>0L~;P L!̙IOKM w!4%@.`m9уkPB!..^X}(A0Hl۲Wq\d90{.naZ]7r t O[qI|' K1v8@nEfQ F,-wtB..]RJ8B!ttlJAޝH̚3|>p2'_NcwbrT<~DV56$'Ï<|<"Æ܌nk?g z/ g S_S;DycSHOOw㡢B!I 8cK<#&6QCQDtl, Fbbb{L!1݆Wĉ=\.d2zcq{O>_ Vdf vo EP ;y1h.#.z-X,*:x^v KՐiFsJ\Q wg116f8ǁ 50g4:Rj!Bc:6_@_+';ZM8_@©cb4R`4Zo6@RBئ5gNYr9nA`ċb=3q8w܁>x7˯v.@lHفQ:q4T/Ы0M9]DBJ8B!8>" E eWTUѯIK$muk%|RiK;`Zl}xJl2A&pŇ DQ˰i~lڱCC pCA~> L~pQ)(fA1Ggp$+!Bz~!jlgOO1*+kZ[~! h")pϜn#';GhVK&ߟX"L.2$/Nݡ:|ܷl`E mbҥK櫯hnRA"A$ު}Pt{RY3 (/_0v5WQ0sX׏B!%<< ÷-X]Vݛ]Ж`h*2 `4@u}[K4EѴu C,C"rXd'O]߮m+5H{ 1{Y*&MnPx-A"4_x Нfh  zb: ~ )@! P ccb1bqDDjuJFcۺ )p+*H \M;A,#"2G\C-ÃRyT%'qRƌ%K~lD79/`L2yse@pP(z-t`}/`XA,@,83FI¢L^^ c J+4%!BHG鈛u";GE"&:{عf Cd7/j;mxsT8Dn FBB֭[GOZQ"SsL v 'Wf -o/- r S TT@`04ٻ)Q\r RCp9IMTB!t Nee% ꒅݦ MBuGbpոPʆe7ÖjQUÆHސziXCL˶B2 ;~| ¢+B֑GQއsB!t"Ko,-Ɇz(ݜt޽nYnv'] op9qp\MDۛưP9WMF#6[&F"z2>ʲ˭obr ,WnM JW۞ Fgkja4[`C N:%+3Z[Cq:~ϥ^^\P0LJL.ϧzܫQ2\N7HI3琴=[+pyԴ\$owcqXSn7:6bi/sl]smQir}rwc{yyuCU@.I)ؼs?@.@&n;g}@)mMhfVI N 'DN<2"#!j0 a"F~~~隈s ]>!>-ݳiF}CC|mQMݵ3LDDF""2^ힽ)˗ ڴĉ۔p aϚQC냾Di5{ӏڒ aA=򻇐K/4p Ρ ?{wMZ=qѱ00"ʹZ-mL =;Y@aE(557nDL9F=0yMr5gcL_RXUx*&")j͘1c%۶e3Nd`-W-ΦVYZ%ڒ3Pnr?M u=|{ӻXr3PB3-VaJTjAj8Es3{ GO?~d[_/biN'Rc1 ۞ WjJC(@`@_^U([d,8'ƖYEs.@ɜ(J̞3~b9~0pƀoX<:Ί%KNp$ZdBmI>*''at  36{O<o/3l߶ WONK@:}56܏{fNj$Ѯ#!{ iζ[l4lpޡ-; uE_R`+nlpP ;62$ߙx!0HA xEl)C.i߉_o1cKպ%&`ݗ_hf_tr{)ۡTF_~BF(@n](ȷvfG7^r՛!QwlP0س{+n`Jݳja`yѯޮ n읶-a4P!-g uPY'M:ܯ(bƊl{R!+31smyx'BqqbFgBXXX/--ƍ r@o7H)1˦`J8j8i(mx J9,i Xf&m7n.i t{z:U*dfdPPqdCsQtvaxz= ;PWWcٖUVVb]vgNC!5a*x6V Tnm:H'|]M?))`:oulIIQw\ft|ې%N0Ѓp#mɽCiBPĠAA2|7"io_cy?|Ʉ1+V؞!=f43NK}` z0|aQ ק tz۹S'H(&V}FB >E独_dԻ`yPW™so/z A'$F\l̮l`E@?l6==TH'ʼn:qfό-W[WFb_x9HLfppZǢ_~q

P] $n xE^=4|AeZl};hjAbrJS("<<Z.8mKBB@!Ì3y\?Y\ JR?v 9?JKK㎙35xMŸ0L#G۞{##P\`@0 sgчaC1וlK@56$'n3HX Ǎa}r***8QXnPaXشc?/>Fjj*%!=֕(zS@zI'c 3foK/!v(B9-^_~ g/<B<Nlu9s&!  isƀZl2{E K-^{vFrƲaZ]7?Xk,\(.ƫ˖aLl Zn#MDBw+Z\"-yK\u˥Ĥ($qTÁB!‘&!a{t:O>oXrx?aqDձ//TVVz\nj+W!C wngbkrBIFS?u*kBhz|<9];wB$aRd̘9 C-zhd^XM&#hKB B!3@ Dٖߏ)HOOCLl,D9kx%bt\o|~5bFWt0-+)Cu%)BBBؘm[?~T&i0} <> soȨؘ؛~ؘxpL ؐ$p BqV*%.~&O={0%>Ŵع{<<<:tXSXS`"Tj+琴w-5l,n=6&` س{oۆ6l]2 R΄C"ڴ~ii)<Tl|ؘ6q3qXhKB B!G `R[Gh¤ɓpw`Ϯ]XkA!3#cnPh4̙3t"o SyuگXI?.njPHEMNpVɇ-cŋO"0϶LYZOM'%!B}}PcENQEL+郛BBz{;nm ěYAC~ӧ_i~~~M!!Ô;"9)ɡ)=+)bXp0=)عc:<==7y2_J%E sOP¡I},`TlW>e;oaPA J͉qݜ?pۨX㶱c1} 6v] DBw,Y<2>J,[Eu%!B(w|}}MkM6 SMk>];j8`,"2?'% *++qA8A0} LuFPJ8B!t9___ "G&i[FDE”t3Rfp L0MB!.qԂ ,p5 o-51j*<%sa=%y=>%3RF.q8Ңo 111qeJ[O87~B7gJN B:’ >ԍ]EӬl&-bwW>0Bm3BIBڮ \k hfZa!*z-o&""#hyA~>m ш^ЙzxE_E^]8DBw{n.p`:8l{rj$N/:@ Qn=;Ѳ[X<"5F:QZ'5jgI2DTcCr("9FɆ{̵}^t%̻;Qӿb>ž_p /`07~ȹA^pc\fb a\oX |g;C?j0He+1rGAg8[Ÿ3€ U& F57Cԛ!ApcP-R*Œ)~:yg+$`~ 58S^ Kƴ_|} fL d(SBuߋt+mF&3rI~xEFAW'nNݳn9NCuqج+/tOrU*MDÁ8-Iy:Fkga&^y<J#:d^L]qW9]H9Sb IDAT s,l*(E :e*ɧAQ;#jȐ!` hbZM<bF\ ǏkXV{tl~ꮛ_X"}ю!Nd'tZW * Кj0qm2w. 5ݺ #~r=iכkFMP[kk[vF^GBBJK/j,YLdA7B\ñ嗱gnh4TVVb޽xu2 >ap^^BvW%1kdQAD ѣ)FTO JqRmˁ~^J|+o>jRk.Z3Ɇb[A1P`HŽsfc s\|V_jQ^XȅVon1hfVľ?ڴ>0Xf "]T:@` 4M,G*Q3X&G72tM5e 5x-K-Njk% R3/5DłA>ۦ=9'NtDy#.$B%s ϧ8Tm0?O&K-3}WZz pTO%3B, }{A9{Dd$!p =S!ygq\ ÓQYM5=q/Sz2ȅ<,-cCnۻ|Rn 9 v4||z[vT[vֹ ~ B%i3[]P<Г%Z]+\EL*d]jvMݸ4\vLeεBzr[vT[vFS?DG%X,qͨuKhuڵSygzћ ؘ8Z@9эO+o`P8p .P)Δ뱿X~"f˰C5҂0,^ةp ֻPHElPHE(9w_q<.@UZFA 6Z}ǠdN6R!c$խ3LvDp ./1">pc[ǦRTP7ZBWQ(f j&h&0L +_7q=D|7y !!?<] h48F7-(.b{bD B\N^@Bm@U҈~uYHO_ b1ŨJݳf:}5p8,H<-VKǦB!UuTHKmN[FlH*Br>9 gOAPPT*u0k) 8J8B!ĥulZWjX99`[d4 U0-MwiWv___̘1ZGDC*!R, r//hĶ-}cǍL.;)-[jmι˕4]j;)A1o !%!"ӱmNPj Mr3 _ nP Pz:ƶ` X)ߓXȅe¡5:N2!=TD6C%bbTB!% p;gõt/,֡҈7H p6o%,u`:d:m$B0`ZNŪWQ0Zх3DZ> F7\xq<ÁB!U tR3sFFEB.`[8t/)TFI%eza8B:%!B:R]OIF0#"pKT޹b!aP)Fd DbHVY((@!B!Lƞ=urhZdef +3<>  E TJ%glC%n7J{Juh H.c֜9xmfD pB!Bk\QK-|cJTRh0JSTdW= )`NF7#'Ѧsssf#Zecl3bHe#J8B!>Km'bw.$BD0шNha#8(V JBBA`4M>3@7H7`ZLGL9O+) J8B!tJ#*;mr>y7\.X, bNwʡ*FZZ˗a4!19~*r2%paљ6CgFgB B!bK@SˠTpV6 _ d46;`֜9P(|NZ )^)XO&/{jj4`W)?|Jul:sLIp`0[""J8B!*VTC͂؝ 76$^br ,p2_@"";sO Tj)i0E59mX=k[,-JmRAgAjs)Gd!PB!Ġ{|R``4Yf: :4 TTMm87ĚolP1f ,u2mqcTk~A Ep1,`뮀u:zc:ivOkNn[ .hBHHFQ A2ujIr?y} U02aJjtVw`2K\yiasѿwER}^5YVݨ-1@[b\ PPB%NZK=  yqcVDž3z~X[bEP)ҙNIS d2K+ҙ0XÒZ5zlFވ" ΔJr#""F]V1k,hdZ TKxK QmNިl@Z@DD="" qq89rADXpkt߶;@5wvXTj=b@ȑ#ػ;Q?yLArݝ#GajR(е[ͷCr2fy5aa\G%$ܠGUv!r\ٵJJR)-u({a8T~,[#h~سkrssYp 44ڹc ^Ē+pi\Ct@ǰ05o7r """"Â]GPg5?aٹxU4@޽9&:,f%K%rDy˗VNԜ T'6`9j.膛#$4R @DDDuR*1 >èGX]3#‚kw'.I)y۩~=fqwg07Ql\FK%̟y! e1cF?rq$tB&l y2ZfB|G"ibU8j#YUGAOW1p"CeE:~о%baH RkWBeee$Gvc|:F'-2r#Pˑ0< DDDDDd2"##3aV3"Â5BD`tM{NZYf!G[Ce;p aښez=z.P(P(aj./.+KriLW'K +"?z ADXp&:mI\Y@ܔX"ZFDdbbc: 2%]0ɰjkK&^=vh~z=?DTZZgNA!q :RP D^L,YPňnK*Q,O]WPN n\YXz]Zm]+jya [wFc^Wa`,65xhr/RR A.e- T)?oCc?F+ځ\x=@ ڞAfFƕ;d ! gHnVTX̰T>`ԣ?W>2lfS#<0ohEEXV1ϧC|"!) @ TleS* "b;#sEE Xpƹ< 7.> M].tz%z"U{7Kߨ0jH,:ĢCNv6֭Y}:7n nJ c """%Ըb 0c;A|cc ^+EuyV[p @8cXl ""Z@ O0FרȅpibOx X aٹ 98Bě֝ѿGT*, XdMP\Tj/6N +2[lЖ䃓VŲe.zҘ 񉉘W~PVCo[ z=6m[b29 "Tc{=zؗl))Gзd7i ĻYl "",Ć"0YЖҨF#rss!6"WK*:E'%b)D'\9 3 RW^ôiZm})aL'_%""PVZL0G8е(+Rtu^F S@! C׷(G|||`T*7cÞ]kn$Fqn""""<,8P-ڂ+ p4?zɰJBttKВEJ ǎه# %?/L ^J SW,Z|maX$?tDN^}qvrJ:j1'0 "/]P1)_6^W0ObϢ%z~E9Ott4z-0A|V6ʫ1}l aҔ б,8(t)q7KHH1Q#FSڽ3߿ڟ4i@JKKq)(d"~I^k6@}"6.>XVvQ+RD8{JK;J,8[hbݻIII,61F p!#rJLB[GV#$îw"T6b(*-gR=$N*0c{a' !';芸 \ 6 RRRXp "j7sZ{Qd?YaܕV#.<r/h~~N5v6n |fDid[a0VxlJRg}bܓzUf@DDQt.(:XL  @(QDD DDoeXl ""wgUDE YV8QtB!t5Rp1QR6*Ԟa."KXl ""OIT UZ%CSǣ]a" GЪ:FrB' /`9;x9XqFDDD"k'xXK,uLk8o޿K2Ξ9R× rw }00L :B@l[ԛB6|11:b!ֳǚ5gtYZZ< zm`9ԍ(X9jC!!.<6%ztF5S`FYmpQbՁPj}n댾i g֢ =S [OFΟCNv6b>@=cѷg,cغ}7߂͙YXZ \ 6 RRRXp j0S9iqECUn clD0%(Wb"ƬGqy0,<!2MUX{|Mr/тaf5O+gg缅Q1$2?X,BҠH4.f ߌ]rP]ٗ@n 5q܄S͸tꏻY͈ #Hñ +x -LU+]TX} Q!x)x%E?k=٨qZs3ukl0u~>':/M9vy㌈f̑C1rpz?FGm;W\5y{v_,3uOH>ަfySzF;bBq{{ElҖDA#B,Dr7C_Wye|Rcka9Oǚ5gDDDj|}}1(/ޘGXؠp? #}}rGͨ.md<'0K>ot_5~>8g0g@'c,5"h)Pf GH'P>-J_b^[ 3yñ㬮c!0rp$GJ36i-غc:Q:׻/ D pwal? |K*Ἡ{]sm?w/m;^ksu<țguk<ΈLR)czv+9|Z}~Z@EŞ?G[3&,8 "WfBVŸ': 5"",VTA:N苊iĆ",VHd mא,Ȩs`-X!e:W;,8[}wG#FIcBaϞڇ"""""" R*{/_WlVhp|R*<:Z-pQeEyD7AJl6}Vc.b#>1#S&jFgj{˜$5bԣb[j?>ƒ]P1m؉Ȯ_ѮmmB+LJ%&5Eff1^^.Bqq=y#^RAD׳%шST7uF`~~~Pj\fQZZ҆׊ L&k6nf:oj}nؾq\J+GfFrhK Z9%1@\H D,H[ @DVv!P*,Da9V/99)))-̛76"##1k֬f ,w[jj*ՆFAzzzm̘1-_gMCm8oܭ7~~~K܎D,1(u2?Dv GdpLk8V߂!V pA ZT"+fDNv8x|q*u2ƒ$tm]rcfa@(f7jϿbm8oۆ367ֿƌ`$''ũ0[l jm=vņ5|NK@A)8XO7.x)[QcXhLi]mC S0"'i FvAˌ9:11FaV7(PЬ6u& _ng0Uhƶጾi goC}s":7T*8,Lc/7˱u>[SRۆ}@LbGdTNbyj %5O ïj;adB$o`jnv\ CeU0Y0Yކ3ƕ}i{d;{}ٵU*qd!,ğyǰQyt\jy YswoGDF2P(r)] IҰp' "!|aEV0dTYqEE^C隹в;࡚gOs%Xh4ȃD0zknTM ŋTM:!$ho)\Wmz" ,O]0dTRx79,O[~}{Ʃϯq̩Fs3@:,憥_ڷrBk3 bPsR ./b(DDDDnT2IL~R uƓNƛzOZ?O9:!ٻ7كb-,;WA bCvl'j 71wTR UX`Ԧ_!PP胇ܓܓ|Vq7@u:,~D,;W1fٹ C5y`a &P1r5?a޸#ȫŸl!8$&ODooƸ8*:\=q',[ ?? 7n&N ܞ 9r̚5QkZ̛7qAK={D*ZRnxBh mK/s1h`$a H<2#TP^jݚ4߻ DDDXRR.\~ĉyz BQAPN6rhK .`uKjנXV_F=aȅs=8}46mPI&a={Dtt4RRR=L >J>Yp """"jä2FGd|8fL+ LJa~~Rt kqaxmly@DDL&VXL;weeeXz5࣏>BLL ΠY'?CcHb? Ur fJ{[wpEr)* ))).ZV+25`@@@Fd^**L8w >|E<6/"""t:f̘'Oֹ]`Ŋǀ5-]񟗦K'DNa) ##xf: Q#]=x`"JKK!H0x$` ozyQ<1m DDD:-Zgbڴi0`jMꫯ˽t$tp?4?/{罇>}"eX5@ R>`LJgϜBiiKg\NC>}ӥKfEM2vP 9ڱuņ+.y_`a& o3UVod|B{g\wLzt& j;{$>%*j):uw& M־Oćo,"/zuGϸ9$O5ߍ>jf 'HޥK׮+9)@ꐱ}25ddlf6Kx}\ <)c1t0H$|oW@kNJDD‚gdee!44aQ Ig췵 k?R >[X+폾B18}ft\懿eb6 f{-Fb}T̝9'6A <R 3b, s+bs-ՊOx~܊x'Ylp 00wx6oAհX,ؕ=/M6XV`߰@DDD#!!ok`0Y?xz/ O.M/ǺMf3r/ط]k{ǎDhpw?cIo_c WN7ǎRQ3*of.=ſKuЯgs-{W3Ẍd ='#[n-|4x֮YGzC݁|h=7Ψ„' L;џ[^RADDz!8p?~QQQ5IRa]RsZϻ}]hpmCoZ庶mGE$-o#ʥF3j=B~uci Uіcw#G0|$tEvAFK[nXgn^$R)px-E,cXR%%h4b?nZ|9Ya>P(&8U3\-,8|'߰uVhZTTT:u A0n8D"EMb4!=ikGZ?.ouj)kdik~FyL]}HD7F @ @DDD-F*bɘ~={sc^х }{&4۶am(,B(- 9%G??L?WFii)m7 !!& G[0a-{F:01[gM+7rWֶۘ]췇 ֶ.°XR|Y!JjF-unZL<[e`v{o,Y -=w ԥK=.#TF,Ӝp{EΝObXRfQ ރ"""HP0hvzĆ"KcQ8w>S : <ݪKEE>#Y ҥ+z 101jhw rݝ\V=ȅIRgϜR&fDTJ)QKqXll0LQqa>6n}ߟ^N>J>'[$Wn~ngx%7""""""$"!|Vj p[\lKΠX> IDAT $"!5庵k1i<3Pb}fb W_Eҥ9j4?n"""2|pW&scpH""'P(a /3׿?ϜO?VcοS&Xp """""rF-!/?,Z6*^3I*i}F;tNUtջJJ?w&Fج`ȅ\;Jțh_#G0|$tEvAF˾ {#=W_yoNP(~u~{x0S3``` *++>u rI-nL,8ѵr ŵX "r}B#GG~}T__t=$%%1,7ǂM$ܹsO1t0ԓw\dŠ+sΡ W|Gi"%%m;v#_懆ZM}TXVc{v,OMśf8,6l6fHR~ xa$ct1cN#""g&=S".}8ܧK.0 4vw~w>x+wa0nT ٳ>YYY eXDu`h5쉹P(QiF8ȅ%$$୷B^^ Y[lw} &0,""hҒܹ_}_өը0c*& %(j!H08k5=8~?d(w}^Fur~xȥBmx` ?4A4:X͂`5tS1jP( MO>n V SN4hƍHYSPS*G.Ž4ؿ2*. B ps4mYL)8ٵڅ@*eun+j_pQ gLTʰ!J1ydLFV >F_rΟk^=oD߬6bt"]'Ƌ()<ڬ6TnCR9Gh0 oۆ3Qw_q1jqA0k?XDaa8tGӶm;nto6 яW(lI~Xp r W|s?3,-(hq(ۇXj{Y^^r;v k֬;#vņ@H$ #}\Nv6r6aRϒm#6bqCeeKKl#$4uhcac;ƶጾqԆ3Ɲ i|hv;cRo@s7Ҹ/Z--[@8wʪVRsbҔosDDݤ"?z c5n|r°Z8cXúƗ_~@L6 v PQQ2߿| ,X?У(--EIIS'o/6tIIP(Gøm\>i.WinV7ڿ+~DzZhDnn.21Bhx繰UW3_ǰ0*,@ ˷{DD3Q}G`41giӐ`H}c)d"zU"qڛ y.qrs>}H$Zr"'gY"j^53%KhI% #r1#xH7=17;L0sXz}˺ED`;V_~sAz^˒%KY#~5EH+Ӆ0U*JKJsN|+/,8yж4h P\\ DPe8Ds"?? @!aXp9deea͚5x嗽.k Dxӯ!jezgs j4Pжm0,\4Ā **"WuD<\s= b…uGy6ׯL&M4TTT*:T*zVӕ#3#c Tm'ٹ2IF:ؿ@?{S'Xn@DD.\p]!Z6}Hu Im]$tipY|0>bqui G8UV5\RXXyDFj3gļyjtC.Z_ߵ|#-- nYn*WKEBX6Xm.QxP9\Xk.M]EACeS3>poY7_+{&.^\%u, ((}cO|ч dIxz rʴK5 ݳoL,Z Y `<ޥO}["v"~#XҢEXp ""V0|pW&|sc$ HIIq̮-:\>iaѡ %}Z۶ Z7<6ntX~zZR\EX[rҋXԓX,{ڿo7nGEQQ^xskV4'LO+ӱ0 ?%fΚg7xk~,-[8n6x{{㙿?Hvl^>G't^| R}C!$$if!,, 'OY"3S{'OMxh^\ r}7?<@~[PPur]CC7jKT6Xfxw2xgPQQava L*l4'N-JV3ud]GDBwxV8&>_5|}}O ""UUUHNNƇ~իWW^aH6$&&ڒ W J#1~|i~9x^^^LDrcWrF;y R/:JKH` ?az=ك??ܱ..FcX,ؾswEL;vfiG܏ȁ!8 2b-l2LR)1U`pz,j}HϹ'  aZaXXp ""3IIIXd  OOOd2DEEaҥ8ͥlR=d`zS@a=[~^]:w~~7DTbY_sٶ57$ ((ؾmoVY‘ ZnnM _?~5IG.hD =\8mXWp|ip ""2 ols^(**blhs F_q}[vq=W'w ǫQYY~g~;lۖ>0}TSO<ۇҥK2>>u޻!/_ƱcGUk 6 'O@vVoKp埡 ?֯â3%x7{*,(oqcF_śoMo; 8XzK`ZZ@ׯdf% 福"@~@!usO^II |%g|SNm֊6Ą949 P&5؝pY Ry"7+`#gd| }Ͻ Xk|Gx'H$BxD,}1NF /_sZ|Nrk׭DzX,֭Yܶ傇6~C@ŗؙ3=~\V 2*2C#mph .`Ϯ$h4bΝHLHbH$BTt4bFB̨эdc_# 1,v|< ciMUV+[3g͆X,fE =nwnÄ9UO6DFFWǣ Rꛠ)gSUKyKy6 ??pS]tirYInd///$YzUUUB  %0?`scn FCX,?abF;S]tCo(Ԃ}`d̰f[sż \YN8 t;˜p ""=~6=P,].⑚˗v}aܹxo"; `ZwcTP5M=?TNxt"L&i.N!\mKjسk'ү3bgؾx;c>|}}[}N)8r5H,dEdK61xOG2 K`lw܁Ą򯗰Qj\<& YZ-RhC܏H,KVB׭Wkj6z  B.1@DDDi fƍȺb D oQb4RBe >"Ν;iF#2* #cFaȑ󪬴ׯ?, +DDDD"X m+~J: oooL6 SMCA~>݃;v履}`oo% O\)w7<{ӑ3aE\EF#Jt99T|M:diWC=h`SV&C&khc6.r1:fsӦbXaصs9cwJNlKS 5s61@tL&rzdiPTu~Q]J6+ِIS\j}1Iu: Jtضm;z vzՓN[vݻ7nOԋe+* ז_ZK T)s1E X];w //]vâ1cP(k7}UqrVppGefqX]  ]Qn0P3A$D-ĞݻnȞ7~dr9DzFDP`ڴi(--ŤI-۶!Bツ&3G  99Q[ٸa=fTVYqZtН! N$Y^^)S`ʔ) gٌjz|&T&&`hdTe]Zٳ'o֡_H:}> ?ƎtA{ɄÞݻ1mz,?88u$9Y%më ׷*p+\p7w5 TcHINj2ᐟgDFFDQQQN#)46^JDxAz 񈌊fnҁ}q13S+~ň+ȉ ^{q|| J#(m82ηgdoUS{FG[u6v}4v\ e l|/j|.2Lwu  #Ct~!xoKj̈nV{˗#7/> H$0͸|9~9O>>keńcu&jL6ZngdtؾlKQ=TM2qސ{4.n1~P5~hifsq (s oV]߶6MSɆkIRQxu"Igp"7ڄv=amJr2$ço؏W|ۇ!Ahhˉ "Ǧ̼S[/DDm_Ϛ֜ ][h@[0Z$r gmum+UhsK=Cbb"~L0Y*;ɓ[obٻ@2{]ΆGVkgpgnٹط'Q#wF-:ndt4d~۟𗗝paSk474zjfcDmR`Q{vM Y#8$cO`XψE1kF ٍX#G͍ͭ( lڼ_^…(++X"Zư1o2XL89fϙmf&z=b*TN 9ݻغdyEKլ^vfA̜5,@ΝlTwDz=ע `89lߺd5}*9D 7T"KmOoPbu:0( qqqvq>@DDDD&֯`6V &O6l@Eu :R. nX>h~!==p1}fLm|9˛ """"jF Vvzz"ǏL&kv؁\}㸢O&an_?;c& \\ر:&&\_[.(PآCUhs٭¿.@~~>]&}>h%}lj#"""""Pn6wuWʗ"-- 2. (.n4jO/} g;N2m{ﵨ|dt4NҡbdÁR mn &MԬZ˗/Gəvɱ ƦVCjVhB$gxԵ+@ÁƐ;-a HR&Pii) cǍgA$ax Ä9ݻغ@ R{`p۶޽[hUcEV\c)Na۳{7MN=5DömxWaph5՜ԒrtBDDD0``r"SN6P5c>kcZш+-*Ȩ&J " [Գ3g8]|;!<7ov-]LATT]{8Q;pVk&9!!mrLNo^?򙄢1Z`MOOO۶"7Wo? EFchęd<2j,@{"4wFv=.$%`HINndC# dR!zpB NwzUUUm0Ͷ/ggãE5 odejz1#L8]/2#{vlUh42@f C`4!`XPUULګ٫WSyy9֬^Y3b1t iě pH j#cNhVONCZVc]w݅܀rK]FZZ*.]_ fʏ?׭s<䤤:ۍF#N ,7zGO6lg]`ʕ92nnlngxg5zt힞իz={?'Ƅffb]0 ubH$ -gj5ƍ\ MömC:IH8 {L$jMɆo^,}Fw9dñG1pР+W`ፖ6 .o xcͨ0ҘRA@A[nh@z9lڰA",K-$"񓧰iÆz'E]1EނbMQD&͞9b}6_dYHs/s_|-$Ǖl=;wl6jjB @#1!Q M:o_h`=#K-իM\7$W {kt***-;0bH/iqo ֲBJ33fãE pŗ21@d j~J%E1 5t>f0P)B_YZ-/_3 `,gAF J[t\DZc-Lr&O3>L&ibĄ DvVV=`"YP|gDD~O@YY>޽8HRuȘ<0g<`5 \) "7"&6llwRDvD4~D]fdsuC\N%& mL x7>5]뵹% :ଙȨIIt_eA۳{h˶d 1@dTNtϽؽk'u'  $V:}:eI ] B,t7Dh|}n>m݂thAppBCC?acP‚K?5_| _=ΤEbeJ,QR Aj&#wFN#Jꦏ`\፷  gucȀL8Qru@&B)h= ]RC狐чz$rdr:uH,J6V#"ܧV/囕Ő2nY}n>.d^Ƒ(7ip*lJ8$y &ubZ&* $'%áThęd\peeeK$P6|8____^@b/W.+ apnL:DtUklރ l{O O? ʠ3VtRs]'].ZċDDDDDNhD L{ ~OQ0yqqBmcΙxP^^ )cB\\mۆ*$u."H剋b3=R:el?2 ^^2!" j?\#9C]s p7o݌ S|\xI}|%=#`n=7$>`…;~Bb+zŇО<-(ƻc B( vmNsq:0H.+˳a"""" *k SAq@5=$'`6ѵ.]+BN """"K$M岖NB&aX UnWVZa()Ghh~FWWW7oro 65u&"گU8s{"H0nDΘGMP\m!.]ڬ򩩩Xp!z Z(+-mKd2"٘nZAHX/Ȏ0@DDD@^bq'0 02zdfΞS) 9=&z5 Vk`pDDDDE'z3@Jr2RNp7AD """"!vzRP peL"r@L8S4i/\lstP=" Fb$xpŜvY&#**.· """"GdbO'vxJv3P)h{"K8tn`4QX+u J-BaÄ vj$'3 Nl2aV2C|v5Xlozݰ)II8p1#L89 ł"gN4[Fz:>Om?C}~WWRx39&X,cm 3))U88xlݲf3߀ e`!J CI9*+9&aA)U*Lkh4bL<8䂗 SOoױBVcҥHrbM";gX"HINFb|jd2ǁx nX2 H\9Ǐg%˜p """+1W@,Rj\xDDӱgNPk4 T#Pf B̈́VMY63CE,tYZ-HJ anZnVxRX,h4h0@\nf@L&Kr~nDP*UPkP:~nǏ}Pp1@T?6g^oƞH,ZFVk;L2?zN[" Jsmk'~pN):Nh&0ZBKh4jh:?Mo2 Ԩ-~N8`0@K}F$fd##=j5FE1`&N8xn9JJhM&deeA6z=W,4lؿ'PuH JX ef z2zC0?j IDAT=Cn:ZpV77VƍŜb^S&!2:˿jR n=`0@"%9{vA(K@\C @nZnUJ%TeFdiشaƎHau2%0.*tёT2a9 !HD"d2BFzzJH$F'٫im3lNv5`2Da떦?Wii) *`@p Gr`>[Jp dpPʴc7 yщ$*(÷3H~b0Q۷nFBB]DDDDDg0AO"MP,h$Fe%8]Ä9&Tx9ə F'KCI9\o_cdGp """"L_?eYT,YYZ--[p7,~Ho0ꎩӧ3/ #cF1[noīDDDD& M~hmqb`CsUsN׬L-StD mfWeTP5אfwFT]^;b';xnc1@DDDD& ՚ըwjg)D"Qy)@DB<`9&M-[=P:}[6{}zc1ز{@߈pdeiѳ{(kr c0~h !#p%۾ c0d sԳgyp """"{RQQ)݇UtC/Պ>\N!>l9y0^}ux=S)K/Ia2f_艓O? ˯">W3=ms>QCC5z ҋ&,, VBYo!bjŰ.b?ٷwnTVU;u=< T*!Cd3XzG bcV78D5ߖ"((cg`݆OO<8go """RSSq:V+>Z}x! Ϥ 2*ζۇEӍO}ݟ={6w7w6ʕ+pDdR!<8&)NpMvy~&NDeUVrس{71az˗"r`j(6>^^^/..Gx;w} F܉Gf&'qA9~ML`0QQ~Rv`*DDDDDg0AUUU7-#OsufɽQRR~www yoǏks֭N<٬j!!8w/QbڔD"?sB,7X9sڿ_~d.'-_G~ЩS'$'Eׇbw ɄM7bɶ̜~bflXXAÑY+03 U0FL8 @"hx  iY1*XXψYؙ3-7;Ԓ!`_& x <#=rDC1}ގ۷w?ؾmGGaxT$߰L_?eYT,YҼ-yyyضm~]:6J bg E[/GD`HqcȑE ru@9e%,V"3啼XĺzF~O;A.c_,L_=naС&Q߶{'O40f^8;SYeԂfE\\~0d"&ڀ_9qPk@zC*o2f|tjJR! M}1])xeXW|t q] +FߧUϪ-R  +/uP=d]c=#"""bOs{()GF _lC2TK_&]i9<0wl ?L grp DtB 悿+:Y0:uW%3bT)nr@[dy;QkMճJ+kgDDDDs8C/UO|/izh0WZq<]l2gKQ^eEza@93pUV"KܬsC}yֵ53>w ׯc0 6J,UH/:cyvSN.EζS:?z_Zg[ZA)nIl?gͶZֲ OfDk յԳ53I9?uX8ݻw rHc`:\=!~8Gm:劮XN7v{TT] njbK%˼ZsYk) t0S`4Q T{ &(07n#c1p0fOʄu(=CԻ%ƋUsFO?MUx9"LUc|b'UXψnNJR$& Bツ:.xHܐf7OhdD\VkگA}x#%}4Y*!zBR_}INƑ#G6Fpu};yalW_[#H_Z e2: ~hBWo ~+|@zAY{M9R]c=#"!Be˖aժU Hل,rh`@A^.cdG89Rx\cAR@|w"WRAA0udv;UVO9U!-ybܦ"y&2/{{P{I0  (?mv/G$t++xft? S5s/1km_~+&@wtB/O_7|ս1*]* ve#/,ٕJl9}w%Ƭ)!߮>C0k$xQ I oÚ:Ö q#HV][utRnejiLVkvQN(V[VBU\BIHp~PR"zU9rn>p㭩Ur/x9Hu; 0s/BJ1o<|*b#μ ~x^þzLA[09,xp./"J/-_FՈOm"Ǫ4~nyw߉kb̿=~%_OȀTԀ??aػa> K*=v(J7k$8zv 3 ulX""""&ڶ%~:#wٶp8&Ƅc'+emڻ#MI\ 'È5ºACT ¦:8~pX5D7> OF]q\5>F݂3'}07 ͸(9KU@h6*y hA p4q4ϝĥ(S4.gmX=?J8|hpA{>j?7Di^ a/yu߷0FÁ?&=w$@eH8֎twu2@5'#΄Cڮr8B-0F6*yWcop jrf ⯷VWlʐ;4n[y{v۴Qh2CYtbm_b۾Hd-j>g;opm' mKjmo?9?{b v]g6$W|+B#7>( $"R`g[[ΈGd8u^Gnn.TUa{E0t&952E?CpHpEZ2Y:[lJ@>y~*p """2&W:-D`OI"NL8ye|j%yDDD0jP>}SG~:̆@DN2֢oQ&'$&&nΊF~^ix⓵C+g@z!T 1) F1 Æ`0zPx2@DDDeN_0ؖ۹}9 C^>[ *J!kA& DaaG`AHNM:iظa=DxdLua=9cc1@DDDe  4U6dolfplƁܾͶn `xL$.]734j5¥A F+ UU#„2ZqL9z@qQ>\C,||n-rИT:UZmHD""""d"*EbȏO&ju vnfHg0Zv( L>ـں76838_no'lɆWW2$\4uFۼ&D9Á|^ 1Mzov"뛝zM#bchvTB& ô r2?/7|6//` i4$&&2 ''n݊19e@ JEq(0$!#Jd`FF, *U L5f:Gqsm׊hp@ Zhi%Fjih \>iiiL8$ 0 qZ]9teeЕ!w~PTPbRA 2%K(.:m*gdh.A73 H)ƞl 'p "j+!+(hDɀǏ###W~ق&FLIZEE(.*$]{-OȀ9#PKPM%%\ga섅p B te~Cpz[lnD:m?abȤAI! +#6fŧO7ι bmPe-̐p Yxc¡A[ 饄ƱH84. ItlݺR'9 Ufh̍@&/m)}XI_ؕ4]B$#,f3eZ-,h/]bVL&@NWbˡW T1"S-z0 u Pxts=L&4 d@>=l=BE-ϫ26&-j8#.QJAh6ijCoK88JÀtaÇ@n25%S2>tP"hoLo~exwsVDDL8PWս]l r&H4ZsJ"IgrJ*ZW[DMWMT FF?ٗ]{j]I"b1p6d̲t@UjDl&|PE~-7&9'pn""Gd@\Dff&qJχ6hJ(*8y@&=_{3J5F}~ R1XSָ>zC%4/Gw`"H0y4deize Db Dp mџ -PoACu%2mИԻil """&? 3O(.*žݻqrҢ]dHNMHNMŰ#gnZg ߺkGJz n=ƣPYlB8dB lBDL8PKPt44j5te|_QQV?dTd2q]0 (.:ӅЕb08^$,L(C8c ̞R#P1Ft09g~s *dJl8":riiix͏ "&T1|P%èkGcԵ /sWkҥZCBkcR+'z3O H RzDDD>BKtX8O\ĉ-72zrd@ T5Ե[FBBBI(wOo \G T,ԗ߶pGx[m+mRq4"_jsb1JlX$jN$s֕%TJMv q܌ ^ Ta f1d#1A-d;>Sw(띟\A!1إרvs&""" 1 """TRgEXIB||9Goa"""Q\[Hc!燈-*3Y:XfhjȤ(UVeee=g L8xTDM(Bv!=&*N_]y˯Q,V@T~XA/5(>>GqN]ګ߫y%xEJQ ""j^9RADD# g0 ߿%#ۚeHw݈ Dw@?`4׷:a2R[D~"H:U;=V (;mVhg`BVի"'Tb yXe(C=.甶Kˈ 088rbD!_]ñ2uV5`xLdTUZ+l߼NM[em- """""N8w3|M>}کc[2lق[%.\\KB@$)_TytN+((e:Udff[uIðE]ZFw_Fbbbڦ26핑;dddd2xEAR.\hK:'JĂ <""++|ZFMM >4$::cy$0@DDDDDqǏkdǡmei4+bǍønn:KJ쎿Q  DDDDDDNʲرc8v眜kNۢ+'%%!))83#*U ԬG{2oXBÁ+jb I mo >}tj/a>bK=|JCuŌ"Is}˖-غuk]r%ۆ}{u<{8T8<{8Qyl|1{8QX"a z&|DAA~.R<""h? 'BRADDԋ%Dd "z9 T*e {8QYbeZ`܎ """!J9ju _<""h /ÄPTH&ADDD """"""6qC"沘DDDDDDm,X(p ""zٻ]:G" n`܎ """{.ΉR*`<>D0@DDDDDDfǎ0 D/u۶p "" v@Ga>z0@DD# &L9UUؾmGDDm/Äx- ycCpp`͚5u{-G/DDDDD+ǣDS@L88pر6;]\.osMM 4 ]HVa;Jףa{DD]˖-;}Nxx&OGqP( a bҥh$%%7w{m>ݻme'''cĉ zv\`|rL&ÔS""hnB4J z2h4r!-- *鲤R)RRRlzdÜ9s.P(J{w,oDDDDDm{Z]@ƃek׮tsC0=ry+gp;fdd ##sNN 2DDDDDD`6]aڵPYL6F;6( Jp"*ju _""h[ Z@x&ahl "OW_{ ""SrL&e8…Վ `7ڑ"jdBnnnVq$77On=99saK:0@D%<# GSh"[˗DS@RсUg3d9}qiI!C{sd2̙3l"""նBdo# D䬤$̞=v… ]ZvL&.] Fc|j5>\ /Z0o-_F$""!&z eáBdo# D䪔̞=PSSKvz6mG^!""oc  rauҥL6Xҁ"ꩤ DD䫶oۆJ ra_ }(dTҡ; EEl8""h\ >2SI&'I6( L0ѥj˴Xb<ڧvz"1s >t`:#Ilx- >yH8D"DO:dz;V'NRȳL6Qw&8g'OFxxCwߏ=8uFnyׯRSSS kge%d&SNL&c p踧z tI/]Buqw1=YTZݻ!!!;w.^}U~DuWZWrL q@hhha D"Fֆr\p<nYYY-/1i$(  22&LK.8[?Wh4B$!>>EEE6máT*Stt4(^K,ȑ#!뮻4L:,[(YbʴZ ų>sO>i*bXf z=6n܈]v!99eeec M-{gZ˗#G 77ΝàA>vܻヒ3g99908p׻T?WY,0g9}4 &&'(..:u Я_vo_~/^lFmm-5opz ~hIee%1||ؾ};5,^O=p뭷b;:Wkr]":G*"..rէO|HpzMEo*ˈüy/4jjQ ?[} Ge^1oWꫯXHNN\.X,yϕkv\iK̜9f_|mܸ˗/Gnn.Aӱj*KDtrju 6m`7\g2ۡ'%%L55(єt[o.OD"QPTUU!!!555]!** .\hDXXXǽ hhh'|qٶwf+5 صkWǵ֓"==x"+ذaFD_vϜ9pIR$_wȼyW_}-7e/b5eϟ?qMs4xv:kR*ŶpK.YN~0o<ܹwEDj5>\߅1xkr<<ثz0C?>;ZlpXxIDATmo4Mt=bR`ߣj_|ȇ3sJvv6 55նi5Bf) pX""""""*.*ڲbcZjuHZu+WnHIIAqNիWW^Rq Kꪫ0m4cbb믿/aZ}MGۛnի/cРA?Z~c'N5k+|ITVV gbʕ馛T\fg61L8z(}YGONNƗ_~~?8o#$$Fv\ll,z=~a ?Oۡ;4MsNDDDZbffd@q f"ᤑk֬Annn^pժƮB*".. """Qwyl|1{8QX"a z&|DAA~.R<""h7 2mAaa!DDD4%A{"">"␊뮿z+Ux,]Q"""aYYYP0DDmffd@qxF3p^ vU!""""""$%%a9s`ĉ0LPظq@]R'x$"nl2ٸa=ӡR@R J[։DDD=4Z;t.L&BrreD"D"DGG=zO;иĸnST][3*J,G" n`.%%rT.\lK6,Z9gA"""R^Y޵ ٻw{|b !00zJ={ֶoHIIaS.\䶤Cٳ8d` }(둜ߓd.Lb`mcGcQ]mDR2AAp ""BjK.Ucn-_戈+` H'CI~>l)"""/RpB).ƎRl9""2)))YPպ_֢\O^EDD䥚'J[뼣&:qQit0 N~ Dbb"e8˙L&㜦 .پz?y?&PQwX,.'ezR[W:pgO^}&E"Spa{HLLDXXb1 yyy D,bƥ9jjj ezD}Qc2AﻗppL6sK/S["%%[oVRm6p 8tPœ]uUNO$) ׍ŋezLh7d ɻ}HEW )pa1yyyرc&Md_~L<۷ozzKl{ TVV/   8lTWWC$!!!F<r9b1D"QTTiӦ!<<J騨ϗ_~I&AP 0anڡk?}4`ر-͘1|}Yt:H$$%%Z_D .`ɒ%9r$d2qu??:34b)S@Vѣ[[emmmu$"izOH6p5 ""4MWd2FK7 Rlnn/}vov! @8prM&0rHA"YYYBee% CF `XB~s Ǐ}]2LHMMrss*a޼yG֊+”)Sl6 NnV_|ĉ;k,O?ؗ/>aܹoPUU%&MK.me]]-fƍ8`1c\spVcJst:`6b῅cy_ۉJ'AyqFA/?|ᩧ+K,ABPPpw -\nmٳgmnfA* vu)0l0ڵDȪU^Gm'?& mAA*r-l~g}"fW&_ҕ:{fx^> DDDԭlvzU\WWdh4۷cǎUW]# ܦsnj?c7nkw޽{1nܸu---ŀVG}}}tl`` jkkֳ-[l?GF¤Ip=BV .@"^?&&C>}PQQSNuAP,Ŭi(L[+u$""ټy3N8پ*W g}뮻F~!֑SNl=!!pܹ >aY}\NN222?FPP-aiiiqi{?O5 |hΝJ|嗶mv瑙 XxܩSw߅Fi9ˎ֑:gxl ""nQTL:۷oǔ)Sm۶6 hhN,l6Y'gWHOOGll,^u$''&lJ4U:1w\_:Djj*{/k?7bͅ Czz:VZL3ԑpto1tPx}L07x#, 8<__|u^& rK,۱f::|1?!H ɐX{/`رyh\aPlڴ ΛJ%q@RR]<}|i)NDFFB.^@J&3QYY'OŮ)f+>Za̮t~II z=7z :Y0hР a۴^t+fI%G}Ϟ=j]뽲\zۮk{boﭼ{ ﭼ{k:p?P;d2!//U՗+$#-L8ѩL Oh4"mTf|f BCC*U 1md+q2?˗-NCDD$~Euu5>x=l;\t AAA2df̜;V?$ l,XqcSmu1 ?L&d[KbPUUe߾u$d̚bKj hz˴Z-fL&|y3rrrPd2!88*F]{-f̘H[]~eQ^^8"+ vm>Ŧ8w,D"i݆3ѷ_?[=s^}et:GDO>W…  B||{{+5{+MeZtz ! ϠV`I=%""""""*$&o@*⡹s """"""Xp0L8s dCyxٹom9N"""""""O䰇ZGft<׎-/'I""""""1F]3fyLccd2 8c8DDDDDDDvR?VN8L8T*<<F:C*p """""""spؓQ8ڨ{.5"""""""";ɩP 1RADDDDDDDnDŽ]d~ݶÆcᶟdYi~Oe<~VPDEamֵyqؼ]uWֵuTW^oumz;^mv㽕V[yo录V[yo录֮]wu:ek DL>Wm5Xow\BBS^^;yQ}"QYwW'B.Z\.okiqqWֵuTW^oumz;^mv㽕V[yo录V[yo录֮m ȍ8DDDDDDDvL81@DDDDDDDnDŽ4IENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/ceilo-gnocchi-arch.png0000664000175100017510000034212315033033467024545 0ustar00mylesmylesPNG  IHDRdKrbKGD pHYs  tIME  &ni IDATxTSg//%@eVJBA8kwJU{PsV=A^S[[Zl۶ [W b ՊZ˪nGUU<9jr0LxG Ƃ,hvdRR 둝 ɄzނVPYYY(,,D\\4 n7\. ݎ7 +++ֆ9)؂ԛnٳgۋV>|lDDDDGtt4 Ҧь c PVV|rr2m۶СClƉ'sΈ .ANCnnnS~Αdgg^000N (n-`zvfDbb݁{=!Mьd$%%7\.|]rr2V^ts}}<a6%WVY76:w9ՊLQ dPZZ ł!M&$ը&;;:N]rdpd2P]]-_}&zFqq1 U5ٳg:)w܉_]ur.^_~9 Ζ=ljuEE \.N< 䠓h?YIt:ݨwB[[>SӧOoLDDDR>0|,^waѴihh; )ܺuK.cɁ[[[ֆ(466rx`q-|pXrO. wu:֯_lfn㣏>š5k-ztwwC'N~zh4r niiiax=zwk׮F_ ۍ?0 v\ؿ\^YGۋN|G>Q>=(|f˅?CCCGtt4|Q!L&TTTvC ??r[nl6ϡ$>FCCsx'֭[>믿[nAa˖->dM&>zphllNBY>_~e*xO8@DDD4,X)_Y@DDD3gN2 'zOTXX('vEaaO|a?7C`ZQUU3& z/rPUU  VףX~ZIͲ260طoڠp('SkΝr0!x"N< xXظq#do'Nĉ˵gN}꺰Pn`Y0>ˋJNNɓ'xUyl(Sӟxv_g}m&ɄիWeVljmD1ۍgφ= Q^DDD4MV00..rѳgASaaa@x`f[-srNa۶mx}C0LDII JJJpĉ=q+}RRRаT*rɰ T߸q 8j&"<pkjEmm-:ؽ{7jkkľhXJKK}& >by^^8T m4Mб''s\hjj X?ǽ[q)j_h4"++kL?v!KDDDO9R"Hh4e/֦&6XdWyyyΆllFkkgE}}}1>C9r `9R'0Xoމ cm۶&IOՊj1Už n塶6h._PP րr >0zN塴TV#M&u*jVɓcODDD41%""M*A2lTjJ[[[О'mkkٳgaQZZ:򓓓<* 6YlN;2.,,^qqqظq&/ńTD(j$d2+p''gϪ.rF $?Gnn/T-[|ҟl dhFJNN F)R,T([+&#T/Ӊ$n{߸q6@5L(((u\__&tvv=Aȣ߳ɓa-8vgϞP\"d{"z*{g@HJJw2GWTTСCerr3M;w_~%ь D[[[nI=5Bф=DZp#~8l۶ 286a׃eJ-z;޽[uى8|LmpBь-QVVdTWWr{:n@_YgUXX(pUNz&p{NĶHJJ ;4UzOdBkk+f|^ Q[[:Qc KDDD3R^^ό dE/Ǥ1@?|t8rȔŋe}TTT*oWC9||}AArssVՉ30hjj~PYY)͕;;;޳pRCdt:tvvl6h4IX{+hĶmY=nE 9&/Q$@ww7*++QWW?!"""9qUΞ=+ë`ާ~ qFF}:(Cԅn%&dYY½`C({ M&Q^^PO v)|G]=zd9UUUؽ{7}d2ɺm\" Ťpj˅?ĉؽ{7֭['_o>A^DDD4cL,UݎeJ. >!PiiiȰɓaSUU DOEPRrˏ(K*&yRS[[+KF,+O`},Zxwn3\ [[[v ~ F޽{w. 'N*_mO `\,:v F @]]B}Vsss#nŋLDDD)8,h7nD\\ I+++qYq'Vk0#7j۱{n"++ !bqqq@Obp\(//hW;;;Q[[+̑Eggy 8qBօFAgg':;;e]h49Rrr2dHhCK%M;TeeeO*c1Fkk <KKKQRRۍ_=1m^v;NN'?2$LJJ D,//Oiڊݻwh4"++ nfY֍NΝ;ǵF#e%ΖfgmJjŎ;PXX(Sg$ɓOeEkk\?&""" ьg4Jne>77W5HU#'aU'3/~C&Iu]&k2t:TWWat(--U z5(--Eyy9naeuu5f3|ONNFEEڀ޼b P\\, LC-[QQZ9&C;wT]2]mʒe)ױW+++p\U },..((Q>puyRbb"֬YZ"""Iyyy#NxtYz^GaaaX'ӕ=Dsss2 5s*E1>z=rssG7VۭHNN}EʱKoК-f3geV|:}ED:=aTuN;etA|8OmkgٷHrb_׮Z w$"""LL1e@@ d9DDDDDDDDD4<㈎`@ZZڔ7Y"""""""""S.\EMP, """"""""""!KDDDshDDDDD4m<GaX044 (,ZW ! ܺu oV]ŋz72Y&d,eL2Y&d,eL2Y&˜ eNCa&<Ȅ8d,eL2Y&d,eL2Y&d,ex!h 2Y&d,eL2Y&d,eL2Y&d(` Y"n;v < X!DDDDDDDD$* 88rpi`(KDDDDDDDD>999 e#,&$Ec_@DDDDDD4W@?7߰"v?Y^?x|~&+D([XXӧO{EфB?pu"3°=12ӎT )KDDDDDD*ʼn"Y""u\hnnFbb",YoAp {7o͛o~$N5 1%""""""""̟?<|^q1/.^(KuVnY,~ݓ~JLLDII 7 G|A^`6a6\vcc#?{f*&&@Ha0447o?SCCCUG||<,Y@DDDDDD4 %&&"11ׇ -?##<1%RQVV#G/@GG:::&쾾>\|YXd Ǎ@DDDDDD4í[X$cž}dMDDDDDDD1U,X""""""",Q2%"""""""[`BYDDDDDDD W\k֬޽{Y,Q&:eKDDDDDD_"Y0MT(0hb K4 em dFi,X"""""""7nahCYDDDDDDD4Z d#Y1 7eKDDDDDD47aݺu@hF e]Xd +|0%`,X""""""""@hO>$X"""""""" @h(C'|a,ׇ/r/]yyys? fxiii,senڴ }x l64IniA{qGg^ygЦҜ9F*wU`*d.ڶrϼAS>Sng妋gTnM0of;gz9,h; 5~ twY 1w&g2f{ uqWL~;,!DDDc@f%Ǎv Ҍ+o"ՐgʎMŨXNG]s1;(>ttt3YD=diNP653^]66 Dڥ>;6XC]ÓS#jF~_GYYwY@n@Ll<:O. lL_Yv +Ʃ xykxC1ER[NZ +j턭-+5c=61%F|㜲>})ȆU-kD#&~fizޤwhO6KܯvlddЯ, 4^ۉm* F_FM+7sV\+^G˕7UBEЉμ\6ϔ g^E9E{&|biw}>ڳ]G//ch\CJ&i< 0PIP#{[kR 9r#]hSj˅sinj hl> 9שE;u &KG#Րm\7qP'ۋXeҬxq˦(QڂZ;Uozk-.W zGt;}K#"i4ѩHKK xl;w>鼁Ci̋KxUՋ^ѻF\n{b7\>6;,Э,]G|.??3ޟCf&?wGb2rЯ2B`CԫT`-H/IGx{ӛEx==fh aOtd]4E}ڨcLdi_b}>trij~Q1 G3]}<3hWMx>o~|j%gj}ho0|i&ZsA7yoe/p@f>o? C. ?tahѱXϹ}uymfƪ3]C+ذ8Vc]ÙW^{A`?F,rzۮǃo]qt;aixi&n 5燼{!`Q-u4smR<*]2w:lA{]Gv8v+׮ڋfi ]Ųϔ hڂrlh;>CNau(BD4%QQHڲǁP뛠M!?u&m! K(i<0I҈T41"| Tmys,T81\j۠ 8GjK}vh 1f7 5%-j[@o\1f[6݀C+ HׇSNc%11۷oGFF7 M4Ml&h/j#}y)_r>H0*`hY|7B0X ZNP zWM/%uD~D$b|`_ź[7g eAsaC_ e`xU/?a7og. jo ,>f'9ޞTMU[)>̊u%5_ySuV.+>ԦdזSB1'DPܢa(yI^lSlݺ -ؗqk9T,,צRUn`pӱwV_?4U>l2>PebClm?qCEG%h[PGe;m>TuY1M4Se)ܙ;")GϬ.I^4u>7x=ǃ^xoB:½ǃڹW;w.yY@s if&7\X  B93s7fifi6=1 rVƄm{a,NY ICQNtg|]i4 6=cͺoz{_]R 9r|\KC ߁x8=>N$ Fs[y68nz-ahj_͚C۠[eDLln(;?+,{9nQY! x*[6 ~9|({{ÎTyg2kXD]ZMAGS 9h;`i7xUܼ'64?*ے* 5o?apgʎ c>)Y~6vb} , 5]>rp-WޔZfe p߃#;K;9TѣFcCٱ~Q~E6hS^jM7g1 +%0 {ƨ(fz'Yjº~X9z, 3ӈ^?|$@tho)_C[PcۂhC9T C?3ϐ+@ptYKOĶUz$O>D51}uddda Tf<|oei^6b ;pvxj{_ Eg!,wL[00%~!u1 21 ߚbQP\{@WrӬ \^$2¸ _ۉGѬ%wq1 d/8 KDڰ.ح9E{|z)8}xm 6珢Z}mڥ2u:lh&R|t6ƥ^oxMZYkwѽcivVt?*{)o|] VQ=AXoEqrz{+a2t,3uLj)L] Jqe^PmUFz'S1@) MH5;{\pfu|\w*h[*:Xc7)x>ڿ-2TmEh.鰩~gv "/q 7<#=HKbDɐ7`uw}?zE+>'pwuk h9c n݊OznbK'E2o b} >rXS 9Jm r_F<s+sxv!\' IDAT  Lj`~0UFygI _i-7֜ax`?X TXt &6^G='Ͱ^5kK3b}`6m`qǒTC uV!C.B"pS֏긓j߽ {+oUψVsHUZZO#zX_"0m-Gᄊi+~1`Q?pڿ~eAY qϘ!~LX08ЏmO]i ݈LdQ.ApQcbM7:TCܟOLlP~{xQ8=Y]GuUF7SrP8koLpHBֆۿrC]k (%v<.u y*1kǃϢpGdbiIQH\$&(C̨9U?ag9֑r!Ҩ(ًvw)x{θ}| i56K=`uD}a[kXYqևw-a(b%xtBFlo{~o4}-,(--CPL̼STҒ _i{?󨆴<<eۓMX{P=Z^D@ E|]I{W:1`M apiZoйu*%yyE \tǨ퍊 @vlp@(ȉ/)F͕bșvZD\*A1ޯG!U+ng:=hǤ?s6 `"1%F1s! r;PL0[ijjl]x{,fO㺴hVN>G鼁Ci̋KbDӤ}11O +퍈hƸ M1F1<'ĒWOo(Tx;L FE!iA ʂG6ENci '$@ `2C~y GE>M7 5{Ak6^JCwx@{<@f-ÆCzKЦbLܼю#-N cI6? *#2l]jy>ШIu%X]'ๆ3Ҍ=.̈́xy@oӯ`g1K4?5P3 ^}>)FA7vbߠ)xl{ܧg\x>7\ajB6kwYeCGK)7Ð mE}ly7nu#g.Yxoխ,t^SrtYd5'he3 ja#X4d?h8ut;CSlT)FE{bAJTx1bb\#H)##StYDloDD3SuV]ml` K762<4\x^9"x'Q9'ƥ0c@LlD]>XDߖܮȿܢذx@D3:, 5B-(g%Ya 7ܣm4V큣")G.w}B#1#f&f=o2lAҒ_ Fm`Y*PHLච{Z oXx(ރ 00 Y}~x m.xM ބ cMN {eႜE]Ofv'1#f>ttt&{TiZWہ5xᯢ[~wPz\AɏY486y+pfӯv9024?uPBlI>$Y ||BeZn=yӗcPkN?ϝs#FIws:J ) DJr"9(D ib}t:z1婢Rl<`4xj n\׊X@^&`4.5 dɫ|a8M/) .^s5WakzZzO!homR^ l_ب1cG}U'׏>l6a2&9sQ%vXm6h'p "ot@,)jKB٬ylg.\.q @7XU b7A@ xnZmeزF+nNkf΁`V"wo*Aɣ?O >KjT_͇ŢvS Y ?ݏbΊMʪWqSyzאEt tdg`Ί:O+y=OH Uz>ے8G/r ;yA"FD*)e (6J YSm6E K@QDu\vr9Ӏq1@Z958*qƠ-@xtkP_qU靨Ǔm|n%p>exΊM U!q/ Wqpnݎ=}+!#i5ڥRw+"`2wF@FZ@A2@qjGk!6 u 'l-dÍm^A\S5[ԙxasBoANAKt ]0'pwǗ#~ff>| t`Ut{/~faqG f>ܸ+ yq9#$|P A}%z Z Ha0mR8n*`_{W㣥L41 aGY SgSY 0y UtOu/ٛ7zL KD^)9HAZ#\ofrr4E,zljAR6A@j4n}ݟFYO?[uG~IYA}#OpЈ8Gp """"!޳L)4I᫱N >L˹/'Z%"*Rp\M%\9`/U=hva&VV#ر,KxY ?>B|@T 4T^SVP<1ӇsPDjj*j_}>41r& r%"_\%s˃]{~)qwwbr3x dizd (T/l-ܸn\H=0$s{?""""VV1|ʕ2Wڤ6E岧ļI|401Pz=R[!~ ^:+:jDBѸ@ƵOsWlB|ƹ؍!#+A4c8wQ#%nѰi.{@)P]NT.޶C n k1pvwAJJ vr;YBUx`oq0DEBUs0sq EB@0'%@t?4!"oDvL: hdBeggN\6/<BD#Fۑ_nf7"""8Fk "A4ʴZ-nh`q#""q- FY"""""""""–DDDDDDDDcTnn.`f""@h2L())$p8|Y"j/Bq}8?<8G:tSDdggsP dȻ5a2&q Ubf#f x FDDa KDDDDDDDDD4DhDh4n3q999(-܎ Y Qw^Z:`kD7s@Av-&Nv;Yg4a@:s "oDDD4~ a KDDDDDDDDD4BDDDDDDDDDD#, MHMME"AQjj "aPR]Ph4Ұ69!X5"?+p:̟$sqi\ oGHj,"-čmbs=l_C5lBDD^Dbj\8Yk K4V0~Uo}k#I Yhpߛ㏥~x=S9u{&?*98GpYp9}+iT˜όxo]>iC{ BDcVDh Ғ鴡 -s=e%Ej`ϰ%>C`"//Vi/r8q̖v^>KHko*=o,AEGaظ&~s<2~ڱBL OCEM>56N'9ӣӭ@*o(Q_`^o(^Ot|:Kz%#fS~s ZM E"6_;D#0{#l/.UDD4L&JJJP]{X|*ƇOJTT7 gwO_o"",%&6җVc͖q6k~pc'g!vf!lvǻs!@yur_gvbO΢IyawaF5SJ]Vm~Aѐ8A;Qr‰ 'b޽T9§爎 .=ИU"dj=||-Qq8_Gt8V|_;A,G|y LWn+n—'#:\wȂD .og%l.񾷊D\wv.B!f) IDATEMӀ^ yԫΪ*L@ͅVJp9U4 nrss80ߧyB찢lF#hq#nrj-zTDAyxmg ̙s81.LY4l מƛ֤\t6a.|E\R iM*J.RCs;S_X|j?5>/?7T-[yp M'.ߣ3CT)մIYǰt,]p?5y`QD`|<4 d}\NNJ*a3cBjրDqxJ$ ?nBr 9b޽0t҉ou/9O 4|&N .V=o dv#"" /K7x}oB]Yz>˯j0gZ zT4T桢 ' R({6k!oQ4S9E%GٱP#8G(Y+iM<0iBDDcJEudDt5V/NiϬ'bY'<˧#^րY͗.LǶqqa85t@܈VrybuLG]w8 #8G\{rHvsbBDDcIC`G^͓/ܣ_H wK?~粨WEu*kȂ~GD)N1A9“:rH YyA "1eߛҚrp|;+fKޠm>k|5[>,É*DRpnLH:{Z{Zk@stD4&kk;FJJ?ۻet7q@h0[lظ&S]j٘3#Jߦ5X`rQx"T4ņoMkRQQӄ93\[aj\Y~`QTn\mgYr|*hJ+qhd8+Z9 #8GрZ/>iIQ.wlZ̙zU[ sz= d ^aR~]+,BpK''z^0!X ! o^0[xOg{Djj*j_}|Y""""""m6bGv9q@g1tz\p/<„/R7ǥ'Gm ׯVV(@Ku5"CCѸb KD^-p:̟$6V$"D/<sa5R&q@\*R\t|6C_5 _XEj\/b9Zo; %%;vp!""o?)Qqj "A4`OJ&~hbC8 C@ZmhvVN ⣤3$ĸ PU4 wu=juJT. O} fVHBV/2Zo*k-8z:@h nC#U"wQ L Mp$CMvqNJꏶLϧ)c-q\\%]@u?\zt՗: R"7In^lr=s k\n .#DDcYҚ;8qpAD#Fۑ_nf8jn]+WurJ WCjUKs+WVcP LӆR/Rɉ԰/WPV_b9-oʌ$lȚBD4v1q@5+C9D9hC7Mo4s0m6]wz`b/B#@#_ $=չƧc`QR9@?c@e˱T[K(lY]FO,ED,00JmA09\j54r{J31:ZJ&q\NRGK_4^8xW\KKSYӡHBb,$"@huݩBER6AUMs?]&#-Зӆ[EUgI pAeHOb KDDDDDtD]y}&3 &jMc`4hJPA 2͂*y5`Ѷ-L.#+3|MAnn.`f""@.V :.HGKϙhbz`0EճeQv„n–Ui8jg_,Gdh.L \d2l"@cPkN?!"D%"%9>z;1+a3g Nœ7gsD]u]L`voh8br/ņ#8bĘp&{˴li@DcY"!&3`PQ%vXm6h'촢SiRD,HPN{'$ˀܿR5+yH&`oClC lȚZ UuMkSQxjtlY΍DDDDDDP3n=b=2(xUl&}:"|Q)aUغ*e8p ehliGAY .0%Q @#FqAiY%lvLH͂_ϙ""De޽0t҉oyQjpuNt@7| WN`BɁF,> G q&FE, k1qt8c i-DD#F y&vZq}t՗*GkN UW p(=f;.ՌsYdXKK",Q/z.B96Ի9Ӏ=gm n`(;*k-8_tu51Fų 1Q|h,a KDDDDDAG0vcRP 6=&U͞ΓU`&$? /z%s|,#1ˏC@DDDDDJZYa,-]TY-Ewsf h>X8WXv5Y dz(\N@dKdc@JA07۰bן`nHBvprvƮxv+ј@[EeQWC)N.:PJsz.=b̎î pHKI dtݩR.GK=@՜iN鴡m tԦ`-LaA͏ PYkן۷{ҹ9DDDDDDAC&!i{j PAYU@'G+UuM^j5Z-Dps VUht=CDD,p:̟$f)qtNй=umPғ|_l6v6RRRcyCDㅼf9mzMD%Cr#'#8GZ ^>W%9F͋.`3GT6s O`3G\jliǹ76T|8cnaǹ41%tv ]ϰKA 3ts5J{ɳ`˗K٢FMt1q]q;fM~gAY=m~H9sd_ c%E /޳eݫgˆgTZ_N*}ilosV+ǔ8]wKP%8UnMGd+dz̗L ݛܿAَqHâmQQka喗o& PJU~l{Ìo!{Al~׏D-i>W di\/DB0Yt]˰h[BV J/ƞ=*k-xvSBȪ->cp<^8xdy! ddy17.Hpg^)oSxjt\;4qA+_l~ʣ%k|lѹEۮv~/G ו}~g?$ M(,7u|g>9b36U]$5.94vW]Ҡa>{e+br6޽ձJ :[:.K_pUZPP֠!k^*kpu/"1VfOBAYb±xD'Ƅ#ﵵZ]8G ERxV\۵~u{ރb9Ϛ]+-Cˑst \gu}|Ma ,x/<+HKrh,5EcϚlna. +3PYۄ#ye+UuM8WX`yn`jEcۃSb!?cC}3|s]@݈ WKDh ϊna6d B'a_>o l;;().'{ =5]Nl?+s937ҡONVO-r7>>v#k. 5`eFr0}غ*]9@;z溽h}UH[J9s2iGG נXo]r3X<+n{G o.3y 3ڡ\k,59;W~ʌ$,v]]cJŬ%=C(x^0P? ,"#kMK rRDzCR};>$W<`WJo9ٹcElù%EUn]%5ocNEF` J>gZ`rD鼫-9y%XI|d>1ܶL& yN* U7*s`6o|ȡ*w;QSb9Db,4E)<*{6/GeEY^#l8_Q/W7]'=rǡ!<`.ڇs@sq!6>)=d_-Z Ƨ\)oq魞]?ųx=-)JiS{+EPT'8n(Z~"5HbYu(1 ,)ɉm^ _HT->\ĜqȻ*/f" C_][Oe^\,@:E:K@+U. "ڎ%^Kئ`ȒO P ]OOj"#n f#Lfġ 9bĤ%EJykT5!=9jȞܗ>pDpJWEQ d :?/R Y  Vt0 y"^HF̙QZ#OM@U tۢɷ -rmM KLJ/]w㡥P A/l_D,'`уPㄱ0|QģӠ0%UYkr%Elxa\5`:g!k"A+t p,^:$] ?t0{{R1LꭊJ9=wy wX_:+rF>; faBj; (6O, y+`bT55O8>J\0%z k5#` sN|v>Hl?>F-.I#X"HdUG1 F(YҲJ. +N%VI8_,~ v@Ǔ%I>arPYkcNbeF~bn9sݻZ:^:ұTO2p<-6QpqP\5V"B.:â/+gE|Q 6=s-͏n C0 Hr9Ӥ/X'E"=g b&A@ڵ8}]30V 2/܆8_cC eQg}PKk;*PEa. W>FdX6kJ+ 8_TS54 +Ƴ`Ů?aXX:#-) 8-k|s 1Jg8WXųx w{uU:}O1\3ϗO1p,wPP0Xa jXJc ͏yſņ=׈ѶB!&?Xɣv栫k\oZę,Rx_Enr,"`yWEFM8j{(at^=x.bCd4Aw_vij 24h}ɩr0[2 9K@jo~ /Eێ#24f{ ų gz*Y}<$"B-6$Ą#g 徑aAٹ{W+짖+o~T~|ص~6_!yt|v7)gR*=;U(lӧrq_UNYSۿ\ٞ )b7ZRQ]W9*z39\Ĝ) IDATi@,ccNCT׷ @]T 8?9{"/yb K>)1&^=E6>ؽȰ 8*k-kBDh ғgna|߂z嶈 UBz;&-)svi6^_@N!"DeC lȚ2jDhPf@ }~쁦sj_۾mZ0fŹ<^ZRh\|-從Z.X2;W#O+ڟZ6gŹ|oosm6yߕ@l3K^Gnn.t:-Z4M 1"%|/ Ui5REEs"ƺ!`=+p\`ثf4B g+Dx~5\uN K~)R؞h7!a KĪ\ւgu[ti-LvyZA4n BdXP698G AU EӶ~2>ooi `w48/m~鏡?T՚L&>|zk׮Ŝ9s>9V)/6ګsG\f"9|Uws'bccaX@ f_#55+WVVJ_>*W/ym 2h!A c#OۃV+P`_mRˁV׾ V +z;sa(r @-*Apko8zm{3* Ý}؄><("Qi`D#,Q9;Wz9[8D9sKbs=N+>J'0 0 J0[RRW_}Xv-4͸א`yersmiMr\Y+L5Wi+ e}Lk51r07iQE+jZGh[x^LJPN)u*W͇MԂ@~Ԋ@%p8WX+ H GZRԘ>8G  @wNjO0!!!tʂ`ӧJJJ -NZ8;ڃB]AC,X']݁3Gey! LfjCir,&&NA6^8xJY˂7Zff&RSSQ꫈,R0VBZ RHk륒֙<8p p=I [xLX>4` UF81- d]8Gv{ {jLf333둗ܖ<,Zk׮(jʋR6@ҁ #!%N9) h4b޽HMMCߡ;v}JyDVCV×;(- BpYQ' KGwI%NWFXd!ng1%"8wO CFsѰn^/<sa5R&y 6mN(X|9-[`vSLs?oF{J׋vj L Ul6h@ ƶ切Mna2Z6}@ߞaZY(:k/ (؟\ !-CWQ mx`ǎ.3%">FHh<Ց8G ! m'%@t?45j?Q\\ ^R@[[z=>cݛ`C۞߾~v|th#5~9m 06,, aaa}7<< #|ɅNJ;+h` "bE}a'k0s_ϝ|Y"""""ꝟ? 6j|UZZ8~8sHpq t:,ZpYصkɓ}_Raٲe8~Tݭ^.U"cq0iH0q(M Q}v[pA%4hӮ;U֐=Vſ"77zo - tazlܸZCiLL ֮];CِU&d+EXE/aw+`1q@5+C9D9hiZh7 ?)@0,330 hkk *RSSr߈zwalCE5wu_UH ғ3rZeTQ|4Q&%aYYYYXh  ̖(MV74ݸ0V6نu? kц!24yEb,/? }(Bq{(b%o+yFWt:ٳ˖-s;wÇa2},g!vXFҸ7TaLe兾Z[[yx]=[lx Ƒ`A%DQtAp?Ygo*ceñЗVZpT^8x>fN:*k-HfX @#x .vVUfnDŽ,?8G{[K'B^GX @@J+330 hmmx\ q}|cV+Jقz'[lH /Yvp /rUxe)+Jz\>8Zgi# 6b֮ng8ElnbBD#FQII L5/2Dmu[jž={܂p(=Zsߍf+JZ m;A.=K O`ϸAǿE06R+ UX{MA4Sqh ''*T瘗6=OOui_i?0%1VXPI`j\Xl+oSˤjpGes .E@}`+E}ciD1%""""<9jĖ1Z[[]YV-keyy;4XPjHq.ΗKKU oe!"4p.*V%x+7$ CȾ4Sqh `6t7׍g0\cBV0%o* * o߾XYX25-(Wn-Lr--)8(Z(b T0FY"""""0!ұRpw d[[[a0ܟ (_/`|kiZlܸ8~8ֱj܌j+1VE7+33E87Q"QU= si0%""""@9-5t:\)))wbsگ7Vff e{2Roh۪앳{j!=bY!"o{C@nHV78 #8Gcg1deeAaǎ.] )MNrO]U_7X e2+r57۔ Y]F*p9s[۷{}꜂` `N%8ԓ8 *r@hDXh"Dq+Lp=w dzx=M6bsNT6c7.`eff|M?~|X$\)oK>8jr]'Z6'OZ Z=JK HUKdPR]Py,y0j2T "9/<] 0M_uccPM*E+alם*OJ_x Ú9jM)G?8-m/=Ϊ|>DcwChC]:*ņ+:!& Y9 Įx9+iNֈ"V "8D4Fxy)))ngerW* ous@hD68ߜOD%Cr9G#su#id8W 9~}d`OJ&iڐ0Bt}]0o#ݾ`8X W#!&@\r]vkv[K% -ƒc,r&o sA9Byun429Y(}oRn};U(=+E,TFTWWX> T*!ph\?4|Q ҒxV"\{|ӽR{s0bɗ0q(M "!O,%b&{A^9'۷#܂K H,v_1M͓ha'H;\.~g>Dk#گnq.7 e?S8Xz:[2;'"JE606@*?$#@i4X ܆E=l 4<~q/j9ȗV&7}NՈtc"s4|>)ws;0h(!Dkk#ƒ8 )u" xH,y>At]i6`xHл\vy9sDVBL"di|fVA;F纛k+:?G@*X!4@V>]HU`O S7i1$ƅ1FCX QE<*%@<,t\{Iy7C! >tlwrL#8G8s*7K e /,ؿ|{oNձzN{ ucٱBtB6zi}0鵝4c<ߊ&$ U(0U#=**!QVŒc KoBzX;7.u%o ή TVV^s@(b)& I0_xj&٪j=fxM@QgPr7 ޫy Ks18FA_Ot?/ses䕾A Ų-w^7- {ܸ~n/`9NqGR$ 1 wDln_|"|w5"S/|agP3FQQcq1B%KkoسoqߡiCHPDݏ!rD2,JP˃(--ۋbW?v ƎGp_] V,Bjr~SX +s.?N ߿>ڀ,ϛ͊s)| ]+*yKTWAfٗwl6;ءG ?-^A*8\mߵuMKގGb``` 0;`|mǝox{"j( wDqξ@ YVdCxL'4֯;ozK@  S1j6dLl^1bٰu݀xDk_ OLkqY￵Je X[0 AY:63<ѣGQYYrXQ]2dSk@GXu_Թk.ug#7K4u$~ s~ ֭XS+!m*&z8GKY1QJi]1Q onZiiH:rr^-9g@LL̘mooGhh(QDI,X0E}} knܸ@(bgLu*P(fe-n#*Hbl_Pdti2`U+4AxP|ȳ[}@ hepk W?FA#8FLB0"ފsWx(C*}?osw#_]Gtda\h6vI\bRMhR#pF9R"u}RXl.\wٟbo֏&|iii- XeM!4Zqd@6l,AP>W\H?d]&B:Z@h0f|c>V2Zey{Y #QTTS ,=CP.պ'B,c@\;6 ޥ>5Bxi¬d2;*nxSfF'iH| )ospxud \Wsk@֓e{?|3'mRDvv6```mD0(;aI 7mBhxŀl3 04kMvj IDAT<}e!$ !%at8ް}+!S?yw:Qq 1!t HDwa\P[}s4W3V{ŎL/#dEL[`Vb)oϛMy oԺRq&[m[S8u?o|6 yGMtk[;gP2/.EnFT/esuj$e֋"R0`<9Bu|b"""n:|gߓaw6 vِ/ֶ̆Xo_u  C,a1#8FP%, l%rJg"(6e,Rd^ 9xJWTz;czlyl삹2].u &^c.=xWu ؈w/l>:0Xne[~x0V#&Bf7srr|jrrrM?m_T>tww" ]bz~ L)fϰZ`ƙ{1B쵝3gc1*g:g;Zo>7E%2DX}8 똸˞) 쳘9--2ɇp=859- Z}$0cOnwZ|r˴e]}֥fl:HT]⴮ɶ%Vn #Qnso?/ Ϭڜd6418-ms^Ni9*j$T*i`@v0 Kcܩ/""Ds9>G3:G.(#+'(2=Z6$*`O_+Z*tl>o[^rX񍽫pf@%t `݃ a?k4vCS>B|u@%Wu+1=ՑXB[PuɄ@|KO'r+/9#k651[ 8:ׄ%aPTPTWJ74F8D?iXBD1hŦxl޼yַ'OAE&BqؘBfr,skBٹFmry,:\"u{Ğ={\g@,61Tl "A4G"GJܹmri8|t;77b(` bSI_MRVw26en]YX̆"9ŀ,yE@H t?Ϥj;vC~1DDg|#XJb$d\D03|nhhcU&̅&Cn Z@K9Fp k.k}b9!S"(2BDT! Hu8 k4g% ۋ"{nZ#su8Z؁6HǶ,`Ca@6i4+TOP?/ڮl`cӘbS ,gpcѼ#t:0Av<}ujsK!KȜ>: قx6Nuc;~ZV$At/=볐"] a5^nhLw;1dX,"ϰ1h}nMg2iɚ k4gs>|}zk0w[8r[&,^YVED~Y[wjAآ(` E+&zF ,|a6cqᅡul(x8 9J`#@ߠNѵ Y0xWCqgzA#oL&L&TPUUU۷ZZ"}|/뮹Yx\fa@&wwcbCǴ2,y"qqh pȂKv*>fA#8FL5Weːc'?ȳA7D3X]@1^_VCcZ5 3-//Geet{Æ K4O,MNjO,Eኌy3IWEE*++`?"o ҄ 6Pst;D{:ZݻҼ%f6`By0 KcBt{Q# CA0]bS E8#8Fxs.'@L zG&l(,6B'E`[`0hڶ^KP  G4O<ۤ^XȆ%@#P 1 JKKyofYC߬bfo r 1AXz\eRu|QcFMI^yȂ3^Om@EEE0uiCCC{ MM#"""FmM8`0-e-%cthAK ga8Fᘐcԍ} 9v@ʑr!bIg:c|~~ѣR06m4T>օm]ٯci6(9dߍR[l(89͘P |`,y #8FsGAXbX{# ̒5 S~mii)*++۷o*KDϮ%_ayt> "*'HˏiYfֺHn8FA1‰c{̈́t#lj]F~]EEN<)^jDDD0܈Cۥ9 xI$ͬ`čܶ_ap d'GZ Butbغ&R)-O6K nbǎb(`0 Kn3!kѬX)U6">oN͎# ֶb 'S`0XV%""À,uޔYfK|!>3cLja%Ts4;B{s@v(**B__}{n !""I|kr(utdzޖ,H`lhh(^xchF&|hZ|u# L 7b 㥑4;k"=lD#Ph8'?RDh=Ͷ&ig?hn޽_MȚdimS"T*t:M\phx3dɍqLJY;s Y&:OcyZV3mcqA4"ilS!39hhh`/[R<%vs#5zE<Ip\v =mu׸Ѭ =x}dffbϞ=8,6tl "A4yd)Ф/Fqw_S^^J ?%,Ď;| !Q" v"PSuiVCa9ըi@MS=/9y?޹EDs% aMbbS yZ8F%N bS'v-0 (**Wx|5˥ "KW*.U:_QYL$xJb$~I`cќc,N'Ո5h4(..F__ 44/|oHl KYEGE$@ Jh mߏL'D!_͆1JJmYuօp%d/BnF|wfFMMMh`޽*g_l N)(J4AggʕP޷ 3R-%!w1 KDDDDD3BVK555XFf>߱c/C} J YgQ{= zY2cJ(maX TVVDXCfN۷oG^^0E$B_Aܹ=h+wX;!"e[4y& zWDD,Jp^Kcl"A4GlmԗQ̌Tl޼VZ|lBH˶`a0uj`C>XƀPu(GV,d#X~Z'PG×uצm}kќa@l D1hN 0`xUUUy&Rرc_~>Aҧ`h@}9``~P'x0Q*@9+DDDDD4 `,( \Lh0t8d/`lP[{9 N T\A+!S?yw5+v>[#"-FP=g@6YDD#PQQL= Al r g<;nYS'P~O "?[@b@y^Nu@kDWl$nڄЬ, po/k!"A4wz=V BCCa`X1Ss`z"[K {Go43gp&lPF!8u9d c &Y"""""V`,޽eeeY &Az*`hf{pnϜ UڃZ}R0h}h @wDP|&d !MaѤ0 KDDDDDBE۷oFFq 2Yl d)amkbϺtЪ] x 30v _ڃk 1/fє1 KDDDDD^^u nڴ V+?_JwfK;kGm ޾R@jk@Q@gвLૡ͞:Rv({=1L6"M d+C`qQs8/H?гuchK8D)RZU4iGsdW^lPl "9)dhlmt[E$%%<',, jZ C6  gϊu߄V lfvsG_ZIcϪYk@YS}cg  @ '""Y,M %.9-ph^ф(B\.޾*eЊh,pvspvlrXV`$:9ZpcPFz8qχV_G ( 0 KDDDDD&cJ X|VEee%k:8& vz*`y6Lk@Sjsɘkh5{&rT S2J~MRARD@N1.d)""3]"rb烗>"6橐֑#8zr)>3Ğv}eNă4pME(E&ΛAPM#33{qyY"t*6q %.uN}{U08dM&L&T*(ym8eiq?1G$2% (4F N C Moc@&laQ'^[X233^#m\]slb@6m޼ qʄH""Dsi׮]8ԉ٪;H&1-P ]s _=t:l^^b; 8F~ y_oDL߼ 1Jޖi@ _ơjT7vR# <8Fpt:0Avm_Ǒ0Xc•x^wk[cߖz{ _]Cgs}YֿV*F&ާ3t>ndC}xwQXXFDDDN拟 :\se}z[۱wQrxr"ors5R#<6_24WJqD5rUطe9~z۲Q /9,ܛ8xzF4md Սnv|t_ֵm._4w9;R~_ֵ>{GѹFty^Ng]mt} ˺k8Fpn%T7v`1O^rHe]ԇFSQ.Ϲ~u@gGgO}fef'ZںP"í4k}}$+WBqM)PDDwQQQ"P ̐yὓHI2ЉJnYW9ks|C'j\j&F{sD{3Yx͓.xmje[~Yz/z>kL1cG'Ԥ(\z{_Ս_'c2G?ѽ#eZöl#[.iG?4=g [ 4=~ÁڡrgY0vvذ"ks9Fp kxFc[nv}?y= WdSEQď]30w[O`൝u}X\cՍxmjHHIKOk;W#51R+6K-Q2egwHo^,\Ԥ(D+,~m?Ѳ&;ggQ!ȁCfX[w(mT 3#7o.@aM8`VIQ(\}A mjR\IMڜqe-m]߼1j6K)#8F=9xe?P"7#G=8TV3K. ^z"1J>yF/;(Yx\&E!&B^)@tBʚ]pX,W9pJ>&^ WfOJz~DuVċpk _=~7Ъeш^v(Ih#""Ā,4{&lXغ^OШ`HpPd^N׶cAt``EzkGeDO,ǻ嗱u}֤2wlWƚ\n<5=':v(;g=I {oPgzɳ8qNJ.Nv;5ًL=.x+,M QeY|洮YDDDBhD$'{% \ii)[`1 X[u/8m;XL-m]8]*e8fU$qcK"9FpAXz\o?GtbJ/!eX}Neoâ|g4upe;NsnFe5uMɚE(Տufh\Oh4O5FDDD. 6A@MrO`@6 Xe-͡ՈW %1%R#anH ]8qp--7;~F(ɓX~=8Fptšd|t儅'ϽQ;vN!;0{ل $uTNeD8ICj5 )R__B7"""7u-m],6}[eli춾x:U:P.n tqu/dݻw{8FpR2k¡1-7;qe$FJYLRs{PQZ` :_mNI3Go|<$Z6۳'nL./!wBZԣ'!Kcc*9瘸peR#q4 .],>fV<8-{ce1L>4,xmr d&s!8TV|OȘv8w{LYDD3/8\ qlmHzaaaí[t:7(* * !l " @шpcudF)8U˸`TjR4E`EٙL&=zԯ>7#h"3aQ>AP46橐;t:TVVF#DSoDD4{nxسg o,61TS^GGG|H r#%dm<hB6j4) [__66L`@IH GE,PfPf,>sF8Oe4DDD4/0 6oތ8VeBPd".&(1hvڵ :qA7re &dBoo/߈(W F&}1d),NAYYRP`0p !:Ŧ13e[ǷӶ̑z7""" x e&QT`:8go l""!~$uZʹSIld""" Tl"'jH>ak3Q9CLD4JEEp@ ( 0 KD`nۇZFAA&Qe PU*,[@Dd2A*3( 0 KD~rg"6X:jw8F,alm ں`P_Dii:DM#"'g_5`2&6D#fbRX%CVl O.CѠ둟φ&7Gn"`,DDTCi)[jef Eܓr%3d\ii)[`1 X[`MDӦlY{$}c9Fͮ"zar6-Y,ٶ{mY/t:ih4_KMiBg@6 3litN.{ʐY/63cl c-Z.AZ7""" $,Y@D4E}f=o[e) "9;Η1JDDDDD@V#44tz`,5 ZVEee%֑%""ƀ,4شiחV矗%""8Uۀ,3dh`""?`,āĨ;^OHWd2(0G DD4NYVSms`""" T !c-E/3dgǮ] \6Q`bcW!Q" vO2CNRAR!MA74`@<-+ңsjFc8FԗdlzId&Ni]ZVZnhh`!(2<Ք͜Gff&MDbS0th!!bmB3SI1{Gݽ{7 4͜c9F"rgR@Vӊh[n\ bSI_MO֐%"7(:) Pu:3cc<#v3Ьht:`L.[pRW\GV "" yf4Ʊ*]LK?>!8F͇1b׮]8ԉ~MPD"lW;6,q;.{B j?]7h4]٩7"""lh4W k _eaߚlxG3MZŦA8F ; F,usAQIw}sقo/סwWujiٛ ٩7"""SD$B/Zh6dZ6"֨ D3oPĵ.q?5\G-;,0LBDDDu\& ;F||udC e"(#8F 誽,,q]VOd`/[M@23GxS'la|hcHQUeA8Fq9E+di q߻u^ej4#Y؋(09x-Γ:IsEEE8Q@`@i h}eӆ\+mmHVlEEŔŹhC䡿Y.QZZꗟ!>>^Z޷dΎ%Ā,IztE+h IŐtYH<;1j69FLj# ]߫z4mZ;E|xqHPb!C%60LSJPx:Dl[VB\\{6/AcbَBpD4q)C,8ʝ-r%K|ҙo18F7ܧ T d=/7O(}|ő@7{iZiY^xA}di WLgW{ŠU_6l`,MI(d㈜MJKKyڂ)L} %UmEk?!4X`#Ӥ|eË֑t˶@bp 1AXz\eү!PgPqȂ!`Jyv/l8;'@ad쯨@AA֣hP]] `jYo6X޾ X-ҥ4) D!(4L3&#8FLjs ÀaXo_][WN)[焐(E&B,6eFN&!77W˛S ȗ0 K^ HDPD"D1hbSfvڅg9(9u E%p1 KDDDDD.,, yyy튊)'3sTDDNRA!@4 \Y>g~9\pe,Y@D~MqX8FͥD(TL`ϒuԀ͛'^NsmϚYs3ԑx$- ?Ά:2/&={zȟbSYc!Ŧ@X*5\d2y] M4F#" TƮ.KJtY>DDDDD4c-[lrdQ`߫V/ZD)?{::%%sɒnhhcU&E&Acڵk5u|s7 oT*֭[e &FDDǿ[^z|΁|H ;U*s2 4 *ZD1ht:|;`쎙AlA__T`Xk4Id߈#iix$-  >kn/f~YϘe@Ǩ0)cќa3`0!SUh4֭[CUUKmٻq.q:6~'UUU(..F__Old믿lڴ;7>;>;?_ Ξ~clc@fE^^N< ^d}!ȌuƮM Brmǭ^+&CR_Qpg em팾_(BV{πl+--ECc ,k F!"DsAXz\ BN~~K@v2441cr0JF~ӟJEQɓ!9J Je*+n2;3vuuu]]I 7mBhVAZ`[l"A4z=LMy5OVz7"?VQQ!Յ;&$4Xr,ގ>"Cɽc0v…PGF1Cf7e t:,s!.wp ^VgS(J_hANΜcdhyS@J 7>/Զ-VdYD&Z^ ?_ts Ѭq-uT`YΓb82e?9a{Է|{˾\__pݞ/7P4 ;v,*dd¤Q`9{C5R IDAT̖-u|* 0Cf7e t:*++aF#ֈ Tw?g & -?,3SSvzκ"tzܝ33Q{tN;sHqcض]h-ةRh5U41T05(#$$$@_9c $/ǏpwɆzO&YA(VHRHR< tףoppm[;J|ig10 ^F(FJ1@6r H "#LTA {,f>Cs]oDDDZѳa Y""""" BolT*ŢEߏ~ƀՋ(L~~&vulWl,j\DDDDD63[P( db[^`&ި o ܋=RaKvvD/.EDDDDDa[^aEQ,jnnFuu5)Aq@٩Sjl<lWv d(|* i"Q̲X,0 S)^쪯ǩ /_+"W,[Zbss?, 8_3$\XۡP`BtAÁ]]x gގV%~2j5r ?HDR}{֭[566zal3ls8f~}=Y"""""[Y"hP]]'O;fȑ#얍C[d2S3>];"@"t( 鎎( eggcݨݻY8滐ϨKٌ|`,Ec MMM]w?d({lg6QWW۷cϞ=8r2q/Ds7<5lU_Ouv)sblΎ_5EBZZZh&^.믿 1DDN*B*"%fCSSwX,FAii)#0H+z{;sObRV0\=ى 1܏L&/^܄,E5ύpJ1Kc10JdAx.9 hN#!g:'dTVV qFz:0v" w4ep,4|+N ~rې7Bލ!i,D1~l^GTzYZh!u [VV@vJ%<iP__ :{ /!;]}jjna K4K c]._ ߁hs) 6OFFX?H54=/1bvd#13E1f c].}n=6DEcj $=--- {`0@QbZ ZJ466e kk.޽;n[z}@+cR+]78f3Nuvd{YZ\q,,_9o~bo|1( }_̟hD0෿k.o!DˢEBo {… ]d2=UDD ܹs,B 6o3cl6)>/M q@2T}lJ0Ŗ).A$e ;W"YQq2V,,O[`/h@/GĸjXnq{I. &q]w믿F?.\RSS c8Žl䠣#yQ$yofbm؀R)vk0lxK_y<# kN,Z:r1h4E 2ǜ~5$}##8`OzNUi[C[Ͻ?D3D HN>$3IU';$X3M1XeU3e>@DDKW_Ecc#BS~^GGGn D¢1͆zC]&ɠhPZZ O[~߇,Q XCyŸ/@uH.0D4-9HI&8MҨ߅-,Nrz=qAo(ԄZH$8p ";5QWWa[B@VG}&yJn\h6;P}!$B7ֳhDq$Y~_= C2f+I{3w,o8}.*u[8FmmJ֭[߿6 UUU0, fԏK,!QVV'O:jX~sdg4n z>>_YYB,fZgl߾ZrJ{eeeΎ+p U}l@h.pSY$M$fQ<y#gA)aC4GD }IA OPQ3wlA0^I/(=&*PT8&f6~l߾+QZZ'O~h܆O<<OWw>"ǵ|Hеspڻ!H#A$z΅$c6~#=R )3]zDsϥ,AR waN{7|  6Gwl "hTXXB55HSRAyG;>fsTurC}}}@7B@ii)juL,Cmm0x` ;FkÁ]ޑϫT9O,*X" *13D $Wk ;w귍o ˅(IRHRDNnGUSN]7oڌ pիxP(| dB3QT޻ 4x ISR,#ˑ [;[^.w#g07P(]e2+:_|)d2TTT@D7l0o0x,/NuvTg'r%X4plFX͆]:CM 7*QTK\9P*,AF$.]y e qqȎ1%oD ǫ{zW*0Ͱl0l6{.DֹA{l //< ,Eo tlkl= GD44ߜӊ oRNZ ͆p\qjZs *++Y0` JJJqN\ A®"1(***ѕ>|ig1fA*zdoo&i-^R NV V+ ^~ZfBNqt?8\WDcQ)J|9hAb <@ea={ ^14C&yO "ɯC+Q elAZ7w+ȭkH\#a{iU& B b1T*UL/P54M\̑ d(2@ d dj 8/!]мzo4\/j-M2سm5"lݻ!QQTjlAzfrڻa+Q)H,s'Q46>ȶv4gL]h|?{ )ZVhii_a1L&Cvv6 eQTjlLtNXv_:L ;NFz:x;z\LivƼ= czb!6e6e+7)Kx:;Q9b`0Oz/l^F UUUP(..f"Y"ju8OSOD, % ,RG]տHjw2ypUWG뺏qۆ6z]\󨼼|=x 8ٌGthZfo z=VD͜]Z-vu-_YYFoZCf3sqϭ_4oCP 3vĢի.g F#_[9BDs677( #]m"wIݮ,wX7IG[v˿Vz#`qkc\ { &60Ҟ =7vŲI9 g/3 3RjTVV ?pI&D 2{`##j5`0#U*р--xk9ٮ.-.)]h d(-0L됁( pFn]C҅Y|uvLmn& ai ^Rl[! SvبDHGbN>ْ7iU?‚FJ&fX,~߷VJf&1i"H z{;>XN_/QT 6Wc ðc wOWw+C؅$M X箻g1Aq}c؁1U;)KyC% E3b^KϮs|,j˓&VԄƀZRAz>BnxnÆyK0 d([+!m7uݘ?U;}<6c_sh]60l!!] Hzq-Y, z_]'@ IDAT>h7/=BEDXldOsnhR6Ѿ}P^^G͛ J:t:޽f55506bO5Y"""""y0~l/ߙ^f w6cc C!l,g%(65[C1t $9gaΕY\붡Y}7e$آRbz= H`ٰo߾mp}'870%"""";rA  DKruw:m3^k5 uMp7(3b69)ہ}F ;0qsH^[D.=>o4\BGW`;.,ۮ{zxCNTB( ܨAz_ܹs|O3?v:d28w2Ex5Y""""" c |XHda޺65<+b£ Ppa4j .8@/?9$ E,,hፆKi[BӺ}?=mq&B*X ؉( T*H$7-{:M(\?@Ʉŋ!.g KDQMx.ܿR-AN ,݅ Yېz---%޵#]8uz:b 1%u@:NcFFn]h@3BlY[𻓟mbԵzo4\N~ m/'7ͨÖBstjݻQQQⅠeee,$҂]]a 7*QTK\9P*,AF$.]y eaA2i힐 :YuAS(h h{Z햵BtNm!Pz?_ik?tߞlﴟz uNJBii)ql޼jB<}Knu?XplF; d(f`ѢE;_Aa0ލq֤5<.>')IӀ{R _-qnBxLq>h7|7wO҇;, dkC?t,j@q*$H@Dqn#_ikx|$}c=\ih a ,$6 i[ ,L2=@@c0$Յ~cj B!X,0 屶OH$8|0gnZ dE_W:a<#n|ՎD}oc|pT2*++Y8 ]z#=/F8FtC;|}nOO-ϵA RXsiƐ4]" 6}cZ/w?_-X#_F~o4Gz|tt:Hm>=]oșr7QuE[`Ν i;^:ף`AccX,fA"YYu Cߝ|9 !I~ 6/FX5xp|r)S ǧ',s @JrBD&O [Yp6 `؁K`D4tàa!CW>d誵=5ЋHp y߷r]8^A#D  u,#c 0p-HqֵI*T'Io;Y`;Ap6 555+ ed2T*.a!K4 $t(Ύ)|A4s eA+_~I"Rpq"1܃`G;6픝H=9=p'h3PY̍uwr3apogWdqnB*//i;^J^GSS9ݎ 0 df/]> `3l!c$ #~,:IA2_j˸o! ѓq,'] {iK/>z>4CX_wG炩GOW,lׅmh8}i#,Ox LRBBFaZB^Gcc#j5!?Xr~I"6[x oGƔOMJ P]_Kt7Gē;@' ;vܹK³@)g6e`c^^(ݱapA!d2  ,, S w|{qq"~(n|>/8O6I'[}0x`”4>Ǚ7 Q{~#)@hz <D. <^%'cĬ _:\hDph3ع/wsKaa! n &"Dh)*&n$Y\0 YvN{70@Ph$.]E?ѿ슍c'ckx@wn@w|i{C9znX _xpwh3inIRHRaEUWW+Y4@Ʉŋ!K@h St9C$w_)ƃy8EA Y@>΂]x@yǏ-3᲻Wrں.5m3nl/ڝ>\ 7E<`4 #\Nm}~h|+ҰbVf ],bVE}}}Hێ_`z 7*QTK\9P*,AF$<R#6&/`,ٛ0 do]՛_>wl8}~YPBf2L7_s}tl{ą{G@mXÿXo;nrxxCs֩bTVVB",AJ_W,\.esgH^dhwWȳGn J {_݀肩gE" .=һmtۮ8mxh G1E&ARMlFWW`߾}\+1q%%%~ '.X *"1(***ѕ>|µ%eoĐ4 j w…/7#=q _&_x]}XZP|R*PTthll^,׋ $},@ ;^j; TM; 4vW5,}h}y`a'uZ<+ܴ7Nl57A5xc޶:ސyM\1&;S"K@DDDDDwl-N[wFyhX`hnz>}^vN܆3Ba31%qA]F@߱ `t%1s҂GB"D DDDDDWr"v{F?Z,@ctfj wn VE" 7Ӝȳܳ^ׅbtmX`0#w}sPVV z=zf X@|pT2*++Y"1(Ln Ne!W@IIId[$9 =G p\0v p\?Xӹ<<F6[o4\ Lm׽cVdq#b1j5 ICX"'V#,VRDaj $'D[0\.w88\#C@/ FVVLNbd?'3ԅ6[?޾4ucyd҅Z/`K>cD&!;;;ef]]]P*At,.8oNDDDDDDw\C@"@ @  cGe,apg12yp~)>r4pqUwh^O_!O?ߙF3ORn3ףbJJJ…A an.? dcܱcq*A$+ bQ0!\ʂaFڽ#eʼn .@* z=RSS^6"6.u~uv}=aUS[ш>b ܿ?kXY;wb3qF/?BDcQx ߆0li1a[r_IqB~~>.j0L K(iU&ξ\vڏ.vE GTk4Q]] ;B)c KDDDDD1O.{7\.7p==QPHR,SGuuuBt(v~z&,bEV6e ],{,Y"""""yK#9:\ |bosNI عs'b1a4r?h_? m3u_P利osNdeqI,zݡ@-vtDDDDD/.{p (6cSRRB^JJ v܉Ǐ/xe3m;w"Pz?~}Sj,/Q?8SDDDDD7)m| Hla40.dO_DKKwZo;Πa-;c;ei􂈈 e 'GtKӉblX씽z寸?/ϒypU^ڳۓ݌?o3EkBPFM X씥` KDDDDDq'"_d="GP%`zx c0%Af*hB˧F%Tj @ѯd+rAW1@Ʉŋ d( ݅W`^AD;Qm#/l߸M2%c0&QUU%"ht92rҡTJY "1(I)H\<Rv{rn#//\ .Z.KOoޟ~{¾WdhPm۶ᘸpɒZooӷ焴}XofEa͛lg>]VVy>#̜ ^$́ ef3>b(J> DDDDD<|p ;Bo.}:_47~o=|t% Hy݆ -[zx˜}n}@|tffBΔMIIztd2٬~sfṵ{>BzmTT-,1o $\ݑx |twY )Ky!ksh|w>_&B Ԇ?~o o79bKDmb"cn,+n{UVu߿Y c=:}~h?6|?A%{[{/}91=O0X__^yBYJ5.dN9˗mQq[;۰"G<1NpwzYt6f+}ch]\xpт{>.?a7.o>R˴ܳr.}:v㓷 |Ў0ʊbTWWx&)Xc|K aO_?l }b;;MMM#06!KDDDDD#1>irw9̑ۆ}O)+JMVP`߸~Wƿt N"%Nه7%ux!x, ޶OQ cgl6ѣj~ݙ۳j$.걐3e=] `Wl$a K4\Cpڻ.G¢%,aa݀YH\E!G#5Hg"AE#e+'8#/GVK{k1>\h΂U?o q8P@6Yz>X??B*{XP|vG iѯr87!lhjjB]]]HLRjdgge}|CYc͡[0v.HY5!uwcp. jJJJD^`W)b^} D"odDq222h"/bY乍Auv׺mmƼ ,Y,ĦU(~p՜~HT Ʉؿ?Zy2 j  na(ˮ@h\Cp|r40h(ȭkHy|<a"HO; f؁Hz>bAs`xC fbH߆$},<<'rZ0Zd.Uhtw\Gzo &JQYYj?~`?Ċv>¦wAiuxMhd9溯k : '7[و Ǯ}z/ ò4ۂIg/϶//EUO%`Ż;kDUM zpt.᏿+?X{ofyd+ؓ3վ`*C_%cO1p۩Nw>_j'eپ=DZ Z Ʉjt:ttt`0`0 ENN$I@X%zVBu{c&cuqƔFVz&!!t(//Eii)֬Y]v. c0XVVM9 Ѭ+;y(³NULc {+헥Ţzغγ~O%x'ۍ<=RXY1p<4_\0!VVNJJBqq1jkkq!Fft:t:>}:^7*c:+U%k4I=B@qq1wЙ8Td&/HafcmODM$zhw%1hDo@D07=>f$eoZ`i%c}D]]]hjj>[:ts{5FxD (Ft7`H82'04OtDP# у~Oź[ hV4){y\~}ILLĆ \v26I&AkS ՞iI+W(/?~BB٩ \$_=-1rd,VhDK{C_Ny&#O$e"("n GX  MGܾqY<֢^" ȀѬiR655%%%.ݻ.OGeKE2c2m{-({kёax6=뱝Hlyy_ӡ=f07d2!'' &͝ ao 7{;˅"z@WX,P!DE4 0nH,Ccl[ X Q|1#GxZf^k$#pˌ=ϻV}N۶&sbͅQTTM2`?jشi^fKSNM6 @X&d@H|:"oEpR P$!8~)Ÿ܂0 hF [$cjED#|>gDD\x8~ _\pM޼5(,:o/z+)+JT* P_jj]tصkߏNƚfz NrOhpmOf, @r<ۀ'TrA>-x~*{Vri"gL ameR&c@|xMd/wfb̔8zW8%$$l^dZZ3СCB!Zja4jDxǣ:999Sֶ`T2VСC0N%$$@RW_oNݶa,yIMLRd0s D x^ɏ ;xlL$cEA חVAƊKy}/3LBbp޽Enn.!ؒVzjhuI {[Z0DDDDDDD褬Ď#>0LBUb^?o8zR=zDDDDDD>*#rǻTsڧGΓX(UF\7c)U &*-f|t-- //ELT\7cQ9O#u{=eBWcls8b;M6o"O7Xܧk غNx;n;{]ڽ%nSJ6@=+_ghxo߿z444`3|&$$k֬qZ`JTPTF`;ߓO-G3 Y""""""sb7jOu7ofV;=~b!j8X/Z0\nA#V)YqĀW~nYZP;=S\7wz;TzPUrwQ.9N?*)ZqԖ}ʈSޒVO%wz,r݌#F,LoG8j_uZT1:=$ds^:ނ7sG PSs8|çd'G8U׻bڵKhK`6QVV沍Vu; t:c1Bul||<C3p'"A4}X0xpa~iK\_=or5vnSV|;lͻIJVQ됹Doڂ=;*>$Y?%JGUMފ-\"+?C)瞐u_vۛC[:O =zZ6VlYً=NGزV`g/չ,-ђ0-pe$'ykF:n=aV<T]]a,& b a,6= q QmC[/ʺeO:pD*$G{2H @o߽{w6- U5-;=h}zlpT>WBQs/ b=zy?]@$ y%pf1{Yu_vp/.`O98B` ͅBhĮ]p^کZl6̙37n8&dLLTA4}tɌk&ނ $8wLc&է=*Eݗ&].ًN#gyK_ٱ78p9:2F:?/ۻw/6nNwJٜV+,\.w wa\n!*V H,cPs )++Cϭa A ۘ+A]>8:9T@ϱG?w rM8.fN?'<u_vn+vif}p"Z<[ &|~: I&XKHHNCIIPX,ƍ!q^ {EYv&d\[[ZNk!"A4s rjҤ[??cܦL:wc߭[d8pҤZh__Q@H$(//dBmm-, f3f3P(R aL"K8{}.UG fYj­5!k غNqhi7߷˜WtdYdu x' X9lܸCLeK^;>o"ԃU\0QƮ|]@=Ձ\$Q;*olYW~^:zʛu˘ճ>Zh Q^^,&d|LE.ˎ)OȖXT1TSoG j_(Fݗx,b:%{3 /UckAEX&E&Ԟ@)ز1ںN/;PqĈsnFu3^y)w2"/jV&3X,\[03 Y""""""T%a=e[ҝnYEbL3*PqԈTǢD ~bJhsG5 j-K٪F>=ZX&m*Źq9n_(Fx=tGo Ex~͛ƖtZ?v:w2"/`{WNTYY{Enn.:C%"""""AolIǮ}zn?뷮SzuYZ,s~ *~}m4zۊ#$/ܧo].\˯k~fim |K7 jn˒_T2FÅfDDDDDD>-KQqԈj+?ߪ9%XW ۟2O Y""""""¿ U5-x,r%_[-hnƖ J]}*#"J!J'l6_yy9lj ؓJ}c.}툉\.w Y"kamx*9 "D3(H"Ccv)ug/ţɲX|E sC͛Y[iۖv3Vxb歡zX/ёa@^b60 O> gX,fQ'HMMEIIL_ (R8G QH%Ay\d ׃u []>mɘt9|T{D/jB2V,)VZZ ݧAL`,//QDc`B6_g{$1 D9h>|ba08ވ`lݻX/jjoyyC'T*!L8\0)qp IDATE2D9h)J4 f/AFDP,{_%$p<0!KDDDDDDDD^} bwb\PP0:.f,FS Y""""""""z`999Cmm-j52T*S{&ԥzFh40 ||`^o`,h$s@aa!ʰi&BPxt_VmzF^wZԋfD C.% q !w,]60,{LهqQ|@aaϵLl„,6= q QmC[/ߝD?m]V"4%ڻ9&eƶ5Ĉù3Ѭjj'u_dD"ZFQQ:C%"""""r 0ͨjYd^@n"lrۙ pkBbOD#fJYYzn cg@|x?[;cJ3($AгIYw*t'bL)xM4+T vtիtݡC8/D",Yr; m-Mlq 99[HBBBdJSg/[:SНSWتf}|>>g%XT2[PRT^KףjZB@II `0@T2ӌ Yo\a ֳf/x>D!F x~*ba9bXGp nW+blɉ"mcncΓe^8nLDh!(g%MB8/HK+pSWsh2>?pNƏLݻw`0J%gΜի!Hi^X,d0!Kv҅7.3 ܀e!Ghq #ݮ#XHȷ S@+\)Vbv8Ъ¸(l[C_!:2 ёh2 >8~ 2 fzi(|-,,޽{ ***Zf𼤨 B,i0!K?p =G"(F "S䎥 㬂 #GأD8u* `B.#s爉GzFGG駟f"<2w\T*ɓBHAVw3w"m bJ9k5^mUӎXVxfXtd8Dp֐_l( tj4RD]]Ъ^i6a2ض>T* &ux1iLt\pJHRdee!44 feeܹshnn`9l#8Gxh?;%cV%Ș{g(T#8D!{܎_BoYhZm+TGX5z{ HB>O5㗠;y$T*T*G,mH$0`BփO&us#&2rv&di\muMǃ> 3g/ xrO%hsq9b ))))4y¼mOʎC4'f§XId{l."5q^.x^UqVƝ+m=}b,Yg`E m3;ƛ55Bw*zo9il^/HcHP`0T+1;::\3j(t8~T8΄,:2ዟ?GGG###)pB Up#-4Rʠr =GX\a2JА2dqdBZB"5Bwm5&)\Xvz(Jzt:t:H$( ]FFqesf6mr;& %%%PN praR_nwVs… Ls"%%E8-yUD9G(J4 f7zZc|hw̟?R===1s$ @HR˸um]W]A+ lj-w/i]4j11QTV0f5]nKIVP?q85 ]3sN$* 3x*))bP(BF^"X,t:ttt@!>> y&d_#ii<̓GvXGXu9sݗAMBBNw Y_dl}X:qNo. _ [퐤fVms^ױ+aE#mux 9|q9%ؼb MOd|R+t:!]PP6ɪT*RRСCg}:2{N9H dV!p asT͝*>>u&X$ TSM{ ڑ1wηm앴gGuAn|(+bclGul`lkEOm7n]u6yIXdl[-a5ڹs'j5%dJoūD"Aii)jP Yr=3w ٣Ϟlu YD#e.UMkmmmLNQHyIem+nNnz wM\޿crV95cw,sl}n|7tDZ'W 7j+@|Id;A1r~Dl[ )ף2ULzի=^"F&d}ηNƉCll}^s爻#嘘 4-Ν{o!7 ց>XK7ݶ<Si{`KeùXlx+~===0m_ON>͝C9KQTA1L_[7Dm/RkD \^(lD4'f{meKI M&I_f =LȒrvaaa M Ǫ/-+<a9GDSƩBvxۆ^ "4AUc5rL:ؒ}}o[+ifuYfw%E3K'J % >c.B5 Cy~d2 ձ999 aB\^Ne%M>F#9sу fⓈ`SRi6QVVRAnn.k.h40P(8fV+,\ڜ wa\n!*Vy=q Aeee5[#<!x#"歱W;p|v s 4^f@`0T* <2\T*zt:SUbX,Ν;i%_Ĝ%K\ngB6 秛"DFDL?_\.\~6=q#nk@RM>~ڻw/ a4a6]*_G-++cubBX,X,x{&b'F":Zv̘@VbBӧ'|*Z Z8)&dhJfa*,\/H ΄f%&dhh1OZhZ{& B`„, mˠyѮ]鄟bӢRfZعs'q)//g^„,-Lv!@8ܯ^_vv6 :5sB2VPmPף F:999LDDDDDDD]}ۄ>k0ڙ&J!JUT*ոLT*TVVzիǭr500Lbƍꫯ]41N3!KD~- xrO%G1D9hId{l."5qB6mE!I&Ak?nDFr,O_Æ_}"R.$-$! """"""@d[z5[&v& %%%Bg~~>jeee a,=0ZZաP*PT00`-Mm%"""""""uUl}}=0 ?_VZZ2TWW 8%aV^R40!KDDDDDDD>~_\0M=4r߼F@<_DR磺FmEBJիW 6L_4+1!KDDDDDDDDܣz=t::::T*ꫯ2x40!KDDDDDDDDLRy]~~>6n܈J D˟Ҭ}vl߾~|;;;~>voߎy&l߾ǎN9sM~X^^m۶ǿ˿`1xPfǛ, 1h&ډD"Z`5 LR;vݻw堷3at-466z sf֭jʕ+&a۶mX,~|ߺu˫WƉ} ;v GQaB^CCd2PSS@|ǣh|5qA)))سgߏbʕ+tJP_<-㧲_BU,ن Y hhhh@FF Mm{qA37 `ǎpM,2 &:19'%%/DD43QTTM2쵕a޽(..M fOdee!;;裏vZ755A š:diZMFF xbFl߾o6N<}^^ ǦMyfa͛7UUUr9&"#țaXrJō*22ZVHԊb?b񤞏۴i˶jB;?@k|0?#TUU IVv;+++X} dee!.. 8z T*T*ED\ptkoGLd$rLR@L&$766$* VCMM yǵX,~&\VBSS&d\.@"gWX,8p6555BA̝;W ȱ_$<(Tq9w)J4 f_>{]eGn{Nd>ebxu;F3>㍈>#~ jhhh@}}= ˣFDS͛ ,CD#8G7ٓ_Xs1ax!n1c:.8qJdDDDDpGegg#22ia{϶mPSSFh4IM6yS5 :;;7SRRpU uh>D9sy[aahЀF466"##c0==]ئUUU裏鲰kXqZ%hZDFF:-•NmkjjPUUv _!22ZV?8nu ""/(++\(0eVZ*TUUxQ^^2Sك۷;%O/^~wvzܼU| g޽eeeñuF@AbXo{mӦMr6;;BL\\pymUA)))XxKLJJ z]Rf­[j*dddj;OS`~6TFFƸs4A%Q<& ?O#8GpvqeڷǍo\\ڵktsjx%9H""""zzz`4I Q@`BC8aſ hll~^J~!D>ꎥ 8ԔX~=BFDDn0!KDa =@Cp\Ġ"f6A  Y""IW\.l ra @.΄l;|0.7_`B+$1(Dsč7؇bXRYskFx>BFDDD(K"d/9K΄lkkkC[murBD~1Gtww q/ 9㍈f/6!םBrBi1<<,\3 |.+'09縚!D!塡!cY>G8&䉱 :q f0 DDDDS Y""is14? YQDBDDD4r)% M&B±g(+q8GqDZ*ݩRȋas"""C@)˗.]b@ȫk(z9s{*ŋ)ȫ\"\BD4äR)J%f8VWzL;FO:2~\v .d`hY94{c"1J##sD!IO7تoܸs)w I=oD6`2)R1D^FD:?Djj*JJJqD xJ}"+w `'%!61JA#vR9a1R"<2CyI'?퇈Y[j꽳?w^$iD4KH@C>sє:w:::'ZF#slgDSA E!U<Ǡ8&9࿾~衇0w\aD4 pׯW^CcVF($a?k׮9({•o4sDx:9bk6L(1GSF;}5?x#".b>*ώYKDl'?yIPO 9!A98GxQb%b~ЄܼyS2_t;w.~ ?_b|zX59{NJwn0gzjVw0aqĉJbO0pE^Ps~J$D#sq $gΜ9غu+-[31GCe0*?TftMʯl1i)Mfb5f{z='|!-Sϩ$9Zm7cӭ &]tHt`Fg2 0 0309}]3 /?/[_&ϔqlMF]=Ϩi`}>f gHӌ%_c>7^#rGA'Ç7r/msY\/M9P\B21fY?OU[[+ͦ/,Ζl󕟟/U:{$wHyO 0?5Kɵkvr󕟟T<Y{'IqntfOW/1;E-^6wnKɚ9 o:l;$;e5Fkms鶹_76h?}I=|$I-5[ӌ yι\{60{Ln?g&I,"DP~/ rifZfSo[EN5YiS) }Krw(cZP~9 zJ;-W'tߪ=e56Ӫxj{}C<@"Ĝ7^[p;OU|LŏrH'`rco$ssV{Zq_V/^⻿‡RCOi=gEEu.* n>rJEyʳwWEy*`غ''8!LYCn\k ˈm>oO>1y^#\Aխѫ䎸ӆ?IҞg=Jl<}zE/~ؠm?{YU(I*+Tn :Sh[j}Wz 3fUNwZU ֚cݹ>A{ޣu+uUoF|!--M3)))Ϸy橣oyVjvp< k_MۢOdf7SbQNN퓑rSn:n~ժUOIgRRyOk0[wTnvIֿW$I~!`ڏJԐ/sN{>t&)*svRmmz{{}LΟ?T5OT|2L2LJ/]Rwkf$rիW>'}FsFDymhh@6y[b= v߄mL0Q庈~/'}7XbEmx>Oj$k. dA傤/F %I_7W1~+5{Xm;z&dXQF_ѣW=S"(ū~7>Wsv?ﱲ_wW/D@1BT{ZW~'p/v{YB^嫊ӚU7xR:^֬*TSջTː<[GpJ{ݝ#,!xwhϳ25f/wܠh?5ߥ__ylWwN>y9T_ v'"@3\u7^ <\9lU) 88GUo(Grۿpi4QڣvƢۙ;a۫?Vnn.@#` rh3aDz{{UZZVI҆ dX( b,0J6Mov{}$%%d`J%PQZV{n}dQ:vΜ93~._0 c&c(ݭdN, "vΞ~jY ?2C?RDq89hS&W({EuwwW[[9Y` iV'LIش&knf d e+**ק+VHU]]-IY` N3) Jm3iɷNPBZrgY^^$7ߔD( %H2\&8,d0m2a;u#@0Hz".N)F`X7w(5eZ#~,ÅnyyyZrjjjd۹0j@HFƺEH^}'Ænyyy׹0jPXh e~BB%I .@И20-Z/x'\n]xQ \ ,6^a[$CǏ90֭USEEFD wPBǎ<澒rJUWW`D! MT9e+**T]]+W*++k\t7ߔ$sƅbQNN퓑rS,5a[8BY0P611QׯddR&~I풺[[e5KfqY )P;mF:Y0oΨr٫nz-u3^j0ŋxbT%ה]vd\ 2o&xglfo774JBu:{@6??0d@HWt=?/_|QTeZURRB!4,!MIի) &reee)//bjUuu%s F F]VBgٺuIQ)))vΞ=+IZD9)@i $b8Y@TkCqT54~*{;{|_jS(ڡVU\\|^$].LKl{@-11Q+VЊӃ IDAT+DA,֯_O!@qqJ]Nw8, ^ZǎQ"))IfY999ͥ rl"I!K ld(b% BdtI  566;6M'N,bZyy(‚zedeX|[Vutt|rL& 0Y,m>)0%pno=L&-_\!o>-ZHfgyeeo4.Ijkko~0L&L&%P `.UY6#cǎz={V<̘;Gr}]]]O h$k. dҏn@0JNN,qݯɦ:)0 eff`0l6 @N3)n:  lٲENB4]B2%%E6m' :0$$$hɒ%JJJ@XVP) ˕<:VUtϭ/#d2@s]]]vΞ=HrP`J  ʔ.jjjRoo/(D r%߿_6b@bY' .fl6=N bh $=ux]~ mٲENB ,C()@!0Rrr2@X0B(u֐o %%%QHB` IMM͛N`l6SOOgB?D[*///PRR}}}bU__p?'].IROObNN:˷@)$55U7o8qB/Mzq\pAfΜ)}p߲qq>7 kX,$m1KϞ4])~3.Ns%#=k|<,Q\YYY!# b(''GmHH\J-R.:.N9ScKʑ˥63"9R%-5򘑱@(بڶ_c=<Sdd`,G]aܶQA| ߒ/-e1.UY6#` q:jjjRoo/2H.`j$k. dn@,r%dD#@L̔‡[7b,b֭[e(;$BY0Y̖-[t:)||({߭x>$5X磸8\ʍO>;]aoF @ y^JJ6mwOL~g\.F1Ԭp0wMW~IR[\.\2E~'}'` IHHНwީ$jUuu^|EP/瞓Ν ͂;;00w|kn9M3Xx9 .jjjRoo/@XYV0nlnnV^M1e!2! @1-33S!fX,jkk4JXuVn& 2./V+ Fel"I!v A dee<'eJJ6m]S J0$$$hɒ%JJJ@XVP@1\L =~PI@(بn 9E!DrՅԶ_MMM<X,"dB.]fQ ~e2(D@L@h,֯_O!4YĴ-[tR0)ڵKgϞ }KK ()@!0Rrr21ᥗ^jY֭[C)W||222D! 1B)$55U7ol@l6,Q0!-Z$D!"@(بڶk! ObP"` q:jjjRoo/(41JNN,bZff CHmf3aeXF!&1,@Lۺu(,˘޻KZ2bbloo7>|8l"I!v A1Ȗu·cuر!߰ai(++K6-)))ڴif3(S{鄹ϧo| 'z. Zd(VU%%%AO?JKK}9iJ{GwJjY~?{u˵b 8(S^^,s=^=cfY;wTbնkJ^=_3fRs3}v_[\\Jcc)\WW@XL޼'';n~=?F\uuu!WSSS-O<8it"ҥKڿ70d2Qku֐áxedd())Qh%0If̊Tm޼Yffɓ')F {K,a,L^W}ΈS=|Vt\9Ow?ǧnZZ7(بl68q"ZtJ/ v} {2gϞ d2 ;$й_% ܵ7dl16ꋩ:KKKΝ;N2Z|, QjWyyyc"dPv™9=Ne[C>ʺm>ھk1@@O>$ 0! ecիW))uW=4cٴ&5F@Z~='`ʎ5+ө&r 1e0O_@]n\s)(9673Qs3|:>'';kkkUTT$I9?,Pqqm6ahCYɥs|.Dh˗bWSS O<4=ٸqjjj"z7nTEE*++ᔔxtO.Zv٩Y{QEEjjj&%|n^wnJ;MKt!رC<Յm3 d2Q K [[[={hϞ=;Њ!s 4ѣ']$9rgZoF3 d۲eTVV6u:uJZv*++'|F e c'\%&&/_1;eX σnPb7uڨ\{Ď޽{f͚kuO sv&???jUhrBPNRUUjkkr 9O=HYYBjK;72̈́{ |سϣx _c#][[kJk׮8p@+VBr8ޮw}W?8?@s?@)$---%h4Tkn(#kuUVVRw`PQQjkk==~[gÆ eCVTT >|X˖-$m߾]G񴩭ƍp8R6l4zװqf_A{5\ۡB3ghƍ2 ڶm>OmL*m߾]@x;-[s^8t?^;wԪUcDDjj6o&6M'OZnUXXm۶4ucl2UUU|BLba dC}3e˖ {c(0lGIYF.K͞c:rjkk <'NÎU j?72 *(({ZPEEE:z_H[\\+ƪ\[hѤ!Tpʂ~;ѫ#Viiϲx@jkko}K7=yqKrr__͝;W:~x:tHOYwGvϟ<>g}N}φ<7xOOq_333uy{u.!rիWCGU}}***TWWG}4wq>3zL-]TK,ѝwɫ qبڶBgV||<d,Ff)P-**RuuOVbŊn/:tиmnmɒ%ѕ+W[oi* IDAT;d=[\oowKHHv]ͦ7n;R#P;4жFgnȏϜ9S׮] pj$ ^wsL̹W/n5gΜqϿۿUuuϲ{9sSNNd"tnxc1h\1ˠq999aߔ/_|ԩSr:ҷ;tqO;sL~Zt/_}{㭧gJ܃ s1f&h7=Xs麨gr ҭO՗k3`_;qs8ϷKy8sHo -YSX@w_7)^OiI"6c9~˻pu}F߈?2pqzp0R&ȸ3 QgMmϻPxGUNW~^_TfBVXJ7bAt3'hf4]rB̩}f ޣ#ai34{1m`ۆ[ϩbß}o16YE O빋d n~o9P:@Y%?M꯿4` $}ߔiAWF ]ɷtϊzz3g~xaz~9CPrIfTҜ!K=h wn|O==dTjWp@qƙ!;q<p1nwV=}_l}:qf5COU1(.7?|g! xNz{קƯ8Yxiܠ4-;}o ʰ@-)_[:Wο>{}N$鯿}LicۄC?gʞi։_{YtQi {{>@yqyh?{+`]}\e/ u_jZ~n?h_gɉ_\?y;ܸC,C2%%E_}J7B@־}n:FzK>N?/϶\^q$ݙ@Р]m+I{?qUIӠs5?=*qvkzn Nw=mS3|N~_O^~:kpk=צmowi =m=צ=~[Atkczug%IPoZNQcM>xմ(-{Ҳ\M$}rͿcz[)VUgt (2-vbe/ԏg:smzPx>bs@~M%XyHCw۾>gLUfݛ~`H>3Z\M;c˥o'K;Ϳyh>nM^=XwEzMU>V ~':l\=?ဏucbm_O~ W?Cg鯿}Ӡ7~Gѯ`R8f~PI,r444h߾}\9/ [hrZ]TߕٷB@uugޞ~}xQ}=Jinwõ;Feߛ>.}^]:.Gw빋!Ͼ}6}i@Yf {Y(׃ws~g'r=>CC94l}CuC ieXFv`l5#d@؜CsgG }iiRc%I;vPnnnض=a,X¡~}6s&9G ml'yZf̺-@B.]~gu|N1=mȦF a2X,׫k)B ;ZzR &͔̕GCn6]ĔDY2 L!@L8Vǹ> A R!/2nm3?WF_?W l6Zj(F pE!lj> @tS}}=d(:飏>ذg\JQ.%>pBG|>@8Ku)5))jնk1^#aٛ]v+Zu@8S":tH3B,Ubb"`r:!υ &11Qf9 8BL "`߾}cG s)$cj;vP[rssþѴ3`|f~A̔`fXo|jkk }RR7"l:%@,ۺul6Qnoe2dXtِ饗twhe͓baB @ٲeN'bпۿrѨ1Ңe-MMMU||<'bŹzk60B/ө<%%%QL`sLYKoovf̘8qBw}|ANF :dX LVJJJ(79^.\lĉ㏕ , BvvYfKC+C/R^^Qe… ga G6Ld!*,,}r߽ҀԴi|HpkllTJJ QŋڿV\9LD@RR}lf0w__n:͛G L"999JKKӛo)IvwwO>!Y" >>^K,Qyy :p%UWFFF0>sNk(g}ODCd]|YR{{mĉz cIʺG׏? c#@1ڹsM6a{No~EeeeL8w(ŋG}}'EĹ\.e rl6JKKuM[nܦ/`~ 8}Ngf%&&RHV/^Tww>رcrss)`1B);֏ ~E ,++Q"`HWc#D ݮٳgf͛cꫪJK.%'r{FH/ y/kD`2$ ;c e.\͛7SP l6>cYb˗U[[+I~j%B 9N>}Z7o՜LX搵Z֋/H!@DTZrO(k0tS(2a#dO:R;E2OOOWFFF477ѣr8eFQ? />>^ԍrJX Z|9F;wTo ,HQfjZlY̅޽[{u*ݻwsQRR|=!ܬ#Gg7ITZZC ʺZlohŊ ֈbӦ9gt'FVQQ"0@JOO,UQQ***btȑ#DSlΝ;5m4:tH~xO'LeF ײeTVVFq\[[[;#mc% LQVVbuTVV.mܸQ9B`q6mRBB$ ~ev{$))"LƨH;WQQ 5+**1h4jϞ=ƍ% N0ոC٦&%%%͛'QggL&ϲwjnn[wӦM;% 6^ Sxs*6 Xo*++ә3gtQl6l6tfiŊҗ\ю;> |t>1L)"sȖo>SobJRQQc}vѓ /")..N{opDݫŋkܹZvg_U[[p;EEE:x5p[v/^쳏7niܣcZX۶mӶm<<>T ȑ#ZvΝS^xoZwgΜ$9sƧUUU}ކ;>ϻ۷o׽YgƍC#x @쉏ג%KL \ffY~_|c \\!QSS'##NaagJm/555.8\ݻ==Cq|aW~~GڿݻGUs)..y{\3zhtx/((r]5ޚ5k0u<|ƕԾ\.WSSӐ{_# KT.o@m۶!+++}{]>Awhh0<455>ZlOfddxӂWAA`0x3Nv>|UYY:p+==}಩`089J>lgg`0{ G>]x=îJWggs.+++=lٲeJO[7𾠠s++&65 Mf:Zt3j䡣&m S]&Q,-jLc\q΄̔'dFS 5=;әt*ޯqz>sn]|9sT H+zs,Lqu@ m9=;ϛXg vAo?8g@3lEcǎe%isoݺe |',:f:}vsn|gA33~xgΜI+:UUUOYu-XlgN\|gn;Y,g;&WSx3}ltRgy ӜdgͥK&͌yY ~ J3Y,sds}+<9?OۓfSfK"ΪuV*d-:mȹ9.>f{vS,[,}E̙TJ1s>7S=cY<ܞ5jz(++Sgg6mڤ!=H*++Q^86rWUUbaL%1WV$}Ϟ=󺶰QggFFFtqc8KܬN;vL'mjkkDD|1(++Ӊ'̋>CWߌqo{n^'{NO(eLa WN{lՙ]ݾ}l裏`I2̔Y1S9q℆444 lӃxTWWgyddDpXǏWWW$i``@ׯי3g&=^iLYnnnV8<TUUb}]gQ9O&CsDss hϞ=ڳgTUUe{<dtС'466j .#ݻqIR0ŋgY^^6t˖-ꫯfVVV:{<7buY-_\w5;;;uc_x555޽?<>>v7nӧs^~iݺuKקk׮t^y91zꩧyf]V~ݼy/{=yf577kttT7o6ͫW{ڲey>.\CԩS~"p8Vssz-gg… joo7EhooÇ4k^z|^hQg~3ϒ$}gvvvvSG555/xh6N&dI&dI&dI&de~Zn~d\"Iz&훘0߳N:~Ŋzf7ߤk޽93mׯ_WIIH/***zt=כB}{ٟb:}&&&;﨨(Zf>SIғO>+Vajxxشモ$\R<9ΚSxرcV$UTTեVIWgVbCC$) ͘ݹsz{{%IΝq[;\m̗ꫯʲ,\.8p ӹtGv$8p@:nkN$f;>E@`FcƾRPߺuv86SĞFѡ%o,eYM6,d,2mȚi//FF-37gWWW:ӽgMu_XL5,ijmcV}}}K膇%C챇fh[[$i߾}r\]vͨx\92SOO`W 4|m g]!e/J23RMvO:vΝ i5dtu˲k.3+ kZB sg ccc3qW)555־}$wga0w[iizy,Ӷm&tct\fs%LU{iLӞ:---ڪIE-RLᴢ"L^,2=-vM< eh4j3ג drLR狹&?-RCCYy\r>WmmmYgN s}D"[{ D"TAڙf=ٞ~Sؘ%׫GJvޭx\.%ISv\.DۭB!% mذArJ&kRlIϧjllLXlʙ:p@ɞ霫Ңx<1m۶M>O^WeR;dZaRjkk3Ls)٬3;۱ەH$UYY)k v8>@/bus?kXm۶IRR-]Y [H$F\.YX,h4j~ˡ3V ɤWSDL~橽nM. `E#Ȕkg3lRuwwD¬=kW(tͦ&S9W(J8IDAT1k\.566ׅR__6ױX̴+L[e˖-SIII J279rD}}}ڱcǬSv.EQ,BכeYiG kppPeٓS]/ߌE24a3v<8|>Sεg2ccciSO6DBuuup83O:_lQQ$Wkkkھ3gѣٰaӶg(dI&dI&dI&dI&dI|gRTW\۷jժ7oԍ7&xbm;w\֋I&dI&dI&dI&dI&dgfQQ,Y),^BA ,Y(}?;@IENDB`././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/devstack.rst0000664000175100017510000000202115033033467022745 0ustar00mylesmyles============================== Installing development sandbox ============================== In a development environment created by devstack_, Ceilometer can be tested alongside other OpenStack services. Configuring devstack ==================== 1. Download devstack_. 2. Create a ``local.conf`` file as input to devstack. 3. The ceilometer services are not enabled by default, so they must be enabled in ``local.conf`` but adding the following:: # Enable the Ceilometer devstack plugin enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git By default, all ceilometer services except for ceilometer-ipmi agent will be enabled 4. Enable Gnocchi storage support by including the following in ``local.conf``:: CEILOMETER_BACKEND=gnocchi Optionally, services which extend Ceilometer can be enabled:: enable_plugin aodh https://opendev.org/openstack/aodh These plugins should be added before ceilometer. 5. ``./stack.sh`` .. _devstack: https://docs.openstack.org/devstack/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/events.rst0000664000175100017510000002714515033033467022463 0ustar00mylesmyles.. Copyright 2013 Rackspace Hosting. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _events: =========================== Events and Event Processing =========================== Events vs. Samples ~~~~~~~~~~~~~~~~~~ In addition to Meters, and related Sample data, Ceilometer can also process Events. While a Sample represents a single numeric datapoint, driving a Meter that represents the changes in that value over time, an Event represents the state of an object in an OpenStack service (such as an Instance in Nova, or an Image in Glance) at a point in time when something of interest has occurred. This can include non-numeric data, such as an instance's flavor, or network address. In general, Events let you know when something has changed about an object in an OpenStack system, such as the resize of an instance, or creation of an image. While Samples can be relatively cheap (small), disposable (losing an individual sample datapoint won't matter much), and fast, Events are larger, more informative, and should be handled more consistently (you do not want to lose one). Event Structure ~~~~~~~~~~~~~~~ To facilitate downstream processing (billing and/or aggregation), a `minimum required data set and format ` has been defined for services, however events generally contain the following information: event_type A dotted string defining what event occurred, such as ``compute.instance.resize.start`` message_id A UUID for this event. generated A timestamp of when the event occurred on the source system. traits A flat mapping of key-value pairs. The event's Traits contain most of the details of the event. Traits are typed, and can be strings, ints, floats, or datetimes. raw (Optional) Mainly for auditing purpose, the full notification message can be stored (unindexed) for future evaluation. Events from Notifications ~~~~~~~~~~~~~~~~~~~~~~~~~ Events are primarily created via the notifications system in OpenStack. OpenStack systems, such as Nova, Glance, Neutron, etc. will emit notifications in a JSON format to the message queue when some notable action is taken by that system. Ceilometer will consume such notifications from the message queue, and process them. The general philosophy of notifications in OpenStack is to emit any and all data someone might need, and let the consumer filter out what they are not interested in. In order to make processing simpler and more efficient, the notifications are stored and processed within Ceilometer as Events. The notification payload, which can be an arbitrarily complex JSON data structure, is converted to a flat set of key-value pairs known as Traits. This conversion is specified by a config file, so that only the specific fields within the notification that are actually needed for processing the event will have to be stored as Traits. Note that the Event format is meant for efficient processing and querying, there are other means available for archiving notifications (i.e. for audit purposes, etc), possibly to different datastores. Converting Notifications to Events ---------------------------------- In order to make it easier to allow users to extract what they need, the conversion from Notifications to Events is driven by a configuration file (specified by the flag definitions_cfg_file_ in :file:`ceilometer.conf`). This includes descriptions of how to map fields in the notification body to Traits, and optional plugins for doing any programmatic translations (splitting a string, forcing case, etc.) The mapping of notifications to events is defined per event_type, which can be wildcarded. Traits are added to events if the corresponding fields in the notification exist and are non-null. (As a special case, an empty string is considered null for non-text traits. This is due to some openstack projects (mostly Nova) using empty string for null dates.) If the definitions file is not present, a warning will be logged, but an empty set of definitions will be assumed. By default, any notifications that do not have a corresponding event definition in the definitions file will be converted to events with a set of minimal, default traits. This can be changed by setting the flag drop_unmatched_notifications_ in the :file:`ceilometer.conf` file. If this is set to True, then any notifications that don't have events defined for them in the file will be dropped. This can be what you want, the notification system is quite chatty by design (notifications philosophy is "tell us everything, we'll ignore what we don't need"), so you may want to ignore the noisier ones if you don't use them. .. _definitions_cfg_file: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html .. _drop_unmatched_notifications: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html There is a set of default traits (all are TEXT type) that will be added to all events if the notification has the relevant data: * service: (All notifications should have this) notification's publisher * tenant_id * request_id * project_id * user_id These do not have to be specified in the event definition, they are automatically added, but their definitions can be overridden for a given ``event_type``. Definitions file format ----------------------- The event definitions file is in YAML format. It consists of a list of event definitions, which are mappings. Order is significant, the list of definitions is scanned in *reverse* order (last definition in the file to the first), to find a definition which matches the notification's event_type. That definition will be used to generate the Event. The reverse ordering is done because it is common to want to have a more general wildcarded definition (such as ``compute.instance.*``) with a set of traits common to all of those events, with a few more specific event definitions (like ``compute.instance.exists``) afterward that have all of the above traits, plus a few more. This lets you put the general definition first, followed by the specific ones, and use YAML mapping include syntax to avoid copying all of the trait definitions. Event Definitions ----------------- Each event definition is a mapping with two keys (both required): event_type This is a list (or a string, which will be taken as a 1 element list) of event_types this definition will handle. These can be wildcarded with unix shell glob syntax. An exclusion listing (starting with a '!') will exclude any types listed from matching. If ONLY exclusions are listed, the definition will match anything not matching the exclusions. traits This is a mapping, the keys are the trait names, and the values are trait definitions. Trait Definitions ----------------- Each trait definition is a mapping with the following keys: type (optional) The data type for this trait. (as a string). Valid options are: *text*, *int*, *float*, and *datetime*. defaults to *text* if not specified. fields A path specification for the field(s) in the notification you wish to extract for this trait. Specifications can be written to match multiple possible fields, the value for the trait will be derived from the matching fields that exist and have a non-null values in the notification. By default the value will be the first such field. (plugins can alter that, if they wish). This is normally a string, but, for convenience, it can be specified as a list of specifications, which will match the fields for all of them. (See `Field Path Specifications`_ for more info on this syntax.) plugin (optional) This is a mapping (For convenience, this value can also be specified as a string, which is interpreted as the name of a plugin to be loaded with no parameters) with the following keys: name (string) name of a plugin to load parameters (optional) Mapping of keyword arguments to pass to the plugin on initialization. (See documentation on each plugin to see what arguments it accepts.) Field Path Specifications ------------------------- The path specifications define which fields in the JSON notification body are extracted to provide the value for a given trait. The paths can be specified with a dot syntax (e.g. ``payload.host``). Square bracket syntax (e.g. ``payload[host]``) is also supported. In either case, if the key for the field you are looking for contains special characters, like '.', it will need to be quoted (with double or single quotes) like so: :: payload.image_meta.'org.openstack__1__architecture' The syntax used for the field specification is a variant of JSONPath, and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) Example Definitions file ------------------------ :: --- - event_type: compute.instance.* traits: &instance_traits user_id: fields: payload.user_id instance_id: fields: payload.instance_id host: fields: publisher_id plugin: name: split parameters: segment: 1 max_split: 1 service_name: fields: publisher_id plugin: split instance_type_id: type: int fields: payload.instance_type_id os_architecture: fields: payload.image_meta.'org.openstack__1__architecture' launched_at: type: datetime fields: payload.launched_at deleted_at: type: datetime fields: payload.deleted_at - event_type: - compute.instance.exists - compute.instance.update traits: <<: *instance_traits audit_period_beginning: type: datetime fields: payload.audit_period_beginning audit_period_ending: type: datetime fields: payload.audit_period_ending Trait plugins ------------- Trait plugins can be used to do simple programmatic conversions on the value in a notification field, like splitting a string, lowercasing a value, converting a screwball date into ISO format, or the like. They are initialized with the parameters from the trait definition, if any, which can customize their behavior for a given trait. They are called with a list of all matching fields from the notification, so they can derive a value from multiple fields. The plugin will be called even if there are no fields found matching the field path(s), this lets a plugin set a default value, if needed. A plugin can also reject a value by returning *None*, which will cause the trait not to be added. If the plugin returns anything other than *None*, the trait's value will be set to whatever the plugin returned (coerced to the appropriate type for the trait). Building Notifications ~~~~~~~~~~~~~~~~~~~~~~ In general, the payload format OpenStack services emit could be described as the Wild West. The payloads are often arbitrary data dumps at the time of the event which is often susceptible to change. To make consumption easier, the Ceilometer team offers: CADF_, an open, cloud standard which helps model cloud events. .. _CADF: https://docs.openstack.org/pycadf/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/gmr.rst0000664000175100017510000000605115033033467021735 0ustar00mylesmyles.. Copyright (c) 2014 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Ceilometer contains a mechanism whereby developers and system administrators can generate a report about the state of a running Ceilometer executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR1* signal to any Ceilometer process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``ceilometer-polling`` has process id ``8675``, and was run with ``2>/var/log/ceilometer/ceilometer-polling.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/ceilometer/ceilometer-polling.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Ceilometer version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from ceilometer import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/index.rst0000664000175100017510000000227015033033467022256 0ustar00mylesmyles================= Contributor Guide ================= In the Contributor Guide, you will find documented policies for developing with Ceilometer. This includes the processes we use for bugs, contributor onboarding, core reviewer memberships, and other procedural items. Ceilometer follows the same workflow as other OpenStack projects. To start contributing to Ceilometer, please follow the workflow found here_. .. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow :Bug tracker: https://bugs.launchpad.net/ceilometer :Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss (prefix subjects with ``[Ceilometer]`` for faster responses) :Wiki: https://wiki.openstack.org/wiki/Ceilometer :Code Hosting: https://opendev.org/openstack/ceilometer/ :Code Review: https://review.opendev.org/#/q/status:open+project:openstack/ceilometer,n,z Overview ======== .. toctree:: :maxdepth: 2 overview architecture Data Types ========== .. toctree:: :maxdepth: 2 measurements events Getting Started =============== .. toctree:: :maxdepth: 2 devstack testing gmr Development =========== .. toctree:: :maxdepth: 2 plugins new_resource_types ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/measurements.rst0000664000175100017510000000772115033033467023665 0ustar00mylesmyles.. Copyright 2012 New Dream Network (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _measurements: ============ Measurements ============ Existing meters =============== For the list of existing meters see the tables under the `Measurements page`_ of Ceilometer in the Administrator Guide. .. _Measurements page: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html New measurements ================ Ceilometer is designed to collect measurements from OpenStack services and from other external components. If you would like to add new meters to the currently existing ones, you need to follow the guidelines given in this section. .. _meter_types: Types ----- Three type of meters are defined in Ceilometer: .. index:: double: meter; cumulative double: meter; gauge double: meter; delta ========== =================================================================== Type Definition ========== =================================================================== Cumulative Increasing over time (instance hours) Gauge Discrete items (floating IPs, image uploads) and fluctuating values (disk I/O) Delta Changing over time (bandwidth) ========== =================================================================== When you're about to add a new meter choose one type from the above list, which is applicable. Units ----- 1. Whenever a volume is to be measured, SI approved units and their approved symbols or abbreviations should be used. Information units should be expressed in bits ('b') or bytes ('B'). 2. For a given meter, the units should NEVER, EVER be changed. 3. When the measurement does not represent a volume, the unit description should always describe WHAT is measured (ie: apples, disk, routers, floating IPs, etc.). 4. When creating a new meter, if another meter exists measuring something similar, the same units and precision should be used. 5. Meters and samples should always document their units in Ceilometer (API and Documentation) and new sampling code should not be merged without the appropriate documentation. ============ ======== ============== ======================= Dimension Unit Abbreviations Note ============ ======== ============== ======================= None N/A Dimension-less variable Volume byte B Time seconds s ============ ======== ============== ======================= Naming convention ----------------- If you plan on adding meters, please follow the convention below: 1. Always use '.' as separator and go from least to most discriminant word. For example, do not use ephemeral_disk_size but disk.ephemeral.size 2. When a part of the name is a variable, it should always be at the end and start with a ':'. For example, do not use .image but image:, where type is your variable name. 3. If you have any hesitation, come and ask in #openstack-telemetry Meter definitions ----------------- Meters definitions by default, are stored in separate configuration file, called :file:`ceilometer/data/meters.d/meters.yaml`. This is essentially a replacement for prior approach of writing notification handlers to consume specific topics. A detailed description of how to use meter definition is illustrated in the `admin_guide`_. .. _admin_guide: https://docs.openstack.org/ceilometer/latest/admin/telemetry-data-collection.html#meter-definitions ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/new_resource_types.rst0000664000175100017510000000574415033033467025104 0ustar00mylesmyles.. Copyright 2017 EasyStack, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _add_new_resource_types: ================================ Ceilometer + Gnocchi Integration ================================ .. warning:: Remember that custom modification may result in conflicts with upstream upgrades. If not intended to be merged with upstream, it's advisable to directly create resource-types via Gnocchi API. .. _resource_types: Managing Resource Types ======================= Resource types in Gnocchi are managed by Ceilometer. The following describes how to add/remove or update Gnocchi resource types to support new Ceilometer data. The modification or creation of Gnocchi resource type definitions are managed `resources_update_operations` of :file:`ceilometer/gnocchi_client.py`. The following operations are supported: 1. Adding a new attribute to a resource type. The following adds `flavor_name` attribute to an existing `instance` resource: .. code:: {"desc": "add flavor_name to instance", "type": "update_attribute_type", "resource_type": "instance", "data": [{ "op": "add", "path": "/attributes/flavor_name", "value": {"type": "string", "min_length": 0, "max_length": 255, "required": True, "options": {'fill': ''}} }]} 2. Remove an existing attribute from a resource type. The following removes `server_group` attribute from `instance` resource: .. code:: {"desc": "remove server_group to instance", "type": "update_attribute_type", "resource_type": "instance", "data": [{ "op": "remove", "path": "/attributes/server_group" }]} 3. Creating a new resource type. The following creates a new resource type named `nova_compute` with a required attribute `host_name`: .. code:: {"desc": "add nova_compute resource type", "type": "create_resource_type", "resource_type": "nova_compute", "data": [{ "attributes": {"host_name": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]} .. note:: Do not modify the existing change steps when making changes. Each modification requires a new step to be added and for `ceilometer-upgrade` to be run to apply the change to Gnocchi. With accomplishing sections above, don't forget to add a new resource type or attributes of a resource type into the :file:`ceilometer/publisher/data/gnocchi_resources.yaml`. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/overview.rst0000664000175100017510000000276315033033467023024 0ustar00mylesmyles======== Overview ======== Objectives ========== The Ceilometer project was started in 2012 with one simple goal in mind: to provide an infrastructure to collect any information needed regarding OpenStack projects. It was designed so that rating engines could use this single source to transform events into billable items which we label as "metering". As the project started to come to life, collecting an `increasing number of meters`_ across multiple projects, the OpenStack community started to realize that a secondary goal could be added to Ceilometer: become a standard way to meter, regardless of the purpose of the collection. This data can then be pushed to any set of targets using provided publishers mentioned in `pipeline-publishers` section. .. _increasing number of meters: https://docs.openstack.org/ceilometer/latest/contributor/measurements.html Metering ======== If you divide a billing process into a 3 step process, as is commonly done in the telco industry, the steps are: 1. :term:`metering` 2. :term:`rating` 3. :term:`billing` Ceilometer's initial goal was, and still is, strictly limited to step one. This is a choice made from the beginning not to go into rating or billing, as the variety of possibilities seemed too large for the project to ever deliver a solution that would fit everyone's needs, from private to public clouds. This means that if you are looking at this project to solve your billing needs, this is the right way to go, but certainly not the end of the road for you. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/plugins.rst0000664000175100017510000001207415033033467022633 0ustar00mylesmyles.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===================== Writing Agent Plugins ===================== This documentation gives you some clues on how to write a new agent or plugin for Ceilometer if you wish to instrument a measurement which has not yet been covered by an existing plugin. Plugin Framework ================ Although we have described a list of the meters Ceilometer should collect, we cannot predict all of the ways deployers will want to measure the resources their customers use. This means that Ceilometer needs to be easy to extend and configure so it can be tuned for each installation. A plugin system based on `setuptools entry points`_ makes it easy to add new monitors in the agents. In particular, Ceilometer now uses Stevedore_, and you should put your entry point definitions in the :file:`entry_points.txt` file of your Ceilometer egg. .. _setuptools entry points: http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins .. _Stevedore: https://docs.openstack.org/stevedore/latest/ Installing a plugin automatically activates it the next time the ceilometer daemon starts. Rather than running and reporting errors or simply consuming cycles for no-ops, plugins may disable themselves at runtime based on configuration settings defined by other components (for example, the plugin for polling libvirt does not run if it sees that the system is configured using some other virtualization tool). Additionally, if no valid resources can be discovered the plugin will be disabled. Polling Agents ============== The polling agent is implemented in :file:`ceilometer/polling/manager.py`. As you will see in the manager, the agent loads all plugins defined in the ``ceilometer.poll.*`` and ``ceilometer.builder.poll.*`` namespaces, then periodically calls their :func:`get_samples` method. Currently we keep separate namespaces - ``ceilometer.poll.compute`` and ``ceilometer.poll.central`` for quick separation of what to poll depending on where is polling agent running. For example, this will load, among others, the :class:`ceilometer.compute.pollsters.instance_stats.CPUPollster` Pollster -------- All pollsters are subclasses of :class:`ceilometer.polling.plugin_base.PollsterBase` class. Pollsters must implement one method: ``get_samples(self, manager, cache, resources)``, which returns a sequence of ``Sample`` objects as defined in the :file:`ceilometer/sample.py` file. Compute plugins are defined as subclasses of the :class:`ceilometer.compute.pollsters.GenericComputePollster` class as defined in the :file:`ceilometer/compute/pollsters/__init__.py` file. For example, in the ``CPUPollster`` plugin, the ``get_samples`` method takes in a given list of resources representing instances on the local host, loops through them and retrieves the `cpu time` details from resource. Similarly, other metrics are built by pulling the appropriate value from the given list of resources. Notifications ============= Notifications in OpenStack are consumed by the notification agent and passed through `pipelines` to be normalised and re-published to specified targets. The existing normalisation pipelines are defined in the namespace ``ceilometer.notification.pipeline``. Each normalisation pipeline are defined as subclass of :class:`ceilometer.pipeline.base.PipelineManager` which interprets and builds pipelines based on a given configuration file. Pipelines are required to define `Source` and `Sink` permutations to describe how to process notification. Additionally, it must set ``get_main_endpoints`` which provides endpoints to be added to the main queue listener in the notification agent. This main queue endpoint inherits :class:`ceilometer.pipeline.base.NotificationEndpoint` and defines which notification priorities to listen, normalises the data, and redirects the data for pipeline processing. Notification endpoints should implement: ``event_types`` A sequence of strings defining the event types the endpoint should handle ``process_notifications(self, priority, notifications)`` Receives an event message from the list provided to ``event_types`` and returns a sequence of objects. Using the SampleEndpoint, it should yield ``Sample`` objects as defined in the :file:`ceilometer/sample.py` file. Two pipeline configurations exist and can be found under ``ceilometer.pipeline.*``. The `sample` pipeline loads in multiple endpoints defined in ``ceilometer.sample.endpoint`` namespace. Each of the endpoints normalises a given notification into different samples. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/contributor/testing.rst0000664000175100017510000000331315033033467022623 0ustar00mylesmyles.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running the Tests ================= Ceilometer includes an extensive set of automated unit tests which are run through tox_. 1. Install ``tox``:: $ sudo pip install tox 2. Run the unit and code-style tests:: $ cd /opt/stack/ceilometer $ tox -e py27,pep8 As tox is a wrapper around testr, it also accepts the same flags as testr. See the `testr documentation`_ for details about these additional flags. .. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html Use a double hyphen to pass options to testr. For example, to run only tests under tests/unit/image:: $ tox -e py27 -- image To debug tests (ie. break into pdb debugger), you can use ''debug'' tox environment. Here's an example, passing the name of a test since you'll normally only want to run the test that hits your breakpoint:: $ tox -e debug ceilometer.tests.unit.test_bin For reference, the ``debug`` tox environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests .. _tox: https://tox.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/glossary.rst0000664000175100017510000001064215033033467020442 0ustar00mylesmyles.. Copyright 2012 New Dream Network (DreamHost) Copyright 2013 eNovance Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======== Glossary ======== .. glossary:: agent Software service running on the OpenStack infrastructure measuring usage and sending the results to any number of target using the :term:`publisher`. billing Billing is the process to assemble bill line items into a single per customer bill, emitting the bill to start the payment collection. bus listener agent Bus listener agent which takes events generated on the Oslo notification bus and transforms them into Ceilometer samples. This is the preferred method of data collection. polling agent Software service running either on a central management node within the OpenStack infrastructure or compute node measuring usage and sending the results to a queue. notification agent The different OpenStack services emit several notifications about the various types of events. The notification agent consumes them from respective queues and filters them by the event_type. data store Storage system for recording data collected by ceilometer. meter The measurements tracked for a resource. For example, an instance has a number of meters, such as duration of instance, CPU time used, number of disk io requests, etc. Three types of meters are defined in ceilometer: * Cumulative: Increasing over time (e.g. disk I/O) * Gauge: Discrete items (e.g. floating IPs, image uploads) and fluctuating values (e.g. number of Swift objects) * Delta: Incremental change to a counter over time (e.g. bandwidth delta) metering Metering is the process of collecting information about what, who, when and how much regarding anything that can be billed. The result of this is a collection of "tickets" (a.k.a. samples) which are ready to be processed in any way you want. notification A message sent via an external OpenStack system (e.g Nova, Glance, etc) using the Oslo notification mechanism [#]_. These notifications are usually sent to and received by Ceilometer through the notifier RPC driver. non-repudiable "Non-repudiation refers to a state of affairs where the purported maker of a statement will not be able to successfully challenge the validity of the statement or contract. The term is often seen in a legal setting wherein the authenticity of a signature is being challenged. In such an instance, the authenticity is being "repudiated"." (Wikipedia, [#]_) project The OpenStack tenant or project. polling agents The polling agent is collecting measurements by polling some API or other tool at a regular interval. publisher The publisher is publishing samples to a specific target. push agents The push agent is the only solution to fetch data within projects, which do not expose the required data in a remotely usable way. This is not the preferred method as it makes deployment a bit more complex having to add a component to each of the nodes that need to be monitored. rating Rating is the process of analysing a series of tickets, according to business rules defined by marketing, in order to transform them into bill line items with a currency value. resource The OpenStack entity being metered (e.g. instance, volume, image, etc). sample Data sample for a particular meter. source The origin of metering data. This field is set to "openstack" by default. It can be configured to a different value using the sample_source field in the ceilometer.conf file. user An OpenStack user. .. [#] https://opendev.org/openstack/oslo.messaging/src/branch/master/oslo_messaging/notify/notifier.py .. [#] http://en.wikipedia.org/wiki/Non-repudiation ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/index.rst0000664000175100017510000000312115033033467017700 0ustar00mylesmyles.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================== Welcome to Ceilometer's documentation! ====================================== The `Ceilometer` project is a data collection service that provides the ability to normalise and transform data across all current OpenStack core components with work underway to support future OpenStack components. Ceilometer is a component of the Telemetry project. Its data can be used to provide customer billing, resource tracking, and alarming capabilities across all OpenStack core components. This documentation offers information on how Ceilometer works and how to contribute to the project. Overview ======== .. toctree:: :maxdepth: 2 install/index contributor/index admin/index configuration/index cli/index Appendix ======== .. toctree:: :maxdepth: 1 releasenotes/index glossary .. update index .. only:: html Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8009415 ceilometer-24.1.0.dev59/doc/source/install/0000775000175100017510000000000015033033521017477 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8009415 ceilometer-24.1.0.dev59/doc/source/install/cinder/0000775000175100017510000000000015033033521020743 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/cinder/install-cinder-config-common.inc0000664000175100017510000000115415033033467027111 0ustar00mylesmyles* Enable periodic usage statistics relating to block storage. To use it, you must run this command in the following format: .. code-block:: console $ cinder-volume-usage-audit --start_time='YYYY-MM-DD HH:MM:SS' \ --end_time='YYYY-MM-DD HH:MM:SS' --send_actions This script outputs what volumes or snapshots were created, deleted, or exists in a given period of time and some information about these volumes or snapshots. Using this script via cron you can get notifications periodically, for example, every 5 minutes:: */5 * * * * /path/to/cinder-volume-usage-audit --send_actions ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/cinder/install-cinder-rdo.rst0000664000175100017510000000207615033033467025205 0ustar00mylesmylesEnable Block Storage meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Block Storage service meters. Perform these steps on the controller and Block Storage nodes. .. note:: Your environment must include the Block Storage service. Configure Cinder to use Telemetry --------------------------------- Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[oslo_messaging_notifications]`` section, configure notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 .. include:: install-cinder-config-common.inc Finalize installation --------------------- #. Restart the Block Storage services on the controller node: .. code-block:: console # systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service #. Restart the Block Storage services on the storage nodes: .. code-block:: console # systemctl restart openstack-cinder-volume.service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/cinder/install-cinder-ubuntu.rst0000664000175100017510000000174215033033467025742 0ustar00mylesmylesEnable Block Storage meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Block Storage service meters. Perform these steps on the controller and Block Storage nodes. .. note:: Your environment must include the Block Storage service. Configure Cinder to use Telemetry --------------------------------- Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[oslo_messaging_notifications]`` section, configure notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 .. include:: install-cinder-config-common.inc Finalize installation --------------------- #. Restart the Block Storage services on the controller node: .. code-block:: console # service cinder-api restart # service cinder-scheduler restart #. Restart the Block Storage services on the storage nodes: .. code-block:: console # service cinder-volume restart ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/get_started.rst0000664000175100017510000000344415033033467022554 0ustar00mylesmyles========================================== Telemetry Data Collection service overview ========================================== The Telemetry Data Collection services provide the following functions: * Efficiently polls metering data related to OpenStack services. * Collects event and metering data by monitoring notifications sent from services. * Publishes collected data to various targets including data stores and message queues. The Telemetry service consists of the following components: A compute agent (``ceilometer-agent-compute``) Runs on each compute node and polls for resource utilization statistics. This is actually the polling agent ``ceilometer-polling`` running with parameter ``--polling-namespace compute``. A central agent (``ceilometer-agent-central``) Runs on a central management server to poll for resource utilization statistics for resources not tied to instances or compute nodes. Multiple agents can be started to scale service horizontally. This is actually the polling agent ``ceilometer-polling`` running with parameter ``--polling-namespace central``. A notification agent (``ceilometer-agent-notification``) Runs on a central management server(s) and consumes messages from the message queue(s) to build event and metering data. Data is then published to defined targets. By default, data is pushed to Gnocchi_. These services communicate by using the OpenStack messaging bus. Ceilometer data is designed to be published to various endpoints for storage and analysis. .. note:: Ceilometer previously provided a storage and API solution. As of Newton, this functionality is officially deprecated and discouraged. For efficient storage and statistical analysis of Ceilometer data, Gnocchi_ is recommended. .. _Gnocchi: https://gnocchi.osci.io ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8009415 ceilometer-24.1.0.dev59/doc/source/install/glance/0000775000175100017510000000000015033033521020730 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/glance/install-glance-rdo.rst0000664000175100017510000000177715033033467025166 0ustar00mylesmylesEnable Image service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Image service meters. Perform these steps on the controller node. Configure the Image service to use Telemetry -------------------------------------------- * Edit the ``/etc/glance/glance-api.conf`` file and complete the following actions: * In the ``[DEFAULT]``, ``[oslo_messaging_notifications]`` sections, configure notifications and RabbitMQ message broker access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller [oslo_messaging_notifications] ... driver = messagingv2 Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. Finalize installation --------------------- * Restart the Image service: .. code-block:: console # systemctl restart openstack-glance-api.service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/glance/install-glance-ubuntu.rst0000664000175100017510000000166115033033467025714 0ustar00mylesmylesEnable Image service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Image service meters. Perform these steps on the controller node. Configure the Image service to use Telemetry -------------------------------------------- * Edit the ``/etc/glance/glance-api.conf`` file and complete the following actions: * In the ``[DEFAULT]``, ``[oslo_messaging_notifications]`` sections, configure notifications and RabbitMQ message broker access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller [oslo_messaging_notifications] ... driver = messagingv2 Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. Finalize installation --------------------- * Restart the Image service: .. code-block:: console # service glance-api restart ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8019414 ceilometer-24.1.0.dev59/doc/source/install/heat/0000775000175100017510000000000015033033521020420 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/heat/install-heat-rdo.rst0000664000175100017510000000153515033033467024336 0ustar00mylesmylesEnable Orchestration service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Orchestration service meters. Perform these steps on the controller node. Configure the Orchestration service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/heat/heat.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Orchestration service: .. code-block:: console # systemctl restart openstack-heat-api.service \ openstack-heat-api-cfn.service openstack-heat-engine.service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/heat/install-heat-ubuntu.rst0000664000175100017510000000142015033033467025065 0ustar00mylesmylesEnable Orchestration service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Orchestration service meters. Perform these steps on the controller node. Configure the Orchestration service to use Telemetry ---------------------------------------------------- * Edit the ``/etc/heat/heat.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Orchestration service: .. code-block:: console # service heat-api restart # service heat-api-cfn restart # service heat-engine restart ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/index.rst0000664000175100017510000000053115033033467021350 0ustar00mylesmyles================== Installation Guide ================== .. toctree:: :maxdepth: 2 get_started.rst install-controller.rst install-compute.rst verify.rst next-steps.rst This chapter assumes a working setup of OpenStack following the `OpenStack Installation Tutorials and Guides `_. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-base-config-common.inc0000664000175100017510000000276215033033467025321 0ustar00mylesmyles2. Edit the ``/etc/ceilometer/pipeline.yaml`` file and complete the following section: * Configure Gnocchi connection: .. code-block:: yaml publishers: # set address of Gnocchi # + filter out Gnocchi-related activity meters (Swift driver) # + set default archive policy - gnocchi://?filter_project=service&archive_policy=low 3. Edit the ``/etc/ceilometer/ceilometer.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[service_credentials]`` section, configure service credentials: .. code-block:: ini [service_credentials] ... auth_type = password auth_url = http://controller:5000/v3 project_domain_id = default user_domain_id = default project_name = service username = ceilometer password = CEILOMETER_PASS interface = internalURL region_name = RegionOne Replace ``CEILOMETER_PASS`` with the password you chose for the ``ceilometer`` user in the Identity service. 4. Create Ceilometer resources in Gnocchi. Gnocchi should be running by this stage: .. code-block:: console # ceilometer-upgrade ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-base-prereq-common.inc0000664000175100017510000001375115033033467025352 0ustar00mylesmyles1. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc 2. To create the service credentials, complete these steps: * Create the ``ceilometer`` user: .. code-block:: console $ openstack user create --domain default --password-prompt ceilometer User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | e0353a670a9e496da891347c589539e9 | | enabled | True | | id | c859c96f57bd4989a8ea1a0b1d8ff7cd | | name | ceilometer | +-----------+----------------------------------+ * Add the ``admin`` role to the ``ceilometer`` user. .. code-block:: console $ openstack role add --project service --user ceilometer admin .. note:: This command provides no output. * Create the ``ceilometer`` service entity: .. code-block:: console $ openstack service create --name ceilometer \ --description "Telemetry" metering +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Telemetry | | enabled | True | | id | 5fb7fd1bb2954fddb378d4031c28c0e4 | | name | ceilometer | | type | metering | +-------------+----------------------------------+ 3. Register Gnocchi service in Keystone: * Create the ``gnocchi`` user: .. code-block:: console $ openstack user create --domain default --password-prompt gnocchi User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | e0353a670a9e496da891347c589539e9 | | enabled | True | | id | 8bacd064f6434ef2b6bbfbedb79b0318 | | name | gnocchi | +-----------+----------------------------------+ * Create the ``gnocchi`` service entity: .. code-block:: console $ openstack service create --name gnocchi \ --description "Metric Service" metric +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Metric Service | | enabled | True | | id | 205978b411674e5a9990428f81d69384 | | name | gnocchi | | type | metric | +-------------+----------------------------------+ * Add the ``admin`` role to the ``gnocchi`` user. .. code-block:: console $ openstack role add --project service --user gnocchi admin .. note:: This command provides no output. * Create the Metric service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ metric public http://controller:8041 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | b808b67b848d443e9eaaa5e5d796970c | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 205978b411674e5a9990428f81d69384 | | service_name | gnocchi | | service_type | metric | | url | http://controller:8041 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ metric internal http://controller:8041 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | c7009b1c2ee54b71b771fa3d0ae4f948 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 205978b411674e5a9990428f81d69384 | | service_name | gnocchi | | service_type | metric | | url | http://controller:8041 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ metric admin http://controller:8041 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | b2c00566d0604551b5fe1540c699db3d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 205978b411674e5a9990428f81d69384 | | service_name | gnocchi | | service_type | metric | | url | http://controller:8041 | +--------------+----------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-base-rdo.rst0000664000175100017510000000507115033033467023405 0ustar00mylesmyles.. _install_rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry service, code-named ceilometer, on the controller node. Prerequisites ------------- Before you install and configure the Telemetry service, you must configure a target to send metering data to. The recommended endpoint is Gnocchi_. .. _Gnocchi: https://gnocchi.osci.io .. include:: install-base-prereq-common.inc Install Gnocchi --------------- #. Install the Gnocchi packages. Alternatively, Gnocchi can be install using pip: .. code-block:: console # dnf install openstack-gnocchi-api openstack-gnocchi-metricd \ python3-gnocchiclient .. note:: Depending on your environment size, consider installing Gnocchi separately as it makes extensive use of the cpu. #. Install the uWSGI packages. The following method uses operating system provided packages. Another alternative would be to use pip(or pip3, depending on the distribution); using pip is not described in this doc: .. code-block:: console # dnf install uwsgi-plugin-common uwsgi-plugin-python3 uwsgi .. note:: Since the provided gnocchi-api wraps around uwsgi, you need to make sure that uWSGI is installed if you want to use gnocchi-api to run Gnocchi API. As Gnocchi API tier runs using WSGI, it can also alternatively be run using Apache httpd and mod_wsgi, or any other HTTP daemon. .. include:: install-gnocchi.inc Finalize Gnocchi installation ----------------------------- #. Start the Gnocchi services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-gnocchi-api.service \ openstack-gnocchi-metricd.service # systemctl start openstack-gnocchi-api.service \ openstack-gnocchi-metricd.service Install and configure components -------------------------------- #. Install the Ceilometer packages: .. code-block:: console # dnf install openstack-ceilometer-notification \ openstack-ceilometer-central .. include:: install-base-config-common.inc Finalize installation --------------------- #. Start the Telemetry services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-ceilometer-notification.service \ openstack-ceilometer-central.service # systemctl start openstack-ceilometer-notification.service \ openstack-ceilometer-central.service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-base-ubuntu.rst0000664000175100017510000000417315033033467024145 0ustar00mylesmyles.. _install_ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Telemetry service, code-named ceilometer, on the controller node. Prerequisites ------------- Before you install and configure the Telemetry service, you must configure a target to send metering data to. The recommended endpoint is Gnocchi_. .. _Gnocchi: https://gnocchi.osci.io .. include:: install-base-prereq-common.inc Install Gnocchi --------------- #. Install the Gnocchi packages. Alternatively, Gnocchi can be installed using pip: .. code-block:: console # apt-get install gnocchi-api gnocchi-metricd python3-gnocchiclient .. note:: Depending on your environment size, consider installing Gnocchi separately as it makes extensive use of the cpu. #. Install the uWSGI packages. The following method uses operating system provided packages. Another alternative would be to use pip(or pip3, depending on the distribution); using pip is not described in this doc: .. code-block:: console # apt-get install uwsgi-plugin-python3 uwsgi .. note:: Since the provided gnocchi-api wraps around uwsgi, you need to make sure that uWSGI is installed if you want to use gnocchi-api to run Gnocchi API. As Gnocchi API tier runs using WSGI, it can also alternatively be run using Apache httpd and mod_wsgi, or any other HTTP daemon. .. include:: install-gnocchi.inc Finalize Gnocchi installation ----------------------------- #. Restart the Gnocchi services: .. code-block:: console # service gnocchi-api restart # service gnocchi-metricd restart Install and configure components -------------------------------- #. Install the ceilometer packages: .. code-block:: console # apt-get install ceilometer-agent-notification \ ceilometer-agent-central .. include:: install-base-config-common.inc Finalize installation --------------------- #. Restart the Telemetry services: .. code-block:: console # service ceilometer-agent-central restart # service ceilometer-agent-notification restart ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-compute-common.inc0000664000175100017510000000367215033033467024621 0ustar00mylesmyles2. Edit the ``/etc/ceilometer/ceilometer.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[service_credentials]`` section, configure service credentials: .. code-block:: ini [service_credentials] ... auth_url = http://controller:5000 project_domain_id = default user_domain_id = default auth_type = password username = ceilometer project_name = service password = CEILOMETER_PASS interface = internalURL region_name = RegionOne Replace ``CEILOMETER_PASS`` with the password you chose for the ``ceilometer`` user in the Identity service. Configure Compute to use Telemetry ---------------------------------- * Edit the ``/etc/nova/nova.conf`` file and configure notifications in the ``[DEFAULT]`` section: .. code-block:: ini [DEFAULT] ... instance_usage_audit = True instance_usage_audit_period = hour [notifications] ... notify_on_state_change = vm_and_task_state [oslo_messaging_notifications] ... driver = messagingv2 Configure Compute to poll IPMI meters ------------------------------------- .. note:: To enable IPMI meters, ensure IPMITool is installed and the host supports IPMI. * Edit the ``/etc/sudoers`` file and include: .. code-block:: ini ceilometer ALL = (root) NOPASSWD: /usr/bin/ceilometer-rootwrap /etc/ceilometer/rootwrap.conf * * Edit the ``/etc/ceilometer/polling.yaml`` to include the required meters, for example: .. code-block:: yaml - name: ipmi interval: 300 meters: - hardware.ipmi.temperature ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-compute-rdo.rst0000664000175100017510000000204615033033467024146 0ustar00mylesmylesEnable Compute service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of notifications and an agent to collect Compute meters. Perform these steps on each compute node. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # dnf install openstack-ceilometer-compute # dnf install openstack-ceilometer-ipmi (optional) .. include:: install-compute-common.inc Finalize installation --------------------- #. Start the agent and configure it to start when the system boots: .. code-block:: console # systemctl enable openstack-ceilometer-compute.service # systemctl start openstack-ceilometer-compute.service # systemctl enable openstack-ceilometer-ipmi.service (optional) # systemctl start openstack-ceilometer-ipmi.service (optional) #. Restart the Compute service: .. code-block:: console # systemctl restart openstack-nova-compute.service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-compute-ubuntu.rst0000664000175100017510000000141615033033467024704 0ustar00mylesmylesEnable Compute service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of notifications and an agent to collect Compute meters. Perform these steps on each compute node. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt-get install ceilometer-agent-compute # apt-get install ceilometer-agent-ipmi (optional) .. include:: install-compute-common.inc Finalize installation --------------------- #. Restart the agent: .. code-block:: console # service ceilometer-agent-compute restart # service ceilometer-agent-ipmi restart (optional) #. Restart the Compute service: .. code-block:: console # service nova-compute restart ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-compute.rst0000664000175100017510000000063415033033467023365 0ustar00mylesmyles.. _install_compute: Install and Configure Compute Services ====================================== This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 1 install-compute-rdo.rst install-compute-ubuntu.rst ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-controller.rst0000664000175100017510000000270015033033467024070 0ustar00mylesmyles.. _install_controller: Install and Configure Controller Services ========================================= This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. Note that installation and configuration vary by distribution. Ceilometer ---------- .. toctree:: :maxdepth: 1 install-base-rdo.rst install-base-ubuntu.rst Additional steps are required to configure services to interact with ceilometer: Cinder ------ .. toctree:: :maxdepth: 1 cinder/install-cinder-rdo.rst cinder/install-cinder-ubuntu.rst Glance ------ .. toctree:: :maxdepth: 1 glance/install-glance-rdo.rst glance/install-glance-ubuntu.rst Heat ---- .. toctree:: :maxdepth: 1 heat/install-heat-rdo.rst heat/install-heat-ubuntu.rst Keystone -------- To enable auditing of API requests, Keystone provides middleware which captures API requests to a service and emits data to Ceilometer. Instructions to enable this functionality is available in `Keystone's developer documentation `_. Ceilometer will captures this information as ``audit.http.*`` events. Neutron ------- .. toctree:: :maxdepth: 1 neutron/install-neutron-rdo.rst neutron/install-neutron-ubuntu.rst Swift ----- .. toctree:: :maxdepth: 1 swift/install-swift-rdo.rst swift/install-swift-ubuntu.rst ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/install-gnocchi.inc0000664000175100017510000000453615033033467023271 0ustar00mylesmyles3. Create the database for Gnocchi's indexer: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the ``gnocchi`` database: .. code-block:: console CREATE DATABASE gnocchi; * Grant proper access to the ``gnocchi`` database: .. code-block:: console GRANT ALL PRIVILEGES ON gnocchi.* TO 'gnocchi'@'localhost' \ IDENTIFIED BY 'GNOCCHI_DBPASS'; GRANT ALL PRIVILEGES ON gnocchi.* TO 'gnocchi'@'%' \ IDENTIFIED BY 'GNOCCHI_DBPASS'; Replace ``GNOCCHI_DBPASS`` with a suitable password. * Exit the database access client. 4. Edit the ``/etc/gnocchi/gnocchi.conf`` file and add Keystone options: * In the ``[api]`` section, configure gnocchi to use keystone: .. code-block:: ini [api] auth_mode = keystone port = 8041 uwsgi_mode = http-socket * In the ``[keystone_authtoken]`` section, configure keystone authentication: .. code-block:: ini [keystone_authtoken] ... auth_type = password auth_url = http://controller:5000/v3 project_domain_name = Default user_domain_name = Default project_name = service username = gnocchi password = GNOCCHI_PASS interface = internalURL region_name = RegionOne Replace ``GNOCCHI_PASS`` with the password you chose for the ``gnocchi`` user in the Identity service. * In the ``[indexer]`` section, configure database access: .. code-block:: ini [indexer] url = mysql+pymysql://gnocchi:GNOCCHI_DBPASS@controller/gnocchi Replace ``GNOCCHI_DBPASS`` with the password you chose for Gnocchi's indexer database. * In the ``[storage]`` section, configure location to store metric data. In this case, we will store it to the local file system. See Gnocchi documenation for a list of more durable and performant drivers: .. code-block:: ini [storage] # coordination_url is not required but specifying one will improve # performance with better workload division across workers. coordination_url = redis://controller:6379 file_basepath = /var/lib/gnocchi driver = file 5. Initialize Gnocchi: .. code-block:: console gnocchi-upgrade ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8019414 ceilometer-24.1.0.dev59/doc/source/install/neutron/0000775000175100017510000000000015033033521021171 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/neutron/install-neutron-rdo.rst0000664000175100017510000000140715033033467025656 0ustar00mylesmylesEnable Networking service meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Networking service meters. Perform these steps on the controller node. Configure the Networking service to use Telemetry ------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Networking service: .. code-block:: console # systemctl restart neutron-server.service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/neutron/install-neutron-ubuntu.rst0000664000175100017510000000130315033033467026407 0ustar00mylesmylesEnable Networking service meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses notifications to collect Networking service meters. Perform these steps on the controller node. Configure the Networking service to use Telemetry ------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: * In the ``[oslo_messaging_notifications]`` sections, enable notifications: .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 Finalize installation --------------------- * Restart the Networking service: .. code-block:: console # service neutron-server restart ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/next-steps.rst0000664000175100017510000000035115033033467022353 0ustar00mylesmyles.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the ceilometer service. To add additional services, see the `OpenStack Installation Tutorials and Guides `_. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8019414 ceilometer-24.1.0.dev59/doc/source/install/swift/0000775000175100017510000000000015033033521020633 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/swift/install-swift-config-common.inc0000664000175100017510000000241615033033467026673 0ustar00mylesmylesConfigure Object Storage to use Telemetry ----------------------------------------- Perform these steps on the controller and any other nodes that run the Object Storage proxy service. * Edit the ``/etc/swift/proxy-server.conf`` file and complete the following actions: * In the ``[filter:keystoneauth]`` section, add the ``ResellerAdmin`` role: .. code-block:: ini [filter:keystoneauth] ... operator_roles = admin, user, ResellerAdmin * In the ``[pipeline:main]`` section, add ``ceilometer``: .. code-block:: ini [pipeline:main] pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging ceilometer proxy-server * In the ``[filter:ceilometer]`` section, configure notifications: .. code-block:: ini [filter:ceilometer] paste.filter_factory = ceilometermiddleware.swift:filter_factory ... control_exchange = swift url = rabbit://openstack:RABBIT_PASS@controller:5672/ driver = messagingv2 topic = notifications log_level = WARN Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/swift/install-swift-prereq-common.inc0000664000175100017510000000200615033033467026717 0ustar00mylesmylesPrerequisites ------------- The Telemetry service requires access to the Object Storage service using the ``ResellerAdmin`` role. Perform these steps on the controller node. #. Source the ``admin`` credentials to gain access to admin-only CLI commands. .. code-block:: console $ . admin-openrc #. Create the ``ResellerAdmin`` role: .. code-block:: console $ openstack role create ResellerAdmin +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | None | | id | 462fa46c13fd4798a95a3bfbe27b5e54 | | name | ResellerAdmin | +-----------+----------------------------------+ #. Add the ``ResellerAdmin`` role to the ``ceilometer`` user: .. code-block:: console $ openstack role add --project service --user ceilometer ResellerAdmin .. note:: This command provides no output. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/swift/install-swift-rdo.rst0000664000175100017510000000130515033033467024757 0ustar00mylesmylesEnable Object Storage meters for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of polling and notifications to collect Object Storage meters. .. note:: Your environment must include the Object Storage service. .. include:: install-swift-prereq-common.inc Install components ------------------ * Install the packages: .. code-block:: console # dnf install python3-ceilometermiddleware .. include:: install-swift-config-common.inc Finalize installation --------------------- * Restart the Object Storage proxy service: .. code-block:: console # systemctl restart openstack-swift-proxy.service ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/swift/install-swift-ubuntu.rst0000664000175100017510000000117215033033467025517 0ustar00mylesmylesEnable Object Storage meters for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Telemetry uses a combination of polling and notifications to collect Object Storage meters. .. note:: Your environment must include the Object Storage service. .. include:: install-swift-prereq-common.inc Install components ------------------ * Install the packages: .. code-block:: console # apt-get install python-ceilometermiddleware .. include:: install-swift-config-common.inc Finalize installation --------------------- * Restart the Object Storage proxy service: .. code-block:: console # service swift-proxy restart ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/install/verify.rst0000664000175100017510000001112315033033467021544 0ustar00mylesmyles.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Telemetry service. These steps only include the Image service meters to reduce clutter. Environments with ceilometer integration for additional services contain more meters. .. note:: Perform these steps on the controller node. .. note:: The following uses Gnocchi to verify data. Alternatively, data can be published to a file backend temporarily by using a ``file://`` publisher. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. List available resource and its metrics: .. code-block:: console $ gnocchi resource list --type image +--------------------------------------+-------+----------------------------------+---------+--------------------------------------+----------------------------------+----------+----------------------------------+--------------+ | id | type | project_id | user_id | original_resource_id | started_at | ended_at | revision_start | revision_end | +--------------------------------------+-------+----------------------------------+---------+--------------------------------------+----------------------------------+----------+----------------------------------+--------------+ | a6b387e1-4276-43db-b17a-e10f649d85a3 | image | 6fd9631226e34531b53814a0f39830a9 | None | a6b387e1-4276-43db-b17a-e10f649d85a3 | 2017-01-25T23:50:14.423584+00:00 | None | 2017-01-25T23:50:14.423601+00:00 | None | +--------------------------------------+-------+----------------------------------+---------+--------------------------------------+----------------------------------+----------+----------------------------------+--------------+ $ gnocchi resource show a6b387e1-4276-43db-b17a-e10f649d85a3 +-----------------------+-------------------------------------------------------------------+ | Field | Value | +-----------------------+-------------------------------------------------------------------+ | created_by_project_id | aca4db3db9904ecc9c1c9bb1763da6a8 | | created_by_user_id | 07b0945689a4407dbd1ea72c3c5b8d2f | | creator | 07b0945689a4407dbd1ea72c3c5b8d2f:aca4db3db9904ecc9c1c9bb1763da6a8 | | ended_at | None | | id | a6b387e1-4276-43db-b17a-e10f649d85a3 | | metrics | image.download: 839afa02-1668-4922-a33e-6b6ea7780715 | | | image.serve: 1132e4a0-9e35-4542-a6ad-d6dc5fb4b835 | | | image.size: 8ecf6c17-98fd-446c-8018-b741dc089a76 | | original_resource_id | a6b387e1-4276-43db-b17a-e10f649d85a3 | | project_id | 6fd9631226e34531b53814a0f39830a9 | | revision_end | None | | revision_start | 2017-01-25T23:50:14.423601+00:00 | | started_at | 2017-01-25T23:50:14.423584+00:00 | | type | image | | user_id | None | +-----------------------+-------------------------------------------------------------------+ #. Download the CirrOS image from the Image service: .. code-block:: console $ IMAGE_ID=$(glance image-list | grep 'cirros' | awk '{ print $2 }') $ glance image-download $IMAGE_ID > /tmp/cirros.img #. List available meters again to validate detection of the image download: .. code-block:: console $ gnocchi measures show 839afa02-1668-4922-a33e-6b6ea7780715 +---------------------------+-------------+-----------+ | timestamp | granularity | value | +---------------------------+-------------+-----------+ | 2017-01-26T15:35:00+00:00 | 300.0 | 3740163.0 | +---------------------------+-------------+-----------+ #. Remove the previously downloaded image file ``/tmp/cirros.img``: .. code-block:: console $ rm /tmp/cirros.img ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8019414 ceilometer-24.1.0.dev59/doc/source/releasenotes/0000775000175100017510000000000015033033521020522 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/releasenotes/folsom.rst0000664000175100017510000000503615033033467022570 0ustar00mylesmyles.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _folsom: ==================== Folsom ==================== This is the first release (Version 0.1) of Ceilometer. Please take all appropriate caution in using it, as it is a technology preview at this time. Version of OpenStack It is currently tested to work with OpenStack 2012.2 Folsom. Due to its use of openstack-common, and the modification that were made in term of notification to many other components (glance, cinder, quantum), it will not easily work with any prior version of OpenStack. Components Currently covered components are: Nova, Nova-network, Glance, Cinder and Quantum. Notably, there is no support yet for Swift and it was decided not to support nova-volume in favor of Cinder. A detailed list of meters covered per component can be found at in :ref:`measurements`. Nova with libvirt only Most of the Nova meters will only work with libvirt fronted hypervisors at the moment, and our test coverage was mostly done on KVM. Contributors are welcome to implement other virtualization backends' meters. Quantum delete events Quantum delete notifications do not include the same metadata as the other messages, so we ignore them for now. This isn't ideal, since it may mean we miss charging for some amount of time, but it is better than throwing away the existing metadata for a resource when it is deleted. Database backend The only tested and complete database backend is currently MongoDB, the SQLAlchemy one is still work in progress. Installation The current best source of information on how to deploy this project is found as the devstack implementation but feel free to come to #openstack-metering on OFTC for more info. Volume of data Please note that metering can generate lots of data very quickly. Have a look at the following spreadsheet to evaluate what you will end up with. https://wiki.openstack.org/wiki/EfficientMetering#Volume_of_data ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/doc/source/releasenotes/index.rst0000664000175100017510000000274115033033467022400 0ustar00mylesmyles.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Release Notes ============================ .. toctree:: :hidden: folsom * :ref:`folsom` * `Havana`_ * `Icehouse`_ * `Juno`_ * `Kilo`_ * `Liberty`_ Since Mitaka development cycle, we start to host release notes on `Ceilometer Release Notes`_ .. _Havana: https://wiki.openstack.org/wiki/ReleaseNotes/Havana#OpenStack_Metering_.28Ceilometer.29 .. _IceHouse: https://wiki.openstack.org/wiki/ReleaseNotes/Icehouse#OpenStack_Telemetry_.28Ceilometer.29 .. _Juno: https://wiki.openstack.org/wiki/ReleaseNotes/Juno#OpenStack_Telemetry_.28Ceilometer.29 .. _Kilo: https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#OpenStack_Telemetry_.28Ceilometer.29 .. _Liberty: https://wiki.openstack.org/wiki/ReleaseNotes/Liberty#OpenStack_Telemetry_.28Ceilometer.29 .. _Ceilometer Release Notes: https://docs.openstack.org/releasenotes/ceilometer/ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7899415 ceilometer-24.1.0.dev59/etc/0000775000175100017510000000000015033033521014537 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8019414 ceilometer-24.1.0.dev59/etc/ceilometer/0000775000175100017510000000000015033033521016667 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/etc/ceilometer/ceilometer-config-generator.conf0000664000175100017510000000043015033033467025123 0ustar00mylesmyles[DEFAULT] output_file = etc/ceilometer/ceilometer.conf wrap_width = 79 namespace = ceilometer namespace = ceilometer-auth namespace = oslo.cache namespace = oslo.concurrency namespace = oslo.log namespace = oslo.messaging namespace = oslo.reports namespace = oslo.service.service ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8019414 ceilometer-24.1.0.dev59/etc/ceilometer/examples/0000775000175100017510000000000015033033521020505 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/etc/ceilometer/examples/osprofiler_event_definitions.yaml0000664000175100017510000000130215033033467027356 0ustar00mylesmyles--- - event_type: profiler.* traits: project: fields: payload.project service: fields: payload.service name: fields: payload.name base_id: fields: payload.base_id trace_id: fields: payload.trace_id parent_id: fields: payload.parent_id timestamp: fields: payload.timestamp host: fields: payload.info.host path: fields: payload.info.request.path query: fields: payload.info.request.query method: fields: payload.info.request.method scheme: fields: payload.info.request.scheme db.statement: fields: payload.info.db.statement db.params: fields: payload.info.db.params ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/etc/ceilometer/polling.yaml0000664000175100017510000000075315033033467021235 0ustar00mylesmyles--- sources: - name: some_pollsters interval: 300 meters: - power.state - cpu - memory.usage - network.incoming.bytes - network.incoming.packets - network.outgoing.bytes - network.outgoing.packets - disk.device.read.bytes - disk.device.read.requests - disk.device.write.bytes - disk.device.write.requests - volume.size - volume.snapshot.size - volume.backup.size ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/etc/ceilometer/polling_all.yaml0000664000175100017510000000012715033033467022060 0ustar00mylesmyles--- sources: - name: all_pollsters interval: 300 meters: - "*" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/etc/ceilometer/rootwrap.conf0000664000175100017510000000172715033033467021433 0ustar00mylesmyles# Configuration for ceilometer-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/ceilometer/rootwrap.d,/usr/share/ceilometer/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8019414 ceilometer-24.1.0.dev59/etc/ceilometer/rootwrap.d/0000775000175100017510000000000015033033521020766 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/etc/ceilometer/rootwrap.d/ipmi.filters0000664000175100017510000000052115033033467023325 0ustar00mylesmyles# ceilometer-rootwrap command filters for IPMI capable nodes # This file should be owned by (and only-writeable by) the root user [Filters] privsep-rootwrap-sys_admin: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, ceilometer.privsep.sys_admin_pctxt, --privsep_sock_path, /tmp/.* ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/pyproject.toml0000664000175100017510000000013315033033467016706 0ustar00mylesmyles[build-system] requires = ["pbr>=6.0.0", "setuptools>=64.0.0"] build-backend = "pbr.build" ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7899415 ceilometer-24.1.0.dev59/releasenotes/0000775000175100017510000000000015033033521016455 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8089414 ceilometer-24.1.0.dev59/releasenotes/notes/0000775000175100017510000000000015033033521017605 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/.placeholder0000664000175100017510000000000015033033467022067 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-aodh-metrics-afbe9b780fd137d6.yaml0000664000175100017510000000017415033033467026175 0ustar00mylesmyles--- features: - | Ceilometer is now able to poll the /metrics endpoint in Aodh to get evaluation results metrics. ././@PaxHeader0000000000000000000000000000020700000000000010214 xustar00113 path=ceilometer-24.1.0.dev59/releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.y0000664000175100017510000000064615033033467031473 0ustar00mylesmyles--- features: - | Add availability_zone attribute to gnocchi instance resources. Populates this attribute by consuming instance.create.end events. upgrade: - | To take advantage of this new feature you will need to update your gnocchi_resources.yaml file. See the example file for an example. You will need to ensure all required attributes of an instance are specified in the event_attributes.././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml0000664000175100017510000000026215033033467027171 0ustar00mylesmyles--- fixes: - > [`bug 1578128 `_] Add a tool that allow users to drop the legacy alarm and alarm_history tables. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml0000664000175100017510000000023215033033467027424 0ustar00mylesmyles--- features: - | Add `disk.device.read.latency` and `disk.device.write.latency` meters to capture total time used by read or write operations. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-disk-size-pollsters-6b819d067f9cf736.yaml0000664000175100017510000000043115033033467027335 0ustar00mylesmyles--- features: - | The ``disk.ephemeral.size`` meter is now published as a compute pollster, in addition to the existing notification meter. - | The ``disk.root.size`` meter is now published as a compute pollster, in addition to the existing notification meter. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml0000664000175100017510000000022415033033467030226 0ustar00mylesmyles--- fixes: - > [`bug 1597618 `_] Add the full support of snmp v3 user security model. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml0000664000175100017510000000036715033033467030042 0ustar00mylesmyles--- upgrade: - | `ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi. fixes: - | Ceilometer previously did not create IPMI sensor data from IPMI agent or Ironic in Gnocchi. This data is now pushed to Gnocchi. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-json-output-to-file-publisher-786380cb7e21b56b.yaml0000664000175100017510000000012515033033467031225 0ustar00mylesmyles--- features: - > Add new json output option for the existing file publisher. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml0000664000175100017510000000036615033033467030524 0ustar00mylesmyles--- fixes: - | [`bug 1848286 `_] Enable load balancer metrics by adding the loadbalancer resource type, allowing Gnocchi to capture measurement data for Octavia load balancers. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml0000664000175100017510000000014415033033467026130 0ustar00mylesmyles--- features: - > Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-map-trait-plugin-0d969f5cc7b18175.yaml0000664000175100017510000000057315033033467026600 0ustar00mylesmyles--- features: - | A ``map`` event trait plugin has been added. This allows notification meter attributes to be created by mapping one set of values from an attribute to another set of values defined in the meter definition. Additional options are also available for controlling how to handle edge cases, such as unknown values and case sensitivity. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml0000664000175100017510000000015015033033467027200 0ustar00mylesmyles--- features: - Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap.out'. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-parameter-for-disabled-projects-381da4543fff071d.yaml0000664000175100017510000000033615033033467031606 0ustar00mylesmyles--- features: - | The ``[polling] ignore_disabled_projects`` option has been added. This option allows polling agent to only parse enabled projects, to reduce procese time in case many projects are disabled. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-pool-size-metrics-cdecb979135bba85.yaml0000664000175100017510000000056015033033467027200 0ustar00mylesmyles--- features: - | Added the following meters to the central agent to capture these metrics for each storage pool by API. - `volume.provider.pool.capacity.total` - `volume.provider.pool.capacity.free` - `volume.provider.pool.capacity.provisioned` - `volume.provider.pool.capacity.virtual_free` - `volume.provider.pool.capacity.allocated` ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-power-state-metric-cdfbb3098b50a704.yaml0000664000175100017510000000011715033033467027247 0ustar00mylesmyles--- features: - | Added the new power.state metric from virDomainState. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-swift-storage_policy-attribute-322fbb5716c5bb10.yaml0000664000175100017510000000216015033033467031603 0ustar00mylesmyles--- features: - | The ``storage_policy`` resource metadata attribute has been added to the ``swift.containers.objects`` and ``swift.containers.objects.size`` meters, populated from already performed Swift account ``GET`` requests. This functionality requires using a new version of Swift that adds the ``storage_policy`` attribute when listing containers in an account. Ceilometer is backwards compatible with Swift versions that do not provide this functionality, but ``storage_policy`` will be set to ``None`` in samples and Gnocchi resources. - | An optional ``storage_policy`` attribute has been added to the ``swift_account`` Gnocchi resource type, to store the storage policy for Swift containers in Gnocchi. For Swift accounts, ``storage_policy`` will be set to ``None``. upgrade: - | To publish the ``storage_policy`` attribute for Swift containers, ``gnocchi_resources.yaml`` will need to be updated to the latest version. Swift in the target OpenStack cloud will also need upgrading to add support for providing the storage policy when listing containers. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml0000664000175100017510000000034215033033467027652 0ustar00mylesmyles--- features: - | Identify user and projects names with the help of their UUIDs in the polled samples. If they are identified, set "project_name" and "user_name" fields in the sample to the corresponding values. ././@PaxHeader0000000000000000000000000000020700000000000010214 xustar00113 path=ceilometer-24.1.0.dev59/releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.y0000664000175100017510000000034115033033467031644 0ustar00mylesmyles--- upgrade: - > Add a tool for migrating metrics data from Ceilometer's native storage to Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the recommended metrics data storage backend. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml0000664000175100017510000000074715033033467030120 0ustar00mylesmyles--- prelude: > Added new tool ``ceilometer-status upgrade check``. features: - | New framework for ``ceilometer-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Ceilometer upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``ceilometer-status upgrade check`` to check if Ceilometer deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-volume-pollster-metadata-d7b435fed9aac0aa.yaml0000664000175100017510000000067115033033467030670 0ustar00mylesmyles--- features: - > Add volume.volume_type_id and backup.is_incremental metadata for cinder pollsters. Also user_id information is now included for backups with the generated samples. upgrade: - > The cinder api microversion has been increased from Pike to Wallaby version (3.64) for volume/snapshot/backup related pollsters. These might not work until the cinder API has been upgraded up to this microversion. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml0000664000175100017510000000125315033033467027242 0ustar00mylesmyles--- features: - | Added the ``volume_type_id`` attribute to ``volume.size`` notification samples, which stores the ID for the volume type of the given volume. - | Added the ``volume_type_id`` attribute to ``volume`` resources in Gnocchi, which stores the ID for the volume type of the given volume. upgrade: - | ``meters.yaml`` has been updated with changes to the ``volume.size`` notification meter. If you override this file in your deployment, it needs to be updated. - | ``gnocchi_resources.yaml`` has been updated with changes to the ``volume`` resource type. If you override this file in your deployment, it needs to be updated. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml0000664000175100017510000000025215033033467031276 0ustar00mylesmyles--- fixes: - > [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml0000664000175100017510000000112015033033467025620 0ustar00mylesmyles--- critical: - > The previous configuration options default for ``requeue_sample_on_dispatcher_error`` and ``requeue_event_on_dispatcher_error`` allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." upgrade: - > The options ``requeue_event_on_dispatcher_error`` and ``requeue_sample_on_dispatcher_error`` have been enabled and removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml0000664000175100017510000000100615033033467025705 0ustar00mylesmyles--- features: - > Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. upgrade: - > batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/bug-1929178-a8243526ce2311f7.yaml0000664000175100017510000000020315033033467024164 0ustar00mylesmyles--- deprecations: - | The ``[coordination] check_watchers`` parameter has been deprecated since it has been ineffective. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/bug-2007108-dba7163b245ad8fd.yaml0000664000175100017510000000027615033033467024454 0ustar00mylesmyles--- fixes: - | [`bug 2007108 `_] The retired metrics dependent on SNMP have been removed from the default ``polling.yaml``. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/bug-2113768-a2db3a59c8e13558.yaml0000664000175100017510000000032715033033467024330 0ustar00mylesmyles--- fixes: - | Fixed `bug #2113768 `__ where the Libvirt inspector did not catch exceptions thrown when calling interfaceStats function on a domain. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml0000664000175100017510000000035115033033467026262 0ustar00mylesmyles--- fixes: - > [`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/ceilometer-api-deprecate-862bfaa54e80fa01.yaml0000664000175100017510000000020415033033467027614 0ustar00mylesmyles--- deprecations: - Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi (metrics), and/or Panko (events). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml0000664000175100017510000000010715033033467027335 0ustar00mylesmyles--- upgrade: - | The deprecated Ceilometer API has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/ceilometer-event-api-removed-49c57835e307b997.yaml0000664000175100017510000000031115033033467030175 0ustar00mylesmyles--- other: - >- The Events API (exposed at /v2/events) which was deprecated has been removed. The Panko project is now responsible for providing this API and can be installed separately. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/cinder-capacity-samples-de94dcfed5540b6c.yaml0000664000175100017510000000025215033033467027644 0ustar00mylesmyles--- features: - | Add support to capture volume capacity usage details from cinder. This data is extracted from notifications sent by Cinder starting in Ocata. ././@PaxHeader0000000000000000000000000000021400000000000010212 xustar00118 path=ceilometer-24.1.0.dev59/releasenotes/notes/cinder-volume-size-poller-availability_zone-2d20a7527e2341b9.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/cinder-volume-size-poller-availability_zone-2d20a7527e2340000664000175100017510000000017615033033467032015 0ustar00mylesmyles--- features: - | The resource metadata for the Cinder volume size poller now includes the availability zone field. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml0000664000175100017510000000063515033033467030161 0ustar00mylesmyles--- features: - > To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml0000664000175100017510000000054515033033467030414 0ustar00mylesmyles--- features: - > [`bug 1480333 `_] Support ability to configure collector to capture events or meters mutually exclusively, rather than capturing both always. other: - > Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml0000664000175100017510000000052415033033467025371 0ustar00mylesmyles--- features: - > Support for CORS is added. More information can be found [`here `_] upgrade: - > The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-aggregated-disk-metrics-54a395c05e74d685.yaml0000664000175100017510000000035515033033467031214 0ustar00mylesmyles--- deprecations: - | disk.* aggregated metrics for instance are deprecated, in favor of the per disk metrics (disk.device.*). Now, it's up to the backend to provide such aggregation feature. Gnocchi already provides this. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml0000664000175100017510000000070215033033467031063 0ustar00mylesmyles--- features: - | Because of deprecating the collector, the default publishers in pipeline.yaml and event_pipeline.yaml are now changed using database instead of notifier. deprecations: - | Collector is no longer supported in this release. The collector introduces lags in pushing data to backend. To optimize the architecture, Ceilometer push data through dispatchers using publishers in notification agent directly. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-contrail-256177299deb6926.yaml0000664000175100017510000000024515033033467026267 0ustar00mylesmyles--- deprecations: - | Support for OpenContrail, which is currently known as Tungsten Fabric, has been deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-events-6561f4059fa25c02.yaml0000664000175100017510000000020715033033467026011 0ustar00mylesmyles--- deprecations: - | The Ceilometer event subsystem and pipeline is now deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-file-dispatcher-2aff376db7609136.yaml0000664000175100017510000000037315033033467027644 0ustar00mylesmyles--- deprecations: - With collector service being deprecated, we now have to address the duplication between dispatchers and publishers. The file dispatcher is now marked as deprecated. Use the file publisher to push samples into a file. ././@PaxHeader0000000000000000000000000000022000000000000010207 xustar00122 path=ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-generic-hardware-declarative-pollstar-dfa418bf6a5e0459.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-generic-hardware-declarative-pollstar-dfa418bf60000664000175100017510000000110415033033467032400 0ustar00mylesmyles--- deprecations: - | ``GenericHardwareDeclarativePollster`` has been deprecated and will be removed in a future release. This pollster was designed to be used in TripleO deployment to gather hardware metrics from overcloud nodes but Telemetry services are no longer deployed in undercloud in current TripleO. - | The ``NodesDiscoveryTripleO`` discovery plugin has been deprecated and will be removed in a future release. This plugin is designed for TripleO deployment but no longer used since Telemetry services were removed from undercloud. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-http-control-exchanges-026a8de6819841f8.yaml0000664000175100017510000000056515033033467031134 0ustar00mylesmyles--- deprecations: - | Allow users to add additional exchanges in ceilometer.conf instead of hardcoding exchanges. Now original http_control_exchanges is being deprecated and renamed notification_control_exchanges. Besides, the new option is integrated with other exchanges in default EXCHANGE_OPTS to make it available to extend additional exchanges. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml0000664000175100017510000000113515033033467030162 0ustar00mylesmyles--- upgrade: - Configuration values can passed in via the querystring of publisher in pipeline. For example, rather than setting target, timeout, verify_ssl, and batch_mode under [dispatcher_http] section of conf, you can specify http:///?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only the raw details of event are required. deprecations: - As the collector service is being deprecated, the duplication of publishers and dispatchers is being addressed. The http dispatcher is now marked as deprecated and the recommended path is to use http publisher. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-http_timeout-ce98003e4949f9d9.yaml0000664000175100017510000000016015033033467027342 0ustar00mylesmyles--- deprecations: - | The ``[DEFAULT] http_timeout`` option has been deprecated because it is unused. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml0000664000175100017510000000072515033033467027642 0ustar00mylesmyles--- features: - | Ceilometer supports generic notifier to publish data and allow user to customize parameters such as topic, transport driver and priority. The publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit driver, but also other driver like kafka can be used. deprecations: - | Kafka publisher is deprecated to use generic notifier instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-neutron-fwaas-e985afe956240c08.yaml0000664000175100017510000000024415033033467027376 0ustar00mylesmyles--- deprecations: - | Support for Neutron FWaaS has been officially deprecated. The feature has been useless since the Neutron FWaaS project was retired. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-neutron-lbaas-5a36406cbe44bbe3.yaml0000664000175100017510000000024415033033467027472 0ustar00mylesmyles--- deprecations: - | Support for Neutron LBaaS has been officially deprecated. The feature has been useless since the Neutron LBaaS project was retired. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-odl-07e3f59165612566.yaml0000664000175100017510000000016615033033467025143 0ustar00mylesmyles--- deprecations: - | Support for OpenDaylight has been deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-pollster-list-ccf22b0dea44f043.yaml0000664000175100017510000000024715033033467027612 0ustar00mylesmyles--- deprecations: - | Deprecating support for enabling pollsters via command line. Meter and pollster enablement should be configured via polling.yaml file. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-vmware-ae49e07e40e74577.yaml0000664000175100017510000000030315033033467026101 0ustar00mylesmyles--- deprecations: - | Support for VMWare vSphere has been deprecated, because the vmwareapi virt driver in nova has been marked experimental and may be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-windows-support-d784b975ce878864.yaml0000664000175100017510000000033515033033467027755 0ustar00mylesmyles--- deprecations: - | Support for running Ceilometer in Windows operating systems has been deprecated because of retirement of the Winstackers project. Because of this, Hyper-V inspector is also deprecated. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecate-xen-support-27600e2bf7be548c.yaml0000664000175100017510000000020615033033467027153 0ustar00mylesmyles--- deprecations: - | Support for XenServer/Xen Cloud Platform has been deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000021300000000000010211 xustar00117 path=ceilometer-24.1.0.dev59/releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f0000664000175100017510000000025215033033467032204 0ustar00mylesmyles--- deprecations: - The event database dispatcher is now deprecated. It has been moved to a new project, alongside the Ceilometer API for /v2/events, called Panko. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/drop-collector-4c207b35d67b2977.yaml0000664000175100017510000000055315033033467025520 0ustar00mylesmyles--- upgrade: - | The collector service is removed. From Ocata, it's possible to edit the pipeline.yaml and event_pipeline.yaml files and modify the publisher to provide the same functionality as collector dispatcher. You may change publisher to 'gnocchi', 'http', 'panko', or any combination of available publishers listed in documentation. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml0000664000175100017510000000074415033033467026315 0ustar00mylesmyles--- prelude: > In an effort to minimise the noise, Ceilometer will no longer produce meters which have no measurable data associated with it. Image meter only captures state information which is already captured in events and other meters. upgrade: - Any existing commands utilising `image` meter should be switched to `image.size` meter which will provide equivalent functionality deprecations: - The `image` meter is dropped in favour of `image.size` meter. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml0000664000175100017510000000066715033033467026442 0ustar00mylesmyles--- prelude: > Samples are required to measure some aspect of a resource. Samples not measuring anything will be dropped. upgrade: - The `instance` meter no longer will be generated. For equivalent functionality, perform the exact same query on any compute meter such as `cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc... deprecations: - The `instance` meter no longer will be generated. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/drop-kwapi-b687bc476186d01b.yaml0000664000175100017510000000012015033033467024710 0ustar00mylesmyles--- deprecations: - | Previously deprecated kwapi meters are not removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/drop-py-2-7-87352d5763131c13.yaml0000664000175100017510000000031515033033467024311 0ustar00mylesmyles--- upgrade: - | Python 2.7 support has been dropped. Last release of ceilometer to support py2.7 is OpenStack Train. The minimum version of Python now supported by ceilometer is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/drop-python-3-6-and-3-7-f67097fa6894da52.yaml0000664000175100017510000000020115033033467026503 0ustar00mylesmyles--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/dynamic-pollster-system-6b45c8c973201b2b.yaml0000664000175100017510000000025415033033467027443 0ustar00mylesmyles--- features: - | Add dynamic pollster system. The dynamic pollster system enables operators to gather new metrics on the fly (without needing to code pollsters).././@PaxHeader0000000000000000000000000000021700000000000010215 xustar00121 path=ceilometer-24.1.0.dev59/releasenotes/notes/dynamic-pollster-system-for-non-openstack-apis-4e06694f223f34f3.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/dynamic-pollster-system-for-non-openstack-apis-4e06694f220000664000175100017510000000033615033033467032077 0ustar00mylesmyles--- features: - | Add the support for non-OpenStack APIs in the dynamic pollster system. This extension enables operators to create pollster on the fly to handle metrics from systems such as the RadosGW API. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/dynamic-pollster-url-joins-6cdb01c4015976f7.yaml0000664000175100017510000000135515033033467030051 0ustar00mylesmyles--- upgrade: - | When using dynamic pollsters to query OpenStack APIs, if the endpoint URL returned by Keystone does not have a trailing slash and ``url_path`` is a relative path, the ``url_path`` configured in the dynamic pollster would replace sections of the endpoint URL instead of being appended to the end of the URL. This behaviour has now been changed so that ``url_path`` values that do not start with a ``/`` are always appended to the end of endpoint URLs. This change may require existing dynamic pollsters that rely on this behaviour to be changed, but this allows dynamic pollsters to be added for OpenStack services that append the active project ID to the API endpoint URL (e.g. Trove). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/enable-promethus-exporter-tls-76e78d4f4a52c6c4.yaml0000664000175100017510000000014615033033467030653 0ustar00mylesmyles--- features: - | Enhanced the Prometheus exporter to support TLS for exposing metrics securely.././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml0000664000175100017510000000024515033033467026067 0ustar00mylesmyles--- fixes: - > [`bug 1254800 `_] Add better support to catch race conditions when creating event_types ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml0000664000175100017510000000041415033033467024344 0ustar00mylesmyles--- fixes: - > [`bug 1940660 `_] Fixes an issue with the Swift pollster where the ``[service_credentials] cafile`` option was not used. This could prevent communication with TLS-enabled Swift APIs. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml0000664000175100017510000000066415033033467027426 0ustar00mylesmyles--- critical: - > [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml0000664000175100017510000000037415033033467030355 0ustar00mylesmyles--- fixes: - > [`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml0000664000175100017510000000061215033033467027335 0ustar00mylesmyles--- fixes: - > [`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml0000664000175100017510000000031415033033467030330 0ustar00mylesmyles--- fixes: - > [`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-notification-batch-9bb42cbdf817e7f9.yaml0000664000175100017510000000043015033033467027422 0ustar00mylesmyles--- fixes: - | The ``[notification] batch_size`` parameter now takes effect to enable batch processing of notifications. The ``[notification] batch_timeout`` parameter has been restored at the same time to determine how much and how long notifications are kept. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml0000664000175100017510000000103415033033467026251 0ustar00mylesmyles--- upgrade: - | Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*` deprecations: - | Previously, to enable/disable radosgw.* meters, you must define entry_point name rather than meter name. This is corrected so you do not need to be aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and will be removed in Rocky. fixes: - | Fix ability to enable/disable radosgw.* meters explicitly ././@PaxHeader0000000000000000000000000000021200000000000010210 xustar00116 path=ceilometer-24.1.0.dev59/releasenotes/notes/fix-volume-provider-pool-capacity-metrics-7b8b0de29a513cea.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/fix-volume-provider-pool-capacity-metrics-7b8b0de29a513ce0000664000175100017510000000023315033033467032076 0ustar00mylesmyles--- fixes: - | [`bug 2113903 `_] Fix volume provider pool capacity metrics for ceph backend. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml0000664000175100017510000000054215033033467025317 0ustar00mylesmyles--- features: - > Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. other: - > A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml0000664000175100017510000000031215033033467025607 0ustar00mylesmyles--- fixes: - > [`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml0000664000175100017510000000026715033033467025614 0ustar00mylesmyles--- features: - > Gnocchi dispatcher now uses client rather than direct http requests upgrade: - > gnocchiclient library is now a requirement if using ceilometer+gnocchi. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml0000664000175100017510000000032215033033467026702 0ustar00mylesmyles--- features: - > [`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml0000664000175100017510000000031415033033467030424 0ustar00mylesmyles--- issues: - | Ceilometer created metrics that could never get measures depending on the polling configuration. Metrics are now created only if Ceilometer gets at least a measure for them. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml0000664000175100017510000000023615033033467027074 0ustar00mylesmyles--- upgrade: - > gnocchi_resources.yaml in Ceilometer should be updated. fixes: - > Fix samples from Heat to map to correct Gnocchi resource type ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml0000664000175100017510000000021715033033467027014 0ustar00mylesmyles--- fixes: - > [`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector ././@PaxHeader0000000000000000000000000000020600000000000010213 xustar00112 path=ceilometer-24.1.0.dev59/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.ya0000664000175100017510000000054515033033467032113 0ustar00mylesmyles--- fixes: - > [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/http-dispatcher-batching-4e17fce46a196b07.yaml0000664000175100017510000000036415033033467027605 0ustar00mylesmyles--- features: - | In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set to True to activate sending meters and events in batches, or False (default value) to send each meter and event with a fresh HTTP call. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml0000664000175100017510000000112115033033467030003 0ustar00mylesmyles--- features: - In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set to True to use system-installed certificates (default value) or False to ignore certificate verification (use in development only!). verify_ssl can also be set to the location of a certificate file e.g. /some/path/cert.crt (use for self-signed certs) or to a directory of certificates. The value is passed as the 'verify' option to the underlying requests method, which is documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/http-publisher-authentication-6371c5a9aa8d4c03.yaml0000664000175100017510000000140715033033467030703 0ustar00mylesmyles--- features: - In the 'publishers' section of a meter/event pipeline definition, https:// can now be used in addition to http://. Furthermore, either Basic or client-certificate authentication can be used (obviously, client cert only makes sense in the https case). For Basic authentication, use the form http://username:password@hostname/. For client certificate authentication pass the client certificate's path (and the key file path, if the key is not in the certificate file) using the parameters 'clientcert' and 'clientkey', e.g. https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any parameters or credentials used for http(s) publishers are removed from the URL before the actual HTTP request is made. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml0000664000175100017510000000132715033033467030116 0ustar00mylesmyles--- features: - Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. The purpose of this middleware is to set up the request URL correctly in case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of Ceilometer. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Ceilometer, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml0000664000175100017510000000060015033033467030217 0ustar00mylesmyles--- upgrade: - > To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. fixes: - > [`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/include-monasca-publisher-1f47dde52af50feb.yaml0000664000175100017510000000047615033033467030202 0ustar00mylesmyles--- features: - | Include a publisher for the Monasca API. A ``monasca://`` pipeline sink will send data to a Monasca instance, using credentials configured in ceilometer.conf. This functionality was previously available in the Ceilosca project (https://github.com/openstack/monasca-ceilometer). ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml0000664000175100017510000000033215033033467026557 0ustar00mylesmyles--- upgrade: - > Run db-sync to add new indices. fixes: - > [`bug 1526793 `_] Additional indices were added to better support querying of event data. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml0000664000175100017510000000051415033033467031025 0ustar00mylesmyles--- upgrade: - | Ceilometer legacy backends and Ceilometer API are now deprecated. Polling all nova instances from compute agent is no more required with Gnocchi. So we switch the [compute]instance_discovery_method to libvirt_metadata. To switch back to the old deprecated behavior you can set it back to 'naive'. ././@PaxHeader0000000000000000000000000000021100000000000010207 xustar00115 path=ceilometer-24.1.0.dev59/releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf07900000664000175100017510000000014315033033467031771 0ustar00mylesmyles--- features: - | `launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml0000664000175100017510000000010315033033467025030 0ustar00mylesmyles--- features: - > Add support for Keystone v3 authentication ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml0000664000175100017510000000017215033033467026147 0ustar00mylesmyles--- deprecations: - The Kwapi pollsters are deprecated and will be removed in the next major version of Ceilometer. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml0000664000175100017510000000226015033033467026351 0ustar00mylesmyles--- features: - The Ceilometer compute agent can now retrieve some instance metadata from the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova fills this metadata with some information about the instance. To enable this feature you should set [compute]/instance_discovery_method = libvirt_metadata in the configuration file. The only downside of this method is that user_metadata (and some other instance attributes) are no longer part of the samples created by the agent. But when Gnocchi is used as backend, this is not an issue since Gnocchi doesn't store resource metadata aside of the measurements. And the missing informations are still retrieved through the Nova notifications and will fully update the resource information in Gnocchi. upgrade: - If you are using Gnocchi as backend it's strongly recommended to switch [compute]/instance_discovery_method to libvirt_metadata. This will reduce the load on the Nova API especially if you have many compute nodes. deprecations: - The [compute]/workload_partitioning = True is deprecated in favor of [compute]/instance_discovery_method = workload_partitioning ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml0000664000175100017510000000054115033033467030527 0ustar00mylesmyles--- fixes: - > [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/make-instance-host-optional-972fa14405c1e2f6.yaml0000664000175100017510000000046515033033467030157 0ustar00mylesmyles--- upgrade: - | The ``instance`` resource type has been updated to make the ``host`` resource attribute optional. This allows the hypervisor a compute instance is running on to be withheld from Gnocchi's resource metadata, which may be required for security reasons e.g. for public clouds. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml0000664000175100017510000000154115033033467027773 0ustar00mylesmyles--- features: - | Workload partitioning of notification agent is now split into queues based on pipeline type (sample, event, etc...) rather than per individual pipeline. This will save some memory usage specifically for pipeline definitions with many source/sink combinations. upgrade: - | If workload partitioning of the notification agent is enabled, the notification agent should not run alongside pre-Queens agents. Doing so may result in missed samples when leveraging transformations. To upgrade without loss of data, set `notification_control_exchanges` option to empty so only existing `ceilometer-pipe-*` queues are processed. Once cleared, reset `notification_control_exchanges` option and launch the new notification agent(s). If `workload_partitioning` is not enabled, no special steps are required. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml0000664000175100017510000000025215033033467027075 0ustar00mylesmyles--- features: - Add two new meters, including memory.bandwidth.total and memory.bandwidth.local, to get memory bandwidth statistics based on Intel CMT feature. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml0000664000175100017510000000050615033033467030260 0ustar00mylesmyles--- fixes: - > [`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. ././@PaxHeader0000000000000000000000000000020500000000000010212 xustar00111 path=ceilometer-24.1.0.dev59/releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yam0000664000175100017510000000042215033033467031743 0ustar00mylesmyles--- prelude: > Network Statistics From OpenDaylight. features: - Add a ceilometer driver to collect network statistics information using REST APIs exposed by network-statistics module in OpenDaylight. - Add support for network statistics meters with gnocchi ././@PaxHeader0000000000000000000000000000022000000000000010207 xustar00122 path=ceilometer-24.1.0.dev59/releasenotes/notes/openstack-dynamic-pollsters-metadata-enrichment-703cf5914cf0c578.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/openstack-dynamic-pollsters-metadata-enrichment-703cf59140000664000175100017510000000015115033033467032247 0ustar00mylesmyles--- features: - | OpenStack Dynamic pollsters metadata enrichment with other OpenStack API's data. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/parallel_requests_option-a3f901b6001e26e4.yaml0000664000175100017510000000036615033033467027746 0ustar00mylesmyles--- features: - | A new option named `max_parallel_requests` is available to control the maximum number of parallel requests that can be executed by the agents. This option also replaces the `poolsize` option of the HTTP publisher. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/parallels-virt_type-ee29c4802fdf5c8e.yaml0000664000175100017510000000012415033033467027071 0ustar00mylesmyles--- fixes: - | The ``[DEFAULT] virt_type`` option now supports ``parallels``. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml0000664000175100017510000000007615033033467026763 0ustar00mylesmyles--- upgrade: - The api.pecan_debug option has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml0000664000175100017510000000042515033033467026415 0ustar00mylesmyles--- features: - Add four new meters, including perf.cpu.cycles for the number of cpu cycles one instruction needs, perf.instructions for the count of instructions, perf.cache_references for the count of cache hits and cache_misses for the count of caches misses. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml0000664000175100017510000000030515033033467030147 0ustar00mylesmyles--- upgrade: - | The deprecated support of configure polling in the `pipeline.yaml` file has been removed. Ceilometer now only uses the `polling.yaml` file for polling configuration. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml0000664000175100017510000000064415033033467026336 0ustar00mylesmyles--- features: - > Add support for configuring the size of samples the poller will send in each batch. upgrade: - > batch_size option added to [polling] section of configuration. Use batch_size=0 to disable batching of samples. deprecations: - > The option batch_polled_samples in the [DEFAULT] section is deprecated. Use batch_size option in [polling] to configure and/or disable batching. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/polling-definition-efffb92e3810e571.yaml0000664000175100017510000000112015033033467026572 0ustar00mylesmyles--- upgrade: - Pipeline processing in polling agents was removed in Liberty cycle. A new polling specific definition file is created to handle polling functionality and pipeline definition file is now reserved exclusively for transformations and routing. The polling.yaml file follows the same syntax as the pipeline.yaml but only handles polling attributes such as interval, discovery, resources, meter matching. It is configured by setting cfg_file under the polling section.If no polling definition file is found, it will fallback to reuse pipeline_cfg_file. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml0000664000175100017510000000022715033033467026441 0ustar00mylesmyles--- deprecations: - | Usage of pipeline.yaml for polling configuration is now deprecated. The dedicated polling.yaml should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/prometheus-bcb201cfe46d5778.yaml0000664000175100017510000000013715033033467025174 0ustar00mylesmyles--- features: - | A new pulisher have been added to push data to Prometheus Pushgateway. ././@PaxHeader0000000000000000000000000000021500000000000010213 xustar00119 path=ceilometer-24.1.0.dev59/releasenotes/notes/publish-network-resources-with-invalid-state-6693c6fa1fefa097.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/publish-network-resources-with-invalid-state-6693c6fa1fef0000664000175100017510000000154115033033467032331 0ustar00mylesmyles--- upgrade: - | The ``ip.floating`` and ``network.services.vpn`` pollsters now publish samples for all found floating IPs and VPNs, even if they are known to have an unknown state, when they would previously be dropped. The volume of samples for such floating IPs and VPNs will be set to ``-1``. This improves visibility of floating IPs and VPNs with unknown states, allowing them to be monitored via samples and the Gnocchi metrics, making it easier to discover such resources for troubleshooting. It also moves some of the "business logic" for downstream rating/billing services such as CloudKitty out of Ceilometer itself. - | The ``network.services.vpn`` now publishes samples for VPNs with status ``ERROR``, when they would previously be dropped. The sample volume for VPNs in ``ERROR`` state is ``7``. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml0000664000175100017510000000065415033033467027233 0ustar00mylesmyles--- fixes: - | A local cache is used when polling instance metrics to minimise calls Nova API. A new option is added `resource_cache_expiry` to configure a time to live for cache before it expires. This resolves issue where migrated instances are not removed from cache. This is only relevant when `instance_discovery_method` is set to `naive`. It is recommended to use `libvirt_metadata` if possible. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml0000664000175100017510000000020515033033467026040 0ustar00mylesmyles--- features: - > Ceilometer alarms code is now fully removed from code base. Equivalent functionality is handled by Aodh. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-batch_polled_samples-b40241c8aad3667d.yaml0000664000175100017510000000011015033033467030336 0ustar00mylesmyles--- upgrade: - | Remove deprecated option `batch_polled_samples`. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml0000664000175100017510000000037315033033467026157 0ustar00mylesmyles--- features: - > Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. upgrade: - > audit middleware in keystonemiddleware library should be used for similar support. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-ceilometer-dbsync-53aa1b529f194f15.yaml0000664000175100017510000000014615033033467027540 0ustar00mylesmyles--- other: - The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-check_watchers-a7c955703b6d9f57.yaml0000664000175100017510000000013115033033467027121 0ustar00mylesmyles--- upgrade: - | The ``[coordination] check_watchers`` parameter has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-compute-disk-meters-264e686622886ff0.yaml0000664000175100017510000000016515033033467027716 0ustar00mylesmyles--- upgrade: - | The deprecated `disk.*` meters have been removed. Use the `disk.device.*` meters instead. ././@PaxHeader0000000000000000000000000000020600000000000010213 xustar00112 path=ceilometer-24.1.0.dev59/releasenotes/notes/remove-compute-rate-deprecated-meters-201893c6b686b04a.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-compute-rate-deprecated-meters-201893c6b686b04a.ya0000664000175100017510000000053315033033467031517 0ustar00mylesmyles--- upgrade: - | The deprecated meter for compute where removed: - disk.read.requests.rate - disk.write.requests.rate - disk.read.bytes.rate - disk.write.bytes.rate - disk.device.read.requests.rate - disk.device.write.requests.rate - disk.device.read.bytes.rate - disk.device.write.bytes.rate ././@PaxHeader0000000000000000000000000000021400000000000010212 xustar00118 path=ceilometer-24.1.0.dev59/releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500000664000175100017510000000022315033033467032075 0ustar00mylesmyles--- upgrade: - | The deprecated `compute.workload_partitioning` option has been removed in favor of `compute.instance_discovery_method`. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-direct-publisher-5785ee7edd16c4d9.yaml0000664000175100017510000000012715033033467027561 0ustar00mylesmyles--- upgrade: - | Remove direct publisher and use the explicit publisher instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml0000664000175100017510000000012715033033467025543 0ustar00mylesmyles--- features: - > Remove eventlet from Ceilometer in favour of threaded approach ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml0000664000175100017510000000012215033033467030734 0ustar00mylesmyles--- upgrade: - | The deprecated control exchange options have been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml0000664000175100017510000000011015033033467027142 0ustar00mylesmyles--- upgrade: - | The deprecated file dispatcher has been removed. ././@PaxHeader0000000000000000000000000000021500000000000010213 xustar00119 path=ceilometer-24.1.0.dev59/releasenotes/notes/remove-generic-hardware-declarative-pollster-e05c614f273ab149.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-generic-hardware-declarative-pollster-e05c614f273a0000664000175100017510000000036715033033467032124 0ustar00mylesmyles--- upgrade: - | ``GenericHardwareDeclarativePollster`` has been removed. Because of this removal all metrics gathered by SNMP daemon have been removed as well. - | The ``NodesDiscoveryTripleO`` discovery plugin has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml0000664000175100017510000000055415033033467027762 0ustar00mylesmyles--- upgrade: - | The Gnocchi dispatcher has been removed and replaced by a native Gnocchi publisher. The configuration options from the `[dispatcher_gnocchi]` has been removed and should be passed via the URL in `pipeline.yaml`. The service authentication override can be done by adding specific credentials to a `[gnocchi]` section instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml0000664000175100017510000000013215033033467031424 0ustar00mylesmyles--- upgrade: - | The deprecated `gnocchi_dispatcher` option group has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml0000664000175100017510000000011015033033467027513 0ustar00mylesmyles--- upgrade: - | The deprecated http dispatcher has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-intel-cmt-perf-meters-15d0fe72b2804f48.yaml0000664000175100017510000000042315033033467030254 0ustar00mylesmyles--- upgrade: - | The following meters were removed. Nova removed support for Intel CMT perf events in 22.0.0, and these meters can no longer be measured since then. - ``cpu_l3_cache_usage`` - ``memory_bandwidth_local`` - ``memory_bandwidth_total`` ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-intel-node-manager-0889de66dede9ab0.yaml0000664000175100017510000000010315033033467030026 0ustar00mylesmyles--- upgrade: - | Support for Intel Node Manager was removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml0000664000175100017510000000014715033033467030537 0ustar00mylesmyles--- upgrade: - | The deprecated kafka publisher has been removed, use NotifierPublisher instead. ././@PaxHeader0000000000000000000000000000021100000000000010207 xustar00115 path=ceilometer-24.1.0.dev59/releasenotes/notes/remove-meter-definitions-cfg-file-config-476596fc86c36a81.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-meter-definitions-cfg-file-config-476596fc86c36a810000664000175100017510000000022115033033467031470 0ustar00mylesmyles--- upgrade: - | Remove deprecated option meter_definitions_cfg_file, use meter_definitions_dirs to configure meter notification file. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-meter-definitions-cfg-file-d57c726d563d805f.yaml0000664000175100017510000000013415033033467031242 0ustar00mylesmyles--- upgrade: - | The deprecated `meter_definitions_cfg_file` option has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-monasca-d5ceda231839d43d.yaml0000664000175100017510000000011415033033467025706 0ustar00mylesmyles--- upgrade: - | Remove integration with the inactive Monasca project ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-neutron-lbaas-d3d4a5327f6a167a.yaml0000664000175100017510000000011515033033467026756 0ustar00mylesmyles--- upgrade: - | Support for neutron-lbaas resources has been removed. ././@PaxHeader0000000000000000000000000000021200000000000010210 xustar00116 path=ceilometer-24.1.0.dev59/releasenotes/notes/remove-notification-workload-partitioning-2cef114fb2478e39.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-notification-workload-partitioning-2cef114fb2478e30000664000175100017510000000014515033033467032177 0ustar00mylesmyles--- upgrade: - | The deprecated workload partitioning for notification agent has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml0000664000175100017510000000012515033033467030224 0ustar00mylesmyles--- upgrade: - | The deprecated `nova_http_log_debug` option has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-opencontrail-88656a9354179299.yaml0000664000175100017510000000025615033033467026366 0ustar00mylesmyles--- upgrade: - | Support for Open Contrail has been removed. Because no SDN is supported after the removal, the mechanism to pull metrics from SDN is also removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-opendaylight-c3839bbe9aa2a227.yaml0000664000175100017510000000010215033033467026746 0ustar00mylesmyles--- upgrade: - | Support for OpenDaylight has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-pollster-list-bda30d747fb87c9e.yaml0000664000175100017510000000011715033033467027176 0ustar00mylesmyles--- upgrade: - | The deprecated `pollster-list` option has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-publisher-topic-options-7a40787a3998921d.yaml0000664000175100017510000000031415033033467030612 0ustar00mylesmyles--- upgrade: - | The notifier publisher options `metering_topic` and `event_topic` are deprecated and will be removed. Use the `topic` query parameter in the notifier publisher URL instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-py38-80670bdcfd4dd135.yaml0000664000175100017510000000016615033033467025075 0ustar00mylesmyles--- upgrade: - | Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-py39-8c39f81f856bee9f.yaml0000664000175100017510000000016615033033467025133 0ustar00mylesmyles--- upgrade: - | Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml0000664000175100017510000000065115033033467027405 0ustar00mylesmyles--- deprecations: - | The pipeline dynamic refresh code has been removed. Ceilometer relies on the cotyledon library for a few releases which provides reload functionality by sending the SIGHUP signal to the process. This achieves the same feature while making sure the reload is explicit once the file is correctly and entirely written to the disk, avoiding the failing load of half-written files. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml0000664000175100017510000000051015033033467026652 0ustar00mylesmyles--- features: - > RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. upgrade: - > Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-sahara-9254593d4fb137b9.yaml0000664000175100017510000000042115033033467025321 0ustar00mylesmyles--- upgrade: - | Default value of the ``[notification] notification_control_exchanges`` option has been updated and ``sahara`` is no longer included by default. - | The default event definiton has been updated and no longer includes events for sahara. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-service-type-volume-v2-08c81098dc7c0922.yaml0000664000175100017510000000022415033033467030324 0ustar00mylesmyles--- features: - | The deprecated ``[service_types] cinderv2`` option has been removed. Use the ``[service_types] cinder`` option instead. ././@PaxHeader0000000000000000000000000000021700000000000010215 xustar00121 path=ceilometer-24.1.0.dev59/releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225230000664000175100017510000000024415033033467032315 0ustar00mylesmyles--- deprecations: - | The `shuffle_time_before_polling_task` option has been removed. This option never worked in the way it was originally intended too. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-transformers-14e00a789dedd76b.yaml0000664000175100017510000000013015033033467027014 0ustar00mylesmyles--- upgrade: - | The support for transformers has been removed from the pipeline. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-uml-e86feeabdd16c628.yaml0000664000175100017510000000022215033033467025233 0ustar00mylesmyles--- upgrade: - | The ``[DEFAULT] virt_type`` option no longer supports ``uml``. UML support by nova was removed in nova 23.3.0 release. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-vsphere-support-411c97b66bdcd264.yaml0000664000175100017510000000042415033033467027400 0ustar00mylesmyles--- upgrade: - | Support for VMware vSphere has been removed. deprecations: - | The ``[DEFAULT] hypervisor_inspector`` option has been deprecated, because libvirt is the only supported hypervisor currently. The option will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-windows-support-0d280cc7c7fffc61.yaml0000664000175100017510000000025215033033467027556 0ustar00mylesmyles--- upgrade: - | Support for running ceilometer in Windows operating systems has been removed. Because of the removal, Hyper-V inspector has also been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/remove-xen-support-7cb932b7bc621269.yaml0000664000175100017510000000012215033033467026433 0ustar00mylesmyles--- upgrade: - | Support for XenServer/Xen Cloud Platform has been removed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml0000664000175100017510000000013515033033467025313 0ustar00mylesmyles--- upgrade: - | Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml0000664000175100017510000000106615033033467027512 0ustar00mylesmyles--- prelude: > Ceilometer backends are no more only databases but also REST API like Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and have been renamed ceilometer-upgrade. The new binary handles database schema upgrade like ceilometer-dbsync does, but it also handle any changes needed in configured ceilometer backends like Gnocchi. deprecations: - For backward compatibility reason we temporary keep ceilometer-dbsync, at least for one major version to ensure deployer have time update their tooling. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/rename-tenant_name_discovery-1675a236bb51176b.yaml0000664000175100017510000000024515033033467030376 0ustar00mylesmyles--- deprecations: - | The ``[polling] tenant_name_discovery`` option has been deprecated in favor of the new ``[polling] identity_name_discovery`` option. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922512.0 ceilometer-24.1.0.dev59/releasenotes/notes/reno.cache0000664000175100017510000031122615033033520021541 0ustar00mylesmyles--- dates: - date: 1412085396 version: 2014.2.rc1 - date: 1684746217 version: rocky-eol - date: 1520408763 version: 9.0.5 - date: 1592213503 version: 13.1.1 - date: 1434666174 version: 2014.1.5 - date: 1406205710 version: 2014.2.b2 - date: 1364311213 version: 2013.1.rc1 - date: 1411224293 version: 2013.2.4 - date: 1380741396 version: 2013.2.rc1 - date: 1501075977 version: 8.1.0 - date: 1582807779 version: 12.1.0 - date: 1602669309 version: 15.0.0 - date: 1644490116 version: 17.0.1 - date: 1733851441 version: xena-eol - date: 1429864765 version: 2015.1.0rc2 - date: 1457011981 version: 6.0.0.0b3 - date: 1497943103 version: 7.0.5 - date: 1622211601 version: train-em - date: 1706804101 version: yoga-eom - date: 1438111278 version: 2015.1.1 - date: 1396550187 version: 2013.2.3 - date: 1466533368 version: 6.1.0 - date: 1462886962 version: 2015.1.4 - date: 1469096326 version: 6.1.3 - date: 1396984487 version: 2014.1.rc2 - date: 1358155371 version: grizzly-2 - date: 1515513276 version: 9.0.4 - date: 1694787385 version: 21.0.0.0rc1 - date: 1743590656 version: 24.0.0 - date: 1435069465 version: 5.0.0.0b1 - date: 1605204417 version: stein-em - date: 1714417462 version: zed-eom - date: 1664971523 version: 19.0.0 - date: 1386241369 version: 2014.1.b1 - date: 1473773791 version: 5.0.5 - date: 1412291010 version: 2014.1.3 - date: 1733851390 version: wallaby-eol - date: 1397726707 version: '2014.1' - date: 1394123552 version: 2014.1.b3 - date: 1350051446 version: '0.1' - date: 1412761554 version: 2014.2.rc2 - date: 1402003239 version: 2014.1.1 - date: 1570718191 version: 13.0.0.0rc2 - date: 1520408762 version: 8.1.4 - date: 1627386784 version: 14.1.0 - date: 1444748791 version: 2015.1.2 - date: 1413297757 version: 2014.2.rc3 - date: 1746004856 version: 2023.2-eol - date: 1637834597 version: ussuri-em - date: 1528987359 version: 9.0.6 - date: 1505233720 version: 8.1.1 - date: 1378482234 version: 2013.2.b3 - date: 1370547598 version: 2013.1.2 - date: 1495019482 version: 7.0.4 - date: 1387060092 version: 2013.2.1 - date: 1453417145 version: 2015.1.3 - date: 1458482958 version: 6.0.0.0rc1 - date: 1449511727 version: 5.0.1 - date: 1369926527 version: 2013.2.b1 - date: 1449522098 version: juno-eol - date: 1392315969 version: 2013.2.2 - date: 1508891540 version: newton-eol - date: 1612205188 version: 13.1.2 - date: 1396279926 version: 2014.1.rc1 - date: 1708337920 version: train-eol - date: 1505233732 version: 9.0.1 - date: 1428645657 version: 2014.2.3 - date: 1733852996 version: yoga-eol - date: 1643024389 version: 15.1.0 - date: 1745853470 version: 21.0.1 - date: 1726489022 version: 23.0.0.0rc1 - date: 1557524074 version: pike-em - date: 1569489392 version: 13.0.0.0rc1 - date: 1374161439 version: 2013.2.b2 - date: 1444902245 version: 5.0.0 - date: 1443167326 version: 5.0.0.0rc1 - date: 1650904648 version: victoria-em - date: 1426205979 version: 2014.1.4 - date: 1467909130 version: 6.1.1 - date: 1475248706 version: 7.0.0.0rc3 - date: 1663058977 version: 19.0.0.0rc1 - date: 1413462386 version: '2014.2' - date: 1418917773 version: 2015.1.0b1 - date: 1428606096 version: 2015.1.0rc1 - date: 1467909086 version: 5.0.4 - date: 1554898277 version: 12.0.0 - date: 1694094120 version: stein-eol - date: 1685621742 version: 19.1.0 - date: 1375981989 version: 2013.1.3 - date: 1417754940 version: 2014.2.1 - date: 1732213716 version: 2023.1-eom - date: 1510772145 version: 8.1.2 - date: 1587561053 version: 14.0.0.0rc1 - date: 1390488924 version: 2014.1.b2 - date: 1441285422 version: 5.0.0.0b3 - date: 1459241032 version: 6.0.0.0rc2 - date: 1382096929 version: 2013.1.4 - date: 1468598046 version: 7.0.0.0b2 - date: 1517956061 version: 8.1.3 - date: 1412039966 version: havana-eol - date: 1712142047 version: 22.0.0 - date: 1528984124 version: 10.0.1 - date: 1667563074 version: wallaby-em - date: 1711474222 version: xena-eom - date: 1528987283 version: 8.1.5 - date: 1555111564 version: ocata-em - date: 1711474072 version: victoria-eom - date: 1727866163 version: 23.0.0 - date: 1409830744 version: 2014.2.b3 - date: 1623943929 version: ocata-eol - date: 1365087346 version: '2013.1' - date: 1696417153 version: 21.0.0 - date: 1646743405 version: 18.0.0.0rc1 - date: 1582807748 version: 11.1.0 - date: 1491847553 version: 6.1.5 - date: 1480904367 version: 7.0.1 - date: 1486057664 version: 8.0.0 - date: 1556083018 version: 9.0.7 - date: 1361523168 version: 2013.1.g3 - date: 1435801216 version: icehouse-eol - date: 1481712529 version: liberty-eol - date: 1572260382 version: queens-em - date: 1423071132 version: 2015.1.0b2 - date: 1742207877 version: 24.0.0.0rc1 - date: 1595238570 version: 12.1.1 - date: 1501075943 version: 7.1.0 - date: 1733853441 version: zed-eol - date: 1402581210 version: 2014.2.b1 - date: 1631671165 version: 17.0.0.0rc1 - date: 1464028257 version: 5.0.3 - date: 1430394796 version: 2015.1.0 - date: 1426772592 version: 2015.1.0b3 - date: 1489179336 version: 8.0.1 - date: 1503342257 version: 9.0.0 - date: 1685621731 version: 18.1.0 - date: 1397592951 version: 2014.1.rc3 - date: 1423154188 version: 2014.2.2 - date: 1681464991 version: xena-em - date: 1407449801 version: 2014.1.2 - date: 1600938815 version: 15.0.0.0rc1 - date: 1468597025 version: 6.1.2 - date: 1462975027 version: kilo-eol - date: 1444135273 version: 5.0.0.0rc2 - date: 1449155783 version: 6.0.0.0b1 - date: 1384884420 version: folsom-eol - date: 1396137345 version: grizzly-eol - date: 1438258228 version: 5.0.0.0b2 - date: 1381738082 version: 2013.2.rc2 - date: 1453737897 version: 5.0.2 - date: 1618397861 version: 16.0.0 - date: 1532973482 version: 11.0.0 - date: 1644488343 version: 16.0.1 - date: 1518056786 version: 10.0.0 - date: 1505233710 version: 7.1.1 - date: 1475759477 version: 7.0.0 - date: 1510858094 version: 9.0.2 - date: 1733815963 version: victoria-eol - date: 1589366177 version: 14.0.0 - date: 1710512945 version: 22.0.0.0rc1 - date: 1499241125 version: mitaka-eol - date: 1582807807 version: 13.1.0 - date: 1447959150 version: 2014.2.4 - date: 1511900036 version: 9.0.3 - date: 1584635929 version: rocky-em - date: 1648640458 version: 18.0.0 - date: 1474036361 version: 7.0.0.0rc1 - date: 1571229206 version: 13.0.0 - date: 1460013915 version: 6.0.0 - date: 1553163605 version: 12.0.0.0rc1 - date: 1708432532 version: ussuri-eol - date: 1674040590 version: 17.0.2 - date: 1679486939 version: 20.0.0 - date: 1485835234 version: 6.1.4 - date: 1474470801 version: 7.0.0.0rc2 - date: 1490160628 version: 7.0.3 - date: 1633518409 version: 17.0.0 - date: 1453385741 version: 6.0.0.0b2 - date: 1459325815 version: 6.0.0.0rc3 - date: 1500292926 version: 8.0.2 - date: 1368717117 version: 2013.1.1 - date: 1472809176 version: 7.0.0.0b3 - date: 1711474149 version: wallaby-eom - date: 1434579740 version: 5.0.0a0 - date: 1616405239 version: 16.0.0.0rc1 - date: 1488297427 version: 7.0.2 - date: 1669979009 version: queens-eol - date: 1539640766 version: 11.0.1 - date: 1382015954 version: '2013.2' - date: 1677843075 version: 20.0.0.0rc1 - date: 1659345073 version: pike-eol file-contents: releasenotes/notes/add-aodh-metrics-afbe9b780fd137d6.yaml: features: - 'Ceilometer is now able to poll the /metrics endpoint in Aodh to get evaluation results metrics. ' releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml: features: - 'Add availability_zone attribute to gnocchi instance resources. Populates this attribute by consuming instance.create.end events. ' upgrade: - 'To take advantage of this new feature you will need to update your gnocchi_resources.yaml file. See the example file for an example. You will need to ensure all required attributes of an instance are specified in the event_attributes.' releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml: fixes: - '[`bug 1578128 `_] Add a tool that allow users to drop the legacy alarm and alarm_history tables. ' releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml: features: - 'Add `disk.device.read.latency` and `disk.device.write.latency` meters to capture total time used by read or write operations. ' releasenotes/notes/add-disk-size-pollsters-6b819d067f9cf736.yaml: features: - 'The ``disk.ephemeral.size`` meter is now published as a compute pollster, in addition to the existing notification meter. ' - 'The ``disk.root.size`` meter is now published as a compute pollster, in addition to the existing notification meter. ' releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml: fixes: - '[`bug 1597618 `_] Add the full support of snmp v3 user security model. ' releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml: fixes: - 'Ceilometer previously did not create IPMI sensor data from IPMI agent or Ironic in Gnocchi. This data is now pushed to Gnocchi. ' upgrade: - '`ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi. ' releasenotes/notes/add-json-output-to-file-publisher-786380cb7e21b56b.yaml: features: - 'Add new json output option for the existing file publisher. ' releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml: fixes: - '[`bug 1848286 `_] Enable load balancer metrics by adding the loadbalancer resource type, allowing Gnocchi to capture measurement data for Octavia load balancers. ' releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml: features: - 'Added support for magnum bay CRUD events, event_type is ''magnum.bay.*''. ' releasenotes/notes/add-map-trait-plugin-0d969f5cc7b18175.yaml: features: - 'A ``map`` event trait plugin has been added. This allows notification meter attributes to be created by mapping one set of values from an attribute to another set of values defined in the meter definition. Additional options are also available for controlling how to handle edge cases, such as unknown values and case sensitivity. ' releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml: features: - Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap.out'. releasenotes/notes/add-parameter-for-disabled-projects-381da4543fff071d.yaml: features: - 'The ``[polling] ignore_disabled_projects`` option has been added. This option allows polling agent to only parse enabled projects, to reduce procese time in case many projects are disabled. ' releasenotes/notes/add-pool-size-metrics-cdecb979135bba85.yaml: features: - 'Added the following meters to the central agent to capture these metrics for each storage pool by API. - `volume.provider.pool.capacity.total` - `volume.provider.pool.capacity.free` - `volume.provider.pool.capacity.provisioned` - `volume.provider.pool.capacity.virtual_free` - `volume.provider.pool.capacity.allocated` ' releasenotes/notes/add-power-state-metric-cdfbb3098b50a704.yaml: features: - 'Added the new power.state metric from virDomainState. ' releasenotes/notes/add-swift-storage_policy-attribute-322fbb5716c5bb10.yaml: features: - 'The ``storage_policy`` resource metadata attribute has been added to the ``swift.containers.objects`` and ``swift.containers.objects.size`` meters, populated from already performed Swift account ``GET`` requests. This functionality requires using a new version of Swift that adds the ``storage_policy`` attribute when listing containers in an account. Ceilometer is backwards compatible with Swift versions that do not provide this functionality, but ``storage_policy`` will be set to ``None`` in samples and Gnocchi resources. ' - 'An optional ``storage_policy`` attribute has been added to the ``swift_account`` Gnocchi resource type, to store the storage policy for Swift containers in Gnocchi. For Swift accounts, ``storage_policy`` will be set to ``None``. ' upgrade: - 'To publish the ``storage_policy`` attribute for Swift containers, ``gnocchi_resources.yaml`` will need to be updated to the latest version. Swift in the target OpenStack cloud will also need upgrading to add support for providing the storage policy when listing containers. ' releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml: features: - "Identify user and projects names with the help of their UUIDs \nin the polled\ \ samples. If they are identified, set \"project_name\" \nand \"user_name\"\ \ fields in the sample to the corresponding values.\n" releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml: upgrade: - 'Add a tool for migrating metrics data from Ceilometer''s native storage to Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the recommended metrics data storage backend. ' releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml: features: - 'New framework for ``ceilometer-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Ceilometer upgrade to ensure if the upgrade can be performed safely. ' prelude: 'Added new tool ``ceilometer-status upgrade check``. ' upgrade: - 'Operator can now use new CLI tool ``ceilometer-status upgrade check`` to check if Ceilometer deployment can be safely upgraded from N-1 to N release. ' releasenotes/notes/add-volume-pollster-metadata-d7b435fed9aac0aa.yaml: features: - 'Add volume.volume_type_id and backup.is_incremental metadata for cinder pollsters. Also user_id information is now included for backups with the generated samples. ' upgrade: - 'The cinder api microversion has been increased from Pike to Wallaby version (3.64) for volume/snapshot/backup related pollsters. These might not work until the cinder API has been upgraded up to this microversion. ' releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml: features: - 'Added the ``volume_type_id`` attribute to ``volume.size`` notification samples, which stores the ID for the volume type of the given volume. ' - 'Added the ``volume_type_id`` attribute to ``volume`` resources in Gnocchi, which stores the ID for the volume type of the given volume. ' upgrade: - '``meters.yaml`` has been updated with changes to the ``volume.size`` notification meter. If you override this file in your deployment, it needs to be updated. ' - '``gnocchi_resources.yaml`` has been updated with changes to the ``volume`` resource type. If you override this file in your deployment, it needs to be updated. ' releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml: fixes: - '[`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. ' releasenotes/notes/always-requeue-7a2df9243987ab67.yaml: critical: - 'The previous configuration options default for `requeue_sample_on_dispatcher_error'' and `requeue_event_on_dispatcher_error'' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." ' upgrade: - 'The options `requeue_event_on_dispatcher_error'' and `requeue_sample_on_dispatcher_error'' have been enabled and removed. ' releasenotes/notes/batch-messaging-d126cc525879d58e.yaml: features: - 'Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. ' upgrade: - 'batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. ' releasenotes/notes/bug-1929178-a8243526ce2311f7.yaml: deprecations: - 'The ``[coordination] check_watchers`` parameter has been deprecated since it has been ineffective. ' releasenotes/notes/bug-2007108-dba7163b245ad8fd.yaml: fixes: - '[`bug 2007108 `_] The retired metrics dependent on SNMP have been removed from the default ``polling.yaml``. ' releasenotes/notes/bug-2113768-a2db3a59c8e13558.yaml: fixes: - 'Fixed `bug #2113768 `__ where the Libvirt inspector did not catch exceptions thrown when calling interfaceStats function on a domain. ' releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml: fixes: - '[`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. ' releasenotes/notes/ceilometer-api-deprecate-862bfaa54e80fa01.yaml: deprecations: - Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi (metrics), and/or Panko (events). releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml: upgrade: - 'The deprecated Ceilometer API has been removed. ' releasenotes/notes/ceilometer-event-api-removed-49c57835e307b997.yaml: other: - The Events API (exposed at /v2/events) which was deprecated has been removed. The Panko project is now responsible for providing this API and can be installed separately. releasenotes/notes/cinder-capacity-samples-de94dcfed5540b6c.yaml: features: - 'Add support to capture volume capacity usage details from cinder. This data is extracted from notifications sent by Cinder starting in Ocata. ' releasenotes/notes/cinder-volume-size-poller-availability_zone-2d20a7527e2341b9.yaml: features: - 'The resource metadata for the Cinder volume size poller now includes the availability zone field. ' releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml: features: - 'To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. ' releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml: features: - '[`bug 1480333 `_] Support ability to configure collector to capture events or meters mutally exclusively, rather than capturing both always. ' other: - 'Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. ' releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml: features: - 'Support for CORS is added. More information can be found [`here `_] ' upgrade: - 'The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. ' releasenotes/notes/deprecate-aggregated-disk-metrics-54a395c05e74d685.yaml: deprecations: - 'disk.* aggregated metrics for instance are deprecated, in favor of the per disk metrics (disk.device.*). Now, it''s up to the backend to provide such aggregation feature. Gnocchi already provides this. ' releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml: deprecations: - 'Collector is no longer supported in this release. The collector introduces lags in pushing data to backend. To optimize the architecture, Ceilometer push data through dispatchers using publishers in notification agent directly. ' features: - 'Because of deprecating the collector, the default publishers in pipeline.yaml and event_pipeline.yaml are now changed using database instead of notifier. ' releasenotes/notes/deprecate-contrail-256177299deb6926.yaml: deprecations: - 'Support for OpenContrail, which is currently known as Tungsten Fabric, has been deprecated and will be removed in a future release. ' releasenotes/notes/deprecate-events-6561f4059fa25c02.yaml: deprecations: - 'The Ceilometer event subsystem and pipeline is now deprecated and will be removed in a future release. ' releasenotes/notes/deprecate-file-dispatcher-2aff376db7609136.yaml: deprecations: - With collector service being deprecated, we now have to address the duplication between dispatchers and publishers. The file dispatcher is now marked as deprecated. Use the file publisher to push samples into a file. releasenotes/notes/deprecate-generic-hardware-declarative-pollstar-dfa418bf6a5e0459.yaml: deprecations: - '``GenericHardwareDeclarativePollster`` has been deprecated and will be removed in a future release. This pollster was designed to be used in TripleO deployment to gather hardware metrics from overcloud nodes but Telemetry services are no longer deployed in undercloud in current TripleO. ' - 'The ``NodesDiscoveryTripleO`` discovery plugin has been deprecated and will be removed in a future release. This plugin is designed for TripleO deployment but no longer used since Telemetry services were removed from undercloud. ' releasenotes/notes/deprecate-http-control-exchanges-026a8de6819841f8.yaml: deprecations: - 'Allow users to add additional exchanges in ceilometer.conf instead of hardcoding exchanges. Now original http_control_exchanges is being deprecated and renamed notification_control_exchanges. Besides, the new option is integrated with other exchanges in default EXCHANGE_OPTS to make it available to extend additional exchanges. ' releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml: deprecations: - As the collector service is being deprecated, the duplication of publishers and dispatchers is being addressed. The http dispatcher is now marked as deprecated and the recommended path is to use http publisher. upgrade: - Configuration values can passed in via the querystring of publisher in pipeline. For example, rather than setting target, timeout, verify_ssl, and batch_mode under [dispatcher_http] section of conf, you can specify http:///?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only the raw details of event are required. releasenotes/notes/deprecate-http_timeout-ce98003e4949f9d9.yaml: deprecations: - 'The ``[DEFAULT] http_timeout`` option has been deprecated because it is unused. ' releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml: deprecations: - 'Kafka publisher is deprecated to use generic notifier instead. ' features: - 'Ceilometer supports generic notifier to publish data and allow user to customize parameters such as topic, transport driver and priority. The publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit driver, but also other driver like kafka can be used. ' releasenotes/notes/deprecate-neutron-fwaas-e985afe956240c08.yaml: deprecations: - 'Support for Neutron FWaaS has been officially deprecated. The feature has been useless since the Neutron FWaaS project was retired. ' releasenotes/notes/deprecate-neutron-lbaas-5a36406cbe44bbe3.yaml: deprecations: - 'Support for Neutron LBaaS has been officially deprecated. The feature has been useless since the Neutron LBaaS project was retired. ' releasenotes/notes/deprecate-odl-07e3f59165612566.yaml: deprecations: - 'Support for OpenDaylight has been deprecated and will be removed in a future release. ' releasenotes/notes/deprecate-pollster-list-ccf22b0dea44f043.yaml: deprecations: - 'Deprecating support for enabling pollsters via command line. Meter and pollster enablement should be configured via polling.yaml file. ' releasenotes/notes/deprecate-vmware-ae49e07e40e74577.yaml: deprecations: - 'Support for VMWare vSphere has been deprecated, because the vmwareapi virt driver in nova has been marked experimental and may be removed in a future release. ' releasenotes/notes/deprecate-windows-support-d784b975ce878864.yaml: deprecations: - 'Support for running Ceilometer in Windows operating systems has been deprecated because of retirement of the Winstackers project. Because of this, Hyper-V inspector is also deprecated. ' releasenotes/notes/deprecate-xen-support-27600e2bf7be548c.yaml: deprecations: - 'Support for XenServer/Xen Cloud Platform has been deprecated and will be removed in a future release. ' releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml: deprecations: - The event database dispatcher is now deprecated. It has been moved to a new project, alongside the Ceilometer API for /v2/events, called Panko. releasenotes/notes/drop-collector-4c207b35d67b2977.yaml: upgrade: - 'The collector service is removed. From Ocata, it''s possible to edit the pipeline.yaml and event_pipeline.yaml files and modify the publisher to provide the same functionality as collector dispatcher. You may change publisher to ''gnocchi'', ''http'', ''panko'', or any combination of available publishers listed in documentation. ' releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml: deprecations: - The `image` meter is dropped in favour of `image.size` meter. prelude: 'In an effort to minimise the noise, Ceilometer will no longer produce meters which have no measureable data associated with it. Image meter only captures state information which is already captured in events and other meters. ' upgrade: - Any existing commands utilising `image` meter should be switched to `image.size` meter which will provide equivalent functionality releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml: deprecations: - The `instance` meter no longer will be generated. prelude: 'Samples are required to measure some aspect of a resource. Samples not measuring anything will be dropped. ' upgrade: - The `instance` meter no longer will be generated. For equivalent functionality, perform the exact same query on any compute meter such as `cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc... releasenotes/notes/drop-kwapi-b687bc476186d01b.yaml: deprecations: - 'Previously deprecated kwapi meters are not removed. ' releasenotes/notes/drop-py-2-7-87352d5763131c13.yaml: upgrade: - 'Python 2.7 support has been dropped. Last release of ceilometer to support py2.7 is OpenStack Train. The minimum version of Python now supported by ceilometer is Python 3.6. ' releasenotes/notes/drop-python-3-6-and-3-7-f67097fa6894da52.yaml: upgrade: - 'Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ' releasenotes/notes/dynamic-pollster-system-6b45c8c973201b2b.yaml: features: - 'Add dynamic pollster system. The dynamic pollster system enables operators to gather new metrics on the fly (without needing to code pollsters).' releasenotes/notes/dynamic-pollster-system-for-non-openstack-apis-4e06694f223f34f3.yaml: features: - 'Add the support for non-OpenStack APIs in the dynamic pollster system. This extension enables operators to create pollster on the fly to handle metrics from systems such as the RadosGW API. ' releasenotes/notes/dynamic-pollster-url-joins-6cdb01c4015976f7.yaml: upgrade: - 'When using dynamic pollsters to query OpenStack APIs, if the endpoint URL returned by Keystone does not have a trailing slash and ``url_path`` is a relative path, the ``url_path`` configured in the dynamic pollster would replace sections of the endpoint URL instead of being appended to the end of the URL. This behaviour has now been changed so that ``url_path`` values that do not start with a ``/`` are always appended to the end of endpoint URLs. This change may require existing dynamic pollsters that rely on this behaviour to be changed, but this allows dynamic pollsters to be added for OpenStack services that append the active project ID to the API endpoint URL (e.g. Trove). ' releasenotes/notes/enable-promethus-exporter-tls-76e78d4f4a52c6c4.yaml: features: - Enhanced the Prometheus exporter to support TLS for exposing metrics securely. releasenotes/notes/event-type-race-c295baf7f1661eab.yaml: fixes: - '[`bug 1254800 `_] Add better support to catch race conditions when creating event_types ' releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml: fixes: - '[`bug 1940660 `_] Fixes an issue with the Swift pollster where the ``[service_credentials] cafile`` option was not used. This could prevent communication with TLS-enabled Swift APIs. ' releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml: critical: - '[`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. ' releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml: fixes: - '[`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. ' releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml: fixes: - '[`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. ' releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml: fixes: - '[`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. ' releasenotes/notes/fix-notification-batch-9bb42cbdf817e7f9.yaml: fixes: - 'The ``[notification] batch_size`` parameter now takes effect to enable batch processing of notifications. The ``[notification] batch_timeout`` parameter has been restored at the same time to determine how much and how long notifications are kept. ' releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml: deprecations: - 'Previously, to enable/disable radosgw.* meters, you must define entry_point name rather than meter name. This is corrected so you do not need to be aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and will be removed in Rocky. ' fixes: - 'Fix ability to enable/disable radosgw.* meters explicitly ' upgrade: - 'Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw.*` ' releasenotes/notes/fix-volume-provider-pool-capacity-metrics-7b8b0de29a513cea.yaml: fixes: - '[`bug 2113903 `_] Fix volume provider pool capacity metrics for ceph backend. ' releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml: features: - 'Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. ' other: - 'A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. ' releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml: fixes: - '[`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. ' releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml: features: - 'Gnocchi dispatcher now uses client rather than direct http requests ' upgrade: - 'gnocchiclient library is now a requirement if using ceilometer+gnocchi. ' releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml: features: - '[`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. ' releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml: issues: - 'Ceilometer created metrics that could never get measures depending on the polling configuration. Metrics are now created only if Ceilometer gets at least a measure for them. ' releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml: fixes: - 'Fix samples from Heat to map to correct Gnocchi resource type ' upgrade: - 'gnocchi_resources.yaml in Ceilometer should be updated. ' releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml: fixes: - '[`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector ' releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml: fixes: - '[`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. ' releasenotes/notes/http-dispatcher-batching-4e17fce46a196b07.yaml: features: - 'In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set to True to activate sending meters and events in batches, or False (default value) to send each meter and event with a fresh HTTP call. ' releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml: features: - In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set to True to use system-installed certificates (default value) or False to ignore certificate verification (use in development only!). verify_ssl can also be set to the location of a certificate file e.g. /some/path/cert.crt (use for self-signed certs) or to a directory of certificates. The value is passed as the 'verify' option to the underlying requests method, which is documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification releasenotes/notes/http-publisher-authentication-6371c5a9aa8d4c03.yaml: features: - In the 'publishers' section of a meter/event pipeline definition, https:// can now be used in addition to http://. Furthermore, either Basic or client-certificate authentication can be used (obviously, client cert only makes sense in the https case). For Basic authentication, use the form http://username:password@hostname/. For client certificate authentication pass the client certificate's path (and the key file path, if the key is not in the certificate file) using the parameters 'clientcert' and 'clientkey', e.g. https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any parameters or credentials used for http(s) publishers are removed from the URL before the actual HTTP request is made. releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml: features: - Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. The purpose of this middleware is to set up the request URL correctly in case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of Ceilometer. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Ceilometer, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml: fixes: - '[`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. ' upgrade: - 'To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. ' releasenotes/notes/include-monasca-publisher-1f47dde52af50feb.yaml: features: - 'Include a publisher for the Monasca API. A ``monasca://`` pipeline sink will send data to a Monasca instance, using credentials configured in ceilometer.conf. This functionality was previously available in the Ceilosca project (https://github.com/openstack/monasca-ceilometer). ' releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml: fixes: - '[`bug 1526793 `_] Additional indices were added to better support querying of event data. ' upgrade: - 'Run db-sync to add new indices. ' releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml: upgrade: - 'Ceilometer legacy backends and Ceilometer API are now deprecated. Polling all nova instances from compute agent is no more required with Gnocchi. So we switch the [compute]instance_discovery_method to libvirt_metadata. To switch back to the old deprecated behavior you can set it back to ''naive''. ' releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml: features: - '`launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked. ' releasenotes/notes/keystone-v3-fab1e257c5672965.yaml: features: - 'Add support for Keystone v3 authentication ' releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml: deprecations: - The Kwapi pollsters are deprecated and will be removed in the next major version of Ceilometer. releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml: deprecations: - The [compute]/workload_partitioning = True is deprecated in favor of [compute]/instance_discovery_method = workload_partitioning features: - 'The Ceilometer compute agent can now retrieve some instance metadata from the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova fills this metadata with some information about the instance. To enable this feature you should set [compute]/instance_discovery_method = libvirt_metadata in the configuration file. The only downside of this method is that user_metadata (and some other instance attributes) are no longer part of the samples created by the agent. But when Gnocchi is used as backend, this is not an issue since Gnocchi doesn''t store resource metadata aside of the measurements. And the missing informations are still retrieved through the Nova notifications and will fully update the resource information in Gnocchi.' upgrade: - If you are using Gnocchi as backend it's strongly recommended to switch [compute]/instance_discovery_method to libvirt_metadata. This will reduce the load on the Nova API especially if you have many compute nodes. releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml: fixes: - '[`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. ' releasenotes/notes/make-instance-host-optional-972fa14405c1e2f6.yaml: upgrade: - 'The ``instance`` resource type has been updated to make the ``host`` resource attribute optional. This allows the hypervisor a compute instance is running on to be withheld from Gnocchi''s resource metadata, which may be required for security reasons e.g. for public clouds. ' releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml: features: - 'Workload partitioning of notification agent is now split into queues based on pipeline type (sample, event, etc...) rather than per individual pipeline. This will save some memory usage specifically for pipeline definitions with many source/sink combinations. ' upgrade: - 'If workload partitioning of the notification agent is enabled, the notification agent should not run alongside pre-Queens agents. Doing so may result in missed samples when leveraging transformations. To upgrade without loss of data, set `notification_control_exchanges` option to empty so only existing `ceilometer-pipe-*` queues are processed. Once cleared, reset `notification_control_exchanges` option and launch the new notification agent(s). If `workload_partitioning` is not enabled, no special steps are required. ' releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml: features: - Add two new meters, including memory.bandwidth.total and memory.bandwidth.local, to get memory bandwidth statistics based on Intel CMT feature. releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml: fixes: - '[`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. ' releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml: features: - Add a ceilometer driver to collect network statistics information using REST APIs exposed by network-statistics module in OpenDaylight. - Add support for network statistics meters with gnocchi prelude: 'Network Statistics From OpenDaylight. ' releasenotes/notes/openstack-dynamic-pollsters-metadata-enrichment-703cf5914cf0c578.yaml: features: - 'OpenStack Dynamic pollsters metadata enrichment with other OpenStack API''s data. ' releasenotes/notes/parallel_requests_option-a3f901b6001e26e4.yaml: features: - 'A new option named `max_parallel_requests` is available to control the maximum number of parallel requests that can be executed by the agents. This option also replaces the `poolsize` option of the HTTP publisher. ' releasenotes/notes/parallels-virt_type-ee29c4802fdf5c8e.yaml: fixes: - 'The ``[DEFAULT] virt_type`` option now supports ``parallels``. ' releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml: upgrade: - The api.pecan_debug option has been removed. releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml: features: - Add four new meters, including perf.cpu.cycles for the number of cpu cycles one instruction needs, perf.instructions for the count of instructions, perf.cache_references for the count of cache hits and cache_misses for the count of caches misses. releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml: upgrade: - 'The deprecated support of configure polling in the `pipeline.yaml` file has been removed. Ceilometer now only uses the `polling.yaml` file for polling configuration. ' releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml: deprecations: - 'The option batch_polled_samples in the [DEFAULT] section is deprecated. Use batch_size option in [polling] to configure and/or disable batching. ' features: - 'Add support for configuring the size of samples the poller will send in each batch. ' upgrade: - 'batch_size option added to [polling] section of configuration. Use batch_size=0 to disable batching of samples. ' releasenotes/notes/polling-definition-efffb92e3810e571.yaml: upgrade: - Pipeline processing in polling agents was removed in Liberty cycle. A new polling specific definition file is created to handle polling functionality and pipeline definition file is now reserved exclusively for transformations and routing. The polling.yaml file follows the same syntax as the pipeline.yaml but only handles polling attributes such as interval, discovery, resources, meter matching. It is configured by setting cfg_file under the polling section.If no polling definition file is found, it will fallback to reuse pipeline_cfg_file. releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml: deprecations: - 'Usage of pipeline.yaml for polling configuration is now deprecated. The dedicated polling.yaml should be used instead. ' releasenotes/notes/prometheus-bcb201cfe46d5778.yaml: features: - 'A new pulisher have been added to push data to Prometheus Pushgateway. ' releasenotes/notes/publish-network-resources-with-invalid-state-6693c6fa1fefa097.yaml: upgrade: - 'The ``ip.floating`` and ``network.services.vpn`` pollsters now publish samples for all found floating IPs and VPNs, even if they are known to have an unknown state, when they would previously be dropped. The volume of samples for such floating IPs and VPNs will be set to ``-1``. This improves visibility of floating IPs and VPNs with unknown states, allowing them to be monitored via samples and the Gnocchi metrics, making it easier to discover such resources for troubleshooting. It also moves some of the "business logic" for downstream rating/billing services such as CloudKitty out of Ceilometer itself. ' - 'The ``network.services.vpn`` now publishes samples for VPNs with status ``ERROR``, when they would previously be dropped. The sample volume for VPNs in ``ERROR`` state is ``7``. ' releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml: fixes: - 'A local cache is used when polling instance metrics to minimise calls Nova API. A new option is added `resource_cache_expiry` to configure a time to live for cache before it expires. This resolves issue where migrated instances are not removed from cache. This is only relevant when `instance_discovery_method` is set to `naive`. It is recommended to use `libvirt_metadata` if possible. ' releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml: features: - "Ceilometer alarms code is now fully removed from code base. Equivalent functionality\ \ is handled by Aodh. \n" releasenotes/notes/remove-batch_polled_samples-b40241c8aad3667d.yaml: upgrade: - 'Remove deprecated option `batch_polled_samples`. ' releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml: features: - 'Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. ' upgrade: - 'audit middleware in keystonemiddleware library should be used for similar support. ' releasenotes/notes/remove-ceilometer-dbsync-53aa1b529f194f15.yaml: other: - The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade instead. releasenotes/notes/remove-check_watchers-a7c955703b6d9f57.yaml: upgrade: - 'The ``[coordination] check_watchers`` parameter has been removed. ' releasenotes/notes/remove-compute-disk-meters-264e686622886ff0.yaml: upgrade: - 'The deprecated `disk.*` meters have been removed. Use the `disk.device.*` meters instead. ' releasenotes/notes/remove-compute-rate-deprecated-meters-201893c6b686b04a.yaml: upgrade: - "The deprecated meter for compute where removed:\n - disk.read.requests.rate\n\ \ - disk.write.requests.rate\n - disk.read.bytes.rate\n - disk.write.bytes.rate\n\ \ - disk.device.read.requests.rate\n - disk.device.write.requests.rate\n \ \ - disk.device.read.bytes.rate\n - disk.device.write.bytes.rate\n" releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.yaml: upgrade: - 'The deprecated `compute.workload_partitioning` option has been removed in favor of `compute.instance_discovery_method`. ' releasenotes/notes/remove-direct-publisher-5785ee7edd16c4d9.yaml: upgrade: - 'Remove direct publisher and use the explicit publisher instead. ' releasenotes/notes/remove-eventlet-6738321434b60c78.yaml: features: - 'Remove eventlet from Ceilometer in favour of threaded approach ' releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml: upgrade: - 'The deprecated control exchange options have been removed. ' releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml: upgrade: - 'The deprecated file dispatcher has been removed. ' releasenotes/notes/remove-generic-hardware-declarative-pollster-e05c614f273ab149.yaml: upgrade: - '``GenericHardwareDeclarativePollster`` has been removed. Because of this removal all metrics gathered by SNMP daemon have been removed as well. ' - 'The ``NodesDiscoveryTripleO`` discovery plugin has been removed. ' releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml: upgrade: - 'The Gnocchi dispatcher has been removed and replaced by a native Gnocchi publisher. The configuration options from the `[dispatcher_gnocchi]` has been removed and should be passed via the URL in `pipeline.yaml`. The service authentication override can be done by adding specific credentials to a `[gnocchi]` section instead. ' releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml: upgrade: - 'The deprecated `dispatcher_gnocchi` option group has been removed. ' releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml: upgrade: - 'The deprecated http dispatcher has been removed. ' releasenotes/notes/remove-intel-cmt-perf-meters-15d0fe72b2804f48.yaml: upgrade: - 'The following meters were removed. Nova removed support for Intel CMT perf events in 22.0.0, and these meters can no longer be measured since then. - ``cpu_l3_cache_usage`` - ``memory_bandwidth_local`` - ``memory_bandwidth_total`` ' releasenotes/notes/remove-intel-node-manager-0889de66dede9ab0.yaml: upgrade: - 'Support for Intel Node Manager was removed. ' releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml: upgrade: - 'The deprecated kafka publisher has been removed, use NotifierPublisher instead. ' releasenotes/notes/remove-meter-definitions-cfg-file-config-476596fc86c36a81.yaml: upgrade: - 'Remove deprecated option meter_definitions_cfg_file, use meter_definitions_dirs to configure meter notification file. ' releasenotes/notes/remove-meter-definitions-cfg-file-d57c726d563d805f.yaml: upgrade: - 'The deprecated `meter_definitions_cfg_file` option has been removed. ' releasenotes/notes/remove-monasca-d5ceda231839d43d.yaml: upgrade: - 'Remove integration with the inactive Monasca project ' releasenotes/notes/remove-neutron-lbaas-d3d4a5327f6a167a.yaml: upgrade: - 'Support for neutron-lbaas resources has been removed. ' releasenotes/notes/remove-notification-workload-partitioning-2cef114fb2478e39.yaml: upgrade: - 'The deprecated workload partitioning for notification agent has been removed. ' releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml: upgrade: - 'The deprecated `nova_http_log_debug` option has been removed. ' releasenotes/notes/remove-opencontrail-88656a9354179299.yaml: upgrade: - 'Support for Open Contrail has been removed. Because no SDN is supported after the removal, the mechanism to pull metrics from SDN is also removed. ' releasenotes/notes/remove-opendaylight-c3839bbe9aa2a227.yaml: upgrade: - 'Support for OpenDaylight has been removed. ' releasenotes/notes/remove-pollster-list-bda30d747fb87c9e.yaml: upgrade: - 'The deprecated `pollster-list` option has been removed. ' releasenotes/notes/remove-publisher-topic-options-7a40787a3998921d.yaml: upgrade: - 'The notifier publisher options `metering_topic` and `event_topic` are deprecated and will be removed. Use the `topic` query parameter in the notifier publisher URL instead. ' releasenotes/notes/remove-py38-80670bdcfd4dd135.yaml: upgrade: - 'Python 3.8 support was dropped. The minimum version of Python now supported is Python 3.9. ' releasenotes/notes/remove-py39-8c39f81f856bee9f.yaml: upgrade: - 'Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. ' releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml: deprecations: - 'The pipeline dynamic refresh code has been removed. Ceilometer relies on the cotyledon library for a few releases which provides reload functionality by sending the SIGHUP signal to the process. This achieves the same feature while making sure the reload is explicit once the file is correctly and entirely written to the disk, avoiding the failing load of half-written files. ' releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml: features: - 'RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. ' upgrade: - 'Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. ' releasenotes/notes/remove-sahara-9254593d4fb137b9.yaml: upgrade: - 'Default value of the ``[notification] notification_control_exchanges`` option has been updated and ``sahara`` is no longer included by default. ' - 'The default event definiton has been updated and no longer includes events for sahara. ' releasenotes/notes/remove-service-type-volume-v2-08c81098dc7c0922.yaml: features: - 'The deprecated ``[service_types] cinderv2`` option has been removed. Use the ``[service_types] cinder`` option instead. ' releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yaml: deprecations: - 'The `shuffle_time_before_polling_task` option has been removed. This option never worked in the way it was originally intended too. ' releasenotes/notes/remove-transformers-14e00a789dedd76b.yaml: upgrade: - 'The support for transformers has been removed from the pipeline. ' releasenotes/notes/remove-uml-e86feeabdd16c628.yaml: upgrade: - 'The ``[DEFAULT] virt_type`` option no longer supports ``uml``. UML support by nova was removed in nova 23.3.0 release. ' releasenotes/notes/remove-vsphere-support-411c97b66bdcd264.yaml: deprecations: - 'The ``[DEFAULT] hypervisor_inspector`` option has been deprecated, because libvirt is the only supported hypervisor currently. The option will be removed in a future release. ' upgrade: - 'Support for VMware vSphere has been removed. ' releasenotes/notes/remove-windows-support-0d280cc7c7fffc61.yaml: upgrade: - 'Support for running ceilometer in Windows operating systems has been removed. Because of the removal, Hyper-V inspector has also been removed. ' releasenotes/notes/remove-xen-support-7cb932b7bc621269.yaml: upgrade: - 'Support for XenServer/Xen Cloud Platform has been removed. ' releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml: upgrade: - 'Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead. ' releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml: deprecations: - For backward compatibility reason we temporary keep ceilometer-dbsync, at least for one major version to ensure deployer have time update their tooling. prelude: 'Ceilometer backends are no more only databases but also REST API like Gnocchi. So ceilometer-dbsync binary name doesn''t make a lot of sense and have been renamed ceilometer-upgrade. The new binary handles database schema upgrade like ceilometer-dbsync does, but it also handle any changes needed in configured ceilometer backends like Gnocchi. ' releasenotes/notes/rename-tenant_name_discovery-1675a236bb51176b.yaml: deprecations: - 'The ``[polling] tenant_name_discovery`` option has been deprecated in favor of the new ``[polling] identity_name_discovery`` option. ' releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml: deprecations: - 'cpu_util and \*.rate meters are deprecated and will be removed in future release in favor of the Gnocchi rate calculation equivalent. ' features: - 'Archive policies can now be configured per metrics in gnocchi_resources.yaml. A default list of archive policies is now created by Ceilometer. They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low" for others. ' upgrade: - 'Ceilometer now creates it own archive policies in Gnocchi and use them to create metrics in Gnocchi. Old metrics kept their current archive policies and will not be updated with ceilometer-upgrade. Only newly created metrics will be impacted. Archive policy can still be overridden with the publisher url (e.g: gnocchi://archive_policy=high). ' releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml: features: - The tenant (project) discovery code in the polling agent now scans for tenants in all available domains. releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml: features: - 'The notification-agent can now be configured to either build meters or events. By default, the notification agent will continue to load both pipelines and build both data models. To selectively enable a pipeline, configure the `pipelines` option under the `[notification]` section. Addition pipelines can be created following the format used by existing pipelines. ' releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml: other: - 'Ship YAML files to ceilometer/pipeline/data/ make it convenient to update all the files on upgrade. Users can copy yaml files from /usr/share/ceilometer and customise their own files located in /etc/ceilometer/. ' releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml: fixes: - Fix to improve handling messages in environments heavily backed up. Previously, notification handlers greedily grabbed messages from queues which could cause ordering issues. A fix was applied to sequentially process messages in a single thread to prevent ordering issues. upgrade: - Batching is enabled by default now when coordinated workers are enabled. Depending on load, it is recommended to scale out the number of `pipeline_processing_queues` to improve distribution. `batch_size` should also be configured accordingly. releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml: fixes: - '[`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurance of a meter will be used and any following duplicates will be skipped from processing. ' releasenotes/notes/snmp-cpu-util-055cd7704056c1ce.yaml: deprecations: - 'metrics hardware.cpu.util and hardware.system_stats.cpu.idle are now deprecated. Other hardware.cpu.* metrics should be used instead. ' features: - 'new metrics are available for snmp polling hardware.cpu.user, hardware.cpu.nice, hardware.cpu.system, hardware.cpu.idle, hardware.cpu.wait, hardware.cpu.kernel, hardware.cpu.interrupt. They replace deprecated hardware.cpu.util and hardware.system_stats.cpu.idle. ' releasenotes/notes/snmp-diskio-samples-fc4b5ed5f19c096c.yaml: features: - 'Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio details. ' releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml: fixes: - '[`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load ' releasenotes/notes/start-using-reno-9ffb7d0035846b4b.yaml: other: - Start using reno to manage release notes. releasenotes/notes/support-None-query-45abaae45f08eda4.yaml: fixes: - '[`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. ' releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e803.yaml: features: - Add support of metering the size of cinder volume/snapshot/backup. Like other meters, these are useful for billing system. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml: features: - 'Support for polling Neutron''s LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. ' issues: - 'Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer''s polling agents may generage a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron''s API is improved to handle polling. ' upgrade: - 'By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. ' releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml: features: - Add support of batch recording metering data to mongodb backend, since the pymongo support *insert_many* interface which can be used to batch record items, in "big-data" scenarios, this change can improve the performance of metering data recording. releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.yaml: features: - 'Support loading multiple meter definition files and allow users to add their own meter definitions into several files according to different types of metrics under the directory of /etc/ceilometer/meters.d.' releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml: features: - '[`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml ' releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml: features: - '[`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. ' releasenotes/notes/switch-to-oslo-privsep-b58f20a279f31bc0.yaml: security: - 'Privsep transitions. Ceilometer is transitioning from using the older style rootwrap privilege escalation path to the new style Oslo privsep path. This should improve performance and security of Ceilometer in the long term. ' - 'Privsep daemons are now started by Ceilometer when required. These daemons can be started via rootwrap if required. rootwrap configs therefore need to be updated to include new privsep daemon invocations. ' upgrade: - 'The following commands are no longer required to be listed in your rootwrap configuration: ipmitool. ' releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml: critical: - '[`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. ' releasenotes/notes/threeads-process-pollsters-cbd22cca6f2effc4.yaml: features: - 'Introduce ``threads_to_process_pollsters`` to enable operators to define the number of pollsters that can be executed in parallel inside a polling task. ' releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml: upgrade: - 'Ceilometer now leverages the latest distribution mechanism provided by the tooz library. Therefore the options `coordination.retry_backoff` and `coordination.max_retry_interval` do not exist anymore. ' releasenotes/notes/transformer-ed4b1ea7d1752576.yaml: deprecations: - 'Usage of transformers in Ceilometer pipelines is deprecated. Transformers in Ceilometer have never computed samples correctly when you have multiple workers. This functionality can be done by the storage backend easily without all issues that Ceilometer has. For example, the rating is already computed in Gnocchi today. ' - 'Pipeline Partitioning is also deprecated. This was only useful to workaround of some issues that tranformers has. ' releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml: fixes: - '[`bug 1491509 `_] Patch to unify timestamp in samples polled by pollsters. Set the time point polling starts as timestamp of samples, and drop timetamping in pollsters. ' releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml: features: - Since the Glance v1 APIs won't be maintained any more, this change add the support of glance v2 in images pollsters. upgrade: - 'The option `glance_page_size'' has been removed because it''s not actually needed. ' releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml: fixes: - The transport_url defined in [oslo_messaging_notifications] was never used, which contradicts the oslo_messaging documentation. This is now fixed. releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml: features: - use memory usable metric from libvirt memoryStats if available. releasenotes/notes/volume-metrics-01ddde0180bc21cb.yaml: upgrade: - 'The default ``polling.yaml`` file has been updated and now it enables meters related to cinder by default. ' releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml: features: - Add a new publisher for pushing samples or events to a Zaqar queue. notes: - files: - - releasenotes/notes/add-aodh-metrics-afbe9b780fd137d6.yaml - !!binary | Y2QwZGI4Yjc2NjYxNTA3ZGQ5OWQ3YmRiYmRjMWJiZWQwM2ZkODAyZA== - - releasenotes/notes/add-map-trait-plugin-0d969f5cc7b18175.yaml - !!binary | ZDFiYTkwYjNjMzU5OWZmMGExZTI2ODM3ZjQzZTEzN2QwYzEzZTEwOA== - - releasenotes/notes/add-pool-size-metrics-cdecb979135bba85.yaml - !!binary | MzkyMmRiNGYzZDRmMGRmNTg2YTg5ODk0MzYzMTQ4YTZlZTg0YjI4ZQ== - - releasenotes/notes/bug-2113768-a2db3a59c8e13558.yaml - !!binary | NzBhNTZiYTY3MDE1N2JmNzJkMTUxM2I4YzE3N2M5Mjk0MDcxZTJlYg== - - releasenotes/notes/deprecate-http_timeout-ce98003e4949f9d9.yaml - !!binary | ZTM5NTBhNTVjMmU0NWFkNDI2MTYzN2YzNDcxZjE3ODU5MDNkNGE3MQ== - - releasenotes/notes/enable-promethus-exporter-tls-76e78d4f4a52c6c4.yaml - !!binary | ZTc2OWE4MGI2Y2E4OTZiYWNhZGVmYTY2YmJjMzc0YTliZTNiMzlmNw== - - releasenotes/notes/fix-volume-provider-pool-capacity-metrics-7b8b0de29a513cea.yaml - !!binary | MGUxNmMxZGQ5MWI2YWZmZWY2M2UxNTNkZmVlNzVkNTRkNzc1ODQ5ZA== - - releasenotes/notes/make-instance-host-optional-972fa14405c1e2f6.yaml - !!binary | NzAzYWRhMmMwNTYyMzVlMTVkODQzY2I5Y2Q5OWU3NWI3YTM2MmM2ZA== - - releasenotes/notes/publish-network-resources-with-invalid-state-6693c6fa1fefa097.yaml - !!binary | NzA0MGU4ZTRlM2ZlNjQ0M2UyZTNiZTE4YmMwMGIyN2U4NTZhMzFmNA== - - releasenotes/notes/remove-intel-node-manager-0889de66dede9ab0.yaml - !!binary | NGZhMmUyM2U0YTE3YTFiYzNmNTZjZDljOTg1ZjE3ZGUyOWRjMmU4Mw== - - releasenotes/notes/remove-py39-8c39f81f856bee9f.yaml - !!binary | YTIwMzI5MmM1NDE0YzM2NWExNTk5MmFlODkzZWU3MzMzYTkwNzY2Zg== - - releasenotes/notes/remove-service-type-volume-v2-08c81098dc7c0922.yaml - !!binary | YmJjNTQzNmI1ZTViNzg3YTE1NWM4Yzk5OWNmZTRiMTkyYWMwZWRkNw== - - releasenotes/notes/threeads-process-pollsters-cbd22cca6f2effc4.yaml - !!binary | NDkyOTc0ZGQwYjRjNTY2NmRlZmUyMzk4ZTk1ZDgwYWEzMjVlM2QwYw== version: 24.0.0-37 - files: - - releasenotes/notes/add-disk-size-pollsters-6b819d067f9cf736.yaml - !!binary | MzZiNDBlZDdkZWVjNmZjZjgxZjhhOWJmZWVhZDAzYmRiNDgzNjJhOA== - - releasenotes/notes/add-parameter-for-disabled-projects-381da4543fff071d.yaml - !!binary | MjYyNGQ3Y2EyZmFhM2M5ODcyMmNiZDM4OGNmMjVkOWJmMmViNzhhMw== - - releasenotes/notes/add-power-state-metric-cdfbb3098b50a704.yaml - !!binary | ZGFiOTYzMDU4OGU1YmYzMzQ0NDlhMzc5NDg0NGM2ODZlM2FjYjczNA== - - releasenotes/notes/add-swift-storage_policy-attribute-322fbb5716c5bb10.yaml - !!binary | NTZjZTc1ZDg5N2U1ZTgyNjFlZWM3OWQzNTUyYjk1YTYwZWQ4NjYzYw== - - releasenotes/notes/add-volume_type_id-attr-f29af86534907941.yaml - !!binary | Y2UzYWI5M2NiODhjY2M3NTFkMmVmYmI1NjgxZThmNGM0NzA0NTM5Zg== - - releasenotes/notes/dynamic-pollster-url-joins-6cdb01c4015976f7.yaml - !!binary | MDQ2ODEyNjE4MmUxNzZlOTIyZGIxYzYzN2U2NGEwNGM1NmZjNDA0MA== - - releasenotes/notes/remove-intel-cmt-perf-meters-15d0fe72b2804f48.yaml - !!binary | OWZlMWQxOWRlMjJkZTBkY2VjZjJiZmVkMDg2Yjc1YTNhOGE3MDZhMA== - - releasenotes/notes/remove-opencontrail-88656a9354179299.yaml - !!binary | ZjQ3NDljNzI1MTgyNjk3MGQ2YzZhMWYwYWJkYTFhZWY3MjQ3ODM0YQ== - - releasenotes/notes/remove-py38-80670bdcfd4dd135.yaml - !!binary | ZWJjYWVlOWI2YzFlOGYzYThhNzI1MWVjMTcyOTUwNjYwNzMyMTE4MA== - - releasenotes/notes/remove-vsphere-support-411c97b66bdcd264.yaml - !!binary | ZTM3ZTJmM2ZmNzc4OWYwOWI5MWQwNjk3OTIxNDkxM2Y1NmQ5MjQ3MQ== - - releasenotes/notes/rename-tenant_name_discovery-1675a236bb51176b.yaml - !!binary | ZGQ5ZmY5OWRkMTY3YWE4NThiZTc4YTFhODliZjg5ZDllYzVlOWZkNA== version: 24.0.0 - files: - - releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml - !!binary | NjdlNDdjZGE4ZTdlMGQyNjQ5ZmVmMzM0YTZlMGRiMjgyNmQ1ZmJkMQ== - - releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml - !!binary | OTlhNTZlNzA3YTFiYzkwNDliMTY3YjMwMzUwMWNjZWI3ZDM3ZTY1ZQ== - - releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml - !!binary | M2ExZmNhZjcxMmE3Y2RjZmZhZmUyYWUzM2JjNGMyNTA4YmFiOGE0OQ== - - releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml - !!binary | NGE0ZGY3Mzc1OWNkNzgxMDVjZWIxMTVhYzBmNGVjMDk4MDI4NWQ4MA== version: 5.0.3 - files: - - releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml - !!binary | M2IzNWM0MDg3NTE5OTgxYmE3ZGQwNjJlZWM5ODhlOWVlNWRkZjA3Ng== - - releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml - !!binary | M2IzNWM0MDg3NTE5OTgxYmE3ZGQwNjJlZWM5ODhlOWVlNWRkZjA3Ng== version: 5.0.2 - files: - - releasenotes/notes/start-using-reno-9ffb7d0035846b4b.yaml - !!binary | OTQ5YzM0M2QxMmJiYjI3ODgxY2UxMWIxMWEyZDBiZDk4M2ZkNjYyMg== version: 5.0.1 - files: - - releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/always-requeue-7a2df9243987ab67.yaml - !!binary | MjQ0NDM5OTc5ZmQyOGVjYjBjNzZkMTMyZjBiZTc4NGM5ODhiNTRjOA== - - releasenotes/notes/batch-messaging-d126cc525879d58e.yaml - !!binary | YzU4OTVkMmM2ZWZjNjY3NjY3OWU2OTczYzA2Yjg1YzBjM2ExMDU4NQ== - - releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml - !!binary | ZjI0ZWE0NDQwMWI4OTQ1YzljYjhhMzRiMmFlZGViYmEzYzA0MDY5MQ== - - releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml - !!binary | YzU4OTVkMmM2ZWZjNjY3NjY3OWU2OTczYzA2Yjg1YzBjM2ExMDU4NQ== - - releasenotes/notes/event-type-race-c295baf7f1661eab.yaml - !!binary | MGUzYWU4YTY2N2Q5YjlkNmUxOWE3NTE1ODU0ZWIxNzAzZmMwNTAxMw== - - releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml - !!binary | ZTg0YTEwODgyYTliNjgyZmY0MWM4NGU4YmY0ZWUyNDk3ZTdlN2EzMQ== - - releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml - !!binary | MWY5ZjRlMTA3MmE1ZTUwMzdiOTM3MzRiYWZjYzY1ZTQyMTFlYjE5Zg== - - releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml - !!binary | ZjI0ZWE0NDQwMWI4OTQ1YzljYjhhMzRiMmFlZGViYmEzYzA0MDY5MQ== - - releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml - !!binary | MDJiMWUxMzk5YmY4ODVkMDMxMTNhMWNjMTI1YjFmOTdlZDU1NDBiOQ== - - releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/keystone-v3-fab1e257c5672965.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml - !!binary | OTAzYTBhNTI3Y2IyNDBjZmQ5NDYyYjdmNTZkMzQ2M2RiNzEyODk5Mw== - - releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml - !!binary | ZjI0ZWE0NDQwMWI4OTQ1YzljYjhhMzRiMmFlZGViYmEzYzA0MDY5MQ== - - releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/remove-eventlet-6738321434b60c78.yaml - !!binary | ZjI0ZWE0NDQwMWI4OTQ1YzljYjhhMzRiMmFlZGViYmEzYzA0MDY5MQ== - - releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml - !!binary | MTY4OWU3MDUzZjRlNzU4N2EyYjgzNjAzNWNkZmE0ZmRhNTY2NjdmYw== - - releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml - !!binary | MGM2ZjExY2Y4OGJmMWExM2E3MjM4NzlkZTQ2ZWM2MTY2NzhkMmUwYg== - - releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml - !!binary | ZjI0ZWE0NDQwMWI4OTQ1YzljYjhhMzRiMmFlZGViYmEzYzA0MDY5MQ== - - releasenotes/notes/support-None-query-45abaae45f08eda4.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml - !!binary | ZjI0ZWE0NDQwMWI4OTQ1YzljYjhhMzRiMmFlZGViYmEzYzA0MDY5MQ== - - releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml - !!binary | ZTZmYTBhODRkMWY3YTMyNjg4MWYzNTg3NzE4ZjFkZjc0M2I4NTg1Zg== - - releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml - !!binary | ZjI0ZWE0NDQwMWI4OTQ1YzljYjhhMzRiMmFlZGViYmEzYzA0MDY5MQ== version: 6.0.0 - files: - - releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml - !!binary | NjZkZDhhYjY1ZTJkOTM1MmRlODZlNDcwNTZkZWEwYjcwMWUyMWExNQ== version: 7.0.5 - files: - - releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml - !!binary | MDMyMDMyNjQyYWQ0OWUwMWQ3MDZmMTlmNTFkNjcyZmNmZjQwMzQ0Mg== version: 7.0.1 - files: - - releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml - !!binary | ODAwMDM0ZGMwYmJiOTUwMjg5M2RlZGQ5YmNkZTdjMTcwNzgwYzM3NQ== - - releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml - !!binary | ZGMyNTRlMmY3OGE0YmI0MmIwZGY2NTU2ZGY4MzQ3YzcxMzdhYjViMg== - - releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml - !!binary | Y2YzZjdjOTkyZTBkMjllMDZhN2JmZjZjMWRmMmYwMTQ0NDE4ZDgwZg== - - releasenotes/notes/always-requeue-7a2df9243987ab67.yaml - !!binary | NDA2ODRkYWZhZTc2ZWFiNzdiNjZiYjFkYTdlMTQzYTNkN2UyYzljOA== - - releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml - !!binary | MzY4NWRjZjQxNzU0M2RiMGJiNzA4YjM0N2U5OTZkODgzODVjOGM1Yg== - - releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml - !!binary | MmZjYTdlYmQ3YzZhNGQyOWM4YTMyMGZmZmQwMzVlZDk4MTRlODI5Mw== - - releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml - !!binary | MmJiODFkNDFmMWM1MDg2YjY4YjEyOTAzNjJjNzI5NjZjMWUzMzcwMg== - - releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml - !!binary | ZWQ3YjZkYmM5NTJlNDljYTY5ZGU5YTk0YTAxMzk4YjEwNmFlY2U0Yg== - - releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml - !!binary | YWFlZGJiZTBlYjAyYWQxZjg2Mzk1YTVhNDkwNDk1YjY0Y2UyNjc3Nw== - - releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml - !!binary | MThjMTgxZjBiM2NlMDdhMGNkNTUyYTkwNjBkZDA5YTk1Y2MyNjA3OA== - - releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml - !!binary | NTc1MGZkZGYyODhjNzQ5Y2FjZmM4MjU3NTM5MjhmNjZlNzU1NzU4ZA== - - releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml - !!binary | YTJhMDRlNWQyMzRiYTM1OGMyNWQ1NDFmMzFmOGNhMWE2MWJmZDVkOA== - - releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml - !!binary | OGRkODIxYTAzZGNmZjQ1MjU4MjUxYmViZmQyYmViODZjMDdkOTRmNw== - - releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml - !!binary | Zjg5MzNmNGFiZGE0ZWNmYzA3ZWU0MWY4NGZkNWZkOGY2NjY3ZTk1YQ== version: 7.0.0 - files: - - releasenotes/notes/ceilometer-api-deprecate-862bfaa54e80fa01.yaml - !!binary | NjYxNmE3MTQwMDlhODBjNzQ4NGZhMjI5MmMyMzMxODY4NjE3Y2I5Yw== - - releasenotes/notes/ceilometer-event-api-removed-49c57835e307b997.yaml - !!binary | OGQyM2Y0MzFhYjBiZDYzOGVkYmYyMTk3ZTU2YmVhNjhkN2IwNmEyMQ== - - releasenotes/notes/deprecate-file-dispatcher-2aff376db7609136.yaml - !!binary | MGFhYTE2MDNkNGQ3N2QxNDY1YjYwMzllNTU2ZjY4ZDQ0MjUxMjJlNA== - - releasenotes/notes/deprecate-http-dispatcher-dbbaacee8182b550.yaml - !!binary | ZTJlNzQ4OTJkYWE3OTM4OGFjY2I2ZGY0ZDQ2NDJhNDYzMjMzMjkxMQ== - - releasenotes/notes/drop-image-meter-9c9b6cebd546dae7.yaml - !!binary | NGQ4YmMxMDk1NjI3NTc0OTE5ZDJkNjk5ZTgyZjU1MjI2OThjZGU1ZQ== - - releasenotes/notes/drop-instance-meter-1b657717b21a0f55.yaml - !!binary | YTkxMjUzYTgxMDRiM2Y1NmE4NDdlYzY0ZGNiZTE3MjBiMDZmMjBlOQ== - - releasenotes/notes/http-dispatcher-batching-4e17fce46a196b07.yaml - !!binary | NTRlNGNjNzdlMDg4MDBkMmU5YjlmOGQyYzY3MjkwMTE4YmVhZTZhYw== - - releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml - !!binary | NzUyYWYyMDhlMGU0MjI2NGIzMTQwYjBmZDY4NmM2NDkwMjU5YzEzNA== - - releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml - !!binary | MTNhZWJhNDEwYjAxOTVhOTBkMWMzNTlkNTljNWJmYjA4MjBhYWU5Yg== - - releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml - !!binary | YjY5MmQzYTZhNjFmYmJlNGMzZGI4YzE0YTdhZTU0MjM1NTliNTE0NA== - - releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml - !!binary | M2U5NWNjMTJmZDI0YjA4OGZmYmUwMjgxNDVmYjM5ZjU5MGYzZTczNA== - - releasenotes/notes/polling-definition-efffb92e3810e571.yaml - !!binary | ODk5OTUyODA5MjdkNDk1MDQyZjM0MmU4YTBhNTIwZmE0Nzc1YzUxNQ== - - releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml - !!binary | M2FlOTk3ZTNkNmM5NjQ4MjkyOTY5MzY2ZDI1MzYyMjk3NDc0YjgxNQ== - - releasenotes/notes/remove-ceilometer-dbsync-53aa1b529f194f15.yaml - !!binary | Nzc5NjczNTM0ZmVmYzM5NjMzYWVkMThiYjAyMDg4NWJkNjAyMGUwNg== - - releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e803.yaml - !!binary | YjczNDRkZDZjODIzMjIxNGIxOWMzOTY3NzJlNTQ0YzkyOTYyNzQ2OA== version: 8.0.0 - files: - - releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml - !!binary | ZjhjMjQzZjQ0OGRiYjAyMzA3Y2NhZGM5NmQ5ZTU4NTA5MWM2MjA2MA== - - releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml - !!binary | YzE1ZDhiYzYzMzBkZTYzMzE3MDg5Y2E2MTRmODY2NjdjYzQwZWQ3YQ== - - releasenotes/notes/deprecate-ceilometer-collector-b793b91cd28b9e7f.yaml - !!binary | MTExOTFhNDYxMmU0MjRjMDJhNWQ5MGExMzM3MTQxYzI2Zjc5YzA5OA== - - releasenotes/notes/deprecate-http-control-exchanges-026a8de6819841f8.yaml - !!binary | NWJlZWRjODFlMWU1NjkwNTk2ODZiNjRmYjYzMmNmZmRkODY5ODViNQ== - - releasenotes/notes/deprecate-kafka-publisher-17b4f221758e15da.yaml - !!binary | YWYyM2I2ZWVhZmNiN2FkYzc2ZjYwYmZjYjA0YWVlNjk5Yzk3NWUzMQ== - - releasenotes/notes/deprecate-pollster-list-ccf22b0dea44f043.yaml - !!binary | MzJhN2M0ZGZiZDlhZDc5NGEzYzVkMzAwYzQ5OTZjNjU4NmZjMDYyNg== - - releasenotes/notes/drop-kwapi-b687bc476186d01b.yaml - !!binary | NDdhZTE4MmI0ZGJhZGExYmViNGUxYjkwMTdmYWQxMDJkOTU0OWFlYw== - - releasenotes/notes/http-publisher-authentication-6371c5a9aa8d4c03.yaml - !!binary | MTkxNzQ4YTQwM2UxZTM4ZDZjZjY0M2QyMTBhN2ZkOWRlM2E3ZmMxMQ== - - releasenotes/notes/network-statistics-from-opendaylight-787df77484d8d751.yaml - !!binary | YWUwNzE2YzZkMDYxYjg3ODRhZmVlZGY4ODIzZWJjOTQ3MTdhOWFlYg== - - releasenotes/notes/parallel_requests_option-a3f901b6001e26e4.yaml - !!binary | Yzg0YzExM2MwYTQwMjIxNmM2YTZmMDllZDQ2MjJhODE2M2U0YWFlYg== - - releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml - !!binary | MWRkODA2NjRhOGIyMTlkZGIxMDkyYmU1NDEzYTMxMzA3M2UyNzg1Yg== - - releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml - !!binary | MWRiZDMwN2EzYTZhYjQ1YTc5N2Q0Zjg4ZTM0MmIyZjUzN2UzYmI2Zg== - - releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml - !!binary | ZmY1ODIyZDJiNGJlNDgwZTNjMzVlYTJhNzAwYmY3ZTY0M2YyMDVhYQ== - - releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml - !!binary | ZDljMTFiYjBhYmYxNWRlMTQ4N2EyODdiZGNkZjQyYzZmZmIyZDk0ZA== - - releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.yaml - !!binary | ZjA1OTM5ZDc0MjIzM2I0NDI0MDAzNmUxMTUxYTA3YmI3MWI0MTU5ZA== - - releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml - !!binary | Mjc2MDRhYmQ0NjFkN2RiZjgwOThjN2NjNzk0ZGZjYzI2ODZjNDUyNw== - - releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml - !!binary | Mzc5ZjEwZmM3MzkzNThiODViMDMxNzYxZmFkN2Q5Y2QyNjU4YWY3Nw== - - releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml - !!binary | YzFjNTZkNmFhZWFjZDFlODFiNmQ3NGI3ZWY5YzJlZWFlMjJjZWZiYw== version: 9.0.0 - files: - - releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml - !!binary | NjU4MmU4ZTRlMjBjNTExYWNhYjllMTZjZmQzYzBmOGU1NjlmNzA1YQ== version: 10.0.1-14 - files: - - releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml - !!binary | ZjRiNThhZTAxZThkZGZjNTE1ZTZmMTRhMGQxOWQ3MjYzNzBmNDg3MA== version: 10.0.1 - files: - - releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml - !!binary | ZDg4MWRkNTIyODlkNDUzYjlmOWQ5NGM3YzMyYzA2NzJhNzBhODA2NA== - - releasenotes/notes/cinder-capacity-samples-de94dcfed5540b6c.yaml - !!binary | YjEwMDc2ZDAzYzYzNTk1YmI1ODI5MTQ5ODY4YjFmYzVlNTI1ZjIxNg== - - releasenotes/notes/deprecate-aggregated-disk-metrics-54a395c05e74d685.yaml - !!binary | MWU2NzNhNjRiOWE5MDg4MGNiODM4OTVhMmI1NGY5MWRlNWI4NWI0ZA== - - releasenotes/notes/drop-collector-4c207b35d67b2977.yaml - !!binary | ZmFkNjllOTYwM2UyMGMwNzYxNzU3MzMzNzQ5OTNmZDMxZGY0OWE0Yw== - - releasenotes/notes/fix-radosgw-name-6de6899ddcd7e06d.yaml - !!binary | ZmRjNTQ0ODc5MjJjZDI3ZmVlY2YzMTZmMzM0YTY0MWYyYTU2NzMyMQ== - - releasenotes/notes/manager-based-ipc-queues-85e3bf59ffdfb0ac.yaml - !!binary | OTExYjk3M2Q3MDU2YjhiYmJmZmVmMWY0ZDM2YmQ2NjIxNzNhNGY5MQ== - - releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml - !!binary | YmI1NzMxNzdjZjM1Mjk5MjQyNWE2ZDU4MDFlZDc0NDBhZDhkMTU4Yg== - - releasenotes/notes/remove-compute-workload-partitioning-option-26538bc1e80500e3.yaml - !!binary | NjBmMTFjYjdhZjVhYmEwNjE0MjlmNGNiODZhN2NhNGExNzVhYWRiMg== - - releasenotes/notes/remove-direct-publisher-5785ee7edd16c4d9.yaml - !!binary | ZmZjODdjMGI0YzU4OTY2MzJkMThhYWEzNmZkYzdmMzFkOWQ3MWU5OQ== - - releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml - !!binary | NTA4ZmFkMTA5ZGFhM2JkNTAxMWRhZDlkOWNjYzJlOGJhY2NmYzNkZg== - - releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml - !!binary | MWNiNzEzZjNlMjM4ZmExZDUzNGU4Y2E5ZjViYWFlNGM1NGFmMDYwOQ== - - releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml - !!binary | ODNmZmFmZmNiMmNlZTZhMGIxOTYwMWJjN2NlZmQ4NjM2ODU2MDFlMg== - - releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml - !!binary | MWNiNzEzZjNlMjM4ZmExZDUzNGU4Y2E5ZjViYWFlNGM1NGFmMDYwOQ== - - releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml - !!binary | MjAwMjM3MzAxNzRiZGEwMDViM2ZlNjk3OGFkMmU5OGVmZTA2MGI3NQ== - - releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml - !!binary | ZDU2M2UxMzQ4YjJiYWE1OGRlZTc3MmIzN2QwY2RkMDhkNjdhZGI4Zg== - - releasenotes/notes/remove-pollster-list-bda30d747fb87c9e.yaml - !!binary | MzJjMTI5YWFiZDA1NjMzZWU4YTJkYmU1YjkxYzAxZDJmZjkxMDg4Mg== - - releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yaml - !!binary | ZmFhYzAzMWE5YjY4OTM5NjMzNzU2NzRmMDMxZTI4YThjNDg2YzJhOA== - - releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml - !!binary | NjBkOWI4N2E4MDhjNzVjMWNkMTExYTdmYzc4OGY1OGUwN2IzNGU5OQ== - - releasenotes/notes/snmp-diskio-samples-fc4b5ed5f19c096c.yaml - !!binary | OWY3ODc4ZWVkMDRiZmEwZjQ1NTQyZDZlMDA1ZTc3ZjZkODllNWI4Nw== version: 10.0.0 - files: - - releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml - !!binary | MGQzZDU1YWY3MDRkM2YwNjg5ODA1ZjJiMDNjNTM5N2NlMDc2NjdhYQ== version: 11.1.0-6 - files: - - releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml - !!binary | ZjU2NDA3ZDc5NTNmZTYzNWJiNzAzZGE1NDNmMjgwN2E4ZDMyMzY5Zg== version: 11.1.0 - files: - - releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml - !!binary | OGZkZDE5ZTc4YTIwNTMyODU1NjljZGEwNWNkYzQ4NzViNzE2MTkwYw== - - releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml - !!binary | NjYzYzUyMzMyODY5MGRmY2MzMGMxYWQ5ODZiYTU3ZTU2NmJkMTk0Yw== - - releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml - !!binary | ODI2YmEzNWM2ZWI5OTAwYmIwYTU1N2Y2ZTRmMDZmN2QxYjliZDM5NA== - - releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml - !!binary | MzY0MTRlMWNlYmUzYTQzZDk2MmY4ZDJhZGZlN2NjMzQ3NDJlOTA1Nw== - - releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml - !!binary | MmRjMjFhNWYwNWVlNjcwMjkyYThhN2Y5Nzk1MmQzOTQyYzMyZjVjZg== - - releasenotes/notes/prometheus-bcb201cfe46d5778.yaml - !!binary | MmI4MDUyMDUyZDg2MWI4NTZiMzUyMmE4ZDdmODU3NzM1NzkzZjAxYg== - - releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml - !!binary | NWVmYzAyODFmYWFiMmYxN2FjYWI4ZDM4NGJlYWYzYzg3YjA4N2U1OA== - - releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml - !!binary | ZGQxYjdhYmYzMjk3NTVjODM3Nzg2MjMyOGY3NzBlMGI3OTc0ZjVjMg== - - releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml - !!binary | ZTkwNmJjZGE4MjkxOGFmZjAwMGFiNzZmMDY3YTJkYzQ5NjYwZDBiNA== - - releasenotes/notes/transformer-ed4b1ea7d1752576.yaml - !!binary | MWRjYmQ2MDdkZjA2OTYxMDFiNDBmNzdkNzcyMTQ4OTY3OWViZTBiYQ== - - releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml - !!binary | MmRlZTQ4NWRhN2E2ZjJjZGY5NjUyNWZhYmMxOGE4YzI3YzhiZTU3MA== version: 11.0.0 - files: - - releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml - !!binary | YjU5MWZlMTFkN2RmNGQyZWRhM2VmNWRhODU0MDM1YjhlZjc4MGViZg== - - releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml - !!binary | MmUzOTVlMDVjMzA3MzE1NWQ2YmRiM2Y4ZGJhMTc0NWMxODAxOTIxZg== version: 12.1.0 - files: - - releasenotes/notes/add-json-output-to-file-publisher-786380cb7e21b56b.yaml - !!binary | MDYzYWY0Mzc0NGJiMGMyOWNlODY2YmJhNmQzYzQwZTM1MDhhMjAxZg== - - releasenotes/notes/deprecate-events-6561f4059fa25c02.yaml - !!binary | OGEwMjQ1YTViM2UxMzU3ZDM1YWQ2NjUzYmUzN2NhMDExNzY1NzdlNA== - - releasenotes/notes/remove-batch_polled_samples-b40241c8aad3667d.yaml - !!binary | NDEzYTlhOTQ0NmE0MmYxOTZlOGVhOGZlNmYwNTc1NDI2YjA4MjA1ZQ== - - releasenotes/notes/remove-compute-disk-meters-264e686622886ff0.yaml - !!binary | ZjdiMTIxOGI4ZTI2YTBmOWE1OTI0YTE1YjAyOWE5NjE0MTNjYzQwYQ== - - releasenotes/notes/remove-compute-rate-deprecated-meters-201893c6b686b04a.yaml - !!binary | ZjdiMTIxOGI4ZTI2YTBmOWE1OTI0YTE1YjAyOWE5NjE0MTNjYzQwYQ== - - releasenotes/notes/remove-meter-definitions-cfg-file-d57c726d563d805f.yaml - !!binary | NDEwNjA3OTE4MGJkZTJlOTg5NTExZDE3ODFmYjdlYTMxOTA5ODMxMA== - - releasenotes/notes/remove-notification-workload-partitioning-2cef114fb2478e39.yaml - !!binary | OWQ5MGNlOGQzN2MwMDIwMDc3ZTQ0MjlmNDFjMWVhOTM3YzFiM2MxZQ== - - releasenotes/notes/remove-publisher-topic-options-7a40787a3998921d.yaml - !!binary | NTYwNjNmMzI2MjcyNmRhZjU5MDZiYWY5MGE5ODFmOWQ4OGVhYTllZA== - - releasenotes/notes/remove-transformers-14e00a789dedd76b.yaml - !!binary | OWRiNWM2YzliZmM2NjAxOGFlYjc4YzRhMjYyZTFiZmE5YjMyNjc5OA== - - releasenotes/notes/snmp-cpu-util-055cd7704056c1ce.yaml - !!binary | OGZkNjgzOTZhZjU5NWEyYmEwMDk1OTFjOWU1M2ExYjU1NTU3YjMxMQ== version: 12.0.0 - files: - - releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml - !!binary | MWFlNWRjYmIzMmE1NGNlOWVkZjUzOTZlMTE0YWFiYjUyN2ZkYzg1Mw== version: 13.1.0 - files: - - releasenotes/notes/add-availability_zone-gnocchi-instance-15170e4966a89d63.yaml - !!binary | MjFhODEwZmVjNmJiZjE5NTg0Y2FkN2FhNWFmZjU5ZWNlYTgyNjkwNw== - - releasenotes/notes/add-upgrade-check-framework-d78858c54cb85f91.yaml - !!binary | NTMzMjFjMWE3MjQwOTY3YTUwNzZmMDU1ZTYwMDkyNzg2ZjY2YjJhMA== - - releasenotes/notes/remove-meter-definitions-cfg-file-config-476596fc86c36a81.yaml - !!binary | MWY4ZWMzZmFjZGE5OGEzOTU5NzVjMzMwYTdlMTk3MjI4ZmQwNDdmMQ== - - releasenotes/notes/switch-to-oslo-privsep-b58f20a279f31bc0.yaml - !!binary | YmQwZDVhOGEyN2IyOTQ1NWUxOWFkMDYyZjQ0ZGQxZmZiOGFmMWFiZg== version: 13.0.0 - files: - - releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml - !!binary | ZmFlNjc0YWQ0YWZlOTJmZjRiYjMwMzIyZWRkZTY1ZTBkZmE5ZDE5MQ== version: 14.1.0-4 - files: - - releasenotes/notes/add-loadbalancer-resource-type-a73c29594b72f012.yaml - !!binary | MTYxN2FhMzA5OWJmYTVmN2IzOGMxMWM1MWIyM2QyYjljZjliNWMyYg== - - releasenotes/notes/drop-py-2-7-87352d5763131c13.yaml - !!binary | YzYzOTU3ODIxNzIwMDdjYzg2OGIxY2ZkMGYxMWM4YmFhMzU2N2ViNA== - - releasenotes/notes/dynamic-pollster-system-6b45c8c973201b2b.yaml - !!binary | N2JmZjQ2OTIxZTZhNWY5YzhlY2FlOTdhYTM3NTZkOGM1NzBmMjNjOA== - - releasenotes/notes/dynamic-pollster-system-for-non-openstack-apis-4e06694f223f34f3.yaml - !!binary | N2NiYTI3N2Q3OThjMDc0MTBiOWI0MWJlZjk0NWI4M2U4YzRhMTZlNQ== - - releasenotes/notes/include-monasca-publisher-1f47dde52af50feb.yaml - !!binary | MTI2MzUwYzBhZTYwOWM1ZDM1ZDU0NTU2ODgzZGEyNDc2ZTgxZTMwZQ== version: 14.0.0 - files: - - releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml - !!binary | M2Y3M2Q4YmFlZWM4NTcwM2FjMzA3ODMwNzcyZDNiMzM3MmZiZTEwZg== version: 15.1.0 - files: - - releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml - !!binary | ZjdhYWIwN2JjNTdiZWNiNjA5NzBiYTI5NDMwZjNjNDJhOWQ1NDQ0ZQ== version: 16.0.1-12 - files: - - releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml - !!binary | OGZkNTE1NzczYmM5ODMwOWRjOWM5YTViMWYxMzYzZjJmOThhODVmOA== version: 16.0.1 - files: - - releasenotes/notes/cinder-volume-size-poller-availability_zone-2d20a7527e2341b9.yaml - !!binary | NzdiNTE2YWNhYzVjNzEzMDE3N2U1ZGU1YTcyMGNmNDFlNjJmM2I4Yg== - - releasenotes/notes/deprecate-xen-support-27600e2bf7be548c.yaml - !!binary | ZmQwYTU2MWJlYTk1NmYxYjYyZjZjYTVhMjdlNzYyY2I3NmFkOWE5MA== version: 16.0.0 - files: - - releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml - !!binary | YTQzYWZhZDMwZjlmYTk2NDE3Zjk1Y2MyNTU4MGQ2ODdiOGFjZGYyZQ== version: 17.0.2-6 - files: - - releasenotes/notes/bug-1929178-a8243526ce2311f7.yaml - !!binary | NDZhZDI3ODZhZTQ4ZjQxYTk3YTAxNGZkMTQwZjAyMmVlM2VlM2MxYw== - - releasenotes/notes/fix-1940660-5226988f2e7ae1bd.yaml - !!binary | ZWQ0MDRjNWY2NmU4NzQ3NzlkNThkM2FjODFmMjhhZTIyYzU1Y2YwOQ== - - releasenotes/notes/remove-xen-support-7cb932b7bc621269.yaml - !!binary | OTliZGQ3OGQ5NTA5NzdiMTFlZmZiYTA4Y2YzYTc2ZTViODdiZjliNA== version: 17.0.0 - files: - - releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml - !!binary | YzZmMDAyNTdkNDU4NmVlYmZjYWY0YTIwODgyNGY1YmUzYTYzYzNmNA== version: 18.1.0 - files: - - releasenotes/notes/deprecate-generic-hardware-declarative-pollstar-dfa418bf6a5e0459.yaml - !!binary | ZDEwZjZjYTMyZmQ5ZGZlNTEwMGNiNTQ1MzFhZGI0YmJhODY2ZWRmOA== - - releasenotes/notes/deprecate-neutron-fwaas-e985afe956240c08.yaml - !!binary | ZGM1ZGQ4OTE1MmExYjIxNjdlNjBhMmMxNGM5ZGYxYjljZjAyN2Q4Nw== - - releasenotes/notes/deprecate-neutron-lbaas-5a36406cbe44bbe3.yaml - !!binary | ODkxN2M3Mzk2NGViNzY0YzNjNGJlYjY1ZDM3MTNiMzY5MzgxODFkZA== - - releasenotes/notes/fix-notification-batch-9bb42cbdf817e7f9.yaml - !!binary | ZWM1YjkwZTk4ZjYwNTYyNmVjNzY2ZTNmOGI4MjMwNDRlMzRkZGMyZA== - - releasenotes/notes/openstack-dynamic-pollsters-metadata-enrichment-703cf5914cf0c578.yaml - !!binary | ZmJiNGI2ZDI2NGM5ZDI0ZTNmODVmODkxZWY3NjU1MDdiZTFmODk5YQ== version: 18.0.0 - files: - - releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml - !!binary | MWRhMGUxNGJlZjk2OWM3Y2NmZjU3OTEwZjFiNDQwODIzNGQ1MGY1Yw== - - releasenotes/notes/bug-2007108-dba7163b245ad8fd.yaml - !!binary | MjMzMDhiZmE3ZjA4MTZlYjUzYjg0MzExMWYyMDU0YjY4NjhiZWUyZA== version: 19.1.0 - files: - - releasenotes/notes/drop-python-3-6-and-3-7-f67097fa6894da52.yaml - !!binary | YmYyNjNiMTExODFmNGU0NDg1MGU5OTEyODI3NjZiM2JkZjRmNDFlMQ== - - releasenotes/notes/remove-check_watchers-a7c955703b6d9f57.yaml - !!binary | MzQwMGFkMTM0YjgwNTEyZmQzYTA2ZmY4NzYwZTgwYjBhYmVhYWZiNg== - - releasenotes/notes/remove-generic-hardware-declarative-pollster-e05c614f273ab149.yaml - !!binary | YTI4Y2VmNzAzNmVkYzJlY2IwZjYwYjVkMjdhOTc3MzU0ODJlN2Y5OA== - - releasenotes/notes/remove-neutron-lbaas-d3d4a5327f6a167a.yaml - !!binary | MzE4YzU0NjQ4YzJjODVkNGY0ZjU0MjVjNWZmYzVlNWYzZGRhODZmMg== version: 19.0.0 - files: - - releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml - !!binary | Nzk0NTRkNmIyMjc4NzYyN2FlNjIzOWFhN2IyNzA3MTAxYmEzMDIxMg== - - releasenotes/notes/bug-2007108-dba7163b245ad8fd.yaml - !!binary | MDExODc0MmJkMzk2YjE3ODNhMWUzNjkxYTIxZDNkMzc1YmNhYTlhMQ== version: 20.0.0 - files: - - releasenotes/notes/deprecate-vmware-ae49e07e40e74577.yaml - !!binary | Mjk3MDg5YTYyMmMyNTc5NWY5N2Q2NDdkNTRjNjI0MGFiMGUxMmIxYw== - - releasenotes/notes/deprecate-windows-support-d784b975ce878864.yaml - !!binary | NzY2MDMzOWI0YTRkNTc4ZjI2MTVmZDZjMjdlOGM5NjI3YWIyOWQzNw== - - releasenotes/notes/volume-metrics-01ddde0180bc21cb.yaml - !!binary | YWJlYzliM2VhYTVhZWQxMWI3MzA5MGM1OGMxYzc3ZGI2MGFlYWQwMg== version: 21.0.0 - files: - - releasenotes/notes/deprecate-contrail-256177299deb6926.yaml - !!binary | MzZlNDdkNzQzMjhkMGFmZjliYTIwZjNmZTQwZGJhZjM2Y2VmM2NjOA== - - releasenotes/notes/deprecate-odl-07e3f59165612566.yaml - !!binary | YTkyZDc3YjE4MjNmN2Q5Nzg5ZDA3ZmFjYTc4NTI5OTYzN2NhYmFhNg== - - releasenotes/notes/remove-monasca-d5ceda231839d43d.yaml - !!binary | ZDJlMjQ3Y2YzODVmMjhhMzFmYTk2NWE4YzEzYjQ4YTlhZTYzY2M1Zg== version: 22.0.0 - files: - - releasenotes/notes/add-volume-pollster-metadata-d7b435fed9aac0aa.yaml - !!binary | MjFmNDQ4NDgyNjllNTdiM2YwZmMwMzUxMWRiODljZDhmM2I2YjY3Mg== - - releasenotes/notes/parallels-virt_type-ee29c4802fdf5c8e.yaml - !!binary | NmQzYWZkODNmYWY3NmI0NGI4YzYyMDAzNDg0NmNmOWM1OWNiYzc1YQ== - - releasenotes/notes/remove-opendaylight-c3839bbe9aa2a227.yaml - !!binary | YzJkZTZhMTA0YTFjNmU4NTcwMWJmYzFiNmNhZmM0M2VjODk5OThmMg== - - releasenotes/notes/remove-sahara-9254593d4fb137b9.yaml - !!binary | ODAzNmQ0OTEzZThhYWViNGYxODYwN2Q5ZDljZmJmODM2Yzc3YjAyMA== - - releasenotes/notes/remove-uml-e86feeabdd16c628.yaml - !!binary | YzlkNzFkOTIyODM3ODk0OTZmY2UxZTcxM2UwNmZiMDU4ODk4MzlmNQ== - - releasenotes/notes/remove-windows-support-0d280cc7c7fffc61.yaml - !!binary | M2I4YWRhZmJiMDVmZjBiYTExNGNkZDk1MDY2ZWE3OTBhOThhNTFjNg== version: 23.0.0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml0000664000175100017510000000145215033033467026404 0ustar00mylesmyles--- features: - | Archive policies can now be configured per metrics in gnocchi_resources.yaml. A default list of archive policies is now created by Ceilometer. They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low" for others. upgrade: - | Ceilometer now creates it own archive policies in Gnocchi and use them to create metrics in Gnocchi. Old metrics kept their current archive policies and will not be updated with ceilometer-upgrade. Only newly created metrics will be impacted. Archive policy can still be overridden with the publisher url (e.g: gnocchi://archive_policy=high). deprecations: - | cpu_util and \*.rate meters are deprecated and will be removed in future release in favor of the Gnocchi rate calculation equivalent. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml0000664000175100017510000000017715033033467027712 0ustar00mylesmyles--- features: - The tenant (project) discovery code in the polling agent now scans for tenants in all available domains. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml0000664000175100017510000000063215033033467031252 0ustar00mylesmyles--- features: - | The notification-agent can now be configured to either build meters or events. By default, the notification agent will continue to load both pipelines and build both data models. To selectively enable a pipeline, configure the `pipelines` option under the `[notification]` section. Addition pipelines can be created following the format used by existing pipelines. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml0000664000175100017510000000036515033033467026055 0ustar00mylesmyles--- other: - | Ship YAML files to ceilometer/pipeline/data/ make it convenient to update all the files on upgrade. Users can copy yaml files from /usr/share/ceilometer and customise their own files located in /etc/ceilometer/. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml0000664000175100017510000000107015033033467027522 0ustar00mylesmyles--- upgrade: - Batching is enabled by default now when coordinated workers are enabled. Depending on load, it is recommended to scale out the number of `pipeline_processing_queues` to improve distribution. `batch_size` should also be configured accordingly. fixes: - Fix to improve handling messages in environments heavily backed up. Previously, notification handlers greedily grabbed messages from queues which could cause ordering issues. A fix was applied to sequentially process messages in a single thread to prevent ordering issues. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml0000664000175100017510000000055415033033467027336 0ustar00mylesmyles --- fixes: - > [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurrence of a meter will be used and any following duplicates will be skipped from processing. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/snmp-cpu-util-055cd7704056c1ce.yaml0000664000175100017510000000072315033033467025350 0ustar00mylesmyles--- features: - | new metrics are available for snmp polling hardware.cpu.user, hardware.cpu.nice, hardware.cpu.system, hardware.cpu.idle, hardware.cpu.wait, hardware.cpu.kernel, hardware.cpu.interrupt. They replace deprecated hardware.cpu.util and hardware.system_stats.cpu.idle. deprecations: - | metrics hardware.cpu.util and hardware.system_stats.cpu.idle are now deprecated. Other hardware.cpu.* metrics should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/snmp-diskio-samples-fc4b5ed5f19c096c.yaml0000664000175100017510000000016215033033467026764 0ustar00mylesmyles--- features: - | Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio details. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml0000664000175100017510000000033515033033467027452 0ustar00mylesmyles--- fixes: - > [`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml0000664000175100017510000000023715033033467026705 0ustar00mylesmyles--- fixes: - > [`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. ././@PaxHeader0000000000000000000000000000021700000000000010215 xustar00121 path=ceilometer-24.1.0.dev59/releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd53e803.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/support-cinder-volume-snapshot-backup-metering-d0a93b86bd0000664000175100017510000000022115033033467032373 0ustar00mylesmyles--- features: - Add support of metering the size of cinder volume/snapshot/backup. Like other meters, these are useful for billing system. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml0000664000175100017510000000117415033033467027515 0ustar00mylesmyles--- features: - > Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. issues: - > Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer's polling agents may generate a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron's API is improved to handle polling. upgrade: - > By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml0000664000175100017510000000042715033033467032144 0ustar00mylesmyles--- features: - Add support of batch recording metering data to mongodb backend, since the pymongo support *insert_many* interface which can be used to batch record items, in "big-data" scenarios, this change can improve the performance of metering data recording. ././@PaxHeader0000000000000000000000000000021000000000000010206 xustar00114 path=ceilometer-24.1.0.dev59/releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.yaml 22 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/support-multiple-meter-definition-files-e3ce1fa73ef2e1de.0000664000175100017510000000036215033033467032240 0ustar00mylesmyles--- features: - | Support loading multiple meter definition files and allow users to add their own meter definitions into several files according to different types of metrics under the directory of /etc/ceilometer/meters.d.././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml0000664000175100017510000000022115033033467027344 0ustar00mylesmyles--- features: - > [`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml0000664000175100017510000000041215033033467030171 0ustar00mylesmyles--- features: - > [`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/switch-to-oslo-privsep-b58f20a279f31bc0.yaml0000664000175100017510000000113715033033467027273 0ustar00mylesmyles--- security: - | Privsep transitions. Ceilometer is transitioning from using the older style rootwrap privilege escalation path to the new style Oslo privsep path. This should improve performance and security of Ceilometer in the long term. - | Privsep daemons are now started by Ceilometer when required. These daemons can be started via rootwrap if required. rootwrap configs therefore need to be updated to include new privsep daemon invocations. upgrade: - | The following commands are no longer required to be listed in your rootwrap configuration: ipmitool. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml0000664000175100017510000000034015033033467026676 0ustar00mylesmyles--- critical: - > [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/threeads-process-pollsters-cbd22cca6f2effc4.yaml0000664000175100017510000000027115033033467030571 0ustar00mylesmyles--- features: - | Introduce ``threads_to_process_pollsters`` to enable operators to define the number of pollsters that can be executed in parallel inside a polling task. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml0000664000175100017510000000034715033033467027775 0ustar00mylesmyles--- upgrade: - | Ceilometer now leverages the latest distribution mechanism provided by the tooz library. Therefore the options `coordination.retry_backoff` and `coordination.max_retry_interval` do not exist anymore. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/transformer-ed4b1ea7d1752576.yaml0000664000175100017510000000075115033033467025266 0ustar00mylesmyles--- deprecations: - | Usage of transformers in Ceilometer pipelines is deprecated. Transformers in Ceilometer have never computed samples correctly when you have multiple workers. This functionality can be done by the storage backend easily without all issues that Ceilometer has. For example, the rating is already computed in Gnocchi today. - | Pipeline Partitioning is also deprecated. This was only useful to workaround of some issues that tranformers has. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml0000664000175100017510000000037615033033467031220 0ustar00mylesmyles--- fixes: - > [`bug 1491509 `_] Patch to unify timestamp in samples polled by pollsters. Set the time point polling starts as timestamp of samples, and drop timetamping in pollsters. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml0000664000175100017510000000036215033033467030720 0ustar00mylesmyles--- features: - Since the Glance v1 APIs won't be maintained any more, this change add the support of glance v2 in images pollsters. upgrade: - > The option ``glance_page_size`` has been removed because it's not actually needed. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml0000664000175100017510000000024515033033467031040 0ustar00mylesmyles--- fixes: - The transport_url defined in [oslo_messaging_notifications] was never used, which contradicts the oslo_messaging documentation. This is now fixed.././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml0000664000175100017510000000012215033033467031020 0ustar00mylesmyles--- features: - use memory usable metric from libvirt memoryStats if available. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/volume-metrics-01ddde0180bc21cb.yaml0000664000175100017510000000020615033033467026000 0ustar00mylesmyles--- upgrade: - | The default ``polling.yaml`` file has been updated and now it enables meters related to cinder by default. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml0000664000175100017510000000012615033033467026022 0ustar00mylesmyles--- features: - Add a new publisher for pushing samples or events to a Zaqar queue. ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/releasenotes/source/0000775000175100017510000000000015033033521017755 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/2023.1.rst0000664000175100017510000000021015033033467021236 0ustar00mylesmyles=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/2023.2.rst0000664000175100017510000000020215033033467021240 0ustar00mylesmyles=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/2024.1.rst0000664000175100017510000000020215033033467021240 0ustar00mylesmyles=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/2024.2.rst0000664000175100017510000000020215033033467021241 0ustar00mylesmyles=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/2025.1.rst0000664000175100017510000000020215033033467021241 0ustar00mylesmyles=========================== 2025.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2025.1 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/releasenotes/source/_static/0000775000175100017510000000000015033033521021403 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/_static/.placeholder0000664000175100017510000000000015033033467023665 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/conf.py0000664000175100017510000002145715033033467021276 0ustar00mylesmyles# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Ceilometer Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Ceilometer Release Notes' copyright = '2015, Ceilometer Developers' # Release notes do not need a version number in the title, they # cover multiple releases. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # openstackdocstheme options openstackdocs_repo_name = 'openstack/ceilometer' openstackdocs_auto_name = False openstackdocs_bug_project = 'ceilometer' openstackdocs_bug_tag = '' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'CeilometerReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'CeilometerReleaseNotes.tex', 'Ceilometer Release Notes Documentation', 'Ceilometer Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceilometerreleasenotes', 'Ceilometer Release Notes Documentation', ['Ceilometer Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'CeilometerReleaseNotes', 'Ceilometer Release Notes Documentation', 'Ceilometer Developers', 'CeilometerReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/index.rst0000664000175100017510000000047215033033467021632 0ustar00mylesmyles========================= Ceilometer Release Notes ========================= .. toctree:: :maxdepth: 1 unreleased 2025.1 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/liberty.rst0000664000175100017510000000022015033033467022164 0ustar00mylesmyles============================= Liberty Series Release Notes ============================= .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7899415 ceilometer-24.1.0.dev59/releasenotes/source/locale/0000775000175100017510000000000015033033521021214 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7899415 ceilometer-24.1.0.dev59/releasenotes/source/locale/en_GB/0000775000175100017510000000000015033033521022166 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000015033033521023753 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175100017510000025077515033033467027035 0ustar00mylesmyles# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2021. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata # Andi Chandler , 2025. #zanata msgid "" msgstr "" "Project-Id-Version: Ceilometer Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2025-02-06 09:15+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2025-02-07 12:11+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "10.0.0" msgstr "10.0.0" msgid "10.0.1" msgstr "10.0.1" msgid "10.0.1-14" msgstr "10.0.1-14" msgid "11.0.0" msgstr "11.0.0" msgid "11.1.0" msgstr "11.1.0" msgid "11.1.0-6" msgstr "11.1.0-6" msgid "12.0.0" msgstr "12.0.0" msgid "12.1.0" msgstr "12.1.0" msgid "13.0.0" msgstr "13.0.0" msgid "13.0.0.0rc1" msgstr "13.0.0.0rc1" msgid "13.1.0" msgstr "13.1.0" msgid "14.0.0" msgstr "14.0.0" msgid "14.1.0-4" msgstr "14.1.0-4" msgid "15.1.0" msgstr "15.1.0" msgid "16.0.0" msgstr "16.0.0" msgid "16.0.1" msgstr "16.0.1" msgid "16.0.1-12" msgstr "16.0.1-12" msgid "17.0.0" msgstr "17.0.0" msgid "17.0.2-6" msgstr "17.0.2-6" msgid "18.0.0" msgstr "18.0.0" msgid "18.1.0" msgstr "18.1.0" msgid "19.0.0" msgstr "19.0.0" msgid "19.1.0" msgstr "19.1.0" msgid "20.0.0" msgstr "20.0.0" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "2024.1 Series Release Notes" msgstr "2024.1 Series Release Notes" msgid "2024.2 Series Release Notes" msgstr "2024.2 Series Release Notes" msgid "21.0.0" msgstr "21.0.0" msgid "22.0.0" msgstr "22.0.0" msgid "23.0.0" msgstr "23.0.0" msgid "23.0.0-52" msgstr "23.0.0-52" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.2" msgstr "5.0.2" msgid "5.0.3" msgstr "5.0.3" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "7.0.0.0b2" msgstr "7.0.0.0b2" msgid "7.0.0.0b3" msgstr "7.0.0.0b3" msgid "7.0.0.0rc1" msgstr "7.0.0.0rc1" msgid "7.0.1" msgstr "7.0.1" msgid "7.0.5" msgstr "7.0.5" msgid "8.0.0" msgstr "8.0.0" msgid "9.0.0" msgstr "9.0.0" msgid "" "A dogpile.cache supported backend is required to enable cache. Additional " "configuration `options `_ are also required." msgstr "" "A dogpile.cache supported backend is required to enable cache. Additional " "configuration `options `_ are also required." msgid "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache." msgstr "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache." msgid "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache. This is only relevant when " "`instance_discovery_method` is set to `naive`. It is recommended to use " "`libvirt_metadata` if possible." msgstr "" "A local cache is used when polling instance metrics to minimise calls Nova " "API. A new option is added `resource_cache_expiry` to configure a time to " "live for cache before it expires. This resolves issue where migrated " "instances are not removed from cache. This is only relevant when " "`instance_discovery_method` is set to `naive`. It is recommended to use " "`libvirt_metadata` if possible." msgid "" "A new option named `max_parallel_requests` is available to control the " "maximum number of parallel requests that can be executed by the agents. This " "option also replaces the `poolsize` option of the HTTP publisher." msgstr "" "A new option named `max_parallel_requests` is available to control the " "maximum number of parallel requests that can be executed by the agents. This " "option also replaces the `poolsize` option of the HTTP publisher." msgid "A new pulisher have been added to push data to Prometheus Pushgateway." msgstr "" "A new publisher have been added to push data to Prometheus Pushgateway." msgid "" "Add `disk.device.read.latency` and `disk.device.write.latency` meters to " "capture total time used by read or write operations." msgstr "" "Add `disk.device.read.latency` and `disk.device.write.latency` meters to " "capture total time used by read or write operations." msgid "" "Add a ceilometer driver to collect network statistics information using REST " "APIs exposed by network-statistics module in OpenDaylight." msgstr "" "Add a Ceilometer driver to collect network statistics information using REST " "APIs exposed by network-statistics module in OpenDaylight." msgid "Add a new publisher for pushing samples or events to a Zaqar queue." msgstr "Add a new publisher for pushing samples or events to a Zaqar queue." msgid "" "Add a tool for migrating metrics data from Ceilometer's native storage to " "Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the " "recommended metrics data storage backend." msgstr "" "Add a tool for migrating metrics data from Ceilometer's native storage to " "Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the " "recommended metrics data storage backend." msgid "" "Add availability_zone attribute to gnocchi instance resources. Populates " "this attribute by consuming instance.create.end events." msgstr "" "Add availability_zone attribute to gnocchi instance resources. Populates " "this attribute by consuming instance.create.end events." msgid "" "Add dynamic pollster system. The dynamic pollster system enables operators " "to gather new metrics on the fly (without needing to code pollsters)." msgstr "" "Add dynamic pollster system. The dynamic pollster system enables operators " "to gather new metrics on the fly (without needing to code pollsters)." msgid "" "Add four new meters, including perf.cpu.cycles for the number of cpu cycles " "one instruction needs, perf.instructions for the count of instructions, perf." "cache_references for the count of cache hits and cache_misses for the count " "of caches misses." msgstr "" "Add four new meters, including perf.cpu.cycles for the number of cpu cycles " "one instruction needs, perf.instructions for the count of instructions, perf." "cache_references for the count of cache hits and cache_misses for the count " "of caches misses." msgid "" "Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio " "details." msgstr "" "Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio " "details." msgid "" "Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap." "out'." msgstr "" "Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap." "out'." msgid "Add new json output option for the existing file publisher." msgstr "Add new JSON output option for the existing file publisher." msgid "Add support for Keystone v3 authentication" msgstr "Add support for Keystone v3 authentication" msgid "" "Add support for batch processing of messages from queue. This will allow the " "collector and notification agent to grab multiple messages per thread to " "enable more efficient processing." msgstr "" "Add support for batch processing of messages from queue. This will allow the " "collector and notification agent to grab multiple messages per thread to " "enable more efficient processing." msgid "" "Add support for configuring the size of samples the poller will send in each " "batch." msgstr "" "Add support for configuring the size of samples the poller will send in each " "batch." msgid "Add support for network statistics meters with gnocchi" msgstr "Add support for network statistics meters with Gnocchi" msgid "" "Add support of batch recording metering data to mongodb backend, since the " "pymongo support *insert_many* interface which can be used to batch record " "items, in \"big-data\" scenarios, this change can improve the performance of " "metering data recording." msgstr "" "Add support of batch recording metering data to MongoDB backend, since the " "pymongo support *insert_many* interface which can be used to batch record " "items, in \"big-data\" scenarios, this change can improve the performance of " "metering data recording." msgid "" "Add support of metering the size of cinder volume/snapshot/backup. Like " "other meters, these are useful for billing system." msgstr "" "Add support of metering the size of Cinder volume/snapshot/backup. Like " "other meters, these are useful for billing system." msgid "" "Add support to capture volume capacity usage details from cinder. This data " "is extracted from notifications sent by Cinder starting in Ocata." msgstr "" "Add support to capture volume capacity usage details from Cinder. This data " "is extracted from notifications sent by Cinder starting in Ocata." msgid "" "Add the support for non-OpenStack APIs in the dynamic pollster system. This " "extension enables operators to create pollster on the fly to handle metrics " "from systems such as the RadosGW API." msgstr "" "Add the support for non-OpenStack APIs in the dynamic pollster system. This " "extension enables operators to create pollster on the fly to handle metrics " "from systems such as the RadosGW API." msgid "" "Add two new meters, including memory.bandwidth.total and memory.bandwidth." "local, to get memory bandwidth statistics based on Intel CMT feature." msgstr "" "Add two new meters, including memory.bandwidth.total and memory.bandwidth." "local, to get memory bandwidth statistics based on Intel CMT feature." msgid "" "Add volume.volume_type_id and backup.is_incremental metadata for cinder " "pollsters. Also user_id information is now included for backups with the " "generated samples." msgstr "" "Add volume.volume_type_id and backup.is_incremental metadata for Cinder " "pollsters. Also user_id information is now included for backups with the " "generated samples." msgid "Added new tool ``ceilometer-status upgrade check``." msgstr "Added new tool ``ceilometer-status upgrade check``." msgid "Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'." msgstr "" "Added support for Magnum bay CRUD events, event_type is 'magnum.bay.*'." msgid "" "Added the ``volume_type_id`` attribute to ``volume.size`` notification " "samples, which stores the ID for the volume type of the given volume." msgstr "" "Added the ``volume_type_id`` attribute to ``volume.size`` notification " "samples, which stores the ID for the volume type of the given volume." msgid "" "Added the ``volume_type_id`` attribute to ``volume`` resources in Gnocchi, " "which stores the ID for the volume type of the given volume." msgstr "" "Added the ``volume_type_id`` attribute to ``volume`` resources in Gnocchi, " "which stores the ID for the volume type of the given volume." msgid "Added the new power.state metric from virDomainState." msgstr "Added the new power.state metric from virDomainState." msgid "" "Addition pipelines can be created following the format used by existing " "pipelines." msgstr "" "Addition pipelines can be created following the format used by existing " "pipelines." msgid "" "Allow users to add additional exchanges in ceilometer.conf instead of " "hardcoding exchanges. Now original http_control_exchanges is being " "deprecated and renamed notification_control_exchanges. Besides, the new " "option is integrated with other exchanges in default EXCHANGE_OPTS to make " "it available to extend additional exchanges." msgstr "" "Allow users to add additional exchanges in ceilometer.conf instead of " "hardcoding exchanges. Now original http_control_exchanges is being " "deprecated and renamed notification_control_exchanges. Besides, the new " "option is integrated with other exchanges in default EXCHANGE_OPTS to make " "it available to extend additional exchanges." msgid "" "Any existing commands utilising `image` meter should be switched to `image." "size` meter which will provide equivalent functionality" msgstr "" "Any existing commands utilising `image` meter should be switched to `image." "size` meter which will provide equivalent functionality" msgid "" "Archive policies can now be configured per metrics in gnocchi_resources." "yaml. A default list of archive policies is now created by Ceilometer. They " "are called \"ceilometer-low-rate\" for all IOs metrics and \"ceilometer-low" "\" for others." msgstr "" "Archive policies can now be configured per metrics in gnocchi_resources." "yaml. A default list of archive policies is now created by Ceilometer. They " "are called \"ceilometer-low-rate\" for all IO metrics and \"ceilometer-low\" " "for others." msgid "" "As the collector service is being deprecated, the duplication of publishers " "and dispatchers is being addressed. The http dispatcher is now marked as " "deprecated and the recommended path is to use http publisher." msgstr "" "As the collector service is being deprecated, the duplication of publishers " "and dispatchers is being addressed. The http dispatcher is now marked as " "deprecated and the recommended path is to use http publisher." msgid "" "Batching is enabled by default now when coordinated workers are enabled. " "Depending on load, it is recommended to scale out the number of " "`pipeline_processing_queues` to improve distribution. `batch_size` should " "also be configured accordingly." msgstr "" "Batching is enabled by default now when coordinated workers are enabled. " "Depending on load, it is recommended to scale out the number of " "`pipeline_processing_queues` to improve distribution. `batch_size` should " "also be configured accordingly." msgid "" "Because of deprecating the collector, the default publishers in pipeline." "yaml and event_pipeline.yaml are now changed using database instead of " "notifier." msgstr "" "Because of deprecating the collector, the default publishers in pipeline." "yaml and event_pipeline.yaml are now changed using database instead of " "notifier." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add " "neutron_lbaas_version=v1 option to configuration file." msgstr "" "By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add " "neutron_lbaas_version=v1 option to configuration file." msgid "" "Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi " "(metrics), and/or Panko (events)." msgstr "" "Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi " "(metrics), and/or Panko (events)." msgid "Ceilometer Release Notes" msgstr "Ceilometer Release Notes" msgid "" "Ceilometer alarms code is now fully removed from code base. Equivalent " "functionality is handled by Aodh." msgstr "" "Ceilometer alarms code is now fully removed from code base. Equivalent " "functionality is handled by Aodh." msgid "" "Ceilometer backends are no more only databases but also REST API like " "Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and " "have been renamed ceilometer-upgrade. The new binary handles database schema " "upgrade like ceilometer-dbsync does, but it also handle any changes needed " "in configured ceilometer backends like Gnocchi." msgstr "" "Ceilometer backends are no more only databases but also REST API like " "Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and " "have been renamed ceilometer-upgrade. The new binary handles database schema " "upgrade like ceilometer-dbsync does, but it also handle any changes needed " "in configured Ceilometer backends like Gnocchi." msgid "" "Ceilometer created metrics that could never get measures depending on the " "polling configuration. Metrics are now created only if Ceilometer gets at " "least a measure for them." msgstr "" "Ceilometer created metrics that could never get measures depending on the " "polling configuration. Metrics are now created only if Ceilometer gets at " "least a measure for them." msgid "" "Ceilometer legacy backends and Ceilometer API are now deprecated. Polling " "all nova instances from compute agent is no more required with Gnocchi. So " "we switch the [compute]instance_discovery_method to libvirt_metadata. To " "switch back to the old deprecated behavior you can set it back to 'naive'." msgstr "" "Ceilometer legacy backends and Ceilometer API are now deprecated. Polling " "all nova instances from compute agent is no more required with Gnocchi. So " "we switch the [compute]instance_discovery_method to libvirt_metadata. To " "switch back to the old deprecated behaviour you can set it back to 'naive'." msgid "" "Ceilometer now creates it own archive policies in Gnocchi and use them to " "create metrics in Gnocchi. Old metrics kept their current archive policies " "and will not be updated with ceilometer-upgrade. Only newly created metrics " "will be impacted. Archive policy can still be overridden with the publisher " "url (e.g: gnocchi://archive_policy=high)." msgstr "" "Ceilometer now creates it own archive policies in Gnocchi and uses them to " "create metrics in Gnocchi. Old metrics keep their current archive policies " "and will not be updated with ceilometer-upgrade. Only newly created metrics " "will be impacted. Archive policy can still be overridden with the publisher " "URL (e.g: gnocchi://archive_policy=high)." msgid "" "Ceilometer now leverages the latest distribution mechanism provided by the " "tooz library. Therefore the options `coordination.retry_backoff` and " "`coordination.max_retry_interval` do not exist anymore." msgstr "" "Ceilometer now leverages the latest distribution mechanism provided by the " "tooz library. Therefore the options `coordination.retry_backoff` and " "`coordination.max_retry_interval` do not exist any more." msgid "" "Ceilometer previously did not create IPMI sensor data from IPMI agent or " "Ironic in Gnocchi. This data is now pushed to Gnocchi." msgstr "" "Ceilometer previously did not create IPMI sensor data from IPMI agent or " "Ironic in Gnocchi. This data is now pushed to Gnocchi." msgid "" "Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. " "The purpose of this middleware is to set up the request URL correctly in " "case there is a proxy (for instance, a loadbalancer such as HAProxy) in " "front of Ceilometer. So, for instance, when TLS connections are being " "terminated in the proxy, and one tries to get the versions from the / " "resource of Ceilometer, one will notice that the protocol is incorrect; It " "will show 'http' instead of 'https'. So this middleware handles such cases. " "Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off " "by default and needs to be enabled via a configuration value." msgstr "" "Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. " "The purpose of this middleware is to set up the request URL correctly in " "case there is a proxy (for instance, a load balancer such as HAProxy) in " "front of Ceilometer. So, for instance, when TLS connections are being " "terminated in the proxy, and one tries to get the versions from the / " "resource of Ceilometer, one will notice that the protocol is incorrect; It " "will show 'http' instead of 'https'. So this middleware handles such cases. " "Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off " "by default and needs to be enabled via a configuration value." msgid "" "Ceilometer supports generic notifier to publish data and allow user to " "customize parameters such as topic, transport driver and priority. The " "publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:" "[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit " "driver, but also other driver like kafka can be used." msgstr "" "Ceilometer supports generic notifier to publish data and allow user to " "customise parameters such as topic, transport driver and priority. The " "publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:" "[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit " "driver, but also other driver like Kafka can be used." msgid "" "Collector is no longer supported in this release. The collector introduces " "lags in pushing data to backend. To optimize the architecture, Ceilometer " "push data through dispatchers using publishers in notification agent " "directly." msgstr "" "Collector is no longer supported in this release. The collector introduces " "lags in pushing data to backend. To optimise the architecture, Ceilometer " "pushes data through dispatchers using publishers in notification agent " "directly." msgid "" "Configuration values can passed in via the querystring of publisher in " "pipeline. For example, rather than setting target, timeout, verify_ssl, and " "batch_mode under [dispatcher_http] section of conf, you can specify http://" "/?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only " "the raw details of event are required." msgstr "" "Configuration values can passed in via the querystring of publisher in " "pipeline. For example, rather than setting target, timeout, verify_ssl, and " "batch_mode under [dispatcher_http] section of conf, you can specify http://" "/?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only " "the raw details of event are required." msgid "" "Configure individual dispatchers by specifying meter_dispatchers and " "event_dispatchers in configuration file." msgstr "" "Configure individual dispatchers by specifying meter_dispatchers and " "event_dispatchers in configuration file." msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Default value of the ``[notification] notification_control_exchanges`` " "option has been updated and ``sahara`` is no longer included by default." msgstr "" "Default value of the ``[notification] notification_control_exchanges`` " "option has been updated and ``sahara`` is no longer included by default." msgid "Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead." msgstr "Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead." msgid "" "Deprecating support for enabling pollsters via command line. Meter and " "pollster enablement should be configured via polling.yaml file." msgstr "" "Deprecating support for enabling pollsters via command line. Meter and " "pollster enablement should be configured via polling.yaml file." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "Fix ability to enable/disable radosgw.* meters explicitly" msgstr "Fix ability to enable/disable radosgw.* meters explicitly" msgid "Fix samples from Heat to map to correct Gnocchi resource type" msgstr "Fix samples from Heat to map to correct Gnocchi resource type" msgid "" "Fix to improve handling messages in environments heavily backed up. " "Previously, notification handlers greedily grabbed messages from queues " "which could cause ordering issues. A fix was applied to sequentially process " "messages in a single thread to prevent ordering issues." msgstr "" "Fix to improve handling messages in environments heavily backed up. " "Previously, notification handlers greedily grabbed messages from queues " "which could cause ordering issues. A fix was applied to sequentially process " "messages in a single thread to prevent ordering issues." msgid "" "For backward compatibility reason we temporary keep ceilometer-dbsync, at " "least for one major version to ensure deployer have time update their " "tooling." msgstr "" "For backward compatibility reason we temporary keep ceilometer-dbsync, at " "least for one major version to ensure deployers have time update their " "tooling." msgid "Gnocchi dispatcher now uses client rather than direct http requests" msgstr "Gnocchi dispatcher now uses client rather than direct HTTP requests" msgid "" "Identify user and projects names with the help of their UUIDs in the polled " "samples. If they are identified, set \"project_name\" and \"user_name\" " "fields in the sample to the corresponding values." msgstr "" "Identify user and projects names with the help of their UUIDs in the polled " "samples. If they are identified, set \"project_name\" and \"user_name\" " "fields in the sample to the corresponding values." msgid "" "If workload partitioning of the notification agent is enabled, the " "notification agent should not run alongside pre-Queens agents. Doing so may " "result in missed samples when leveraging transformations. To upgrade without " "loss of data, set `notification_control_exchanges` option to empty so only " "existing `ceilometer-pipe-*` queues are processed. Once cleared, reset " "`notification_control_exchanges` option and launch the new notification " "agent(s). If `workload_partitioning` is not enabled, no special steps are " "required." msgstr "" "If workload partitioning of the notification agent is enabled, the " "notification agent should not run alongside pre-Queens agents. Doing so may " "result in missed samples when leveraging transformations. To upgrade without " "loss of data, set `notification_control_exchanges` option to empty so only " "existing `ceilometer-pipe-*` queues are processed. Once cleared, reset " "`notification_control_exchanges` option and launch the new notification " "agent(s). If `workload_partitioning` is not enabled, no special steps are " "required." msgid "" "If you are using Gnocchi as backend it's strongly recommended to switch " "[compute]/instance_discovery_method to libvirt_metadata. This will reduce " "the load on the Nova API especially if you have many compute nodes." msgstr "" "If you are using Gnocchi as backend it's strongly recommended to switch " "[compute]/instance_discovery_method to libvirt_metadata. This will reduce " "the load on the Nova API especially if you have many compute nodes." msgid "" "In an effort to minimise the noise, Ceilometer will no longer produce meters " "which have no measurable data associated with it. Image meter only captures " "state information which is already captured in events and other meters." msgstr "" "In an effort to minimise the noise, Ceilometer will no longer produce meters " "which have no measurable data associated with it. Image meter only captures " "state information which is already captured in events and other meters." msgid "" "In an effort to minimise the noise, Ceilometer will no longer produce meters " "which have no measureable data associated with it. Image meter only captures " "state information which is already captured in events and other meters." msgstr "" "In an effort to minimise the noise, Ceilometer will no longer produce meters " "which have no measurable data associated with it. Image meter only captures " "state information which is already captured in events and other meters." msgid "" "In the 'publishers' section of a meter/event pipeline definition, https:// " "can now be used in addition to http://. Furthermore, either Basic or client-" "certificate authentication can be used (obviously, client cert only makes " "sense in the https case). For Basic authentication, use the form http://" "username:password@hostname/. For client certificate authentication pass the " "client certificate's path (and the key file path, if the key is not in the " "certificate file) using the parameters 'clientcert' and 'clientkey', e.g. " "https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any " "parameters or credentials used for http(s) publishers are removed from the " "URL before the actual HTTP request is made." msgstr "" "In the 'publishers' section of a meter/event pipeline definition, https:// " "can now be used in addition to http://. Furthermore, either Basic or client-" "certificate authentication can be used (obviously, client cert only makes " "sense in the https case). For Basic authentication, use the form http://" "username:password@hostname/. For client certificate authentication pass the " "client certificate's path (and the key file path, if the key is not in the " "certificate file) using the parameters 'clientcert' and 'clientkey', e.g. " "https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any " "parameters or credentials used for http(s) publishers are removed from the " "URL before the actual HTTP request is made." msgid "" "In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set " "to True to activate sending meters and events in batches, or False (default " "value) to send each meter and event with a fresh HTTP call." msgstr "" "In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set " "to True to activate sending meters and events in batches, or False (default " "value) to send each meter and event with a fresh HTTP call." msgid "" "In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set " "to True to use system-installed certificates (default value) or False to " "ignore certificate verification (use in development only!). verify_ssl can " "also be set to the location of a certificate file e.g. /some/path/cert.crt " "(use for self-signed certs) or to a directory of certificates. The value is " "passed as the 'verify' option to the underlying requests method, which is " "documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-" "cert-verification" msgstr "" "In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set " "to True to use system-installed certificates (default value) or False to " "ignore certificate verification (use in development only!). verify_ssl can " "also be set to the location of a certificate file e.g. /some/path/cert.crt " "(use for self-signed certs) or to a directory of certificates. The value is " "passed as the 'verify' option to the underlying requests method, which is " "documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-" "cert-verification" msgid "" "Include a publisher for the Monasca API. A ``monasca://`` pipeline sink will " "send data to a Monasca instance, using credentials configured in ceilometer." "conf. This functionality was previously available in the Ceilosca project " "(https://github.com/openstack/monasca-ceilometer)." msgstr "" "Include a publisher for the Monasca API. A ``monasca://`` pipeline sink will " "send data to a Monasca instance, using credentials configured in ceilometer." "conf. This functionality was previously available in the Ceilosca project " "(https://github.com/openstack/monasca-ceilometer)." msgid "Kafka publisher is deprecated to use generic notifier instead." msgstr "Kafka publisher is deprecated to use generic notifier instead." msgid "Known Issues" msgstr "Known Issues" msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "Mitaka Release Notes" msgstr "Mitaka Release Notes" msgid "Network Statistics From OpenDaylight." msgstr "Network Statistics From OpenDaylight." msgid "" "Neutron API is not designed to be polled against. When polling against " "Neutron is enabled, Ceilometer's polling agents may generage a significant " "load against the Neutron API. It is recommended that a dedicated API be " "enabled for polling while Neutron's API is improved to handle polling." msgstr "" "Neutron API is not designed to be polled against. When polling against " "Neutron is enabled, Ceilometer's polling agents may generate a significant " "load against the Neutron API. It is recommended that a dedicated API be " "enabled for polling while Neutron's API is improved to handle polling." msgid "New Features" msgstr "New Features" msgid "" "New framework for ``ceilometer-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Ceilometer " "upgrade to ensure if the upgrade can be performed safely." msgstr "" "New framework for ``ceilometer-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Ceilometer " "upgrade to ensure if the upgrade can be performed safely." msgid "Newton Release Notes" msgstr "Newton Release Notes" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "OpenStack Dynamic pollsters metadata enrichment with other OpenStack API's " "data." msgstr "" "OpenStack Dynamic pollsters metadata enrichment with other OpenStack API's " "data." msgid "" "Operator can now use new CLI tool ``ceilometer-status upgrade check`` to " "check if Ceilometer deployment can be safely upgraded from N-1 to N release." msgstr "" "Operator can now use new CLI tool ``ceilometer-status upgrade check`` to " "check if Ceilometer deployment can be safely upgraded from N-1 to N release." msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Pipeline Partitioning is also deprecated. This was only useful to workaround " "of some issues that tranformers has." msgstr "" "Pipeline Partitioning is also deprecated. This was only useful to workaround " "some issues that transformers had." msgid "" "Pipeline processing in polling agents was removed in Liberty cycle. A new " "polling specific definition file is created to handle polling functionality " "and pipeline definition file is now reserved exclusively for transformations " "and routing. The polling.yaml file follows the same syntax as the pipeline." "yaml but only handles polling attributes such as interval, discovery, " "resources, meter matching. It is configured by setting cfg_file under the " "polling section.If no polling definition file is found, it will fallback to " "reuse pipeline_cfg_file." msgstr "" "Pipeline processing in polling agents was removed in Liberty cycle. A new " "polling specific definition file is created to handle polling functionality " "and pipeline definition file is now reserved exclusively for transformations " "and routing. The polling.yaml file follows the same syntax as the pipeline." "yaml but only handles polling attributes such as interval, discovery, " "resources, meter matching. It is configured by setting cfg_file under the " "polling section.If no polling definition file is found, it will fallback to " "reuse pipeline_cfg_file." msgid "" "Pipeline.yaml files for agents should be updated to notifier:// or udp:// " "publishers. The rpc:// publisher is no longer supported." msgstr "" "Pipeline.yaml files for agents should be updated to notifier:// or udp:// " "publishers. The rpc:// publisher is no longer supported." msgid "Prelude" msgstr "Prelude" msgid "Previously deprecated kwapi meters are not removed." msgstr "Previously deprecated Kwapi meters are not removed." msgid "" "Previously, to enable/disable radosgw.* meters, you must define entry_point " "name rather than meter name. This is corrected so you do not need to be " "aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw " "meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and " "will be removed in Rocky." msgstr "" "Previously, to enable/disable radosgw.* meters, you must define entry_point " "name rather than meter name. This is corrected so you do not need to be " "aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw " "meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and " "will be removed in Rocky." msgid "" "Privsep daemons are now started by Ceilometer when required. These daemons " "can be started via rootwrap if required. rootwrap configs therefore need to " "be updated to include new privsep daemon invocations." msgstr "" "Privsep daemons are now started by Ceilometer when required. These daemons " "can be started via rootwrap if required. rootwrap configs therefore need to " "be updated to include new privsep daemon invocations." msgid "" "Privsep transitions. Ceilometer is transitioning from using the older style " "rootwrap privilege escalation path to the new style Oslo privsep path. This " "should improve performance and security of Ceilometer in the long term." msgstr "" "Privsep transitions. Ceilometer is transitioning from using the older style " "rootwrap privilege escalation path to the new style Oslo privsep path. This " "should improve performance and security of Ceilometer in the long term." msgid "" "Python 2.7 support has been dropped. Last release of ceilometer to support " "py2.7 is OpenStack Train. The minimum version of Python now supported by " "ceilometer is Python 3.6." msgstr "" "Python 2.7 support has been dropped. Last release of Ceilometer to support " "py2.7 is OpenStack Train. The minimum version of Python now supported by " "Ceilometer is Python 3.6." msgid "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgstr "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgid "" "Python 3.8 support was dropped. The minimum version of Python now supported " "is Python 3.9." msgstr "" "Python 3.8 support was dropped. The minimum version of Python now supported " "is Python 3.9." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "RPC collector support is dropped. The queue-based notifier publisher and " "collector was added as the recommended alternative as of Icehouse cycle." msgstr "" "RPC collector support is dropped. The queue-based notifier publisher and " "collector was added as the recommended alternative as of Icehouse cycle." msgid "Remove deprecated option `batch_polled_samples`." msgstr "Remove deprecated option `batch_polled_samples`." msgid "" "Remove deprecated option meter_definitions_cfg_file, use " "meter_definitions_dirs to configure meter notification file." msgstr "" "Remove deprecated option meter_definitions_cfg_file, use " "meter_definitions_dirs to configure meter notification file." msgid "Remove direct publisher and use the explicit publisher instead." msgstr "Remove direct publisher and use the explicit publisher instead." msgid "Remove eventlet from Ceilometer in favour of threaded approach" msgstr "Remove eventlet from Ceilometer in favour of threaded approach" msgid "Remove integration with the inactive Monasca project" msgstr "Remove integration with the inactive Monasca project" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "Run db-sync to add new indices." msgstr "Run db-sync to add new indices." msgid "" "Samples are required to measure some aspect of a resource. Samples not " "measuring anything will be dropped." msgstr "" "Samples are required to measure some aspect of a resource. Samples not " "measuring anything will be dropped." msgid "Security Issues" msgstr "Security Issues" msgid "" "Ship YAML files to ceilometer/pipeline/data/ make it convenient to update " "all the files on upgrade. Users can copy yaml files from /usr/share/" "ceilometer and customise their own files located in /etc/ceilometer/." msgstr "" "Ship YAML files to ceilometer/pipeline/data/ make it convenient to update " "all the files on upgrade. Users can copy yaml files from /usr/share/" "ceilometer and customise their own files located in /etc/ceilometer/." msgid "" "Since the Glance v1 APIs won't be maintained any more, this change add the " "support of glance v2 in images pollsters." msgstr "" "Since the Glance v1 APIs won't be maintained any more, this change add the " "support of glance v2 in images pollsters." msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Support for CADF-only payload in HTTP dispatcher is dropped as audit " "middleware in pyCADF was dropped in Kilo cycle." msgstr "" "Support for CADF-only payload in HTTP dispatcher is dropped as audit " "middleware in pyCADF was dropped in Kilo cycle." msgid "" "Support for CORS is added. More information can be found [`here `_]" msgstr "" "Support for CORS is added. More information can be found [`here `_]" msgid "" "Support for Neutron FWaaS has been officially deprecated. The feature has " "been useless since the Neutron FWaaS project was retired." msgstr "" "Support for Neutron FWaaS has been officially deprecated. The feature has " "been useless since the Neutron FWaaS project was retired." msgid "" "Support for Neutron LBaaS has been officially deprecated. The feature has " "been useless since the Neutron LBaaS project was retired." msgstr "" "Support for Neutron LBaaS has been officially deprecated. The feature has " "been useless since the Neutron LBaaS project was retired." msgid "" "Support for Open Contrail has been removed. Because no SDN is supported " "after the removal, the mechanism to pull metrics from SDN is also removed." msgstr "" "Support for Open Contrail has been removed. Because no SDN is supported " "after the removal, the mechanism to pull metrics from SDN is also removed." msgid "" "Support for OpenContrail, which is currently known as Tungsten Fabric, has " "been deprecated and will be removed in a future release." msgstr "" "Support for OpenContrail, which is currently known as Tungsten Fabric, has " "been deprecated and will be removed in a future release." msgid "" "Support for OpenDaylight has been deprecated and will be removed in a future " "release." msgstr "" "Support for OpenDaylight has been deprecated and will be removed in a future " "release." msgid "Support for OpenDaylight has been removed." msgstr "Support for OpenDaylight has been removed." msgid "" "Support for VMWare vSphere has been deprecated, because the vmwareapi virt " "driver in nova has been marked experimental and may be removed in a future " "release." msgstr "" "Support for VMWare vSphere has been deprecated because the vmwareapi virt " "driver in Nova has been marked experimental and may be removed in a future " "release." msgid "Support for VMware vSphere has been removed." msgstr "Support for VMware vSphere has been removed." msgid "" "Support for XenServer/Xen Cloud Platform has been deprecated and will be " "removed in a future release." msgstr "" "Support for XenServer/Xen Cloud Platform has been deprecated and will be " "removed in a future release." msgid "Support for XenServer/Xen Cloud Platform has been removed." msgstr "Support for XenServer/Xen Cloud Platform has been removed." msgid "Support for neutron-lbaas resources has been removed." msgstr "Support for neutron-lbaas resources has been removed." msgid "" "Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is " "deprecated. The same metrics are available between v1 and v2." msgstr "" "Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is " "deprecated. The same metrics are available between v1 and v2." msgid "" "Support for running Ceilometer in Windows operating systems has been " "deprecated because of retirement of the Winstackers project. Because of " "this, Hyper-V inspector is also deprecated." msgstr "" "Support for running Ceilometer in Windows operating systems has been " "deprecated because of retirement of the Winstackers project. Because of " "this, Hyper-V inspector is also deprecated." msgid "" "Support for running ceilometer in Windows operating systems has been " "removed. Because of the removal, Hyper-V inspector has also been removed." msgstr "" "Support for running Ceilometer in Windows operating systems has been " "removed. Because of the removal, Hyper-V inspector has also been removed." msgid "" "Support loading multiple meter definition files and allow users to add their " "own meter definitions into several files according to different types of " "metrics under the directory of /etc/ceilometer/meters.d." msgstr "" "Support loading multiple meter definition files and allow users to add their " "own meter definitions into several files according to different types of " "metrics under the directory of /etc/ceilometer/meters.d." msgid "" "Support resource caching in Gnocchi dispatcher to improve write performance " "to avoid additional queries." msgstr "" "Support resource caching in Gnocchi dispatcher to improve write performance " "to avoid additional queries." msgid "" "The Ceilometer compute agent can now retrieve some instance metadata from " "the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova " "fills this metadata with some information about the instance. To enable this " "feature you should set [compute]/instance_discovery_method = " "libvirt_metadata in the configuration file. The only downside of this method " "is that user_metadata (and some other instance attributes) are no longer " "part of the samples created by the agent. But when Gnocchi is used as " "backend, this is not an issue since Gnocchi doesn't store resource metadata " "aside of the measurements. And the missing informations are still retrieved " "through the Nova notifications and will fully update the resource " "information in Gnocchi." msgstr "" "The Ceilometer compute agent can now retrieve some instance metadata from " "the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova " "fills this metadata with some information about the instance. To enable this " "feature you should set [compute]/instance_discovery_method = " "libvirt_metadata in the configuration file. The only downside of this method " "is that user_metadata (and some other instance attributes) are no longer " "part of the samples created by the agent. But when Gnocchi is used as " "backend, this is not an issue since Gnocchi doesn't store resource metadata " "aside of the measurements. And the missing information is still retrieved " "through the Nova notifications and will fully update the resource " "information in Gnocchi." msgid "" "The Ceilometer event subsystem and pipeline is now deprecated and will be " "removed in a future release." msgstr "" "The Ceilometer event subsystem and pipeline is now deprecated and will be " "removed in a future release." msgid "" "The Events API (exposed at /v2/events) which was deprecated has been " "removed. The Panko project is now responsible for providing this API and can " "be installed separately." msgstr "" "The Events API (exposed at /v2/events) which was deprecated has been " "removed. The Panko project is now responsible for providing this API and can " "be installed separately." msgid "" "The Gnocchi dispatcher has been removed and replaced by a native Gnocchi " "publisher. The configuration options from the `[dispatcher_gnocchi]` has " "been removed and should be passed via the URL in `pipeline.yaml`. The " "service authentication override can be done by adding specific credentials " "to a `[gnocchi]` section instead." msgstr "" "The Gnocchi dispatcher has been removed and replaced by a native Gnocchi " "publisher. The configuration options from the `[dispatcher_gnocchi]` has " "been removed and should be passed via the URL in `pipeline.yaml`. The " "service authentication override can be done by adding specific credentials " "to a `[gnocchi]` section instead." msgid "" "The Kwapi pollsters are deprecated and will be removed in the next major " "version of Ceilometer." msgstr "" "The Kwapi pollsters are deprecated and will be removed in the next major " "version of Ceilometer." msgid "" "The [compute]/workload_partitioning = True is deprecated in favor of " "[compute]/instance_discovery_method = workload_partitioning" msgstr "" "The [compute]/workload_partitioning = True is deprecated in favour of " "[compute]/instance_discovery_method = workload_partitioning" msgid "" "The ``NodesDiscoveryTripleO`` discovery plugin has been deprecated and will " "be removed in a future release. This plugin is designed for TripleO " "deployment but no longer used since Telemetry services were removed from " "undercloud." msgstr "" "The ``NodesDiscoveryTripleO`` discovery plugin has been deprecated and will " "be removed in a future release. This plugin is designed for TripleO " "deployment but is no longer used since Telemetry services were removed from " "undercloud." msgid "The ``NodesDiscoveryTripleO`` discovery plugin has been removed." msgstr "The ``NodesDiscoveryTripleO`` discovery plugin has been removed." msgid "" "The ``[DEFAULT] hypervisor_inspector`` option has been deprecated, because " "libvirt is the only supported hypervisor currently. The option will be " "removed in a future release." msgstr "" "The ``[DEFAULT] hypervisor_inspector`` option has been deprecated, because " "libvirt is the only supported hypervisor currently. The option will be " "removed in a future release." msgid "" "The ``[DEFAULT] virt_type`` option no longer supports ``uml``. UML support " "by nova was removed in nova 23.3.0 release." msgstr "" "The ``[DEFAULT] virt_type`` option no longer supports ``uml``. UML support " "by Nova was removed in Nova 23.3.0 release." msgid "The ``[DEFAULT] virt_type`` option now supports ``parallels``." msgstr "The ``[DEFAULT] virt_type`` option now supports ``parallels``." msgid "" "The ``[coordination] check_watchers`` parameter has been deprecated since it " "has been ineffective." msgstr "" "The ``[coordination] check_watchers`` parameter has been deprecated since it " "has been ineffective." msgid "The ``[coordination] check_watchers`` parameter has been removed." msgstr "The ``[coordination] check_watchers`` parameter has been removed." msgid "" "The ``[notification] batch_size`` parameter now takes effect to enable batch " "processing of notifications. The ``[notification] batch_timeout`` parameter " "has been restored at the same time to determine how much and how long " "notifications are kept." msgstr "" "The ``[notification] batch_size`` parameter now takes effect to enable batch " "processing of notifications. The ``[notification] batch_timeout`` parameter " "has been restored at the same time to determine how much and how long " "notifications are kept." msgid "The `image` meter is dropped in favour of `image.size` meter." msgstr "The `image` meter is dropped in favour of `image.size` meter." msgid "The `instance` meter no longer will be generated." msgstr "The `instance` meter no longer will be generated." msgid "" "The `instance` meter no longer will be generated. For equivalent " "functionality, perform the exact same query on any compute meter such as " "`cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc..." msgstr "" "The `instance` meter no longer will be generated. For equivalent " "functionality, perform the exact same query on any compute meter such as " "`cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc..." msgid "" "The `shuffle_time_before_polling_task` option has been removed. This option " "never worked in the way it was originally intended too." msgstr "" "The `shuffle_time_before_polling_task` option has been removed. This option " "never worked in the way it was originally intended to." msgid "" "The api-paste.ini file can be modified to include or exclude the CORs " "middleware. Additional configurations can be made to middleware as well." msgstr "" "The api-paste.ini file can be modified to include or exclude the CORs " "middleware. Additional configurations can be made to middleware as well." msgid "The api.pecan_debug option has been removed." msgstr "The api.pecan_debug option has been removed." msgid "" "The cinder api microversion has been increased from Pike to Wallaby version " "(3.64) for volume/snapshot/backup related pollsters. These might not work " "until the cinder API has been upgraded up to this microversion." msgstr "" "The Cinder API microversion has been increased from Pike to Wallaby version " "(3.64) for volume/snapshot/backup related pollsters. These might not work " "until the Cinder API has been upgraded up to this microversion." msgid "" "The collector service is removed. From Ocata, it's possible to edit the " "pipeline.yaml and event_pipeline.yaml files and modify the publisher to " "provide the same functionality as collector dispatcher. You may change " "publisher to 'gnocchi', 'http', 'panko', or any combination of available " "publishers listed in documentation." msgstr "" "The collector service is removed. From Ocata, it's possible to edit the " "pipeline.yaml and event_pipeline.yaml files and modify the publisher to " "provide the same functionality as collector dispatcher. You may change " "publisher to 'gnocchi', 'http', 'panko', or any combination of available " "publishers listed in documentation." msgid "" "The default ``polling.yaml`` file has been updated and now it enables meters " "related to cinder by default." msgstr "" "The default ``polling.yaml`` file has been updated and now it enables meters " "related to cinder by default." msgid "" "The default event definiton has been updated and no longer includes events " "for sahara." msgstr "" "The default event definition has been updated and no longer includes events " "for Sahara." msgid "The deprecated Ceilometer API has been removed." msgstr "The deprecated Ceilometer API has been removed." msgid "" "The deprecated `compute.workload_partitioning` option has been removed in " "favor of `compute.instance_discovery_method`." msgstr "" "The deprecated `compute.workload_partitioning` option has been removed in " "favour of `compute.instance_discovery_method`." msgid "" "The deprecated `disk.*` meters have been removed. Use the `disk.device.*` " "meters instead." msgstr "" "The deprecated `disk.*` meters have been removed. Use the `disk.device.*` " "meters instead." msgid "The deprecated `dispatcher_gnocchi` option group has been removed." msgstr "The deprecated `dispatcher_gnocchi` option group has been removed." msgid "The deprecated `gnocchi_dispatcher` option group has been removed." msgstr "The deprecated `gnocchi_dispatcher` option group has been removed." msgid "The deprecated `meter_definitions_cfg_file` option has been removed." msgstr "The deprecated `meter_definitions_cfg_file` option has been removed." msgid "The deprecated `nova_http_log_debug` option has been removed." msgstr "The deprecated `nova_http_log_debug` option has been removed." msgid "The deprecated `pollster-list` option has been removed." msgstr "The deprecated `pollster-list` option has been removed." msgid "" "The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade " "instead." msgstr "" "The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade " "instead." msgid "The deprecated control exchange options have been removed." msgstr "The deprecated control exchange options have been removed." msgid "The deprecated file dispatcher has been removed." msgstr "The deprecated file dispatcher has been removed." msgid "The deprecated http dispatcher has been removed." msgstr "The deprecated http dispatcher has been removed." msgid "" "The deprecated kafka publisher has been removed, use NotifierPublisher " "instead." msgstr "" "The deprecated Kafka publisher has been removed, use NotifierPublisher " "instead." msgid "The deprecated meter for compute where removed:" msgstr "The deprecated meter for compute where removed:" msgid "" "The deprecated support of configure polling in the `pipeline.yaml` file has " "been removed. Ceilometer now only uses the `polling.yaml` file for polling " "configuration." msgstr "" "The deprecated support of configure polling in the `pipeline.yaml` file has " "been removed. Ceilometer now only uses the `polling.yaml` file for polling " "configuration." msgid "" "The deprecated workload partitioning for notification agent has been removed." msgstr "" "The deprecated workload partitioning for notification agent has been removed." msgid "" "The event database dispatcher is now deprecated. It has been moved to a new " "project, alongside the Ceilometer API for /v2/events, called Panko." msgstr "" "The event database dispatcher is now deprecated. It has been moved to a new " "project, alongside the Ceilometer API for /v2/events, called Panko." msgid "" "The following commands are no longer required to be listed in your rootwrap " "configuration: ipmitool." msgstr "" "The following commands are no longer required to be listed in your rootwrap " "configuration: ipmitool." msgid "" "The following meters were removed. Nova removed support for Intel CMT perf " "events in 22.0.0, and these meters can no longer be measured since then." msgstr "" "The following meters were removed. Nova removed support for Intel CMT perf " "events in 22.0.0, and these meters can no longer be measured since then." msgid "" "The notification-agent can now be configured to either build meters or " "events. By default, the notification agent will continue to load both " "pipelines and build both data models. To selectively enable a pipeline, " "configure the `pipelines` option under the `[notification]` section." msgstr "" "The notification-agent can now be configured to either build meters or " "events. By default, the notification agent will continue to load both " "pipelines and build both data models. To selectively enable a pipeline, " "configure the `pipelines` option under the `[notification]` section." msgid "" "The notifier publisher options `metering_topic` and `event_topic` are " "deprecated and will be removed. Use the `topic` query parameter in the " "notifier publisher URL instead." msgstr "" "The notifier publisher options `metering_topic` and `event_topic` are " "deprecated and will be removed. Use the `topic` query parameter in the " "notifier publisher URL instead." msgid "" "The option 'glance_page_size' has been removed because it's not actually " "needed." msgstr "" "The option 'glance_page_size' has been removed because it's not actually " "needed." msgid "" "The option ``glance_page_size`` has been removed because it's not actually " "needed." msgstr "" "The option ``glance_page_size`` has been removed because it's not actually " "needed." msgid "" "The option batch_polled_samples in the [DEFAULT] section is deprecated. Use " "batch_size option in [polling] to configure and/or disable batching." msgstr "" "The option batch_polled_samples in the [DEFAULT] section is deprecated. Use " "batch_size option in [polling] to configure and/or disable batching." msgid "" "The options 'requeue_event_on_dispatcher_error' and " "'requeue_sample_on_dispatcher_error' have been enabled and removed." msgstr "" "The options 'requeue_event_on_dispatcher_error' and " "'requeue_sample_on_dispatcher_error' have been enabled and removed." msgid "" "The options ``requeue_event_on_dispatcher_error`` and " "``requeue_sample_on_dispatcher_error`` have been enabled and removed." msgstr "" "The options ``requeue_event_on_dispatcher_error`` and " "``requeue_sample_on_dispatcher_error`` have been enabled and removed." msgid "" "The pipeline dynamic refresh code has been removed. Ceilometer relies on the " "cotyledon library for a few releases which provides reload functionality by " "sending the SIGHUP signal to the process. This achieves the same feature " "while making sure the reload is explicit once the file is correctly and " "entirely written to the disk, avoiding the failing load of half-written " "files." msgstr "" "The pipeline dynamic refresh code has been removed. Ceilometer relies on the " "cotyledon library for a few releases which provides reload functionality by " "sending the SIGHUP signal to the process. This achieves the same feature " "while making sure the reload is explicit once the file is correctly and " "entirely written to the disk, avoiding the failing load of half-written " "files." msgid "" "The previous configuration options default for " "'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' " "allowed to lose data very easily: if the dispatcher failed to send data to " "the backend (e.g. Gnocchi is down), then the dispatcher raised and the data " "were lost forever. This was completely unacceptable, and nobody should be " "able to configure Ceilometer in that way.\"" msgstr "" "The previous configuration options default for " "'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' " "allowed to lose data very easily: if the dispatcher failed to send data to " "the backend (e.g. Gnocchi is down), then the dispatcher raised and the data " "were lost forever. This was completely unacceptable, and nobody should be " "able to configure Ceilometer in that way.\"" msgid "" "The previous configuration options default for " "``requeue_sample_on_dispatcher_error`` and " "``requeue_event_on_dispatcher_error`` allowed to lose data very easily: if " "the dispatcher failed to send data to the backend (e.g. Gnocchi is down), " "then the dispatcher raised and the data were lost forever. This was " "completely unacceptable, and nobody should be able to configure Ceilometer " "in that way.\"" msgstr "" "The previous configuration options default for " "``requeue_sample_on_dispatcher_error`` and " "``requeue_event_on_dispatcher_error`` allowed to lose data very easily: if " "the dispatcher failed to send data to the backend (e.g. Gnocchi is down), " "then the dispatcher raised and the data were lost forever. This was " "completely unacceptable, and nobody should be able to configure Ceilometer " "in that way.\"" msgid "" "The resource metadata for the Cinder volume size poller now includes the " "availability zone field." msgstr "" "The resource metadata for the Cinder volume size poller now includes the " "availability zone field." msgid "The support for transformers has been removed from the pipeline." msgstr "The support for transformers has been removed from the pipeline." msgid "" "The tenant (project) discovery code in the polling agent now scans for " "tenants in all available domains." msgstr "" "The tenant (project) discovery code in the polling agent now scans for " "tenants in all available domains." msgid "" "The transport_url defined in [oslo_messaging_notifications] was never used, " "which contradicts the oslo_messaging documentation. This is now fixed." msgstr "" "The transport_url defined in [oslo_messaging_notifications] was never used, " "which contradicts the oslo_messaging documentation. This is now fixed." msgid "" "To minimise load on Nova API, an additional configuration option was added " "to control discovery interval vs metric polling interval. If " "resource_update_interval option is configured in compute section, the " "compute agent will discover new instances based on defined interval. The " "agent will continue to poll the discovered instances at the interval defined " "by pipeline." msgstr "" "To minimise load on Nova API, an additional configuration option was added " "to control discovery interval vs metric polling interval. If " "resource_update_interval option is configured in compute section, the " "compute agent will discover new instances based on defined interval. The " "agent will continue to poll the discovered instances at the interval defined " "by pipeline." msgid "" "To take advantage of this new feature you will need to update your " "gnocchi_resources.yaml file. See the example file for an example. You will " "need to ensure all required attributes of an instance are specified in the " "event_attributes." msgstr "" "To take advantage of this new feature you will need to update your " "gnocchi_resources.yaml file. See the example file for an example. You will " "need to ensure all required attributes of an instance are specified in the " "event_attributes." msgid "" "To utilize the new policy support. The policy.json file should be updated " "accordingly. The pre-existing policy.json file will continue to function as " "it does if policy changes are not required." msgstr "" "To utilize the new policy support. The policy.json file should be updated " "accordingly. The pre-existing policy.json file will continue to function as " "it does if policy changes are not required." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Usage of pipeline.yaml for polling configuration is now deprecated. The " "dedicated polling.yaml should be used instead." msgstr "" "Usage of pipeline.yaml for polling configuration is now deprecated. The " "dedicated polling.yaml should be used instead." msgid "" "Usage of transformers in Ceilometer pipelines is deprecated. Transformers in " "Ceilometer have never computed samples correctly when you have multiple " "workers. This functionality can be done by the storage backend easily " "without all issues that Ceilometer has. For example, the rating is already " "computed in Gnocchi today." msgstr "" "Usage of transformers in Ceilometer pipelines is deprecated. Transformers in " "Ceilometer have never computed samples correctly when you have multiple " "workers. This functionality can be done by the storage backend easily " "without all issues that Ceilometer has. For example, the rating is already " "computed in Gnocchi today." msgid "" "Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw." "*`" msgstr "" "Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw." "*`" msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "With collector service being deprecated, we now have to address the " "duplication between dispatchers and publishers. The file dispatcher is now " "marked as deprecated. Use the file publisher to push samples into a file." msgstr "" "With collector service being deprecated, we now have to address the " "duplication between dispatchers and publishers. The file dispatcher is now " "marked as deprecated. Use the file publisher to push samples into a file." msgid "" "Workload partitioning of notification agent is now split into queues based " "on pipeline type (sample, event, etc...) rather than per individual " "pipeline. This will save some memory usage specifically for pipeline " "definitions with many source/sink combinations." msgstr "" "Workload partitioning of notification agent is now split into queues based " "on pipeline type (sample, event, etc...) rather than per individual " "pipeline. This will save some memory usage specifically for pipeline " "definitions with many source/sink combinations." msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "[`bug 1254800 `_] Add " "better support to catch race conditions when creating event_types" msgstr "" "[`bug 1254800 `_] Add " "better support to catch race conditions when creating event_types" msgid "" "[`bug 1388680 `_] " "Suppose ability to query for None value when using SQL backend." msgstr "" "[`bug 1388680 `_] " "Suppose ability to query for None value when using SQL backend." msgid "" "[`bug 1480333 `_] " "Support ability to configure collector to capture events or meters mutally " "exclusively, rather than capturing both always." msgstr "" "[`bug 1480333 `_] " "Support ability to configure collector to capture events or meters mutally " "exclusively, rather than capturing both always." msgid "" "[`bug 1491509 `_] Patch " "to unify timestamp in samples polled by pollsters. Set the time point " "polling starts as timestamp of samples, and drop timetamping in pollsters." msgstr "" "[`bug 1491509 `_] Patch " "to unify timestamp in samples polled by pollsters. Set the time point " "polling starts as timestamp of samples, and drop timestamping in pollsters." msgid "" "[`bug 1504495 `_] " "Configure ceilometer to handle policy.json rules when possible." msgstr "" "[`bug 1504495 `_] " "Configure Ceilometer to handle policy.json rules when possible." msgid "" "[`bug 1506738 `_] [`bug " "1509677 `_] Optimise SQL " "backend queries to minimise query load" msgstr "" "[`bug 1506738 `_] [`bug " "1509677 `_] Optimise SQL " "backend queries to minimise query load" msgid "" "[`bug 1506959 `_] Add " "support to query unique set of meter names rather than meters associated " "with each resource. The list is available by adding unique=True option to " "request." msgstr "" "[`bug 1506959 `_] Add " "support to query unique set of meter names rather than meters associated " "with each resource. The list is available by adding unique=True option to " "request." msgid "" "[`bug 1513731 `_] Add " "support for hardware cpu_util in snmp.yaml" msgstr "" "[`bug 1513731 `_] Add " "support for hardware cpu_util in snmp.yaml" msgid "" "[`bug 1518338 `_] Add " "support for storing SNMP metrics in Gnocchi.This functionality requires " "Gnocchi v2.1.0 to be installed." msgstr "" "[`bug 1518338 `_] Add " "support for storing SNMP metrics in Gnocchi.This functionality requires " "Gnocchi v2.1.0 to be installed." msgid "" "[`bug 1519767 `_] " "fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and " "its potential race conditions are now patched." msgstr "" "[`bug 1519767 `_] " "fnmatch functionality in python <= 2.7.9 is not thread-safe. this issue and " "its potential race conditions are now patched." msgid "" "[`bug 1523124 `_] Fix " "gnocchi dispatcher to support UDP collector" msgstr "" "[`bug 1523124 `_] Fix " "Gnocchi dispatcher to support UDP collector" msgid "" "[`bug 1526793 `_] " "Additional indices were added to better support querying of event data." msgstr "" "[`bug 1526793 `_] " "Additional indices were added to better support querying of event data." msgid "" "[`bug 1530793 `_] " "network.services.lb.incoming.bytes meter was previous set to incorrect type. " "It should be a gauge meter." msgstr "" "[`bug 1530793 `_] " "network.services.lb.incoming.bytes meter was previous set to incorrect type. " "It should be a gauge meter." msgid "" "[`bug 1531626 `_] Ensure " "aggregator transformer timeout is honoured if size is not provided." msgstr "" "[`bug 1531626 `_] Ensure " "aggregator transformer timeout is honoured if size is not provided." msgid "" "[`bug 1532661 `_] Fix " "statistics query failures due to large numbers stored in MongoDB. Data from " "MongoDB is returned as Int64 for big numbers when int and float types are " "expected. The data is cast to appropriate type to handle large data." msgstr "" "[`bug 1532661 `_] Fix " "statistics query failures due to large numbers stored in MongoDB. Data from " "MongoDB is returned as Int64 for big numbers when int and float types are " "expected. The data is cast to appropriate type to handle large data." msgid "" "[`bug 1533787 `_] Fix an " "issue where agents are not properly getting registered to group when " "multiple notification agents are deployed. This can result in bad " "transformation as the agents are not coordinated. It is still recommended to " "set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when " "deploying multiple agents." msgstr "" "[`bug 1533787 `_] Fix an " "issue where agents are not properly getting registered to group when " "multiple notification agents are deployed. This can result in bad " "transformation as the agents are not coordinated. It is still recommended to " "set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when " "deploying multiple agents." msgid "" "[`bug 1536338 `_] Patch " "was added to fix the broken floatingip pollster that polled data from nova " "api, but since the nova api filtered the data by tenant, ceilometer was not " "getting any data back. The fix changes the pollster to use the neutron api " "instead to get the floating ip info." msgstr "" "[`bug 1536338 `_] Patch " "was added to fix the broken floatingip pollster that polled data from Nova " "API, but since the Nova API filtered the data by tenant, Ceilometer was not " "getting any data back. The fix changes the pollster to use the Neutron API " "instead to get the floating IP info." msgid "" "[`bug 1536498 `_] Patch " "to fix duplicate meter definitions causing duplicate samples. If a duplicate " "is found, log a warning and skip the meter definition. Note that the first " "occurance of a meter will be used and any following duplicates will be " "skipped from processing." msgstr "" "[`bug 1536498 `_] Patch " "to fix duplicate meter definitions causing duplicate samples. If a duplicate " "is found, log a warning and skip the meter definition. Note that the first " "occurrence of a meter will be used and any following duplicates will be " "skipped from processing." msgid "" "[`bug 1536699 `_] Patch " "to fix volume field lookup in meter definition file. In case the field is " "missing in the definition, it raises a keyerror and aborts. Instead we " "should skip the missing field meter and continue with the rest of the " "definitions." msgstr "" "[`bug 1536699 `_] Patch " "to fix volume field lookup in meter definition file. In case the field is " "missing in the definition, it raises a key error and aborts. Instead we " "should skip the missing field meter and continue with the rest of the " "definitions." msgid "" "[`bug 1539163 `_] Add " "ability to define whether to use first or last timestamps when aggregating " "samples. This will allow more flexibility when chaining transformers." msgstr "" "[`bug 1539163 `_] Add " "ability to define whether to use first or last timestamps when aggregating " "samples. This will allow more flexibility when chaining transformers." msgid "" "[`bug 1542189 `_] Handle " "malformed resource definitions in gnocchi_resources.yaml gracefully. " "Currently we raise an exception once we hit a bad resource and skip the " "rest. Instead the patch skips the bad resource and proceeds with rest of the " "definitions." msgstr "" "[`bug 1542189 `_] Handle " "malformed resource definitions in gnocchi_resources.yaml gracefully. " "Currently we raise an exception once we hit a bad resource and skip the " "rest. Instead the patch skips the bad resource and proceeds with rest of the " "definitions." msgid "" "[`bug 1550436 `_] Cache " "json parsers when building parsing logic to handle event and meter " "definitions. This will improve agent startup and setup time." msgstr "" "[`bug 1550436 `_] Cache " "json parsers when building parsing logic to handle event and meter " "definitions. This will improve agent startup and setup time." msgid "" "[`bug 1578128 `_] Add a " "tool that allow users to drop the legacy alarm and alarm_history tables." msgstr "" "[`bug 1578128 `_] Add a " "tool that allow users to drop the legacy alarm and alarm_history tables." msgid "" "[`bug 1597618 `_] Add " "the full support of snmp v3 user security model." msgstr "" "[`bug 1597618 `_] Add " "the full support of SNMP v3 user security model." msgid "" "[`bug 1848286 `_] " "Enable load balancer metrics by adding the loadbalancer resource type, " "allowing Gnocchi to capture measurement data for Octavia load balancers." msgstr "" "[`bug 1848286 `_] " "Enable load balancer metrics by adding the loadbalancer resource type, " "allowing Gnocchi to capture measurement data for Octavia load balancers." msgid "" "[`bug 1940660 `_] Fixes " "an issue with the Swift pollster where the ``[service_credentials] cafile`` " "option was not used. This could prevent communication with TLS-enabled Swift " "APIs." msgstr "" "[`bug 1940660 `_] Fixes " "an issue with the Swift pollster where the ``[service_credentials] cafile`` " "option was not used. This could prevent communication with TLS-enabled Swift " "APIs." msgid "" "[`bug 2007108 `_] The " "retired metrics dependent on SNMP have been removed from the default " "``polling.yaml``." msgstr "" "[`bug 2007108 `_] The " "retired metrics dependent on SNMP have been removed from the default " "``polling.yaml``." msgid "" "[`bug 255569 `_] Fix " "caching support in Gnocchi dispatcher. Added better locking support to " "enable smoother cache access." msgstr "" "[`bug 255569 `_] Fix " "caching support in Gnocchi dispatcher. Added better locking support to " "enable smoother cache access." msgid "" "``GenericHardwareDeclarativePollster`` has been deprecated and will be " "removed in a future release. This pollster was designed to be used in " "TripleO deployment to gather hardware metrics from overcloud nodes but " "Telemetry services are no longer deployed in undercloud in current TripleO." msgstr "" "``GenericHardwareDeclarativePollster`` has been deprecated and will be " "removed in a future release. This pollster was designed to be used in " "TripleO deployment to gather hardware metrics from overcloud nodes but " "Telemetry services are no longer deployed in undercloud in the current " "TripleO." msgid "" "``GenericHardwareDeclarativePollster`` has been removed. Because of this " "removal all metrics gathered by SNMP daemon have been removed as well." msgstr "" "``GenericHardwareDeclarativePollster`` has been removed. Because of this " "removal, all metrics gathered by the SNMP daemon have also been removed." msgid "``cpu_l3_cache_usage``" msgstr "``cpu_l3_cache_usage``" msgid "" "``gnocchi_resources.yaml`` has been updated with changes to the ``volume`` " "resource type. If you override this file in your deployment, it needs to be " "updated." msgstr "" "``gnocchi_resources.yaml`` has been updated with changes to the ``volume`` " "resource type. If you override this file in your deployment, it needs to be " "updated." msgid "``memory_bandwidth_local``" msgstr "``memory_bandwidth_local``" msgid "``memory_bandwidth_total``" msgstr "``memory_bandwidth_total``" msgid "" "``meters.yaml`` has been updated with changes to the ``volume.size`` " "notification meter. If you override this file in your deployment, it needs " "to be updated." msgstr "" "``meters.yaml`` has been updated with changes to the ``volume.size`` " "notification meter. If you override this file in your deployment, it needs " "to be updated." msgid "" "`ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi." msgstr "" "`ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi." msgid "" "`launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked." msgstr "" "`launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked." msgid "" "audit middleware in keystonemiddleware library should be used for similar " "support." msgstr "" "audit middleware in keystonemiddleware library should be used for similar " "support." msgid "" "batch_size and batch_timeout configuration options are added to both " "[notification] and [collector] sections of configuration. The batch_size " "controls the number of messages to grab before processing. Similarly, the " "batch_timeout defines the wait time before processing." msgstr "" "batch_size and batch_timeout configuration options are added to both " "[notification] and [collector] sections of configuration. The batch_size " "controls the number of messages to grab before processing. Similarly, the " "batch_timeout defines the wait time before processing." msgid "" "batch_size option added to [polling] section of configuration. Use " "batch_size=0 to disable batching of samples." msgstr "" "batch_size option added to [polling] section of configuration. Use " "batch_size=0 to disable batching of samples." msgid "" "cpu_util and \\*.rate meters are deprecated and will be removed in future " "release in favor of the Gnocchi rate calculation equivalent." msgstr "" "cpu_util and \\*.rate meters are deprecated and will be removed in future " "release in favour of the Gnocchi rate calculation equivalent." msgid "" "disk.* aggregated metrics for instance are deprecated, in favor of the per " "disk metrics (disk.device.*). Now, it's up to the backend to provide such " "aggregation feature. Gnocchi already provides this." msgstr "" "disk.* aggregated metrics for instance are deprecated, in favour of the per " "disk metrics (disk.device.*). Now, it's up to the backend to provide such an " "aggregation feature. Gnocchi already provides this." msgid "disk.device.read.bytes.rate" msgstr "disk.device.read.bytes.rate" msgid "disk.device.read.requests.rate" msgstr "disk.device.read.requests.rate" msgid "disk.device.write.bytes.rate" msgstr "disk.device.write.bytes.rate" msgid "disk.device.write.requests.rate" msgstr "disk.device.write.requests.rate" msgid "disk.read.bytes.rate" msgstr "disk.read.bytes.rate" msgid "disk.read.requests.rate" msgstr "disk.read.requests.rate" msgid "disk.write.bytes.rate" msgstr "disk.write.bytes.rate" msgid "disk.write.requests.rate" msgstr "disk.write.requests.rate" msgid "gnocchi_resources.yaml in Ceilometer should be updated." msgstr "gnocchi_resources.yaml in Ceilometer should be updated." msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgstr "" "gnocchiclient library is now a requirement if using ceilometer+gnocchi." msgid "" "metrics hardware.cpu.util and hardware.system_stats.cpu.idle are now " "deprecated. Other hardware.cpu.* metrics should be used instead." msgstr "" "metrics hardware.cpu.util and hardware.system_stats.cpu.idle are now " "deprecated. Other hardware.cpu.* metrics should be used instead." msgid "" "new metrics are available for snmp polling hardware.cpu.user, hardware.cpu." "nice, hardware.cpu.system, hardware.cpu.idle, hardware.cpu.wait, hardware." "cpu.kernel, hardware.cpu.interrupt. They replace deprecated hardware.cpu." "util and hardware.system_stats.cpu.idle." msgstr "" "new metrics are available for snmp polling hardware.cpu.user, hardware.cpu." "nice, hardware.cpu.system, hardware.cpu.idle, hardware.cpu.wait, hardware." "cpu.kernel, hardware.cpu.interrupt. They replace deprecated hardware.cpu." "util and hardware.system_stats.cpu.idle." msgid "use memory usable metric from libvirt memoryStats if available." msgstr "use memory usable metric from libvirt memoryStats if available." ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.7899415 ceilometer-24.1.0.dev59/releasenotes/source/locale/fr/0000775000175100017510000000000015033033521021623 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175100017510000000000015033033521023410 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175100017510000000272215033033467026455 0ustar00mylesmyles# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Ceilometer Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-03-28 06:27+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:24+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.2" msgstr "5.0.2" msgid "5.0.3" msgstr "5.0.3" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "7.0.0.0b2" msgstr "7.0.0.0b2" msgid "7.0.0.0b3" msgstr "7.0.0.0b3" msgid "7.0.0.0rc1" msgstr "7.0.0.0rc1" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Ceilometer Release Notes" msgstr "Note de release de Ceilometer" msgid "Critical Issues" msgstr "Erreurs critiques" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Known Issues" msgstr "Problèmes connus" msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Other Notes" msgstr "Autres notes" msgid "Start using reno to manage release notes." msgstr "Commence à utiliser reno pour la gestion des notes de release" msgid "Upgrade Notes" msgstr "Notes de mises à jours" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/mitaka.rst0000664000175100017510000003332015033033467021767 0ustar00mylesmyles==================== Mitaka Release Notes ==================== 6.0.0 ===== New Features ------------ .. releasenotes/notes/batch-messaging-d126cc525879d58e.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - Add support for batch processing of messages from queue. This will allow the collector and notification agent to grab multiple messages per thread to enable more efficient processing. .. releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - To minimise load on Nova API, an additional configuration option was added to control discovery interval vs metric polling interval. If resource_update_interval option is configured in compute section, the compute agent will discover new instances based on defined interval. The agent will continue to poll the discovered instances at the interval defined by pipeline. .. releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1480333 `_] Support ability to configure collector to capture events or meters mutally exclusively, rather than capturing both always. .. releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - Support for CORS is added. More information can be found [`here `_] .. releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Support resource caching in Gnocchi dispatcher to improve write performance to avoid additional queries. .. releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Gnocchi dispatcher now uses client rather than direct http requests .. releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1518338 `_] Add support for storing SNMP metrics in Gnocchi.This functionality requires Gnocchi v2.1.0 to be installed. .. releasenotes/notes/keystone-v3-fab1e257c5672965.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Add support for Keystone v3 authentication .. releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Ceilometer alarms code is now fully removed from code base. Equivalent functionality is handled by Aodh. .. releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Support for CADF-only payload in HTTP dispatcher is dropped as audit middleware in pyCADF was dropped in Kilo cycle. .. releasenotes/notes/remove-eventlet-6738321434b60c78.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Remove eventlet from Ceilometer in favour of threaded approach .. releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - RPC collector support is dropped. The queue-based notifier publisher and collector was added as the recommended alternative as of Icehouse cycle. .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is deprecated. The same metrics are available between v1 and v2. .. releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1513731 `_] Add support for hardware cpu_util in snmp.yaml .. releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1506959 `_] Add support to query unique set of meter names rather than meters associated with each resource. The list is available by adding unique=True option to request. Known Issues ------------ .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - Neutron API is not designed to be polled against. When polling against Neutron is enabled, Ceilometer's polling agents may generage a significant load against the Neutron API. It is recommended that a dedicated API be enabled for polling while Neutron's API is improved to handle polling. Upgrade Notes ------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 244439979fd28ecb0c76d132f0be784c988b54c8 - The options 'requeue_event_on_dispatcher_error' and 'requeue_sample_on_dispatcher_error' have been enabled and removed. .. releasenotes/notes/batch-messaging-d126cc525879d58e.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - batch_size and batch_timeout configuration options are added to both [notification] and [collector] sections of configuration. The batch_size controls the number of messages to grab before processing. Similarly, the batch_timeout defines the wait time before processing. .. releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml @ c5895d2c6efc6676679e6973c06b85c0c3a10585 - The api-paste.ini file can be modified to include or exclude the CORs middleware. Additional configurations can be made to middleware as well. .. releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - gnocchiclient library is now a requirement if using ceilometer+gnocchi. .. releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - gnocchi_resources.yaml in Ceilometer should be updated. .. releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - To utilize the new policy support. The policy.json file should be updated accordingly. The pre-existing policy.json file will continue to function as it does if policy changes are not required. .. releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Run db-sync to add new indices. .. releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - audit middleware in keystonemiddleware library should be used for similar support. .. releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Pipeline.yaml files for agents should be updated to notifier:// or udp:// publishers. The rpc:// publisher is no longer supported. .. releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add neutron_lbaas_version=v1 option to configuration file. Critical Issues --------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 244439979fd28ecb0c76d132f0be784c988b54c8 - The previous configuration options default for 'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." .. releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml @ e84a10882a9b682ff41c84e8bf4ee2497e7e7a31 - [`bug 1533787 `_] Fix an issue where agents are not properly getting registered to group when multiple notification agents are deployed. This can result in bad transformation as the agents are not coordinated. It is still recommended to set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when deploying multiple agents. .. releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1519767 `_] fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and its potential race conditions are now patched. Bug Fixes --------- .. releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1531626 `_] Ensure aggregator transformer timeout is honoured if size is not provided. .. releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1550436 `_] Cache json parsers when building parsing logic to handle event and meter definitions. This will improve agent startup and setup time. .. releasenotes/notes/event-type-race-c295baf7f1661eab.yaml @ 0e3ae8a667d9b9d6e19a7515854eb1703fc05013 - [`bug 1254800 `_] Add better support to catch race conditions when creating event_types .. releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1539163 `_] Add ability to define whether to use first or last timestamps when aggregating samples. This will allow more flexibility when chaining transformers. .. releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml @ 1f9f4e1072a5e5037b93734bafcc65e4211eb19f - [`bug 1536338 `_] Patch was added to fix the broken floatingip pollster that polled data from nova api, but since the nova api filtered the data by tenant, ceilometer was not getting any data back. The fix changes the pollster to use the neutron api instead to get the floating ip info. .. releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1530793 `_] network.services.lb.incoming.bytes meter was previous set to incorrect type. It should be a gauge meter. .. releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 255569 `_] Fix caching support in Gnocchi dispatcher. Added better locking support to enable smoother cache access. .. releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - Fix samples from Heat to map to correct Gnocchi resource type .. releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1523124 `_] Fix gnocchi dispatcher to support UDP collector .. releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml @ 02b1e1399bf885d03113a1cc125b1f97ed5540b9 - [`bug 1542189 `_] Handle malformed resource definitions in gnocchi_resources.yaml gracefully. Currently we raise an exception once we hit a bad resource and skip the rest. Instead the patch skips the bad resource and proceeds with rest of the definitions. .. releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1504495 `_] Configure ceilometer to handle policy.json rules when possible. .. releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml @ 1689e7053f4e7587a2b836035cdfa4fda56667fc - [`bug 1526793 `_] Additional indices were added to better support querying of event data. .. releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml @ 903a0a527cb240cfd9462b7f56d3463db7128993 - [`bug 1536699 `_] Patch to fix volume field lookup in meter definition file. In case the field is missing in the definition, it raises a keyerror and aborts. Instead we should skip the missing field meter and continue with the rest of the definitions. .. releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1532661 `_] Fix statistics query failures due to large numbers stored in MongoDB. Data from MongoDB is returned as Int64 for big numbers when int and float types are expected. The data is cast to appropriate type to handle large data. .. releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml @ 0c6f11cf88bf1a13a723879de46ec616678d2e0b - [`bug 1536498 `_] Patch to fix duplicate meter definitions causing duplicate samples. If a duplicate is found, log a warning and skip the meter definition. Note that the first occurance of a meter will be used and any following duplicates will be skipped from processing. .. releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - [`bug 1506738 `_] [`bug 1509677 `_] Optimise SQL backend queries to minimise query load .. releasenotes/notes/support-None-query-45abaae45f08eda4.yaml @ e6fa0a84d1f7a326881f3587718f1df743b8585f - [`bug 1388680 `_] Suppose ability to query for None value when using SQL backend. Other Notes ----------- .. releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - Configure individual dispatchers by specifying meter_dispatchers and event_dispatchers in configuration file. .. releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml @ f24ea44401b8945c9cb8a34b2aedebba3c040691 - A dogpile.cache supported backend is required to enable cache. Additional configuration `options `_ are also required. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/newton.rst0000664000175100017510000001614215033033467022036 0ustar00mylesmyles==================== Newton Release Notes ==================== 7.0.5 ===== Bug Fixes --------- .. releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml @ 66dd8ab65e2d9352de86e47056dea0b701e21a15 - A local cache is used when polling instance metrics to minimise calls Nova API. A new option is added `resource_cache_expiry` to configure a time to live for cache before it expires. This resolves issue where migrated instances are not removed from cache. 7.0.1 ===== New Features ------------ .. releasenotes/notes/http_proxy_to_wsgi_enabled-616fa123809e1600.yaml @ 032032642ad49e01d706f19f51d672fcff403442 - Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. The purpose of this middleware is to set up the request URL correctly in case there is a proxy (for instance, a loadbalancer such as HAProxy) in front of Ceilometer. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Ceilometer, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. 7.0.0 ===== Prelude ------- .. releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml @ 18c181f0b3ce07a0cd552a9060dd09a95cc26078 Ceilometer backends are no more only databases but also REST API like Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and have been renamed ceilometer-upgrade. The new binary handles database schema upgrade like ceilometer-dbsync does, but it also handle any changes needed in configured ceilometer backends like Gnocchi. New Features ------------ .. releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml @ cf3f7c992e0d29e06a7bff6c1df2f0144418d80f - Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'. .. releasenotes/notes/http-dispatcher-verify-ssl-551d639f37849c6f.yaml @ 2fca7ebd7c6a4d29c8a320fffd035ed9814e8293 - In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set to True to use system-installed certificates (default value) or False to ignore certificate verification (use in development only!). verify_ssl can also be set to the location of a certificate file e.g. /some/path/cert.crt (use for self-signed certs) or to a directory of certificates. The value is passed as the 'verify' option to the underlying requests method, which is documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification .. releasenotes/notes/memory-bandwidth-meter-f86cf01178573671.yaml @ ed7b6dbc952e49ca69de9a94a01398b106aece4b - Add two new meters, including memory.bandwidth.total and memory.bandwidth.local, to get memory bandwidth statistics based on Intel CMT feature. .. releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml @ aaedbbe0eb02ad1f86395a5a490495b64ce26777 - Add four new meters, including perf.cpu.cycles for the number of cpu cycles one instruction needs, perf.instructions for the count of instructions, perf.cache_references for the count of cache hits and cache_misses for the count of caches misses. .. releasenotes/notes/support-meter-batch-recording-mongo-6c2bdf4fbb9764eb.yaml @ a2a04e5d234ba358c25d541f31f8ca1a61bfd5d8 - Add support of batch recording metering data to mongodb backend, since the pymongo support *insert_many* interface which can be used to batch record items, in "big-data" scenarios, this change can improve the performance of metering data recording. .. releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml @ f8933f4abda4ecfc07ee41f84fd5fd8f6667e95a - Since the Glance v1 APIs won't be maintained any more, this change add the support of glance v2 in images pollsters. Upgrade Notes ------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 40684dafae76eab77b66bb1da7e143a3d7e2c9c8 - The options 'requeue_event_on_dispatcher_error' and 'requeue_sample_on_dispatcher_error' have been enabled and removed. .. releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml @ 5750fddf288c749cacfc825753928f66e755758d - Batching is enabled by default now when coordinated workers are enabled. Depending on load, it is recommended to scale out the number of `pipeline_processing_queues` to improve distribution. `batch_size` should also be configured accordingly. .. releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml @ f8933f4abda4ecfc07ee41f84fd5fd8f6667e95a - The option 'glance_page_size' has been removed because it's not actually needed. Deprecation Notes ----------------- .. releasenotes/notes/deprecated_database_event_dispatcher_panko-607d558c86a90f17.yaml @ 3685dcf417543db0bb708b347e996d88385c8c5b - The event database dispatcher is now deprecated. It has been moved to a new project, alongside the Ceilometer API for /v2/events, called Panko. .. releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml @ 2bb81d41f1c5086b68b1290362c72966c1e33702 - The Kwapi pollsters are deprecated and will be removed in the next major version of Ceilometer. .. releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml @ 18c181f0b3ce07a0cd552a9060dd09a95cc26078 - For backward compatibility reason we temporary keep ceilometer-dbsync, at least for one major version to ensure deployer have time update their tooling. Critical Issues --------------- .. releasenotes/notes/always-requeue-7a2df9243987ab67.yaml @ 40684dafae76eab77b66bb1da7e143a3d7e2c9c8 - The previous configuration options default for 'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' allowed to lose data very easily: if the dispatcher failed to send data to the backend (e.g. Gnocchi is down), then the dispatcher raised and the data were lost forever. This was completely unacceptable, and nobody should be able to configure Ceilometer in that way." Bug Fixes --------- .. releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml @ 800034dc0bbb9502893dedd9bcde7c170780c375 - [`bug 1578128 `_] Add a tool that allow users to drop the legacy alarm and alarm_history tables. .. releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml @ dc254e2f78a4bb42b0df6556df8347c7137ab5b2 - [`bug 1597618 `_] Add the full support of snmp v3 user security model. .. releasenotes/notes/single-thread-pipelines-f9e6ac4b062747fe.yaml @ 5750fddf288c749cacfc825753928f66e755758d - Fix to improve handling messages in environments heavily backed up. Previously, notification handlers greedily grabbed messages from queues which could cause ordering issues. A fix was applied to sequentially process messages in a single thread to prevent ordering issues. .. releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml @ 8dd821a03dcff45258251bebfd2beb86c07d94f7 - [`bug 1491509 `_] Patch to unify timestamp in samples polled by pollsters. Set the time point polling starts as timestamp of samples, and drop timetamping in pollsters. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/ocata.rst0000664000175100017510000000021015033033467021600 0ustar00mylesmyles=========================== Ocata Series Release Notes =========================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/pike.rst0000664000175100017510000000021715033033467021450 0ustar00mylesmyles=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/queens.rst0000664000175100017510000000022315033033467022015 0ustar00mylesmyles=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/rocky.rst0000664000175100017510000000022115033033467021642 0ustar00mylesmyles=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/stein.rst0000664000175100017510000000022115033033467021635 0ustar00mylesmyles=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/train.rst0000664000175100017510000000017615033033467021641 0ustar00mylesmyles========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/unreleased.rst0000664000175100017510000000015615033033467022651 0ustar00mylesmyles============================= Current Series Release Notes ============================= .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/ussuri.rst0000664000175100017510000000020215033033467022044 0ustar00mylesmyles=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/victoria.rst0000664000175100017510000000022015033033467022332 0ustar00mylesmyles============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/wallaby.rst0000664000175100017510000000021415033033467022150 0ustar00mylesmyles============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/xena.rst0000664000175100017510000000020015033033467021443 0ustar00mylesmyles========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/yoga.rst0000664000175100017510000000020015033033467021447 0ustar00mylesmyles========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/releasenotes/source/zed.rst0000664000175100017510000000017415033033467021304 0ustar00mylesmyles======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/reno.yaml0000664000175100017510000000023015033033467015617 0ustar00mylesmyles--- # Ignore the kilo-eol tag because that branch does not work with reno # and contains no release notes. closed_branch_tag_re: "(.+)(?=0.13.0 # MIT License cachetools>=2.1.0 # MIT License cotyledon>=1.3.0 #Apache-2.0 futurist>=1.8.0 # Apache-2.0 jsonpath-rw-ext>=1.1.3 # Apache-2.0 lxml>=4.5.1 # BSD msgpack>=0.5.2 # Apache-2.0 oslo.concurrency>=3.29.0 # Apache-2.0 oslo.config>=8.6.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 pbr>=2.0.0 # Apache-2.0 oslo.messaging>=10.3.0 # Apache-2.0 oslo.upgradecheck>=0.1.1 # Apache-2.0 oslo.utils>=4.7.0 # Apache-2.0 oslo.privsep>=1.32.0 # Apache-2.0 python-glanceclient>=2.8.0 # Apache-2.0 python-keystoneclient>=3.18.0 # Apache-2.0 keystoneauth1>=3.18.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 python-novaclient>=9.1.0 # Apache-2.0 python-swiftclient>=3.2.0 # Apache-2.0 python-cinderclient>=3.3.0 # Apache-2.0 PyYAML>=5.1 # MIT requests>=2.25.1 # Apache-2.0 stevedore>=1.20.0 # Apache-2.0 tenacity>=6.3.1 # Apache-2.0 tooz>=1.47.0 # Apache-2.0 oslo.cache>=1.26.0 # Apache-2.0 gnocchiclient>=7.0.0 # Apache-2.0 python-zaqarclient>=1.3.0 # Apache-2.0 prometheus_client>=0.20.0 # Apache-2.0 requests-aws>=0.1.4 # BSD License (3 clause) aodhclient>=3.8.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/setup.cfg0000664000175100017510000002405215033033521015610 0ustar00mylesmyles[metadata] name = ceilometer url = http://launchpad.net/ceilometer summary = OpenStack Telemetry description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/ceilometer/latest/ python_requires = >=3.10 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Topic :: System :: Monitoring [files] packages = ceilometer data_files = etc/ceilometer = etc/ceilometer/* [entry_points] ceilometer.notification.pipeline = meter = ceilometer.pipeline.sample:SamplePipelineManager event = ceilometer.pipeline.event:EventPipelineManager ceilometer.sample.endpoint = http.request = ceilometer.middleware:HTTPRequest http.response = ceilometer.middleware:HTTPResponse hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification _sample = ceilometer.telemetry.notifications:TelemetryIpc meter = ceilometer.meter.notifications:ProcessMeterNotifications ceilometer.discover.compute = local_instances = ceilometer.compute.discovery:InstanceDiscovery local_node = ceilometer.polling.discovery.localnode:LocalNodeDiscovery ceilometer.discover.central = barbican = ceilometer.polling.discovery.non_openstack_credentials_discovery:NonOpenStackCredentialsDiscovery endpoint = ceilometer.polling.discovery.endpoint:EndpointDiscovery tenant = ceilometer.polling.discovery.tenant:TenantDiscovery vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery fw_services = ceilometer.network.services.discovery:FirewallDiscovery fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery images = ceilometer.image.discovery:ImagesDiscovery volumes = ceilometer.volume.discovery:VolumeDiscovery volume_pools = ceilometer.volume.discovery:VolumePoolsDiscovery volume_snapshots = ceilometer.volume.discovery:VolumeSnapshotsDiscovery volume_backups = ceilometer.volume.discovery:VolumeBackupsDiscovery alarm = ceilometer.alarm.discovery:AlarmDiscovery ceilometer.discover.ipmi = local_node = ceilometer.polling.discovery.localnode:LocalNodeDiscovery ceilometer.poll.compute = disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster disk.device.read.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskReadLatencyPollster disk.device.write.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskWriteLatencyPollster power.state = ceilometer.compute.pollsters.instance_stats:PowerStatePollster cpu = ceilometer.compute.pollsters.instance_stats:CPUPollster network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster network.incoming.bytes.delta = ceilometer.compute.pollsters.net:IncomingBytesDeltaPollster network.outgoing.bytes.delta = ceilometer.compute.pollsters.net:OutgoingBytesDeltaPollster network.incoming.packets.drop = ceilometer.compute.pollsters.net:IncomingDropPollster network.outgoing.packets.drop = ceilometer.compute.pollsters.net:OutgoingDropPollster network.incoming.packets.error = ceilometer.compute.pollsters.net:IncomingErrorsPollster network.outgoing.packets.error = ceilometer.compute.pollsters.net:OutgoingErrorsPollster memory.usage = ceilometer.compute.pollsters.instance_stats:MemoryUsagePollster memory.resident = ceilometer.compute.pollsters.instance_stats:MemoryResidentPollster memory.swap.in = ceilometer.compute.pollsters.instance_stats:MemorySwapInPollster memory.swap.out = ceilometer.compute.pollsters.instance_stats:MemorySwapOutPollster disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster disk.ephemeral.size = ceilometer.compute.pollsters.disk:EphemeralSizePollster disk.root.size = ceilometer.compute.pollsters.disk:RootSizePollster perf.cpu.cycles = ceilometer.compute.pollsters.instance_stats:PerfCPUCyclesPollster perf.instructions = ceilometer.compute.pollsters.instance_stats:PerfInstructionsPollster perf.cache.references = ceilometer.compute.pollsters.instance_stats:PerfCacheReferencesPollster perf.cache.misses = ceilometer.compute.pollsters.instance_stats:PerfCacheMissesPollster ceilometer.poll.ipmi = hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster hardware.ipmi.power = ceilometer.ipmi.pollsters.sensor:PowerSensorPollster ceilometer.poll.central = alarm.evaluation_result = ceilometer.alarm.aodh:EvaluationResultPollster ip.floating = ceilometer.network.floatingip:FloatingIPPollster image.size = ceilometer.image.glance:ImageSizePollster radosgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster radosgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster radosgw.objects = ceilometer.objectstore.rgw:ObjectsPollster radosgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster radosgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster radosgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster volume.size = ceilometer.volume.cinder:VolumeSizePollster volume.snapshot.size = ceilometer.volume.cinder:VolumeSnapshotSize volume.backup.size = ceilometer.volume.cinder:VolumeBackupSize volume.provider.pool.capacity.total = ceilometer.volume.cinder:VolumeProviderPoolCapacityTotal volume.provider.pool.capacity.free = ceilometer.volume.cinder:VolumeProviderPoolCapacityFree volume.provider.pool.capacity.provisioned = ceilometer.volume.cinder:VolumeProviderPoolCapacityProvisioned volume.provider.pool.capacity.virtual_free = ceilometer.volume.cinder:VolumeProviderPoolCapacityVirtualFree volume.provider.pool.capacity.allocated = ceilometer.volume.cinder:VolumeProviderPoolCapacityAllocated ceilometer.compute.virt = libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector ceilometer.sample.publisher = test = ceilometer.publisher.test:TestPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher udp = ceilometer.publisher.udp:UDPPublisher tcp = ceilometer.publisher.tcp:TCPPublisher file = ceilometer.publisher.file:FilePublisher http = ceilometer.publisher.http:HttpPublisher prometheus = ceilometer.publisher.prometheus:PrometheusPublisher https = ceilometer.publisher.http:HttpPublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher opentelemetryhttp = ceilometer.publisher.opentelemetry_http:OpentelemetryHttpPublisher ceilometer.event.publisher = test = ceilometer.publisher.test:TestPublisher notifier = ceilometer.publisher.messaging:EventNotifierPublisher http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher file = ceilometer.publisher.file:FilePublisher ceilometer.event.trait_plugin = split = ceilometer.event.trait_plugins:SplitterTraitPlugin bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin map = ceilometer.event.trait_plugins:MapTraitPlugin console_scripts = ceilometer-polling = ceilometer.cmd.polling:main ceilometer-agent-notification = ceilometer.cmd.agent_notification:main ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-upgrade = ceilometer.cmd.storage:upgrade ceilometer-rootwrap = oslo_rootwrap.cmd:main ceilometer-status = ceilometer.cmd.status:main oslo.config.opts = ceilometer = ceilometer.opts:list_opts ceilometer-auth = ceilometer.opts:list_keystoneauth_opts [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/setup.py0000664000175100017510000000127115033033467015510 0ustar00mylesmyles# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/test-requirements.txt0000664000175100017510000000043515033033467020240 0ustar00mylesmylescoverage>=4.4.1 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD oslo.messaging[kafka]>=8.0.0 # Apache-2.0 oslotest>=3.8.0 # Apache-2.0 testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT stestr>=2.0.0 # Apache-2.0 testresources>=2.0.1 # Apache-2.0 libvirt-python>=6.0.0 # LGPLv2+ ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1751922512.8099413 ceilometer-24.1.0.dev59/tools/0000775000175100017510000000000015033033521015124 5ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/tools/__init__.py0000664000175100017510000000000015033033467017234 0ustar00mylesmyles././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/tools/send_test_data.py0000775000175100017510000001110215033033467020466 0ustar00mylesmyles#!/usr/bin/env python3 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for sending test data for Ceilometer via oslo.messaging. Usage: Send messages with samples generated by make_test_data source .tox/py27/bin/activate ./tools/send_test_data.py --count 1000 --resources_count 10 --topic metering """ import argparse import datetime import functools import json import random import uuid import make_test_data import oslo_messaging from oslo_utils import timeutils from ceilometer import messaging from ceilometer.publisher import utils from ceilometer import service def send_batch_notifier(notifier, topic, batch): notifier.sample({}, event_type=topic, payload=batch) def get_notifier(conf): return oslo_messaging.Notifier( messaging.get_transport(conf), driver='messagingv2', publisher_id='telemetry.publisher.test', topics=['metering'], ) def generate_data(conf, send_batch, make_data_args, samples_count, batch_size, resources_count, topic): make_data_args.interval = 1 make_data_args.start = (timeutils.utcnow() - datetime.timedelta(minutes=samples_count)) make_data_args.end = timeutils.utcnow() make_data_args.resource_id = None resources_list = [str(uuid.uuid4()) for _ in range(resources_count)] resource_samples = {resource: 0 for resource in resources_list} batch = [] count = 0 for sample in make_test_data.make_test_data(conf, **make_data_args.__dict__): count += 1 resource = resources_list[random.randint(0, len(resources_list) - 1)] resource_samples[resource] += 1 sample['resource_id'] = resource # need to change the timestamp from datetime.datetime type to iso # format (unicode type), because collector will change iso format # timestamp to datetime.datetime type before recording to db. sample['timestamp'] = sample['timestamp'].isoformat() # need to recalculate signature because of the resource_id change sig = utils.compute_signature(sample, conf.publisher.telemetry_secret) sample['message_signature'] = sig batch.append(sample) if len(batch) == batch_size: send_batch(topic, batch) batch = [] if count == samples_count: send_batch(topic, batch) return resource_samples send_batch(topic, batch) return resource_samples def get_parser(): parser = argparse.ArgumentParser() parser.add_argument( '--batch-size', dest='batch_size', type=int, default=100 ) parser.add_argument( '--config-file', default='/etc/ceilometer/ceilometer.conf' ) parser.add_argument( '--topic', default='perfmetering' ) parser.add_argument( '--samples-count', dest='samples_count', type=int, default=1000 ) parser.add_argument( '--resources-count', dest='resources_count', type=int, default=100 ) parser.add_argument( '--result-directory', dest='result_dir', default='/tmp' ) return parser def main(): args = get_parser().parse_known_args()[0] make_data_args = make_test_data.get_parser().parse_known_args()[0] conf = service.prepare_service(argv=['/', '--config-file', args.config_file]) notifier = get_notifier(conf) send_batch = functools.partial(send_batch_notifier, notifier) result_dir = args.result_dir del args.config_file del args.result_dir resource_writes = generate_data(conf, send_batch, make_data_args, **args.__dict__) result_file = "{}/sample-by-resource-{}".format(result_dir, random.getrandbits(32)) with open(result_file, 'w') as f: f.write(json.dumps(resource_writes)) return result_file if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1751922487.0 ceilometer-24.1.0.dev59/tox.ini0000664000175100017510000000476615033033467015325 0ustar00mylesmyles[tox] minversion = 3.18.0 envlist = py3{9,12},pep8 ignore_basepython_conflict=true [testenv] basepython = python3 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt usedevelop = True setenv = CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:none} passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE CEILOMETER_* commands = stestr run {posargs} oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf allowlist_externals = bash [testenv:cover] setenv = PYTHON=coverage run --source ceilometer --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:pep8] skip_install = true deps = pre-commit commands = pre-commit run -a [testenv:releasenotes] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt commands = sphinx-build --keep-going -b html -j auto doc/source doc/build/html setenv = PYTHONHASHSEED=0 [testenv:pdf-docs] deps = {[testenv:docs]deps} allowlist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:debug] allowlist_externals = find commands = find . -type f -name "*.pyc" -delete oslo_debug_helper {posargs} [testenv:venv] commands = {posargs} setenv = PYTHONHASHSEED=0 [doc8] ignore = D000 ignore-path = .venv,.git,.tox,*ceilometer/locale*,*lib/python*,ceilometer.egg*,doc/build,doc/source/api,releasenotes/* [flake8] # E123 closing bracket does not match indentation of opening bracket's line # W503 line break before binary operator # W504 line break after binary operator ignore = E123,W503,W504 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,install-guide show-source = True [hacking] import_exceptions = ceilometer.i18n [flake8:local-plugins] extension = C301 = checks:no_log_warn C302 = checks:no_os_popen paths = ./ceilometer/hacking