pax_global_header 0000666 0000000 0000000 00000000064 15026774211 0014517 g ustar 00root root 0000000 0000000 52 comment=43b276224e5ddf639670aef50113292fe0e022e7
django-prometheus-2.4.1/ 0000775 0000000 0000000 00000000000 15026774211 0015156 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/.github/ 0000775 0000000 0000000 00000000000 15026774211 0016516 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/.github/workflows/ 0000775 0000000 0000000 00000000000 15026774211 0020553 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/.github/workflows/ci.yml 0000664 0000000 0000000 00000004344 15026774211 0021676 0 ustar 00root root 0000000 0000000 name: CI
on:
push:
branches:
- "*"
pull_request:
branches:
- master
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
test:
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
os: [ubuntu-24.04]
runs-on: ${{ matrix.os }}
name: "${{ matrix.os }} Python: ${{ matrix.python-version }}"
services:
redis:
image: redis:8.0-alpine
ports:
- 6379:6379
memcached:
image: memcached:1.6-alpine
ports:
- 11211:11211
mysql:
image: mysql:9.3.0
env:
MYSQL_ALLOW_EMPTY_PASSWORD: yes
ports:
- 3306:3306
postgresql:
image: postgis/postgis:17-3.5-alpine
env:
POSTGRES_HOST_AUTH_METHOD: trust
ports:
- 5432:5432
steps:
- name: Install OS Packages
run: |
sudo apt-get update
sudo apt-get install binutils libproj-dev gdal-bin libmemcached-dev libsqlite3-mod-spatialite
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
pip install -U "pip>=23.1.1"
pip install -U "tox-gh-actions==3.3.0" coverage
- name: Log versions
run: |
python --version
pip --version
psql -V
mysql -V
- name: prep DB
env:
MYSQL_TCP_PORT: 3306
MYSQL_HOST: localhost
PGHOST: localhost
PGPORT: 5432
run: |
psql -U postgres -c 'CREATE DATABASE postgis'
psql -U postgres postgis -c 'CREATE EXTENSION IF NOT EXISTS postgis;'
mysql --protocol=TCP --user=root -e 'create database django_prometheus_1;'
- name: Run test and linters via Tox
run: tox
- name: Process code coverage
run: |
coverage combine .coverage django_prometheus/tests/end2end/.coverage
coverage xml
django-prometheus-2.4.1/.github/workflows/pre-release.yml 0000664 0000000 0000000 00000002336 15026774211 0023506 0 ustar 00root root 0000000 0000000 name: Pre-Release
on:
push:
branches:
- "master"
jobs:
pre-release-django-prometheus-job:
runs-on: ubuntu-latest
name: pre-release django-prometheus
if: ${{ github.repository_owner == 'django-commons' }}
permissions:
id-token: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check if version is dev
run: |
if ! grep -q "dev" django_prometheus/__init__.py; then
echo "Version does not contain 'dev', skipping pre-release"
exit 1
else
echo "Version contains 'dev', proceeding with pre-release"
fi
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install wheel setuptools packaging twine build --upgrade
- name: Set version number
run: python update_version_from_git.py
- name: Build
run: python -m build
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.12.4
with:
skip-existing: true
verbose: true
print-hash: true
django-prometheus-2.4.1/.github/workflows/release.yml 0000664 0000000 0000000 00000003500 15026774211 0022714 0 ustar 00root root 0000000 0000000 name: Release To PyPI
on:
push:
tags:
- v[0-9]+.[0-9]+.[0-9]+
jobs:
org-check:
name: Check GitHub Organization
if: ${{ github.repository_owner == 'django-commons' }}
runs-on: ubuntu-latest
steps:
- name: Noop
run: "true"
determine-tag:
name: Determine the release tag to operate against.
needs: org-check
runs-on: ubuntu-latest
outputs:
release-tag: ${{ steps.determine-tag.outputs.release-tag }}
release-version: ${{ steps.determine-tag.outputs.release-version }}
steps:
- name: Determine Tag
id: determine-tag
run: |
RELEASE_TAG=${GITHUB_REF#refs/tags/}
echo "Release tag: ${RELEASE_TAG}"
if [[ "${RELEASE_TAG}" =~ ^v[0-9]+.[0-9]+.[0-9]+$ ]]; then
echo "release-tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT
echo "release-version=${RELEASE_TAG#v}" >> $GITHUB_OUTPUT
else
echo "::error::Release tag '${RELEASE_TAG}' must match 'v\d+.\d+.\d+'."
exit 1
fi
release-django-prometheus-job:
runs-on: ubuntu-latest
name: Release Django-Promethues
needs: determine-tag
permissions:
id-token: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ needs.determine-tag.outputs.release-tag }}
fetch-depth: 0
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install wheel setuptools packaging twine build --upgrade
- name: Build
run: python -m build
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.12.4
with:
skip-existing: true
verbose: true
print-hash: true
django-prometheus-2.4.1/.gitignore 0000664 0000000 0000000 00000002146 15026774211 0017151 0 ustar 00root root 0000000 0000000 # Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
env*/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
# Translations
*.mo
*.pot
# Django stuff:
*.log
*.sqlite3
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# VSCode
.vscode/
### Emacs ###
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
# venv
venv/
### Prometheus ###
examples/prometheus/data
django-prometheus-2.4.1/.pre-commit-config.yaml 0000664 0000000 0000000 00000001424 15026774211 0021440 0 ustar 00root root 0000000 0000000 repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-case-conflict
- id: check-merge-conflict
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/adamchainz/django-upgrade
rev: '1.25.0'
hooks:
- id: django-upgrade
args: [--target-version, '4.2']
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.13
hooks:
- id: ruff-check
name: ruff check
args: ['--fix']
- id: ruff-format
name: ruff format
- repo: https://github.com/asottile/pyupgrade
rev: v3.20.0
hooks:
- id: pyupgrade
- repo: https://github.com/google/yamlfmt
rev: v0.17.0
hooks:
- id: yamlfmt
django-prometheus-2.4.1/CHANGELOG.md 0000664 0000000 0000000 00000004051 15026774211 0016767 0 ustar 00root root 0000000 0000000 # Changelog
## v2.4.1 - June 25th, 2025
* Add Django version to install requirements.
## v2.4.0 - June 18th, 2025
* Add support for Django 5.0 and Python 3.12.
* Replace black, flake8 and isort with Ruff
* Drop support for Django 3.2 (Python 3.7), 4.0 and 4.1
* Project moved to the [Django Commons](https://github.com/django-commons) GitHub organization.
* Add pyupgrade and yamlfmt pre-commit hooks
## v2.3.1 - May 2nd, 2023
* Fix postgresql provider import, Thanks [@wilsonehusin](https://github.com/korfuri/django-prometheus/pull/402)
## v2.3.0 - May 2nd, 2023
* Remove support for Python 3.6, Django versions older tha than 3.2
* Fix two latency metrics not using PROMETHEUS_LATENCY_BUCKETS setting, Thanks [@AleksaC](https://github.com/korfuri/django-prometheus/pull/343)
* Support new cache backend names in newer Django versions, Thanks [@tneuct](https://github.com/korfuri/django-prometheus/pull/329)
* Make export of migrations False by default, Thanks [@kaypee90](https://github.com/korfuri/django-prometheus/pull/313)
* Add support for Django 4.1, Python 3.11
* Add support for Django 4.2 and Psycopg 3
## v2.2.0 - December 19, 2021
* Switch to Github Actions CI, remove travis-ci.
* Add support for Django 3.2 & 4.0 and Python 3.9 & 3.10
## v2.1.0 - August 22, 2020
* Remove support for older django and python versions
* Add support for Django 3.0 and Django 3.1
* Add support for [PostGIS](https://github.com/korfuri/django-prometheus/pull/221), Thanks [@EverWinter23](https://github.com/EverWinter23)
## v2.0.0 - Jan 20, 2020
* Added support for newer Django and Python versions
* Added an extensibility that applications to add their own labels to middleware (request/response) metrics
* Allow overriding and setting custom bucket values for request/response latency histogram metric
* Internal improvements:
* use tox
* Use pytest
* use Black
* Automate pre-releases on every commit ot master
* Fix flaky tests.
## v1.1.0 - Sep 28, 2019
* maintenance release that updates this library to support recent and supported version of python & Django
django-prometheus-2.4.1/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000012147 15026774211 0017762 0 ustar 00root root 0000000 0000000
# Django Commons Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official email address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[django-commons-coc@googlegroups.com](mailto:django-commons-coc@googlegroups.com).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Warning
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 2. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 3. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations
django-prometheus-2.4.1/CONTRIBUTING.md 0000664 0000000 0000000 00000003563 15026774211 0017416 0 ustar 00root root 0000000 0000000 # Contributing
## Git
Feel free to send pull requests, even for the tiniest things. Watch
for Travis' opinion on them ([](https://travis-ci.org/korfuri/django-prometheus)).
Travis will also make sure your code is pep8 compliant, and it's a
good idea to run flake8 as well (on django_prometheus/ and on
tests/). The code contains "unused" imports on purpose so flake8 isn't
run automatically.
## Tests
Please write unit tests for your change. There are two kinds of tests:
* Regular unit tests that test the code directly, without loading
Django. This is limited to pieces of the code that don't depend on
Django, since a lot of the Django code will require a full Django
environment (anything that interacts with models, for instance,
needs a full database configuration).
* End-to-end tests are Django unit tests in a test application. The
test application doubles as an easy way to interactively test your
changes. It uses most of the basic Django features and a few
advanced features, so you can test things for yourself.
### Running all tests
```shell
python setup.py test
cd tests/end2end/ && PYTHONPATH=../.. ./manage.py test
```
The former runs the regular unit tests, the latter runs the Django
unit test.
To avoid setting PYTHONPATH every time, you can also run `python
setup.py install`.
### Running the test Django app
```shell
cd tests/end2end/ && PYTHONPATH=../.. ./manage.py runserver
```
By default, this will start serving on http://localhost:8000/. Metrics
are available at `/metrics`.
## Running Prometheus
See for instructions on installing
Prometheus. Once you have Prometheus installed, you can use the
example rules and dashboard in `examples/prometheus/`. See
`examples/prometheus/README.md` to run Prometheus and view the example
dashboard.
django-prometheus-2.4.1/LICENSE 0000664 0000000 0000000 00000026135 15026774211 0016172 0 ustar 00root root 0000000 0000000 Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
django-prometheus-2.4.1/MANIFEST.in 0000664 0000000 0000000 00000000042 15026774211 0016710 0 ustar 00root root 0000000 0000000 include LICENSE
include README.md
django-prometheus-2.4.1/README.md 0000664 0000000 0000000 00000017547 15026774211 0016453 0 ustar 00root root 0000000 0000000 # django-prometheus
Export Django monitoring metrics for Prometheus.io
[](https://gitter.im/django-prometheus/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](http://badge.fury.io/py/django-prometheus)
[](https://github.com/korfuri/django-prometheus/actions/workflows/ci.yml)
[](https://coveralls.io/github/korfuri/django-prometheus?branch=master)
[](https://pypi.python.org/pypi/django-prometheus)
## Features
This library provides Prometheus metrics for Django related operations:
* Requests & Responses
* Database access done via [Django ORM](https://docs.djangoproject.com/en/3.2/topics/db/)
* Cache access done via [Django Cache framework](https://docs.djangoproject.com/en/3.2/topics/cache/)
## Usage
### Requirements
* Django >= 4.2
* Python 3.9 and above.
### Installation
Install with:
```shell
pip install django-prometheus
```
Or, if you're using a development version cloned from this repository:
```shell
python path-to-where-you-cloned-django-prometheus/setup.py install
```
This will install [prometheus_client](https://github.com/prometheus/client_python) as a dependency.
### Quickstart
In your settings.py:
```python
INSTALLED_APPS = [
...
'django_prometheus',
...
]
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
# All your other middlewares go here, including the default
# middlewares like SessionMiddleware, CommonMiddleware,
# CsrfViewmiddleware, SecurityMiddleware, etc.
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
```
In your urls.py:
```python
urlpatterns = [
...
path('', include('django_prometheus.urls')),
]
```
### Configuration
Prometheus uses Histogram based grouping for monitoring latencies. The default
buckets are:
```python
PROMETHEUS_LATENCY_BUCKETS = (0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 25.0, 50.0, 75.0, float("inf"),)
```
You can define custom buckets for latency, adding more buckets decreases performance but
increases accuracy:
```python
PROMETHEUS_LATENCY_BUCKETS = (.1, .2, .5, .6, .8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.5, 9.0, 12.0, 15.0, 20.0, 30.0, float("inf"))
```
---
You can have a custom namespace for your metrics:
```python
PROMETHEUS_METRIC_NAMESPACE = "project"
```
This will prefix all metrics with `project_` word like this:
```text
project_django_http_requests_total_by_method_total{method="GET"} 1.0
```
### Monitoring your databases
SQLite, MySQL, and PostgreSQL databases can be monitored. Just
replace the `ENGINE` property of your database, replacing
`django.db.backends` with `django_prometheus.db.backends`.
```python
DATABASES = {
'default': {
'ENGINE': 'django_prometheus.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
}
```
### Monitoring your caches
Filebased, memcached, redis caches can be monitored. Just replace
the cache backend to use the one provided by django_prometheus
`django.core.cache.backends` with `django_prometheus.cache.backends`.
```python
CACHES = {
'default': {
'BACKEND': 'django_prometheus.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
}
}
```
### Monitoring your models
You may want to monitor the creation/deletion/update rate for your
model. This can be done by adding a mixin to them. This is safe to do
on existing models (it does not require a migration).
If your model is:
```python
class Dog(models.Model):
name = models.CharField(max_length=100, unique=True)
breed = models.CharField(max_length=100, blank=True, null=True)
age = models.PositiveIntegerField(blank=True, null=True)
```
Just add the `ExportModelOperationsMixin` as such:
```python
from django_prometheus.models import ExportModelOperationsMixin
class Dog(ExportModelOperationsMixin('dog'), models.Model):
name = models.CharField(max_length=100, unique=True)
breed = models.CharField(max_length=100, blank=True, null=True)
age = models.PositiveIntegerField(blank=True, null=True)
```
This will export 3 metrics, `django_model_inserts_total{model="dog"}`,
`django_model_updates_total{model="dog"}` and
`django_model_deletes_total{model="dog"}`.
Note that the exported metrics are counters of creations,
modifications and deletions done in the current process. They are not
gauges of the number of objects in the model.
Starting with Django 1.7, migrations are also monitored. Two gauges
are exported, `django_migrations_applied_by_connection` and
`django_migrations_unapplied_by_connection`. You may want to alert if
there are unapplied migrations.
If you want to disable the Django migration metrics, set the
`PROMETHEUS_EXPORT_MIGRATIONS` setting to False.
### Monitoring and aggregating the metrics
Prometheus is quite easy to set up. An example prometheus.conf to
scrape `127.0.0.1:8001` can be found in `examples/prometheus`.
Here's an example of a PromDash displaying some of the metrics
collected by django-prometheus:

## Adding your own metrics
You can add application-level metrics in your code by using
[prometheus_client](https://github.com/prometheus/client_python)
directly. The exporter is global and will pick up your metrics.
To add metrics to the Django internals, the easiest way is to extend
django-prometheus' classes. Please consider contributing your metrics,
pull requests are welcome. Make sure to read the Prometheus best
practices on
[instrumentation](http://prometheus.io/docs/practices/instrumentation/)
and [naming](http://prometheus.io/docs/practices/naming/).
## Importing Django Prometheus using only local settings
If you wish to use Django Prometheus but are not able to change
the code base, it's possible to have all the default metrics by
modifying only the settings.
First step is to inject prometheus' middlewares and to add
django_prometheus in INSTALLED_APPS
```python
MIDDLEWARE = \
['django_prometheus.middleware.PrometheusBeforeMiddleware'] + \
MIDDLEWARE + \
['django_prometheus.middleware.PrometheusAfterMiddleware']
INSTALLED_APPS += ['django_prometheus']
```
Second step is to create the /metrics end point, for that we need
another file (called urls_prometheus_wrapper.py in this example) that
will wraps the apps URLs and add one on top:
```python
from django.urls import include, path
urlpatterns = []
urlpatterns.append(path('prometheus/', include('django_prometheus.urls')))
urlpatterns.append(path('', include('myapp.urls')))
```
This file will add a "/prometheus/metrics" end point to the URLs of django
that will export the metrics (replace myapp by your project name).
Then we inject the wrapper in settings:
```python
ROOT_URLCONF = "graphite.urls_prometheus_wrapper"
```
## Adding custom labels to middleware (request/response) metrics
You can add application specific labels to metrics reported by the django-prometheus middleware.
This involves extending the classes defined in middleware.py.
* Extend the Metrics class and override the `register_metric` method to add the application specific labels.
* Extend middleware classes, set the metrics_cls class attribute to the the extended metric class and override the label_metric method to attach custom metrics.
See implementation example in [the test app](django_prometheus/tests/end2end/testapp/test_middleware_custom_labels.py#L19-L46)
django-prometheus-2.4.1/django_prometheus/ 0000775 0000000 0000000 00000000000 15026774211 0020673 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/__init__.py 0000664 0000000 0000000 00000001022 15026774211 0022777 0 ustar 00root root 0000000 0000000 """Django-Prometheus
https://github.com/korfuri/django-prometheus
"""
# Import all files that define metrics. This has the effect that
# `import django_prometheus` will always instantiate all metric
# objects right away.
from django_prometheus import middleware, models
__all__ = ["middleware", "models", "pip_prometheus"]
__version__ = "2.4.1"
# Import pip_prometheus to export the pip metrics automatically.
try:
import pip_prometheus
except ImportError:
# If people don't have pip, don't export anything.
pass
django-prometheus-2.4.1/django_prometheus/apps.py 0000664 0000000 0000000 00000001653 15026774211 0022215 0 ustar 00root root 0000000 0000000 from django.apps import AppConfig
from django.conf import settings
import django_prometheus
from django_prometheus.exports import SetupPrometheusExportsFromConfig
from django_prometheus.migrations import ExportMigrations
class DjangoPrometheusConfig(AppConfig):
name = django_prometheus.__name__
verbose_name = "Django-Prometheus"
def ready(self):
"""Initializes the Prometheus exports if they are enabled in the config.
Note that this is called even for other management commands
than `runserver`. As such, it is possible to scrape the
metrics of a running `manage.py test` or of another command,
which shouldn't be done for real monitoring (since these jobs
are usually short-lived), but can be useful for debugging.
"""
SetupPrometheusExportsFromConfig()
if getattr(settings, "PROMETHEUS_EXPORT_MIGRATIONS", False):
ExportMigrations()
django-prometheus-2.4.1/django_prometheus/cache/ 0000775 0000000 0000000 00000000000 15026774211 0021736 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/cache/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0024035 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/cache/backends/ 0000775 0000000 0000000 00000000000 15026774211 0023510 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/cache/backends/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0025607 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/cache/backends/django_memcached_consul.py 0000664 0000000 0000000 00000001364 15026774211 0030701 0 ustar 00root root 0000000 0000000 from django_memcached_consul import memcached
from django_prometheus.cache.metrics import (
django_cache_get_total,
django_cache_hits_total,
django_cache_misses_total,
)
class MemcachedCache(memcached.MemcachedCache):
"""Inherit django_memcached_consul to add metrics about hit/miss ratio"""
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend="django_memcached_consul").inc()
cached = super().get(key, default=None, version=version)
if cached is not None:
django_cache_hits_total.labels(backend="django_memcached_consul").inc()
else:
django_cache_misses_total.labels(backend="django_memcached_consul").inc()
return cached or default
django-prometheus-2.4.1/django_prometheus/cache/backends/filebased.py 0000664 0000000 0000000 00000001305 15026774211 0025777 0 ustar 00root root 0000000 0000000 from django.core.cache.backends import filebased
from django_prometheus.cache.metrics import (
django_cache_get_total,
django_cache_hits_total,
django_cache_misses_total,
)
class FileBasedCache(filebased.FileBasedCache):
"""Inherit filebased cache to add metrics about hit/miss ratio"""
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend="filebased").inc()
cached = super().get(key, default=None, version=version)
if cached is not None:
django_cache_hits_total.labels(backend="filebased").inc()
else:
django_cache_misses_total.labels(backend="filebased").inc()
return cached or default
django-prometheus-2.4.1/django_prometheus/cache/backends/locmem.py 0000664 0000000 0000000 00000001260 15026774211 0025335 0 ustar 00root root 0000000 0000000 from django.core.cache.backends import locmem
from django_prometheus.cache.metrics import (
django_cache_get_total,
django_cache_hits_total,
django_cache_misses_total,
)
class LocMemCache(locmem.LocMemCache):
"""Inherit filebased cache to add metrics about hit/miss ratio"""
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend="locmem").inc()
cached = super().get(key, default=None, version=version)
if cached is not None:
django_cache_hits_total.labels(backend="locmem").inc()
else:
django_cache_misses_total.labels(backend="locmem").inc()
return cached or default
django-prometheus-2.4.1/django_prometheus/cache/backends/memcached.py 0000664 0000000 0000000 00000001622 15026774211 0025771 0 ustar 00root root 0000000 0000000 from django.core.cache.backends import memcached
from django_prometheus.cache.metrics import (
django_cache_get_total,
django_cache_hits_total,
django_cache_misses_total,
)
class MemcachedPrometheusCacheMixin:
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend="memcached").inc()
cached = super().get(key, default=None, version=version)
if cached is not None:
django_cache_hits_total.labels(backend="memcached").inc()
return cached
django_cache_misses_total.labels(backend="memcached").inc()
return default
class PyLibMCCache(MemcachedPrometheusCacheMixin, memcached.PyLibMCCache):
"""Inherit memcached to add metrics about hit/miss ratio"""
class PyMemcacheCache(MemcachedPrometheusCacheMixin, memcached.PyMemcacheCache):
"""Inherit memcached to add metrics about hit/miss ratio"""
django-prometheus-2.4.1/django_prometheus/cache/backends/redis.py 0000664 0000000 0000000 00000003411 15026774211 0025167 0 ustar 00root root 0000000 0000000 from django.core.cache.backends.redis import RedisCache as DjangoRedisCache
from django_redis import cache, exceptions
from django_prometheus.cache.metrics import (
django_cache_get_fail_total,
django_cache_get_total,
django_cache_hits_total,
django_cache_misses_total,
)
class RedisCache(cache.RedisCache):
"""Inherit redis to add metrics about hit/miss/interruption ratio"""
@cache.omit_exception
def get(self, key, default=None, version=None, client=None):
try:
django_cache_get_total.labels(backend="redis").inc()
cached = self.client.get(key, default=None, version=version, client=client)
except exceptions.ConnectionInterrupted as e:
django_cache_get_fail_total.labels(backend="redis").inc()
if self._ignore_exceptions:
if self._log_ignored_exceptions:
self.logger.error(str(e))
return default
raise
else:
if cached is not None:
django_cache_hits_total.labels(backend="redis").inc()
return cached
django_cache_misses_total.labels(backend="redis").inc()
return default
class NativeRedisCache(DjangoRedisCache):
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend="native_redis").inc()
try:
result = super().get(key, default=None, version=version)
except Exception:
django_cache_get_fail_total.labels(backend="native_redis").inc()
raise
if result is not None:
django_cache_hits_total.labels(backend="native_redis").inc()
return result
django_cache_misses_total.labels(backend="native_redis").inc()
return default
django-prometheus-2.4.1/django_prometheus/cache/metrics.py 0000664 0000000 0000000 00000001245 15026774211 0023760 0 ustar 00root root 0000000 0000000 from prometheus_client import Counter
from django_prometheus.conf import NAMESPACE
django_cache_get_total = Counter(
"django_cache_get_total",
"Total get requests on cache",
["backend"],
namespace=NAMESPACE,
)
django_cache_hits_total = Counter(
"django_cache_get_hits_total",
"Total hits on cache",
["backend"],
namespace=NAMESPACE,
)
django_cache_misses_total = Counter(
"django_cache_get_misses_total",
"Total misses on cache",
["backend"],
namespace=NAMESPACE,
)
django_cache_get_fail_total = Counter(
"django_cache_get_fail_total",
"Total get request failures by cache",
["backend"],
namespace=NAMESPACE,
)
django-prometheus-2.4.1/django_prometheus/conf/ 0000775 0000000 0000000 00000000000 15026774211 0021620 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/conf/__init__.py 0000664 0000000 0000000 00000000723 15026774211 0023733 0 ustar 00root root 0000000 0000000 from django.conf import settings
NAMESPACE = ""
PROMETHEUS_LATENCY_BUCKETS = (
0.01,
0.025,
0.05,
0.075,
0.1,
0.25,
0.5,
0.75,
1.0,
2.5,
5.0,
7.5,
10.0,
25.0,
50.0,
75.0,
float("inf"),
)
if settings.configured:
NAMESPACE = getattr(settings, "PROMETHEUS_METRIC_NAMESPACE", NAMESPACE)
PROMETHEUS_LATENCY_BUCKETS = getattr(settings, "PROMETHEUS_LATENCY_BUCKETS", PROMETHEUS_LATENCY_BUCKETS)
django-prometheus-2.4.1/django_prometheus/db/ 0000775 0000000 0000000 00000000000 15026774211 0021260 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/__init__.py 0000664 0000000 0000000 00000000623 15026774211 0023372 0 ustar 00root root 0000000 0000000 # Import all metrics
from django_prometheus.db.metrics import (
Counter,
connection_errors_total,
connections_total,
errors_total,
execute_many_total,
execute_total,
query_duration_seconds,
)
__all__ = [
"Counter",
"connection_errors_total",
"connections_total",
"errors_total",
"execute_many_total",
"execute_total",
"query_duration_seconds",
]
django-prometheus-2.4.1/django_prometheus/db/backends/ 0000775 0000000 0000000 00000000000 15026774211 0023032 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/README.md 0000664 0000000 0000000 00000002733 15026774211 0024316 0 ustar 00root root 0000000 0000000 # Adding new database wrapper types
Unfortunately, I don't have the resources to create wrappers for all
database vendors. Doing so should be straightforward, but testing that
it works and maintaining it is a lot of busywork, or is impossible for
me for commercial databases.
This document should be enough for people who wish to implement a new
database wrapper.
## Structure
A database engine in Django requires 3 classes (it really requires 2,
but the 3rd one is required for our purposes):
* A DatabaseFeatures class, which describes what features the database
supports. For our usage, we can simply extend the existing
DatabaseFeatures class without any changes.
* A DatabaseWrapper class, which abstracts the interface to the
database.
* A CursorWrapper class, which abstracts the interface to a cursor. A
cursor is the object that can execute SQL statements via an open
connection.
An easy example can be found in the sqlite3 module. Here are a few tips:
* The `self.alias` and `self.vendor` properties are present in all
DatabaseWrappers.
* The CursorWrapper doesn't have access to the alias and vendor, so we
generate the class in a function that accepts them as arguments.
* Most methods you overload should just increment a counter, forward
all arguments to the original method and return the
result. `execute` and `execute_many` should also wrap the call to
the parent method in a `try...except` block to increment the
`errors_total` counter as appropriate.
django-prometheus-2.4.1/django_prometheus/db/backends/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0025131 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/mysql/ 0000775 0000000 0000000 00000000000 15026774211 0024177 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/mysql/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0026276 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/mysql/base.py 0000664 0000000 0000000 00000001063 15026774211 0025463 0 ustar 00root root 0000000 0000000 from django.db.backends.mysql import base
from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper
class DatabaseFeatures(base.DatabaseFeatures):
"""Our database has the exact same features as the base one."""
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
CURSOR_CLASS = base.CursorWrapper
def create_cursor(self, name=None):
cursor = self.connection.cursor()
CursorWrapper = ExportingCursorWrapper(self.CURSOR_CLASS, self.alias, self.vendor)
return CursorWrapper(cursor)
django-prometheus-2.4.1/django_prometheus/db/backends/postgis/ 0000775 0000000 0000000 00000000000 15026774211 0024522 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/postgis/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0026621 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/postgis/base.py 0000664 0000000 0000000 00000001376 15026774211 0026015 0 ustar 00root root 0000000 0000000 from django.contrib.gis.db.backends.postgis import base
from django.db.backends.postgresql.base import Cursor
from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
def get_new_connection(self, *args, **kwargs):
conn = super().get_new_connection(*args, **kwargs)
conn.cursor_factory = ExportingCursorWrapper(
conn.cursor_factory or Cursor(),
"postgis",
self.vendor,
)
return conn
def create_cursor(self, name=None):
# cursor_factory is a kwarg to connect() so restore create_cursor()'s
# default behavior
return base.DatabaseWrapper.create_cursor(self, name=name)
django-prometheus-2.4.1/django_prometheus/db/backends/postgresql/ 0000775 0000000 0000000 00000000000 15026774211 0025235 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/postgresql/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0027334 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/postgresql/base.py 0000664 0000000 0000000 00000001366 15026774211 0026527 0 ustar 00root root 0000000 0000000 from django.db.backends.postgresql import base
from django.db.backends.postgresql.base import Cursor
from django_prometheus.db.common import DatabaseWrapperMixin, ExportingCursorWrapper
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
def get_new_connection(self, *args, **kwargs):
conn = super().get_new_connection(*args, **kwargs)
conn.cursor_factory = ExportingCursorWrapper(
conn.cursor_factory or Cursor(),
self.alias,
self.vendor,
)
return conn
def create_cursor(self, name=None):
# cursor_factory is a kwarg to connect() so restore create_cursor()'s
# default behavior
return base.DatabaseWrapper.create_cursor(self, name=name)
django-prometheus-2.4.1/django_prometheus/db/backends/spatialite/ 0000775 0000000 0000000 00000000000 15026774211 0025171 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/spatialite/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0027270 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/spatialite/base.py 0000664 0000000 0000000 00000000657 15026774211 0026465 0 ustar 00root root 0000000 0000000 from django.contrib.gis.db.backends.spatialite import base, features
from django.db.backends.sqlite3 import base as sqlite_base
from django_prometheus.db.common import DatabaseWrapperMixin
class DatabaseFeatures(features.DatabaseFeatures):
"""Our database has the exact same features as the base one."""
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
CURSOR_CLASS = sqlite_base.SQLiteCursorWrapper
django-prometheus-2.4.1/django_prometheus/db/backends/sqlite3/ 0000775 0000000 0000000 00000000000 15026774211 0024416 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/sqlite3/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0026515 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/db/backends/sqlite3/base.py 0000664 0000000 0000000 00000000520 15026774211 0025677 0 ustar 00root root 0000000 0000000 from django.db.backends.sqlite3 import base
from django_prometheus.db.common import DatabaseWrapperMixin
class DatabaseFeatures(base.DatabaseFeatures):
"""Our database has the exact same features as the base one."""
class DatabaseWrapper(DatabaseWrapperMixin, base.DatabaseWrapper):
CURSOR_CLASS = base.SQLiteCursorWrapper
django-prometheus-2.4.1/django_prometheus/db/common.py 0000664 0000000 0000000 00000005316 15026774211 0023127 0 ustar 00root root 0000000 0000000 from django_prometheus.db import (
connection_errors_total,
connections_total,
errors_total,
execute_many_total,
execute_total,
query_duration_seconds,
)
class ExceptionCounterByType:
"""A context manager that counts exceptions by type.
Exceptions increment the provided counter, whose last label's name
must match the `type_label` argument.
In other words:
c = Counter('http_request_exceptions_total', 'Counter of exceptions',
['method', 'type'])
with ExceptionCounterByType(c, extra_labels={'method': 'GET'}):
handle_get_request()
"""
def __init__(self, counter, type_label="type", extra_labels=None):
self._counter = counter
self._type_label = type_label
self._labels = dict(extra_labels) # Copy labels since we modify them.
def __enter__(self):
pass
def __exit__(self, typ, value, traceback):
if typ is not None:
self._labels.update({self._type_label: typ.__name__})
self._counter.labels(**self._labels).inc()
class DatabaseWrapperMixin:
"""Extends the DatabaseWrapper to count connections and cursors."""
def get_new_connection(self, *args, **kwargs):
connections_total.labels(self.alias, self.vendor).inc()
try:
return super().get_new_connection(*args, **kwargs)
except Exception:
connection_errors_total.labels(self.alias, self.vendor).inc()
raise
def create_cursor(self, name=None):
return self.connection.cursor(factory=ExportingCursorWrapper(self.CURSOR_CLASS, self.alias, self.vendor))
def ExportingCursorWrapper(cursor_class, alias, vendor):
"""Returns a CursorWrapper class that knows its database's alias and
vendor name.
"""
labels = {"alias": alias, "vendor": vendor}
class CursorWrapper(cursor_class):
"""Extends the base CursorWrapper to count events."""
def execute(self, *args, **kwargs):
execute_total.labels(alias, vendor).inc()
with (
query_duration_seconds.labels(**labels).time(),
ExceptionCounterByType(errors_total, extra_labels=labels),
):
return super().execute(*args, **kwargs)
def executemany(self, query, param_list, *args, **kwargs):
execute_total.labels(alias, vendor).inc(len(param_list))
execute_many_total.labels(alias, vendor).inc(len(param_list))
with (
query_duration_seconds.labels(**labels).time(),
ExceptionCounterByType(errors_total, extra_labels=labels),
):
return super().executemany(query, param_list, *args, **kwargs)
return CursorWrapper
django-prometheus-2.4.1/django_prometheus/db/metrics.py 0000664 0000000 0000000 00000002473 15026774211 0023306 0 ustar 00root root 0000000 0000000 from prometheus_client import Counter, Histogram
from django_prometheus.conf import NAMESPACE, PROMETHEUS_LATENCY_BUCKETS
connections_total = Counter(
"django_db_new_connections_total",
"Counter of created connections by database and by vendor.",
["alias", "vendor"],
namespace=NAMESPACE,
)
connection_errors_total = Counter(
"django_db_new_connection_errors_total",
"Counter of connection failures by database and by vendor.",
["alias", "vendor"],
namespace=NAMESPACE,
)
execute_total = Counter(
"django_db_execute_total",
("Counter of executed statements by database and by vendor, including bulk executions."),
["alias", "vendor"],
namespace=NAMESPACE,
)
execute_many_total = Counter(
"django_db_execute_many_total",
("Counter of executed statements in bulk operations by database and by vendor."),
["alias", "vendor"],
namespace=NAMESPACE,
)
errors_total = Counter(
"django_db_errors_total",
("Counter of execution errors by database, vendor and exception type."),
["alias", "vendor", "type"],
namespace=NAMESPACE,
)
query_duration_seconds = Histogram(
"django_db_query_duration_seconds",
("Histogram of query duration by database and vendor."),
["alias", "vendor"],
buckets=PROMETHEUS_LATENCY_BUCKETS,
namespace=NAMESPACE,
)
django-prometheus-2.4.1/django_prometheus/exports.py 0000664 0000000 0000000 00000011250 15026774211 0022750 0 ustar 00root root 0000000 0000000 import logging
import os
import threading
import prometheus_client
from django.conf import settings
from django.http import HttpResponse
from prometheus_client import multiprocess
try:
# Python 2
from BaseHTTPServer import HTTPServer
except ImportError:
# Python 3
from http.server import HTTPServer
logger = logging.getLogger(__name__)
def SetupPrometheusEndpointOnPort(port, addr=""):
"""Exports Prometheus metrics on an HTTPServer running in its own thread.
The server runs on the given port and is by default listenning on
all interfaces. This HTTPServer is fully independent of Django and
its stack. This offers the advantage that even if Django becomes
unable to respond, the HTTPServer will continue to function and
export metrics. However, this also means that the features
offered by Django (like middlewares or WSGI) can't be used.
Now here's the really weird part. When Django runs with the
auto-reloader enabled (which is the default, you can disable it
with `manage.py runserver --noreload`), it forks and executes
manage.py twice. That's wasteful but usually OK. It starts being a
problem when you try to open a port, like we do. We can detect
that we're running under an autoreloader through the presence of
the RUN_MAIN environment variable, so we abort if we're trying to
export under an autoreloader and trying to open a port.
"""
assert os.environ.get("RUN_MAIN") != "true", (
"The thread-based exporter can't be safely used when django's "
"autoreloader is active. Use the URL exporter, or start django "
"with --noreload. See documentation/exports.md."
)
prometheus_client.start_http_server(port, addr=addr)
class PrometheusEndpointServer(threading.Thread):
"""A thread class that holds an http and makes it serve_forever()."""
def __init__(self, httpd, *args, **kwargs):
self.httpd = httpd
super().__init__(*args, **kwargs)
def run(self):
self.httpd.serve_forever()
def SetupPrometheusEndpointOnPortRange(port_range, addr=""):
"""Like SetupPrometheusEndpointOnPort, but tries several ports.
This is useful when you're running Django as a WSGI application
with multiple processes and you want Prometheus to discover all
workers. Each worker will grab a port and you can use Prometheus
to aggregate across workers.
port_range may be any iterable object that contains a list of
ports. Typically this would be a `range` of contiguous ports.
As soon as one port is found that can serve, use this one and stop
trying.
Returns the port chosen (an `int`), or `None` if no port in the
supplied range was available.
The same caveats regarding autoreload apply. Do not use this when
Django's autoreloader is active.
"""
assert os.environ.get("RUN_MAIN") != "true", (
"The thread-based exporter can't be safely used when django's "
"autoreloader is active. Use the URL exporter, or start django "
"with --noreload. See documentation/exports.md."
)
for port in port_range:
try:
httpd = HTTPServer((addr, port), prometheus_client.MetricsHandler)
except OSError:
# Python 2 raises socket.error, in Python 3 socket.error is an
# alias for OSError
continue # Try next port
thread = PrometheusEndpointServer(httpd)
thread.daemon = True
thread.start()
logger.info(f"Exporting Prometheus /metrics/ on port {port}")
return port # Stop trying ports at this point
logger.warning("Cannot export Prometheus /metrics/ - no available ports in supplied range")
return None
def SetupPrometheusExportsFromConfig():
"""Exports metrics so Prometheus can collect them."""
port = getattr(settings, "PROMETHEUS_METRICS_EXPORT_PORT", None)
port_range = getattr(settings, "PROMETHEUS_METRICS_EXPORT_PORT_RANGE", None)
addr = getattr(settings, "PROMETHEUS_METRICS_EXPORT_ADDRESS", "")
if port_range:
SetupPrometheusEndpointOnPortRange(port_range, addr)
elif port:
SetupPrometheusEndpointOnPort(port, addr)
def ExportToDjangoView(request):
"""Exports /metrics as a Django view.
You can use django_prometheus.urls to map /metrics to this view.
"""
if "PROMETHEUS_MULTIPROC_DIR" in os.environ or "prometheus_multiproc_dir" in os.environ:
registry = prometheus_client.CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
else:
registry = prometheus_client.REGISTRY
metrics_page = prometheus_client.generate_latest(registry)
return HttpResponse(metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST)
django-prometheus-2.4.1/django_prometheus/middleware.py 0000664 0000000 0000000 00000027313 15026774211 0023370 0 ustar 00root root 0000000 0000000 from django.utils.deprecation import MiddlewareMixin
from prometheus_client import Counter, Histogram
from django_prometheus.conf import NAMESPACE, PROMETHEUS_LATENCY_BUCKETS
from django_prometheus.utils import PowersOf, Time, TimeSince
class Metrics:
_instance = None
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
def register_metric(self, metric_cls, name, documentation, labelnames=(), **kwargs):
return metric_cls(name, documentation, labelnames=labelnames, **kwargs)
def __init__(self, *args, **kwargs):
self.register()
def register(self):
self.requests_total = self.register_metric(
Counter,
"django_http_requests_before_middlewares_total",
"Total count of requests before middlewares run.",
namespace=NAMESPACE,
)
self.responses_total = self.register_metric(
Counter,
"django_http_responses_before_middlewares_total",
"Total count of responses before middlewares run.",
namespace=NAMESPACE,
)
self.requests_latency_before = self.register_metric(
Histogram,
"django_http_requests_latency_including_middlewares_seconds",
("Histogram of requests processing time (including middleware processing time)."),
buckets=PROMETHEUS_LATENCY_BUCKETS,
namespace=NAMESPACE,
)
self.requests_unknown_latency_before = self.register_metric(
Counter,
"django_http_requests_unknown_latency_including_middlewares_total",
(
"Count of requests for which the latency was unknown (when computing "
"django_http_requests_latency_including_middlewares_seconds)."
),
namespace=NAMESPACE,
)
self.requests_latency_by_view_method = self.register_metric(
Histogram,
"django_http_requests_latency_seconds_by_view_method",
"Histogram of request processing time labelled by view.",
["view", "method"],
buckets=PROMETHEUS_LATENCY_BUCKETS,
namespace=NAMESPACE,
)
self.requests_unknown_latency = self.register_metric(
Counter,
"django_http_requests_unknown_latency_total",
"Count of requests for which the latency was unknown.",
namespace=NAMESPACE,
)
# Set in process_request
self.requests_ajax = self.register_metric(
Counter,
"django_http_ajax_requests_total",
"Count of AJAX requests.",
namespace=NAMESPACE,
)
self.requests_by_method = self.register_metric(
Counter,
"django_http_requests_total_by_method",
"Count of requests by method.",
["method"],
namespace=NAMESPACE,
)
self.requests_by_transport = self.register_metric(
Counter,
"django_http_requests_total_by_transport",
"Count of requests by transport.",
["transport"],
namespace=NAMESPACE,
)
# Set in process_view
self.requests_by_view_transport_method = self.register_metric(
Counter,
"django_http_requests_total_by_view_transport_method",
"Count of requests by view, transport, method.",
["view", "transport", "method"],
namespace=NAMESPACE,
)
self.requests_body_bytes = self.register_metric(
Histogram,
"django_http_requests_body_total_bytes",
"Histogram of requests by body size.",
buckets=PowersOf(2, 30),
namespace=NAMESPACE,
)
# Set in process_template_response
self.responses_by_templatename = self.register_metric(
Counter,
"django_http_responses_total_by_templatename",
"Count of responses by template name.",
["templatename"],
namespace=NAMESPACE,
)
# Set in process_response
self.responses_by_status = self.register_metric(
Counter,
"django_http_responses_total_by_status",
"Count of responses by status.",
["status"],
namespace=NAMESPACE,
)
self.responses_by_status_view_method = self.register_metric(
Counter,
"django_http_responses_total_by_status_view_method",
"Count of responses by status, view, method.",
["status", "view", "method"],
namespace=NAMESPACE,
)
self.responses_body_bytes = self.register_metric(
Histogram,
"django_http_responses_body_total_bytes",
"Histogram of responses by body size.",
buckets=PowersOf(2, 30),
namespace=NAMESPACE,
)
self.responses_by_charset = self.register_metric(
Counter,
"django_http_responses_total_by_charset",
"Count of responses by charset.",
["charset"],
namespace=NAMESPACE,
)
self.responses_streaming = self.register_metric(
Counter,
"django_http_responses_streaming_total",
"Count of streaming responses.",
namespace=NAMESPACE,
)
# Set in process_exception
self.exceptions_by_type = self.register_metric(
Counter,
"django_http_exceptions_total_by_type",
"Count of exceptions by object type.",
["type"],
namespace=NAMESPACE,
)
self.exceptions_by_view = self.register_metric(
Counter,
"django_http_exceptions_total_by_view",
"Count of exceptions by view.",
["view"],
namespace=NAMESPACE,
)
class PrometheusBeforeMiddleware(MiddlewareMixin):
"""Monitoring middleware that should run before other middlewares."""
metrics_cls = Metrics
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metrics = self.metrics_cls.get_instance()
def process_request(self, request):
self.metrics.requests_total.inc()
request.prometheus_before_middleware_event = Time()
def process_response(self, request, response):
self.metrics.responses_total.inc()
if hasattr(request, "prometheus_before_middleware_event"):
self.metrics.requests_latency_before.observe(TimeSince(request.prometheus_before_middleware_event))
else:
self.metrics.requests_unknown_latency_before.inc()
return response
class PrometheusAfterMiddleware(MiddlewareMixin):
"""Monitoring middleware that should run after other middlewares."""
metrics_cls = Metrics
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metrics = self.metrics_cls.get_instance()
def _transport(self, request):
return "https" if request.is_secure() else "http"
def _method(self, request):
m = request.method
if m not in (
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"TRACE",
"OPTIONS",
"CONNECT",
"PATCH",
):
return ""
return m
def label_metric(self, metric, request, response=None, **labels):
return metric.labels(**labels) if labels else metric
def process_request(self, request):
transport = self._transport(request)
method = self._method(request)
self.label_metric(self.metrics.requests_by_method, request, method=method).inc()
self.label_metric(self.metrics.requests_by_transport, request, transport=transport).inc()
# Mimic the behaviour of the deprecated "Request.is_ajax()" method.
if request.headers.get("x-requested-with") == "XMLHttpRequest":
self.label_metric(self.metrics.requests_ajax, request).inc()
content_length = int(request.headers.get("content-length") or 0)
self.label_metric(self.metrics.requests_body_bytes, request).observe(content_length)
request.prometheus_after_middleware_event = Time()
def _get_view_name(self, request):
view_name = ""
if hasattr(request, "resolver_match"):
if request.resolver_match is not None:
if request.resolver_match.view_name is not None:
view_name = request.resolver_match.view_name
return view_name
def process_view(self, request, view_func, *view_args, **view_kwargs):
transport = self._transport(request)
method = self._method(request)
if hasattr(request, "resolver_match"):
name = request.resolver_match.view_name or ""
self.label_metric(
self.metrics.requests_by_view_transport_method,
request,
view=name,
transport=transport,
method=method,
).inc()
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
self.label_metric(
self.metrics.responses_by_templatename,
request,
response=response,
templatename=str(response.template_name),
).inc()
return response
def process_response(self, request, response):
method = self._method(request)
name = self._get_view_name(request)
status = str(response.status_code)
self.label_metric(self.metrics.responses_by_status, request, response, status=status).inc()
self.label_metric(
self.metrics.responses_by_status_view_method,
request,
response,
status=status,
view=name,
method=method,
).inc()
if hasattr(response, "charset"):
self.label_metric(
self.metrics.responses_by_charset,
request,
response,
charset=str(response.charset),
).inc()
if hasattr(response, "streaming") and response.streaming:
self.label_metric(self.metrics.responses_streaming, request, response).inc()
if hasattr(response, "content"):
self.label_metric(self.metrics.responses_body_bytes, request, response).observe(len(response.content))
if hasattr(request, "prometheus_after_middleware_event"):
self.label_metric(
self.metrics.requests_latency_by_view_method,
request,
response,
view=self._get_view_name(request),
method=request.method,
).observe(TimeSince(request.prometheus_after_middleware_event))
else:
self.label_metric(self.metrics.requests_unknown_latency, request, response).inc()
return response
def process_exception(self, request, exception):
self.label_metric(self.metrics.exceptions_by_type, request, type=type(exception).__name__).inc()
if hasattr(request, "resolver_match"):
name = request.resolver_match.view_name or ""
self.label_metric(self.metrics.exceptions_by_view, request, view=name).inc()
if hasattr(request, "prometheus_after_middleware_event"):
self.label_metric(
self.metrics.requests_latency_by_view_method,
request,
view=self._get_view_name(request),
method=request.method,
).observe(TimeSince(request.prometheus_after_middleware_event))
else:
self.label_metric(self.metrics.requests_unknown_latency, request).inc()
django-prometheus-2.4.1/django_prometheus/migrations.py 0000664 0000000 0000000 00000003550 15026774211 0023424 0 ustar 00root root 0000000 0000000 from django.db import connections
from django.db.backends.dummy.base import DatabaseWrapper
from prometheus_client import Gauge
from django_prometheus.conf import NAMESPACE
unapplied_migrations = Gauge(
"django_migrations_unapplied_total",
"Count of unapplied migrations by database connection",
["connection"],
namespace=NAMESPACE,
)
applied_migrations = Gauge(
"django_migrations_applied_total",
"Count of applied migrations by database connection",
["connection"],
namespace=NAMESPACE,
)
def ExportMigrationsForDatabase(alias, executor):
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
unapplied_migrations.labels(alias).set(len(plan))
applied_migrations.labels(alias).set(len(executor.loader.applied_migrations))
def ExportMigrations():
"""Exports counts of unapplied migrations.
This is meant to be called during app startup, ideally by
django_prometheus.apps.AppConfig.
"""
# Import MigrationExecutor lazily. MigrationExecutor checks at
# import time that the apps are ready, and they are not when
# django_prometheus is imported. ExportMigrations() should be
# called in AppConfig.ready(), which signals that all apps are
# ready.
from django.db.migrations.executor import MigrationExecutor
if "default" in connections and (isinstance(connections["default"], DatabaseWrapper)):
# This is the case where DATABASES = {} in the configuration,
# i.e. the user is not using any databases. Django "helpfully"
# adds a dummy database and then throws when you try to
# actually use it. So we don't do anything, because trying to
# export stats would crash the app on startup.
return
for alias in connections.databases:
executor = MigrationExecutor(connections[alias])
ExportMigrationsForDatabase(alias, executor)
django-prometheus-2.4.1/django_prometheus/models.py 0000664 0000000 0000000 00000003015 15026774211 0022527 0 ustar 00root root 0000000 0000000 from prometheus_client import Counter
from django_prometheus.conf import NAMESPACE
model_inserts = Counter(
"django_model_inserts_total",
"Number of insert operations by model.",
["model"],
namespace=NAMESPACE,
)
model_updates = Counter(
"django_model_updates_total",
"Number of update operations by model.",
["model"],
namespace=NAMESPACE,
)
model_deletes = Counter(
"django_model_deletes_total",
"Number of delete operations by model.",
["model"],
namespace=NAMESPACE,
)
def ExportModelOperationsMixin(model_name):
"""Returns a mixin for models to export counters for lifecycle operations.
Usage:
class User(ExportModelOperationsMixin('user'), Model):
...
"""
# Force create the labels for this model in the counters. This
# is not necessary but it avoids gaps in the aggregated data.
model_inserts.labels(model_name)
model_updates.labels(model_name)
model_deletes.labels(model_name)
class Mixin:
def _do_insert(self, *args, **kwargs):
model_inserts.labels(model_name).inc()
return super()._do_insert(*args, **kwargs)
def _do_update(self, *args, **kwargs):
model_updates.labels(model_name).inc()
return super()._do_update(*args, **kwargs)
def delete(self, *args, **kwargs):
model_deletes.labels(model_name).inc()
return super().delete(*args, **kwargs)
Mixin.__qualname__ = f"ExportModelOperationsMixin('{model_name}')"
return Mixin
django-prometheus-2.4.1/django_prometheus/tests/ 0000775 0000000 0000000 00000000000 15026774211 0022035 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/tests/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0024134 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/tests/end2end/ 0000775 0000000 0000000 00000000000 15026774211 0023354 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/tests/end2end/manage.py 0000775 0000000 0000000 00000000372 15026774211 0025163 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/ 0000775 0000000 0000000 00000000000 15026774211 0025034 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/__init__.py 0000664 0000000 0000000 00000000000 15026774211 0027133 0 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/helpers.py 0000664 0000000 0000000 00000001101 15026774211 0027041 0 ustar 00root root 0000000 0000000 DJANGO_MIDDLEWARES = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
]
def get_middleware(before, after):
middleware = [before]
middleware.extend(DJANGO_MIDDLEWARES)
middleware.append(after)
return middleware
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/models.py 0000664 0000000 0000000 00000000703 15026774211 0026671 0 ustar 00root root 0000000 0000000 from django.db.models import CharField, Model, PositiveIntegerField
from django_prometheus.models import ExportModelOperationsMixin
class Dog(ExportModelOperationsMixin("dog"), Model):
name = CharField(max_length=100, unique=True)
breed = CharField(max_length=100, blank=True, null=True)
age = PositiveIntegerField(blank=True, null=True)
class Lawn(ExportModelOperationsMixin("lawn"), Model):
location = CharField(max_length=100)
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/settings.py 0000664 0000000 0000000 00000011055 15026774211 0027250 0 ustar 00root root 0000000 0000000 import os
import tempfile
from testapp.helpers import get_middleware
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ")0-t%mc5y1^fn8e7i**^^v166@5iu(&-2%9#kxud0&4ap#k!_k"
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_prometheus",
"testapp",
)
MIDDLEWARE = get_middleware(
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
ROOT_URLCONF = "testapp.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "testapp.wsgi.application"
DATABASES = {
"default": {
"ENGINE": "django_prometheus.db.backends.sqlite3",
"NAME": "db.sqlite3",
},
# Comment this to not test django_prometheus.db.backends.postgres.
"postgresql": {
"ENGINE": "django_prometheus.db.backends.postgresql",
"NAME": "postgres",
"USER": "postgres",
"PASSWORD": "",
"HOST": "localhost",
"PORT": "5432",
},
# Comment this to not test django_prometheus.db.backends.postgis.
"postgis": {
"ENGINE": "django_prometheus.db.backends.postgis",
"NAME": "postgis",
"USER": "postgres",
"PASSWORD": "",
"HOST": "localhost",
"PORT": "5432",
},
# Comment this to not test django_prometheus.db.backends.mysql.
"mysql": {
"ENGINE": "django_prometheus.db.backends.mysql",
"NAME": "django_prometheus_1",
"USER": "root",
"PASSWORD": "",
"HOST": "127.0.0.1",
"PORT": "3306",
},
"spatialite": {
"ENGINE": "django_prometheus.db.backends.spatialite",
"NAME": "db_spatialite.sqlite3",
},
# The following databases are used by test_db.py only
"test_db_1": {
"ENGINE": "django_prometheus.db.backends.sqlite3",
"NAME": "test_db_1.sqlite3",
},
"test_db_2": {
"ENGINE": "django_prometheus.db.backends.sqlite3",
"NAME": "test_db_2.sqlite3",
},
}
# Caches
_tmp_cache_dir = tempfile.mkdtemp()
CACHES = {
"default": {
"BACKEND": "django_prometheus.cache.backends.memcached.PyLibMCCache",
"LOCATION": "localhost:11211",
},
"memcached.PyLibMCCache": {
"BACKEND": "django_prometheus.cache.backends.memcached.PyLibMCCache",
"LOCATION": "localhost:11211",
},
"memcached.PyMemcacheCache": {
"BACKEND": "django_prometheus.cache.backends.memcached.PyMemcacheCache",
"LOCATION": "localhost:11211",
},
"filebased": {
"BACKEND": "django_prometheus.cache.backends.filebased.FileBasedCache",
"LOCATION": os.path.join(_tmp_cache_dir, "django_cache"),
},
"locmem": {
"BACKEND": "django_prometheus.cache.backends.locmem.LocMemCache",
"LOCATION": os.path.join(_tmp_cache_dir, "locmem_cache"),
},
"native_redis": {
"BACKEND": "django_prometheus.cache.backends.redis.NativeRedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
},
"redis": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
},
# Fake redis config emulated stopped service
"stopped_redis": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": "redis://127.0.0.1:6666/1",
},
"stopped_redis_ignore_exception": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": "redis://127.0.0.1:6666/1",
"OPTIONS": {"IGNORE_EXCEPTIONS": True},
},
}
# Internationalization
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {"console": {"class": "logging.StreamHandler"}},
"root": {"handlers": ["console"], "level": "INFO"},
"loggers": {"django": {"handlers": ["console"], "level": "INFO"}},
}
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/templates/ 0000775 0000000 0000000 00000000000 15026774211 0027032 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/templates/help.html 0000664 0000000 0000000 00000004126 15026774211 0030653 0 ustar 00root root 0000000 0000000 Can't Help Falling in Love
Remembering Helps Me to Forget
Helplessly, Hopelessly
Love Helps Those
I Need a Little Help
For a while We Helped Each Other Out
Give Me a Helping Hand
I Can't Help You, I'm Falling Too
How Can I Help You Say Goodbye?
Time Hasn't Helped
Jukebox, Help Me Find My Baby
I Just Can't Help Myself
Help Me, Girl
I Can't Help it
Help Somebody
Help, Help
I Can't Help How I Feel
No Help From Me
I Can Help
Somebody Help Me
Please Help Me I'm Falling in Love With You
Help Yourself
Outside Help
Helping Hand
Help Me, Rhonda
Can't Help Feeling So Blue
We All Agreed to Help
Help Pour Out the Rain (Lacey's Song)
Sleep Won't Help Me
I Can't Help Myself (Sugarpie, Honeybunch)
Cry for Help
She's Helping Me Get Over You
Mama Help Me
Help Yourself to Me
Can't Help But Wonder
Heaven Help the Working Girl
Help Me Pick Up the Pieces
Crying Won't Help Now
I Couldn't Help Myself
So Help Me, Girl
Heaven Help the Fool
Help Wanted
Help Me Get Over You
Helpless
Help
Can't Help it
Can't Help Calling Your Name
If She Just Helps Me Get Over You
Helpless Heart
No Help Wanted
It Didn't Help Much
Help Me Make it Through the Night
Help Me Understand
I Just Can't Help Believing
Can't Help Thinking About Me
How Could I Help But Love You?
Heaven Help My Heart
I Can't Help Remembering You
Help Me Hold on
Helping Me Get Over You
I Can't Help it if I'm Still in Love with You
Girl Can't Help it, The
I Can't Help it, I'm Falling in Love
With a Little Help from My Friends
Heaven Help the Child
Help Me
Can't Help But Love You
Help is on the Way
I Got Some Help I Don't Need
Heaven Help Us All
Heaven Help Me
Helplessly Hoping
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/templates/index.html 0000664 0000000 0000000 00000000023 15026774211 0031022 0 ustar 00root root 0000000 0000000 This is the index.
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/templates/lawn.html 0000664 0000000 0000000 00000000105 15026774211 0030655 0 ustar 00root root 0000000 0000000 Aaah, {{ lawn.location }}, the best place on Earth, probably.
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/templates/slow.html 0000664 0000000 0000000 00000004252 15026774211 0030707 0 ustar 00root root 0000000 0000000
_.---"'"""""'`--.._
_,.-' `-._
_,." -.
.-"" ___...---------.._ `.
`---'"" `-. `.
`. \
`. \
\ \
. \
| .
| |
_________ | |
_,.-'" `"'-.._ : |
_,-' `-._.' |
_.' `. '
_.-. _,+......__ `. .
.' `-"' `"-.,-""--._ \ /
/ ,' | __ \ \ /
` .. +" ) \ \ /
`.' \ ,-"`-.. | | \ /
/ " | .' \ '. _.' .'
|,.."--"""--..| " | `""`. |
," `-._ | | |
.' `-._+ | |
/ `. / |
| ` ' | / |
`-.....--.__ | | / |
`./ "| / `-.........--.- ' | ,' '
/| || `.' ,' .' |_,-+ /
/ ' '.`. _,' ,' `. | ' _,.. /
/ `. `"'"'""'" _,^--------"`. | `.'_ _/
/... _.`:.________,.' `._,.-..| "'
`.__.' `._ /
"' mh
Art by Maija Haavisto
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/templates/sql.html 0000664 0000000 0000000 00000001244 15026774211 0030520 0 ustar 00root root 0000000 0000000
Execute some SQL here, for fun and profit!
Note that this is a very bad vulnerability: it gives anyone direct
access to your whole database. This only exists to test that
django_prometheus is working.
{% if query %}
Your query was:
{{ query }}
Your results were:
{{ rows }}
{% endif %}
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/test_caches.py 0000664 0000000 0000000 00000006623 15026774211 0027702 0 ustar 00root root 0000000 0000000 import pytest
from django.core.cache import caches
from redis import RedisError
from django_prometheus.testutils import assert_metric_equal, get_metric
_SUPPORTED_CACHES = [
"memcached.PyLibMCCache",
"memcached.PyMemcacheCache",
"filebased",
"locmem",
"native_redis",
"redis",
]
class TestCachesMetrics:
"""Test django_prometheus.caches metrics."""
@pytest.mark.parametrize("supported_cache", _SUPPORTED_CACHES)
def test_counters(self, supported_cache):
# Note: those tests require a memcached server running
tested_cache = caches[supported_cache]
backend = supported_cache.split(".")[0]
total_before = get_metric("django_cache_get_total", backend=backend) or 0
hit_before = get_metric("django_cache_get_hits_total", backend=backend) or 0
miss_before = get_metric("django_cache_get_misses_total", backend=backend) or 0
tested_cache.set("foo1", "bar")
tested_cache.get("foo1")
tested_cache.get("foo1")
tested_cache.get("foofoo")
result = tested_cache.get("foofoo", default="default")
assert result == "default"
assert_metric_equal(total_before + 4, "django_cache_get_total", backend=backend)
assert_metric_equal(hit_before + 2, "django_cache_get_hits_total", backend=backend)
assert_metric_equal(
miss_before + 2,
"django_cache_get_misses_total",
backend=backend,
)
def test_redis_cache_fail(self):
# Note: test use fake service config (like if server was stopped)
supported_cache = "redis"
total_before = get_metric("django_cache_get_total", backend=supported_cache) or 0
fail_before = get_metric("django_cache_get_fail_total", backend=supported_cache) or 0
hit_before = get_metric("django_cache_get_hits_total", backend=supported_cache) or 0
miss_before = get_metric("django_cache_get_misses_total", backend=supported_cache) or 0
tested_cache = caches["stopped_redis_ignore_exception"]
tested_cache.get("foo1")
assert_metric_equal(hit_before, "django_cache_get_hits_total", backend=supported_cache)
assert_metric_equal(miss_before, "django_cache_get_misses_total", backend=supported_cache)
assert_metric_equal(total_before + 1, "django_cache_get_total", backend=supported_cache)
assert_metric_equal(fail_before + 1, "django_cache_get_fail_total", backend=supported_cache)
tested_cache = caches["stopped_redis"]
with pytest.raises(RedisError):
tested_cache.get("foo1")
assert_metric_equal(hit_before, "django_cache_get_hits_total", backend=supported_cache)
assert_metric_equal(miss_before, "django_cache_get_misses_total", backend=supported_cache)
assert_metric_equal(total_before + 2, "django_cache_get_total", backend=supported_cache)
assert_metric_equal(fail_before + 2, "django_cache_get_fail_total", backend=supported_cache)
@pytest.mark.parametrize("supported_cache", _SUPPORTED_CACHES)
def test_cache_version_support(self, supported_cache):
# Note: those tests require a memcached server running
tested_cache = caches[supported_cache]
tested_cache.set("foo1", "bar v.1", version=1)
tested_cache.set("foo1", "bar v.2", version=2)
assert tested_cache.get("foo1", version=1) == "bar v.1"
assert tested_cache.get("foo1", version=2) == "bar v.2"
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/test_db.py 0000664 0000000 0000000 00000015744 15026774211 0027045 0 ustar 00root root 0000000 0000000 import pytest
from django.conf import settings
from django.db import connections
from django_prometheus.testutils import (
assert_metric_compare,
assert_metric_diff,
assert_metric_equal,
get_metric,
save_registry,
)
# @pytest.fixture(autouse=True)
# def enable_db_access_for_all_tests(db):
# pass
@pytest.mark.django_db(databases=list(settings.DATABASES.keys()))
class BaseDBTest:
pass
@pytest.mark.skipif(connections["test_db_1"].vendor != "sqlite", reason="Skipped unless test_db_1 uses sqlite")
class TestDbMetrics(BaseDBTest):
"""Test django_prometheus.db metrics.
Note regarding the values of metrics: many tests interact with the
database, and the test runner itself does. As such, tests that
require that a metric has a specific value are at best very
fragile. Consider asserting that the value exceeds a certain
threshold, or check by how much it increased during the test.
"""
def test_config_has_expected_databases(self):
"""Not a real unit test: ensures that testapp.settings contains the
databases this test expects.
"""
assert "default" in connections.databases.keys()
assert "test_db_1" in connections.databases.keys()
assert "test_db_2" in connections.databases.keys()
def test_counters(self):
cursor_db1 = connections["test_db_1"].cursor()
cursor_db2 = connections["test_db_2"].cursor()
cursor_db1.execute("SELECT 1")
for _ in range(200):
cursor_db2.execute("SELECT 2")
cursor_db1.execute("SELECT 3")
try:
cursor_db1.execute("this is clearly not valid SQL")
except Exception:
pass
assert_metric_equal(
1,
"django_db_errors_total",
alias="test_db_1",
vendor="sqlite",
type="OperationalError",
)
assert get_metric("django_db_execute_total", alias="test_db_1", vendor="sqlite") > 0
assert get_metric("django_db_execute_total", alias="test_db_2", vendor="sqlite") >= 200
def test_histograms(self):
cursor_db1 = connections["test_db_1"].cursor()
cursor_db2 = connections["test_db_2"].cursor()
cursor_db1.execute("SELECT 1")
for _ in range(200):
cursor_db2.execute("SELECT 2")
assert (
get_metric(
"django_db_query_duration_seconds_count",
alias="test_db_1",
vendor="sqlite",
)
> 0
)
assert (
get_metric(
"django_db_query_duration_seconds_count",
alias="test_db_2",
vendor="sqlite",
)
>= 200
)
def test_execute_many(self):
registry = save_registry()
cursor_db1 = connections["test_db_1"].cursor()
cursor_db1.executemany(
"INSERT INTO testapp_lawn(location) VALUES (?)",
[("Paris",), ("New York",), ("Berlin",), ("San Francisco",)],
)
assert_metric_diff(
registry,
4,
"django_db_execute_many_total",
alias="test_db_1",
vendor="sqlite",
)
@pytest.mark.skipif("postgresql" not in connections, reason="Skipped unless postgresql database is enabled")
class TestPostgresDbMetrics(BaseDBTest):
"""Test django_prometheus.db metrics for postgres backend.
Note regarding the values of metrics: many tests interact with the
database, and the test runner itself does. As such, tests that
require that a metric has a specific value are at best very
fragile. Consider asserting that the value exceeds a certain
threshold, or check by how much it increased during the test.
"""
def test_counters(self):
registry = save_registry()
cursor = connections["postgresql"].cursor()
for _ in range(20):
cursor.execute("SELECT 1")
assert_metric_compare(
registry,
lambda a, b: a + 20 <= b < a + 25,
"django_db_execute_total",
alias="postgresql",
vendor="postgresql",
)
@pytest.mark.skipif("mysql" not in connections, reason="Skipped unless mysql database is enabled")
class TestMysDbMetrics(BaseDBTest):
"""Test django_prometheus.db metrics for mys backend.
Note regarding the values of metrics: many tests interact with the
database, and the test runner itself does. As such, tests that
require that a metric has a specific value are at best very
fragile. Consider asserting that the value exceeds a certain
threshold, or check by how much it increased during the test.
"""
def test_counters(self):
registry = save_registry()
cursor = connections["mysql"].cursor()
for _ in range(20):
cursor.execute("SELECT 1")
assert_metric_compare(
registry,
lambda a, b: a + 20 <= b < a + 25,
"django_db_execute_total",
alias="mysql",
vendor="mysql",
)
@pytest.mark.skipif("postgis" not in connections, reason="Skipped unless postgis database is enabled")
class TestPostgisDbMetrics(BaseDBTest):
"""Test django_prometheus.db metrics for postgis backend.
Note regarding the values of metrics: many tests interact with the
database, and the test runner itself does. As such, tests that
require that a metric has a specific value are at best very
fragile. Consider asserting that the value exceeds a certain
threshold, or check by how much it increased during the test.
"""
def test_counters(self):
r = save_registry()
cursor = connections["postgis"].cursor()
for _ in range(20):
cursor.execute("SELECT 1")
assert_metric_compare(
r,
lambda a, b: a + 20 <= b < a + 25,
"django_db_execute_total",
alias="postgis",
vendor="postgresql",
)
@pytest.mark.skipif("spatialite" not in connections, reason="Skipped unless spatialite database is enabled")
class TestSpatialiteDbMetrics(BaseDBTest):
"""Test django_prometheus.db metrics for spatialite backend.
Note regarding the values of metrics: many tests interact with the
database, and the test runner itself does. As such, tests that
require that a metric has a specific value are at best very
fragile. Consider asserting that the value exceeds a certain
threshold, or check by how much it increased during the test.
"""
def test_counters(self):
r = save_registry()
connection = connections["spatialite"]
# Make sure the extension is loaded and geospatial tables are created
connection.prepare_database()
cursor = connection.cursor()
for _ in range(20):
cursor.execute("SELECT 1")
assert_metric_compare(
r,
lambda a, b: a + 20 <= b < a + 25,
"django_db_execute_total",
alias="spatialite",
vendor="sqlite",
)
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/test_middleware.py 0000664 0000000 0000000 00000011605 15026774211 0030565 0 ustar 00root root 0000000 0000000 import pytest
from django_prometheus.testutils import (
assert_metric_diff,
assert_metric_equal,
save_registry,
)
from testapp.views import ObjectionException
def M(metric_name):
"""Makes a full metric name from a short metric name.
This is just intended to help keep the lines shorter in test
cases.
"""
return f"django_http_{metric_name}"
def T(metric_name):
"""Makes a full metric name from a short metric name like M(metric_name)
This method adds a '_total' postfix for metrics.
"""
return f"{M(metric_name)}_total"
class TestMiddlewareMetrics:
"""Test django_prometheus.middleware.
Note that counters related to exceptions can't be tested as
Django's test Client only simulates requests and the exception
handling flow is very different in that simulation.
"""
@pytest.fixture(autouse=True)
def _setup(self, settings):
settings.PROMETHEUS_LATENCY_BUCKETS = (0.05, 1.0, 2.0, 4.0, 5.0, 10.0, float("inf"))
def test_request_counters(self, client):
registry = save_registry()
client.get("/")
client.get("/")
client.get("/help")
client.post("/", {"test": "data"})
assert_metric_diff(registry, 4, M("requests_before_middlewares_total"))
assert_metric_diff(registry, 4, M("responses_before_middlewares_total"))
assert_metric_diff(registry, 3, T("requests_total_by_method"), method="GET")
assert_metric_diff(registry, 1, T("requests_total_by_method"), method="POST")
assert_metric_diff(registry, 4, T("requests_total_by_transport"), transport="http")
assert_metric_diff(
registry,
2,
T("requests_total_by_view_transport_method"),
view="testapp.views.index",
transport="http",
method="GET",
)
assert_metric_diff(
registry,
1,
T("requests_total_by_view_transport_method"),
view="testapp.views.help",
transport="http",
method="GET",
)
assert_metric_diff(
registry,
1,
T("requests_total_by_view_transport_method"),
view="testapp.views.index",
transport="http",
method="POST",
)
# We have 3 requests with no post body, and one with a few
# bytes, but buckets are cumulative so that is 4 requests with
# <=128 bytes bodies.
assert_metric_diff(registry, 3, M("requests_body_total_bytes_bucket"), le="0.0")
assert_metric_diff(registry, 4, M("requests_body_total_bytes_bucket"), le="128.0")
assert_metric_equal(None, M("responses_total_by_templatename"), templatename="help.html")
assert_metric_diff(registry, 3, T("responses_total_by_templatename"), templatename="index.html")
assert_metric_diff(registry, 4, T("responses_total_by_status"), status="200")
assert_metric_diff(registry, 0, M("responses_body_total_bytes_bucket"), le="0.0")
assert_metric_diff(registry, 3, M("responses_body_total_bytes_bucket"), le="128.0")
assert_metric_diff(registry, 4, M("responses_body_total_bytes_bucket"), le="8192.0")
assert_metric_diff(registry, 4, T("responses_total_by_charset"), charset="utf-8")
assert_metric_diff(registry, 0, M("responses_streaming_total"))
def test_latency_histograms(self, client):
# Caution: this test is timing-based. This is not ideal. It
# runs slowly (each request to /slow takes at least .1 seconds
# to complete), to eliminate flakiness we adjust the buckets used
# in the test suite.
registry = save_registry()
# This always takes more than .1 second, so checking the lower
# buckets is fine.
client.get("/slow")
assert_metric_diff(
registry,
0,
M("requests_latency_seconds_by_view_method_bucket"),
le="0.05",
view="slow",
method="GET",
)
assert_metric_diff(
registry,
1,
M("requests_latency_seconds_by_view_method_bucket"),
le="5.0",
view="slow",
method="GET",
)
def test_exception_latency_histograms(self, client):
registry = save_registry()
try:
client.get("/objection")
except ObjectionException:
pass
assert_metric_diff(
registry,
2,
M("requests_latency_seconds_by_view_method_bucket"),
le="2.5",
view="testapp.views.objection",
method="GET",
)
def test_streaming_responses(self, client):
registry = save_registry()
client.get("/")
client.get("/file")
assert_metric_diff(registry, 1, M("responses_streaming_total"))
assert_metric_diff(registry, 1, M("responses_body_total_bytes_bucket"), le="+Inf")
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/test_middleware_custom_labels.py 0000664 0000000 0000000 00000007364 15026774211 0033510 0 ustar 00root root 0000000 0000000 import pytest
from prometheus_client import REGISTRY
from prometheus_client.metrics import MetricWrapperBase
from django_prometheus.middleware import (
Metrics,
PrometheusAfterMiddleware,
PrometheusBeforeMiddleware,
)
from django_prometheus.testutils import assert_metric_diff, save_registry
from testapp.helpers import get_middleware
from testapp.test_middleware import M, T
EXTENDED_METRICS = [
M("requests_latency_seconds_by_view_method"),
M("responses_total_by_status_view_method"),
]
class CustomMetrics(Metrics):
def register_metric(self, metric_cls, name, documentation, labelnames=(), **kwargs):
if name in EXTENDED_METRICS:
labelnames.extend(("view_type", "user_agent_type"))
return super().register_metric(metric_cls, name, documentation, labelnames=labelnames, **kwargs)
class AppMetricsBeforeMiddleware(PrometheusBeforeMiddleware):
metrics_cls = CustomMetrics
class AppMetricsAfterMiddleware(PrometheusAfterMiddleware):
metrics_cls = CustomMetrics
def label_metric(self, metric, request, response=None, **labels):
new_labels = labels
if metric._name in EXTENDED_METRICS:
new_labels = {"view_type": "foo", "user_agent_type": "browser"}
new_labels.update(labels)
return super().label_metric(metric, request, response=response, **new_labels)
class TestMiddlewareMetricsWithCustomLabels:
@pytest.fixture(autouse=True)
def _setup(self, settings):
settings.MIDDLEWARE = get_middleware(
"testapp.test_middleware_custom_labels.AppMetricsBeforeMiddleware",
"testapp.test_middleware_custom_labels.AppMetricsAfterMiddleware",
)
# Allow CustomMetrics to be used
for metric in Metrics._instance.__dict__.values():
if isinstance(metric, MetricWrapperBase):
REGISTRY.unregister(metric)
Metrics._instance = None
def test_request_counters(self, client):
registry = save_registry()
client.get("/")
client.get("/")
client.get("/help")
client.post("/", {"test": "data"})
assert_metric_diff(registry, 4, M("requests_before_middlewares_total"))
assert_metric_diff(registry, 4, M("responses_before_middlewares_total"))
assert_metric_diff(registry, 3, T("requests_total_by_method"), method="GET")
assert_metric_diff(registry, 1, T("requests_total_by_method"), method="POST")
assert_metric_diff(registry, 4, T("requests_total_by_transport"), transport="http")
assert_metric_diff(
registry,
2,
T("requests_total_by_view_transport_method"),
view="testapp.views.index",
transport="http",
method="GET",
)
assert_metric_diff(
registry,
1,
T("requests_total_by_view_transport_method"),
view="testapp.views.help",
transport="http",
method="GET",
)
assert_metric_diff(
registry,
1,
T("requests_total_by_view_transport_method"),
view="testapp.views.index",
transport="http",
method="POST",
)
assert_metric_diff(
registry,
2.0,
T("responses_total_by_status_view_method"),
status="200",
view="testapp.views.index",
method="GET",
view_type="foo",
user_agent_type="browser",
)
assert_metric_diff(
registry,
1.0,
T("responses_total_by_status_view_method"),
status="200",
view="testapp.views.help",
method="GET",
view_type="foo",
user_agent_type="browser",
)
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/test_migrations.py 0000664 0000000 0000000 00000002443 15026774211 0030624 0 ustar 00root root 0000000 0000000 from unittest.mock import MagicMock
import pytest
from django_prometheus.migrations import ExportMigrationsForDatabase
from django_prometheus.testutils import assert_metric_equal
def M(metric_name):
"""Make a full metric name from a short metric name.
This is just intended to help keep the lines shorter in test
cases.
"""
return f"django_migrations_{metric_name}"
@pytest.mark.django_db
class TestMigrations:
"""Test migration counters."""
def test_counters(self):
executor = MagicMock()
executor.migration_plan = MagicMock()
executor.migration_plan.return_value = set()
executor.loader.applied_migrations = {"a", "b", "c"}
ExportMigrationsForDatabase("fakedb1", executor)
assert executor.migration_plan.call_count == 1
executor.migration_plan = MagicMock()
executor.migration_plan.return_value = {"a"}
executor.loader.applied_migrations = {"b", "c"}
ExportMigrationsForDatabase("fakedb2", executor)
assert_metric_equal(3, M("applied_total"), connection="fakedb1")
assert_metric_equal(0, M("unapplied_total"), connection="fakedb1")
assert_metric_equal(2, M("applied_total"), connection="fakedb2")
assert_metric_equal(1, M("unapplied_total"), connection="fakedb2")
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/test_models.py 0000664 0000000 0000000 00000003171 15026774211 0027732 0 ustar 00root root 0000000 0000000 import pytest
from django_prometheus.testutils import assert_metric_diff, save_registry
from testapp.models import Dog, Lawn
def M(metric_name):
"""Make a full metric name from a short metric name.
This is just intended to help keep the lines shorter in test
cases.
"""
return f"django_model_{metric_name}"
@pytest.mark.django_db
class TestModelMetrics:
"""Test django_prometheus.models."""
def test_counters(self):
registry = save_registry()
cool = Dog()
cool.name = "Cool"
cool.save()
assert_metric_diff(registry, 1, M("inserts_total"), model="dog")
elysees = Lawn()
elysees.location = "Champs Elysees, Paris"
elysees.save()
assert_metric_diff(registry, 1, M("inserts_total"), model="lawn")
assert_metric_diff(registry, 1, M("inserts_total"), model="dog")
galli = Dog()
galli.name = "Galli"
galli.save()
assert_metric_diff(registry, 2, M("inserts_total"), model="dog")
cool.breed = "Wolfhound"
assert_metric_diff(registry, 2, M("inserts_total"), model="dog")
cool.save()
assert_metric_diff(registry, 2, M("inserts_total"), model="dog")
assert_metric_diff(registry, 1, M("updates_total"), model="dog")
cool.age = 9
cool.save()
assert_metric_diff(registry, 2, M("updates_total"), model="dog")
cool.delete() # :(
assert_metric_diff(registry, 2, M("inserts_total"), model="dog")
assert_metric_diff(registry, 2, M("updates_total"), model="dog")
assert_metric_diff(registry, 1, M("deletes_total"), model="dog")
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/urls.py 0000664 0000000 0000000 00000000704 15026774211 0026374 0 ustar 00root root 0000000 0000000 from django.contrib import admin
from django.urls import include, path
from testapp import views
urlpatterns = [
path("", views.index),
path("help", views.help),
path("slow", views.slow, name="slow"),
path("objection", views.objection),
path("sql", views.sql),
path("newlawn/", views.newlawn),
path("file", views.file),
path("", include("django_prometheus.urls")),
path("admin/", admin.site.urls),
]
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/views.py 0000664 0000000 0000000 00000003043 15026774211 0026543 0 ustar 00root root 0000000 0000000 import os
import time
from django.db import connections
from django.http import FileResponse
from django.shortcuts import render
from django.template.response import TemplateResponse
from testapp.models import Lawn
def index(request):
return TemplateResponse(request, "index.html", {})
def help(request):
# render does not instantiate a TemplateResponse, so it does not
# increment the "by_templatename" counters.
return render(request, "help.html", {})
def slow(request):
"""This view takes .1s to load, on purpose."""
time.sleep(0.1)
return TemplateResponse(request, "slow.html", {})
def newlawn(request, location):
"""This view creates a new Lawn instance in the database."""
lawn = Lawn()
lawn.location = location
lawn.save()
return TemplateResponse(request, "lawn.html", {"lawn": lawn})
class ObjectionException(Exception):
pass
def objection(request):
raise ObjectionException("Objection!")
def sql(request):
databases = connections.databases.keys()
query = request.GET.get("query")
db = request.GET.get("database")
if query and db:
cursor = connections[db].cursor()
cursor.execute(query, [])
results = cursor.fetchall()
return TemplateResponse(
request,
"sql.html",
{"query": query, "rows": results, "databases": databases},
)
return TemplateResponse(request, "sql.html", {"query": None, "rows": None, "databases": databases})
def file(request):
return FileResponse(open(os.devnull, "rb"))
django-prometheus-2.4.1/django_prometheus/tests/end2end/testapp/wsgi.py 0000664 0000000 0000000 00000000606 15026774211 0026361 0 ustar 00root root 0000000 0000000 """WSGI config for testapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings")
application = get_wsgi_application()
django-prometheus-2.4.1/django_prometheus/tests/test_django_prometheus.py 0000664 0000000 0000000 00000000653 15026774211 0027167 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
from django_prometheus.utils import PowersOf
class TestDjangoPrometheus:
def testPowersOf(self):
"""Tests utils.PowersOf."""
assert PowersOf(2, 4) == [0, 1, 2, 4, 8]
assert PowersOf(3, 5, lower=1) == [0, 3, 9, 27, 81, 243]
assert PowersOf(2, 4, include_zero=False) == [1, 2, 4, 8]
assert PowersOf(2, 6, lower=2, include_zero=False) == [4, 8, 16, 32, 64, 128]
django-prometheus-2.4.1/django_prometheus/tests/test_exports.py 0000664 0000000 0000000 00000002122 15026774211 0025147 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import socket
from unittest.mock import ANY, MagicMock, call, patch
from django_prometheus.exports import SetupPrometheusEndpointOnPortRange
@patch("django_prometheus.exports.HTTPServer")
def test_port_range_available(httpserver_mock):
"""Test port range setup with an available port."""
httpserver_mock.side_effect = [socket.error, MagicMock()]
port_range = [8000, 8001]
port_chosen = SetupPrometheusEndpointOnPortRange(port_range)
assert port_chosen in port_range
expected_calls = [call(("", 8000), ANY), call(("", 8001), ANY)]
assert httpserver_mock.mock_calls == expected_calls
@patch("django_prometheus.exports.HTTPServer")
def test_port_range_unavailable(httpserver_mock):
"""Test port range setup with no available ports."""
httpserver_mock.side_effect = [socket.error, socket.error]
port_range = [8000, 8001]
port_chosen = SetupPrometheusEndpointOnPortRange(port_range)
expected_calls = [call(("", 8000), ANY), call(("", 8001), ANY)]
assert httpserver_mock.mock_calls == expected_calls
assert port_chosen is None
django-prometheus-2.4.1/django_prometheus/tests/test_testutils.py 0000664 0000000 0000000 00000011506 15026774211 0025511 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
from operator import itemgetter
import prometheus_client
import pytest
from django_prometheus.testutils import (
assert_metric_diff,
assert_metric_equal,
assert_metric_no_diff,
assert_metric_not_equal,
get_metric,
get_metric_from_frozen_registry,
get_metrics_vector,
save_registry,
)
class TestPrometheusTestCaseMixin:
@pytest.fixture
def registry(self):
return prometheus_client.CollectorRegistry()
@pytest.fixture(autouse=True)
def some_gauge(self, registry):
some_gauge = prometheus_client.Gauge("some_gauge", "Some gauge.", registry=registry)
some_gauge.set(42)
return some_gauge
@pytest.fixture(autouse=True)
def some_labelled_gauge(self, registry):
some_labelled_gauge = prometheus_client.Gauge(
"some_labelled_gauge",
"Some labelled gauge.",
["labelred", "labelblue"],
registry=registry,
)
some_labelled_gauge.labels("pink", "indigo").set(1)
some_labelled_gauge.labels("pink", "royal").set(2)
some_labelled_gauge.labels("carmin", "indigo").set(3)
some_labelled_gauge.labels("carmin", "royal").set(4)
return some_labelled_gauge
def test_get_metric(self, registry):
"""Tests get_metric."""
assert get_metric("some_gauge", registry=registry) == 42
assert (
get_metric(
"some_labelled_gauge",
registry=registry,
labelred="pink",
labelblue="indigo",
)
== 1
)
def test_get_metrics_vector(self, registry):
"""Tests get_metrics_vector."""
vector = get_metrics_vector("some_nonexistent_gauge", registry=registry)
assert vector == []
vector = get_metrics_vector("some_gauge", registry=registry)
assert vector == [({}, 42)]
vector = get_metrics_vector("some_labelled_gauge", registry=registry)
assert sorted(
[
({"labelred": "pink", "labelblue": "indigo"}, 1),
({"labelred": "pink", "labelblue": "royal"}, 2),
({"labelred": "carmin", "labelblue": "indigo"}, 3),
({"labelred": "carmin", "labelblue": "royal"}, 4),
],
key=itemgetter(1),
) == sorted(vector, key=itemgetter(1))
def test_assert_metric_equal(self, registry):
"""Tests assert_metric_equal."""
# First we test that a scalar metric can be tested.
assert_metric_equal(42, "some_gauge", registry=registry)
assert_metric_not_equal(43, "some_gauge", registry=registry)
# Here we test that assert_metric_equal fails on nonexistent gauges.
assert_metric_not_equal(42, "some_nonexistent_gauge", registry=registry)
# Here we test that labelled metrics can be tested.
assert_metric_equal(
1,
"some_labelled_gauge",
registry=registry,
labelred="pink",
labelblue="indigo",
)
assert_metric_not_equal(
1,
"some_labelled_gauge",
registry=registry,
labelred="tomato",
labelblue="sky",
)
def test_registry_saving(self, registry, some_gauge, some_labelled_gauge):
"""Tests save_registry and frozen registries operations."""
frozen_registry = save_registry(registry=registry)
# Test that we can manipulate a frozen scalar metric.
assert get_metric_from_frozen_registry("some_gauge", frozen_registry) == 42
some_gauge.set(99)
assert get_metric_from_frozen_registry("some_gauge", frozen_registry) == 42
assert_metric_diff(frozen_registry, 99 - 42, "some_gauge", registry=registry)
assert_metric_no_diff(frozen_registry, 1, "some_gauge", registry=registry)
# Now test the same thing with a labelled metric.
assert (
get_metric_from_frozen_registry(
"some_labelled_gauge",
frozen_registry,
labelred="pink",
labelblue="indigo",
)
== 1
)
some_labelled_gauge.labels("pink", "indigo").set(5)
assert (
get_metric_from_frozen_registry(
"some_labelled_gauge",
frozen_registry,
labelred="pink",
labelblue="indigo",
)
== 1
)
assert_metric_diff(
frozen_registry,
5 - 1,
"some_labelled_gauge",
registry=registry,
labelred="pink",
labelblue="indigo",
)
assert_metric_no_diff(
frozen_registry,
1,
"some_labelled_gauge",
registry=registry,
labelred="pink",
labelblue="indigo",
)
django-prometheus-2.4.1/django_prometheus/testutils.py 0000664 0000000 0000000 00000014545 15026774211 0023316 0 ustar 00root root 0000000 0000000 import copy
from prometheus_client import REGISTRY
METRIC_EQUALS_ERR_EXPLANATION = """
%s%s = %s, expected %s.
The values for %s are:
%s"""
METRIC_DIFF_ERR_EXPLANATION = """
%s%s changed by %f, expected %f.
Value before: %s
Value after: %s
"""
METRIC_COMPARE_ERR_EXPLANATION = """
The change in value of %s%s didn't match the predicate.
Value before: %s
Value after: %s
"""
METRIC_DIFF_ERR_NONE_EXPLANATION = """
%s%s was None after.
Value before: %s
Value after: %s
"""
"""A collection of utilities that make it easier to write test cases
that interact with metrics.
"""
def assert_metric_equal(expected_value, metric_name, registry=REGISTRY, **labels):
"""Asserts that metric_name{**labels} == expected_value."""
value = get_metric(metric_name, registry=registry, **labels)
assert_err = METRIC_EQUALS_ERR_EXPLANATION % (
metric_name,
format_labels(labels),
value,
expected_value,
metric_name,
format_vector(get_metrics_vector(metric_name)),
)
assert expected_value == value, assert_err
def assert_metric_diff(frozen_registry, expected_diff, metric_name, registry=REGISTRY, **labels):
"""Asserts that metric_name{**labels} changed by expected_diff between
the frozen registry and now. A frozen registry can be obtained
by calling save_registry, typically at the beginning of a test
case.
"""
saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels)
current_value = get_metric(metric_name, registry=registry, **labels)
assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % (
metric_name,
format_labels(labels),
saved_value,
current_value,
)
diff = current_value - (saved_value or 0.0)
assert_err = METRIC_DIFF_ERR_EXPLANATION % (
metric_name,
format_labels(labels),
diff,
expected_diff,
saved_value,
current_value,
)
assert expected_diff == diff, assert_err
def assert_metric_no_diff(frozen_registry, expected_diff, metric_name, registry=REGISTRY, **labels):
"""Asserts that metric_name{**labels} isn't changed by expected_diff between
the frozen registry and now. A frozen registry can be obtained
by calling save_registry, typically at the beginning of a test
case.
"""
saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels)
current_value = get_metric(metric_name, registry=registry, **labels)
assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % (
metric_name,
format_labels(labels),
saved_value,
current_value,
)
diff = current_value - (saved_value or 0.0)
assert_err = METRIC_DIFF_ERR_EXPLANATION % (
metric_name,
format_labels(labels),
diff,
expected_diff,
saved_value,
current_value,
)
assert expected_diff != diff, assert_err
def assert_metric_not_equal(expected_value, metric_name, registry=REGISTRY, **labels):
"""Asserts that metric_name{**labels} == expected_value."""
value = get_metric(metric_name, registry=registry, **labels)
assert_err = METRIC_EQUALS_ERR_EXPLANATION % (
metric_name,
format_labels(labels),
value,
expected_value,
metric_name,
format_vector(get_metrics_vector(metric_name)),
)
assert expected_value != value, assert_err
def assert_metric_compare(frozen_registry, predicate, metric_name, registry=REGISTRY, **labels):
"""Asserts that metric_name{**labels} changed according to a provided
predicate function between the frozen registry and now. A
frozen registry can be obtained by calling save_registry,
typically at the beginning of a test case.
"""
saved_value = get_metric_from_frozen_registry(metric_name, frozen_registry, **labels)
current_value = get_metric(metric_name, registry=registry, **labels)
assert current_value is not None, METRIC_DIFF_ERR_NONE_EXPLANATION % (
metric_name,
format_labels(labels),
saved_value,
current_value,
)
assert predicate(saved_value, current_value) is True, METRIC_COMPARE_ERR_EXPLANATION % (
metric_name,
format_labels(labels),
saved_value,
current_value,
)
def save_registry(registry=REGISTRY):
"""Freezes a registry. This lets a user test changes to a metric
instead of testing the absolute value. A typical use case looks like:
registry = save_registry()
doStuff()
assert_metric_diff(registry, 1, 'stuff_done_total')
"""
return copy.deepcopy(list(registry.collect()))
def get_metric(metric_name, registry=REGISTRY, **labels):
"""Gets a single metric."""
return get_metric_from_frozen_registry(metric_name, registry.collect(), **labels)
def get_metrics_vector(metric_name, registry=REGISTRY):
"""Returns the values for all labels of a given metric.
The result is returned as a list of (labels, value) tuples,
where `labels` is a dict.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
return get_metric_vector_from_frozen_registry(metric_name, registry.collect())
def get_metric_vector_from_frozen_registry(metric_name, frozen_registry):
"""Like get_metrics_vector, but from a frozen registry."""
output = []
for metric in frozen_registry:
for sample in metric.samples:
if sample[0] == metric_name:
output.append((sample[1], sample[2]))
return output
def get_metric_from_frozen_registry(metric_name, frozen_registry, **labels):
"""Gets a single metric from a frozen registry."""
for metric in frozen_registry:
for sample in metric.samples:
if sample[0] == metric_name and sample[1] == labels:
return sample[2]
def format_labels(labels):
"""Format a set of labels to Prometheus representation.
In:
{'method': 'GET', 'port': '80'}
Out:
'{method="GET",port="80"}'
"""
return "{{{}}}".format(",".join([f'{k}="{v}"' for k, v in labels.items()]))
def format_vector(vector):
"""Formats a list of (labels, value) where labels is a dict into a
human-readable representation.
"""
return "\n".join([f"{format_labels(labels)} = {value}" for labels, value in vector])
django-prometheus-2.4.1/django_prometheus/urls.py 0000664 0000000 0000000 00000000243 15026774211 0022231 0 ustar 00root root 0000000 0000000 from django.urls import path
from django_prometheus import exports
urlpatterns = [path("metrics", exports.ExportToDjangoView, name="prometheus-django-metrics")]
django-prometheus-2.4.1/django_prometheus/utils.py 0000664 0000000 0000000 00000001503 15026774211 0022404 0 ustar 00root root 0000000 0000000 from timeit import default_timer
def Time():
"""Returns some representation of the current time.
This wrapper is meant to take advantage of a higher time
resolution when available. Thus, its return value should be
treated as an opaque object. It can be compared to the current
time with TimeSince().
"""
return default_timer()
def TimeSince(t):
"""Compares a value returned by Time() to the current time.
Returns:
the time since t, in fractional seconds.
"""
return default_timer() - t
def PowersOf(logbase, count, lower=0, include_zero=True):
"""Returns a list of count powers of logbase (from logbase**lower)."""
if not include_zero:
return [logbase**i for i in range(lower, count + lower)]
return [0] + [logbase**i for i in range(lower, count + lower)]
django-prometheus-2.4.1/documentation/ 0000775 0000000 0000000 00000000000 15026774211 0020027 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/documentation/exports.md 0000664 0000000 0000000 00000011723 15026774211 0022061 0 ustar 00root root 0000000 0000000 # Exports
## Default: exporting /metrics as a Django view
/metrics can be exported as a Django view very easily. Simply
include('django_prometheus.urls') with no prefix like so:
```python
urlpatterns = [
...
path('', include('django_prometheus.urls')),
]
```
This will reserve the /metrics path on your server. This may be a
problem for you, so you can use a prefix. For instance, the following
will export the metrics at `/monitoring/metrics` instead. You will
need to configure Prometheus to use that path instead of the default.
```python
urlpatterns = [
...
path('monitoring/', include('django_prometheus.urls')),
]
```
## Exporting /metrics in a dedicated thread
To ensure that issues in your Django app do not affect the monitoring,
it is recommended to export /metrics in an HTTPServer running in a
daemon thread. This will prevent that problems such as thread
starvation or low-level bugs in Django do not affect the export of
your metrics, which may be more needed than ever if these problems
occur.
It can be enabled by adding the following line in your `settings.py`:
```python
PROMETHEUS_METRICS_EXPORT_PORT = 8001
PROMETHEUS_METRICS_EXPORT_ADDRESS = '' # all addresses
```
However, by default this mechanism is disabled, because it is not
compatible with Django's autoreloader. The autoreloader is the feature
that allows you to edit your code and see the changes
immediately. This works by forking multiple processes of Django, which
would compete for the port. As such, this code will assert-fail if the
autoreloader is active.
You can run Django without the autoreloader by passing `-noreload` to
`manage.py`. If you decide to enable the thread-based exporter in
production, you may wish to modify your manage.py to ensure that this
option is always active:
```python
execute_from_command_line(sys.argv + ['--noreload'])
```
## Exporting /metrics in a WSGI application with multiple processes per process
If you're using WSGI (e.g. with uwsgi or with gunicorn) and multiple
Django processes, using either option above won't work, as requests
using the Django view would just go to an inconsistent backend each
time, and exporting on a single port doesn't work.
The following settings can be used instead:
```python
PROMETHEUS_METRICS_EXPORT_PORT_RANGE = range(8001, 8050)
```
This will make Django-Prometheus try to export /metrics on port
8001. If this fails (i.e. the port is in use), it will try 8002, then
8003, etc.
You can then configure Prometheus to collect metrics on as many
targets as you have workers, using each port separately.
This approach requires the application to be loaded into each child process.
uWSGI and Gunicorn typically load the application into the master process before forking the child processes.
Set the [lazy-apps option](https://uwsgi-docs.readthedocs.io/en/latest/Options.html#lazy-apps) to `true` (uWSGI)
or the [preload-app option](https://docs.gunicorn.org/en/stable/settings.html#preload-app) to `false` (Gunicorn)
to change this behaviour.
## Exporting /metrics in a WSGI application with multiple processes globally
In some WSGI applications, workers are short lived (less than a minute), so some
are never scraped by prometheus by default. Prometheus client already provides
a nice system to aggregate them using the env variable: `PROMETHEUS_MULTIPROC_DIR`
which will configure the directory where metrics will be stored as files per process.
Configuration in uwsgi would look like:
```ini
env = PROMETHEUS_MULTIPROC_DIR=/path/to/django_metrics
```
You can also set this environment variable elsewhere such as in a kubernetes manifest.
Setting this will create four files (one for counters, one for summaries, ...etc)
for each pid used. In uwsgi, the number of different pids used can be quite large
(the pid change every time a worker respawn). To prevent having thousand of files
created, it's possible to create file using worker ids rather than pids.
You can change the function used for identifying the process to use the uwsgi worker_id.
Modify this in settings before any metrics are created:
```python
try:
import prometheus_client
import uwsgi
prometheus_client.values.ValueClass = prometheus_client.values.MultiProcessValue(
process_identifier=uwsgi.worker_id)
except ImportError:
pass # not running in uwsgi
```
Note that this code uses internal interfaces of prometheus_client.
The underlying implementation may change.
The number of resulting files will be:
number of processes * 4 (counter, histogram, gauge, summary)
Be aware that by default this will generate a large amount of file descriptors:
Each worker will keep 3 file descriptors for each files it created.
Since these files will be written often, you should consider mounting this directory
as a `tmpfs` or using a subdir of an existing one such as `/run/` or `/var/run/`.
If uwsgi is not using lazy-apps (lazy-apps = true), there will be a
file descriptors leak (tens to hundreds of fds on a single file) due
to the way uwsgi forks processes to create workers.
django-prometheus-2.4.1/examples/ 0000775 0000000 0000000 00000000000 15026774211 0016774 5 ustar 00root root 0000000 0000000 django-prometheus-2.4.1/examples/django-promdash.png 0000664 0000000 0000000 00000455427 15026774211 0022600 0 ustar 00root root 0000000 0000000 PNG
IHDR /b\
sBITO tEXtSoftware gnome-screenshot> IDATxX?`IQDKHՆ!^AZq\#%3~FU\l
|Zc О%+P\+ 4a%0$|E?$wrϼ`d290>"