pax_global_header 0000666 0000000 0000000 00000000064 14770573620 0014525 g ustar 00root root 0000000 0000000 52 comment=ac1a759d95e1953549bc9af46bf46285bec99b70
ansible-runner-2.4.1/ 0000775 0000000 0000000 00000000000 14770573620 0014455 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/.cherry_picker.toml 0000664 0000000 0000000 00000000321 14770573620 0020255 0 ustar 00root root 0000000 0000000 team = "ansible"
repo = "ansible-runner"
check_sha = "cd5e00236a3ddcd8a2d7790ec409cb8355e20721" # the very first commit in repo
fix_commit_msg = false # Don't replace "#" with "GH-"
default_branch = "devel"
ansible-runner-2.4.1/.coveragerc 0000664 0000000 0000000 00000000437 14770573620 0016602 0 ustar 00root root 0000000 0000000 [run]
branch = True
data_file = test/coverage/data/coverage
source =
ansible_runner
[report]
skip_covered = True
skip_empty = True
[html]
directory = test/coverage/reports/html
[xml]
output = test/coverage/reports/coverage.xml
[json]
output = test/coverage/reports/coverage.json
ansible-runner-2.4.1/.dockerignore 0000664 0000000 0000000 00000000005 14770573620 0017124 0 ustar 00root root 0000000 0000000 .tox
ansible-runner-2.4.1/.git_archival.txt 0000664 0000000 0000000 00000000215 14770573620 0017726 0 ustar 00root root 0000000 0000000 node: ac1a759d95e1953549bc9af46bf46285bec99b70
node-date: 2025-03-25T14:36:00-04:00
describe-name: 2.4.1
ref-names: tag: 2.4.1, release_2.4
ansible-runner-2.4.1/.gitattributes 0000664 0000000 0000000 00000000041 14770573620 0017343 0 ustar 00root root 0000000 0000000 .git_archival.txt export-subst
ansible-runner-2.4.1/.github/ 0000775 0000000 0000000 00000000000 14770573620 0016015 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/.github/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000000242 14770573620 0020612 0 ustar 00root root 0000000 0000000 # Community Code of Conduct
Please see the official [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
ansible-runner-2.4.1/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 14770573620 0020200 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/.github/ISSUE_TEMPLATE/bug_report.yml 0000664 0000000 0000000 00000003635 14770573620 0023102 0 ustar 00root root 0000000 0000000 name: Bug report
description: Report a problem so it can be fixed
labels:
- needs_triage
body:
- type: markdown
attributes:
value: |
**Thank you for reporting a bug in Ansible Runner.**
If you are looking for community support, please visit
the [Community guide](https://ansible.readthedocs.io/projects/runner/en/latest/community/)
for information on how to get in touch.
- type: input
label: Ansible Runner version
description: Output from `ansible-runner --version`
render: console
validations:
required: true
- type: textarea
label: Python version
description: Output from `python -VV`
render: console
validations:
required: true
- type: textarea
label: Operating System and Environment
description: Provide relevant information about the operating system and environment
render: console
placeholder: RHEL 8, Debian 10, output from `/etc/os-release`, any other relevant information
validations:
required: true
- type: input
label: Container Runtime
description: Output from `podman|docker --version`
render: console
placeholder: podman --version
validations:
required: true
- type: textarea
attributes:
label: Summary
description: Briefly explain the problem
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce
description: Describe in detail how to reproduce the problem
value: |
1.
2.
3.
validations:
required: true
- type: textarea
attributes:
label: Expected Results
description: What was expected to happen when running the steps above
validations:
required: true
- type: textarea
attributes:
label: Actual Results
description: What actually happened when running the steps above
validations:
required: true
ansible-runner-2.4.1/.github/ISSUE_TEMPLATE/config.yml 0000664 0000000 0000000 00000000033 14770573620 0022164 0 ustar 00root root 0000000 0000000 blank_issue_enabled: false
ansible-runner-2.4.1/.github/ISSUE_TEMPLATE/documentation_report.yml 0000664 0000000 0000000 00000001154 14770573620 0025170 0 ustar 00root root 0000000 0000000 name: Documentation Report
description: Report an issue with documentation
labels:
- needs_triage
- docs
body:
- type: markdown
attributes:
value: |
**Thank you reporting an issue with Ansible Runner documentation.**
If you are looking for community support, please visit
the [Community guide](https://ansible.readthedocs.io/projects/runner/en/latest/community/)
for information on how to get in touch.
- type:
attributes:
label: Summary
description: Describe the problem or suggestion with the documentation
validations:
required: true
ansible-runner-2.4.1/.github/ISSUE_TEMPLATE/feature_request.yml 0000664 0000000 0000000 00000001125 14770573620 0024125 0 ustar 00root root 0000000 0000000 name: Feature Request
description: Suggest a new feature
labels:
- needs_triage
- enhancement
body:
- type: markdown
attributes:
value: |
**Thank you for suggesting a new feature for Ansible Runner.**
If you are looking for community support, please visit
the [Community guide](https://ansible.readthedocs.io/projects/runner/en/latest/community/)
for information on how to get in touch.
- type:
attributes:
label: Summary
description: Describe the new feature and how it would be used.
validations:
required: true
ansible-runner-2.4.1/.github/issue_labeler.yml 0000664 0000000 0000000 00000000027 14770573620 0021355 0 ustar 00root root 0000000 0000000 needs_triage:
- '.*'
ansible-runner-2.4.1/.github/patchback.yml 0000664 0000000 0000000 00000000165 14770573620 0020462 0 ustar 00root root 0000000 0000000 ---
backport_branch_prefix: patchback/backports/
backport_label_prefix: backport-
target_branch_prefix: release_
...
ansible-runner-2.4.1/.github/pr_labeler_existing.yml 0000664 0000000 0000000 00000000045 14770573620 0022560 0 ustar 00root root 0000000 0000000 docs:
- docs/*
test:
- 'test/*'
ansible-runner-2.4.1/.github/pr_labeler_new.yml 0000664 0000000 0000000 00000000132 14770573620 0021514 0 ustar 00root root 0000000 0000000 needs_triage:
- '.*'
- '.*/*'
- '*'
- '*/*'
docs:
- docs/*
test:
- 'test/*'
ansible-runner-2.4.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14770573620 0020052 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/.github/workflows/ci.yml 0000664 0000000 0000000 00000006405 14770573620 0021175 0 ustar 00root root 0000000 0000000 name: CI
on:
pull_request:
push:
jobs:
sanity:
name: ${{ matrix.test.name }}
runs-on: ubuntu-22.04
env:
TOXENV: ${{ matrix.test.tox_env }}
PY_COLORS: 1
strategy:
fail-fast: false
matrix:
test:
- name: Lint
tox_env: linters-py310
- name: Docs
tox_env: docs
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0 # this is not ideal, but we need tags available to generate versions in tests
- name: Install tox
run: |
python3 -m pip install --upgrade pip
python3 -m pip install "tox==4.11.3"
- name: Create tox environment
run: tox --notest
- name: Run tests
run: tox
integration:
runs-on: ubuntu-22.04
name: Integration - ${{ matrix.py_version.name }}
env:
TOXENV: ${{ matrix.py_version.tox_env }}
PY_COLORS: 1
strategy:
fail-fast: false
matrix:
py_version:
- name: '3.9'
tox_env: integration-py39
- name: '3.10'
tox_env: integration-py310
- name: '3.11'
tox_env: integration-py311
- name: '3.12'
tox_env: integration-py312
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Python ${{ matrix.py_version.name }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.py_version.name }}
- name: Install tox
run: |
python3 -m pip install --upgrade pip
python3 -m pip install "tox==4.11.3" build
- name: Prepare runner test container
run: |
TMPDIR=$(mktemp -d)
cp test/integration/Dockerfile $TMPDIR
pyproject-build -w -o $TMPDIR
pushd $TMPDIR
docker build --build-arg WHEEL=$(ls -1 ansible_runner*.whl) --rm=true -t ansible-runner-gha${{ github.run_id }}-event-test .
podman build --build-arg WHEEL=$(ls -1 ansible_runner*.whl) --rm=true -t ansible-runner-gha${{ github.run_id }}-event-test .
popd
rm -r $TMPDIR
- name: Create tox environment
run: |
tox --notest
- name: Run integration tests
run: |
RUNNER_TEST_IMAGE_NAME=ansible-runner-gha${{ github.run_id }}-event-test tox
unit:
name: Unit - ${{ matrix.py_version.name}}
runs-on: ubuntu-22.04
env:
TOXENV: ${{ matrix.py_version.tox_env }}
PY_COLORS: 1
strategy:
fail-fast: false
matrix:
py_version:
- name: '3.9'
tox_env: unit-py39
- name: '3.10'
tox_env: unit-py310
- name: '3.11'
tox_env: unit-py311
- name: '3.12'
tox_env: unit-py312
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Python ${{ matrix.py_version.name }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.py_version.name }}
- name: Install tox
run: |
python3 -m pip install --upgrade pip
python3 -m pip install "tox==4.11.3"
- name: Create tox environment
run: tox --notest
- name: Run tests
run: tox
ansible-runner-2.4.1/.github/workflows/triage_existing.yml 0000664 0000000 0000000 00000001373 14770573620 0023766 0 ustar 00root root 0000000 0000000 name: Triage
on:
issues:
types:
- reopened
pull_request_target:
types:
- reopened
- synchronize
jobs:
triage:
runs-on: ubuntu-latest
name: Label
steps:
- name: Label pull requests
uses: actions/labeler@v3
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/pr_labeler_existing.yml
if: github.event_name == 'pull_request_target'
- name: Label issues
uses: github/issue-labeler@v2.4.1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
not-before: 2021-12-07T07:00:00Z
configuration-path: .github/issue_labeler.yml
enable-versioned-regex: 0
if: github.event_name == 'issues'
ansible-runner-2.4.1/.github/workflows/triage_new.yml 0000664 0000000 0000000 00000001336 14770573620 0022724 0 ustar 00root root 0000000 0000000 name: Triage
on:
issues:
types:
- opened
pull_request_target:
types:
- opened
jobs:
triage:
runs-on: ubuntu-latest
name: Label
steps:
- name: Label pull requests
uses: actions/labeler@v3
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
configuration-path: .github/pr_labeler_new.yml
if: github.event_name == 'pull_request_target'
- name: Label issues
uses: github/issue-labeler@v2.4.1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
not-before: 2021-12-07T07:00:00Z
configuration-path: .github/issue_labeler.yml
enable-versioned-regex: 0
if: github.event_name == 'issues'
ansible-runner-2.4.1/.gitignore 0000664 0000000 0000000 00000000547 14770573620 0016453 0 ustar 00root root 0000000 0000000 # Test artifacts
*.py,cover
.pytest_cache/
docs/_build/
docs/build/
pytestdebug.log
test/coverage/
# Byte-complied files
*.py[cod]
__pycache__/
# Distribution / packaging
*.egg
*.egg-info/
.eggs/
AUTHORS
Changelog
build/
deb-build/
dist/
eggs/
rpm-build/
# Environments
.env
.python-version
.tox/
.venv
venv/
# Demo files
/demo/artifacts
/demo/daemon.log
ansible-runner-2.4.1/.pre-commit-config.yaml 0000664 0000000 0000000 00000000162 14770573620 0020735 0 ustar 00root root 0000000 0000000 ---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
hooks:
- id: flake8
ansible-runner-2.4.1/.readthedocs.yaml 0000664 0000000 0000000 00000000516 14770573620 0017706 0 ustar 00root root 0000000 0000000 version: 2
formats:
- epub
- pdf
build:
os: ubuntu-20.04
tools:
python: '3.10'
sphinx:
builder: dirhtml
configuration: docs/conf.py
fail_on_warning: true
python:
install:
# Installation is needed in order to get the version string
- method: pip
path: .
- requirements: docs/requirements.txt
ansible-runner-2.4.1/.yamllint 0000664 0000000 0000000 00000000736 14770573620 0016315 0 ustar 00root root 0000000 0000000 extends: default
ignore-from-file: .gitignore
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
comments:
min-spaces-from-content: 1 # prettier compatibility
line-length: disable
document-start: disable
truthy:
allowed-values:
- 'True'
- 'true'
- 'Yes'
- 'yes'
- 'On'
- 'on'
- 'False'
- 'false'
- 'No'
- 'no'
- 'Off'
- 'off'
ansible-runner-2.4.1/CODEOWNERS 0000664 0000000 0000000 00000000462 14770573620 0016052 0 ustar 00root root 0000000 0000000 # For more info on CODEOWNERS file, see:
# - https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
# - https://github.blog/2017-07-06-introducing-code-owners/
# Default owners for everything in the repo.
* @ansible/executor
ansible-runner-2.4.1/CONTRIBUTING.md 0000664 0000000 0000000 00000002757 14770573620 0016721 0 ustar 00root root 0000000 0000000 # Ansible Runner Contributing Guidelines
Hi there! We're excited to have you as a contributor.
If you have questions about this document or anything not covered here?
See the [Community section](https://ansible.readthedocs.io/projects/runner/en/latest/community/) of the docs for information about getting in touch.
## Things to know prior to submitting code
- All code and doc submissions are done through pull requests against the `devel` branch.
- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
- We ask all of our community members and contributors to adhere to the [Ansible code of conduct]. If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com].
## Setting up your development environment
In this example we are using [virtualenvwrapper](https://virtualenvwrapper.readthedocs.io/en/latest/), but any virtual environment will do.
```bash
(host)$ pip install virtualenvwrapper
(host)$ mkvirtualenv ansible-runner
(host)$ pip install -e .
```
When done making changes, run:
```
(host)$ deactivate
```
To reactivate the virtual environment:
```
(host)$ workon ansible-runner
```
## Linting and Unit Tests
`tox` is used to run linters (`flake8` and `yamllint`) and tests.
```
(host)$ pip install tox
(host)$ tox
```
[Ansible code of conduct]: http://docs.ansible.com/ansible/latest/community/code_of_conduct.html
[codeofconduct@ansible.com]: mailto:codeofconduct@ansible.com
ansible-runner-2.4.1/LICENSE.md 0000664 0000000 0000000 00000022130 14770573620 0016057 0 ustar 00root root 0000000 0000000 Apache License
==============
_Version 2.0, January 2004_
_<>_
### Terms and Conditions for use, reproduction, and distribution
#### 1. Definitions
“License” shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
“Licensor” shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
“Legal Entity” shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, “control” means **(i)** the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
outstanding shares, or **(iii)** beneficial ownership of such entity.
“You” (or “Your”) shall mean an individual or Legal Entity exercising
permissions granted by this License.
“Source” form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
“Object” form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
“Work” shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
“Derivative Works” shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
“Contribution” shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
“submitted” means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as “Not a Contribution.”
“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
#### 2. Grant of Copyright License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
#### 3. Grant of Patent License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
#### 4. Redistribution
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
this License; and
* **(b)** You must cause any modified files to carry prominent notices stating that You
changed the files; and
* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
#### 5. Submission of Contributions
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
#### 6. Trademarks
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
#### 7. Disclaimer of Warranty
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
#### 8. Limitation of Liability
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
#### 9. Accepting Warranty or Additional Liability
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
ansible-runner-2.4.1/MANIFEST.in 0000664 0000000 0000000 00000000046 14770573620 0016213 0 ustar 00root root 0000000 0000000
include README.md
include LICENSE.md
ansible-runner-2.4.1/README.md 0000664 0000000 0000000 00000003413 14770573620 0015735 0 ustar 00root root 0000000 0000000 Ansible Runner
==============
[](https://pypi.org/project/ansible-runner/)
[](https://ansible-runner.readthedocs.io/en/latest/)
[](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
[](https://codecov.io/gh/ansible/ansible-runner)
Ansible Runner is a tool and Python library that helps when interfacing with Ansible directly or as part of another system. Ansible Runner works as a standalone tool, a container image interface, or a Python module that can be imported. The goal is to provide a stable and consistent interface abstraction to Ansible.
See the [latest documentation] for usage details.
Get Involved
------------
* [GitHub issues] to track bug report and feature ideas
* [GitHub Milestones] to track what's for the next release
* Want to contribute? Please check out our [contributing guide]
* Visit the [Community] section of the docs.
* See the [Ansible communication guide] for complete information about getting in touch.
[GitHub issues]: https://github.com/ansible/ansible-runner/issues
[GitHub Milestones]: https://github.com/ansible/ansible-runner/milestones
[contributing guide]: https://github.com/ansible/ansible-runner/blob/devel/CONTRIBUTING.md
[Community]: https://ansible.readthedocs.io/projects/runner/en/latest/community/
[Ansible communication guide]: https://docs.ansible.com/ansible/devel/community/communication.html
[latest documentation]: https://ansible.readthedocs.io/projects/runner/en/latest/
ansible-runner-2.4.1/SECURITY.md 0000664 0000000 0000000 00000000471 14770573620 0016250 0 ustar 00root root 0000000 0000000 # Security Policy
## Reporting a Vulnerability
For all security related bugs, email security@ansible.com instead of using this issue tracker and you will receive a prompt response.
For more information on the Ansible community's practices regarding responsible disclosure, see https://www.ansible.com/security
ansible-runner-2.4.1/demo/ 0000775 0000000 0000000 00000000000 14770573620 0015401 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/env/ 0000775 0000000 0000000 00000000000 14770573620 0016171 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/env/envvars 0000664 0000000 0000000 00000000022 14770573620 0017572 0 ustar 00root root 0000000 0000000 ---
TESTVAR: aval
ansible-runner-2.4.1/demo/env/extravars 0000664 0000000 0000000 00000000050 14770573620 0020126 0 ustar 00root root 0000000 0000000 ---
ansible_connection: local
test: val
ansible-runner-2.4.1/demo/env/passwords 0000664 0000000 0000000 00000000047 14770573620 0020142 0 ustar 00root root 0000000 0000000 ---
"Password:\\s*?$": "some_password"
ansible-runner-2.4.1/demo/env/settings 0000664 0000000 0000000 00000000074 14770573620 0017755 0 ustar 00root root 0000000 0000000 ---
idle_timeout: 600
job_timeout: 3600
pexpect_timeout: 10
ansible-runner-2.4.1/demo/env/ssh_key 0000664 0000000 0000000 00000000000 14770573620 0017547 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0017436 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/inventory/hosts 0000664 0000000 0000000 00000000136 14770573620 0020521 0 ustar 00root root 0000000 0000000 localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
ansible-runner-2.4.1/demo/project/ 0000775 0000000 0000000 00000000000 14770573620 0017047 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/ 0000775 0000000 0000000 00000000000 14770573620 0020173 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/ 0000775 0000000 0000000 00000000000 14770573620 0022034 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/README.md 0000664 0000000 0000000 00000002460 14770573620 0023315 0 ustar 00root root 0000000 0000000 Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
ansible-runner-2.4.1/demo/project/roles/testrole/defaults/ 0000775 0000000 0000000 00000000000 14770573620 0023643 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/defaults/main.yml 0000664 0000000 0000000 00000000041 14770573620 0025305 0 ustar 00root root 0000000 0000000 ---
# defaults file for testrole
ansible-runner-2.4.1/demo/project/roles/testrole/handlers/ 0000775 0000000 0000000 00000000000 14770573620 0023634 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/handlers/main.yml 0000664 0000000 0000000 00000000041 14770573620 0025276 0 ustar 00root root 0000000 0000000 ---
# handlers file for testrole
ansible-runner-2.4.1/demo/project/roles/testrole/meta/ 0000775 0000000 0000000 00000000000 14770573620 0022762 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/meta/main.yml 0000664 0000000 0000000 00000003347 14770573620 0024440 0 ustar 00root root 0000000 0000000 ---
galaxy_info:
author: your name
description: your description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: license (GPLv2, CC-BY, etc)
min_ansible_version: 1.2
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
# Optionally specify the branch Galaxy will use when accessing the GitHub
# repo for this role. During role install, if no tags are available,
# Galaxy will use this branch. During import Galaxy will access files on
# this branch. If Travis integration is configured, only notifications for this
# branch will be accepted. Otherwise, in all cases, the repo's default branch
# (usually master) will be used.
# github_branch:
#
# platforms is a list of platforms, and each platform has a name and a list of versions.
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.
ansible-runner-2.4.1/demo/project/roles/testrole/tasks/ 0000775 0000000 0000000 00000000000 14770573620 0023161 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/tasks/main.yml 0000664 0000000 0000000 00000000201 14770573620 0024621 0 ustar 00root root 0000000 0000000 ---
# tasks file for testrole
- name: just print a message to stdout
debug:
msg: "hello from the ansible-runner testrole!"
ansible-runner-2.4.1/demo/project/roles/testrole/tests/ 0000775 0000000 0000000 00000000000 14770573620 0023176 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/tests/inventory 0000664 0000000 0000000 00000000013 14770573620 0025150 0 ustar 00root root 0000000 0000000 localhost
ansible-runner-2.4.1/demo/project/roles/testrole/tests/test.yml 0000664 0000000 0000000 00000000103 14770573620 0024672 0 ustar 00root root 0000000 0000000 ---
- hosts: localhost
remote_user: root
roles:
- testrole
ansible-runner-2.4.1/demo/project/roles/testrole/vars/ 0000775 0000000 0000000 00000000000 14770573620 0023007 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/demo/project/roles/testrole/vars/main.yml 0000664 0000000 0000000 00000000035 14770573620 0024454 0 ustar 00root root 0000000 0000000 ---
# vars file for testrole
ansible-runner-2.4.1/demo/project/test.yml 0000664 0000000 0000000 00000000063 14770573620 0020550 0 ustar 00root root 0000000 0000000 ---
- hosts: all
tasks:
- debug: msg="Test!"
ansible-runner-2.4.1/docs/ 0000775 0000000 0000000 00000000000 14770573620 0015405 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/docs/Makefile 0000664 0000000 0000000 00000001143 14770573620 0017044 0 ustar 00root root 0000000 0000000 # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = ansible-runner
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ansible-runner-2.4.1/docs/ansible_runner.config.rst 0000664 0000000 0000000 00000000341 14770573620 0022407 0 ustar 00root root 0000000 0000000 ansible_runner.config package
================================
Submodules
----------
ansible_runner.config.runner module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: ansible_runner.config.runner.RunnerConfig
ansible-runner-2.4.1/docs/ansible_runner.display_callback.rst 0000664 0000000 0000000 00000000675 14770573620 0024435 0 ustar 00root root 0000000 0000000 ansible_runner.display_callback package
=======================================
Submodules
----------
ansible_runner.display_callback.callback.awx_display module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: ansible_runner.display_callback.callback.awx_display
:members:
:undoc-members:
Module contents
---------------
.. automodule:: ansible_runner.display_callback
:members:
:undoc-members:
ansible-runner-2.4.1/docs/ansible_runner.rst 0000664 0000000 0000000 00000002166 14770573620 0021152 0 ustar 00root root 0000000 0000000 ansible_runner package
======================
Subpackages
-----------
.. toctree::
ansible_runner.config
ansible_runner.display_callback
Submodules
----------
ansible_runner.exceptions module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: ansible_runner.exceptions
:members:
:undoc-members:
:show-inheritance:
ansible_runner.interface module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: ansible_runner.interface
:members:
:undoc-members:
:show-inheritance:
ansible_runner.loader module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: ansible_runner.loader
:members:
:undoc-members:
:show-inheritance:
ansible_runner.runner module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: ansible_runner.runner
:members:
:undoc-members:
:show-inheritance:
ansible_runner.runner\_config module
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: ansible_runner.runner_config
:members:
:undoc-members:
:show-inheritance:
ansible_runner.utils module
^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. automodule:: ansible_runner.utils
:members:
:undoc-members:
:show-inheritance:
ansible-runner-2.4.1/docs/community.rst 0000664 0000000 0000000 00000003375 14770573620 0020173 0 ustar 00root root 0000000 0000000 .. _community:
Community
=========
We welcome your feedback, questions and ideas.
Here's how to reach the community.
Code of Conduct
---------------
All communication and interactions in the Ansible community are governed by the `Ansible code of conduct `_.
Please read and abide by it!
Reach out to our community team at `codeofconduct@ansible.com `_ if you have any questions or need assistance.
Ansible Forum
-------------
Join the `Ansible Forum `_, the default communication platform for questions and help, development discussions, events, and much more.
`Register `_ to join the community.
Search by categories and tags to find interesting topics or start a new one; subscribe only to topics you need!
* `Get Help `_: get help or help others. Please add appropriate tags if you start new discussions, for example ``ansible-runner``, ``playbook``, and ``awx``.
* `Posts tagged with 'ansible-runner' `_: subscribe to participate in project-related conversations.
* `Bullhorn newsletter `_: used to announce releases and important changes.
* `Social Spaces `_: gather and interact with fellow enthusiasts.
* `News & Announcements `_: track project-wide announcements including social events.
See `Navigating the Ansible forum `_ for some practical advice on finding your way around.
ansible-runner-2.4.1/docs/conf.py 0000664 0000000 0000000 00000013256 14770573620 0016713 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
from ansible_runner.__main__ import VERSION
# -- Project information -----------------------------------------------------
def _get_version():
version_parts = VERSION.split('.', 3)[:3]
return '.'.join(version_parts)
nitpicky = True
default_role = 'any' # This catches single backticks (incorrectly) used for inline code formatting
project = 'ansible-runner'
copyright = f'2018-{datetime.datetime.today().year}, Red Hat, Inc'
author = 'Red Hat, Inc.'
version = _get_version()
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
"sphinx_ansible_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_ansible_theme'
html_title = "Ansible Runner Documentation"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'display_version': False,
'titles_only': False,
'documentation_home_url': 'https://ansible-runner.readthedocs.io/en/stable/',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ansible-runnerdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ansible-runner.tex', 'ansible-runner Documentation',
'Red Hat Ansible', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ansible-runner', 'ansible-runner Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ansible-runner', 'ansible-runner Documentation',
author, 'ansible-runner', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
ansible-runner-2.4.1/docs/execution_environments.rst 0000664 0000000 0000000 00000012325 14770573620 0022754 0 ustar 00root root 0000000 0000000 .. _execution_environments:
Using Runner with Execution Environments
========================================
.. note::
For an Execution Environments general technology overview and to learn how get started using it in a few easy steps, see the `Getting started with Execution Environments guide `_.
**Execution Environments** are meant to be a consistent, reproducible, portable,
and shareable method to run Ansible Automation jobs in the exact same way on
your laptop as they are executed in `Ansible AWX `_.
This aids in the development of automation jobs and Ansible Content that is
meant to be run in **Ansible AWX**,
or via `Red Hat Ansible Automation Platform `_
in a predictable way.
More specifically, the term **Execution Environments** within the context of
**Ansible Runner** refers to the container runtime execution of **Ansible** via
**Ansible Runner** within an `OCI Compliant Container Runtime
`_ using an `OCI Compliant
Container Image `_ that
appropriately bundles `Ansible Base `_,
`Ansible Collection Content `_,
and the runtime dependencies required to support these contents.
The build tooling provided by `Ansible Builder `_
aids in the creation of these images.
All aspects of running **Ansible Runner** in standalone mode (see: :ref:`standalone`)
are true here with the exception that the process isolation is inherently a
container runtime (`podman `_ by default).
Using Execution Environments from Protected Registries
------------------------------------------------------
When a job is run that uses an execution environment container image from a private/protected registry,
you will first need to authenticate to the registry.
If you are running the job manually via ``ansible-runner run``, logging in on the command line via
``podman login`` first is a method of authentication. Alternatively, creating a ``container_auth_data``
dictionary with the keys ``host``, ``username``, and ``password`` and putting that in the job's ``env/settings``
file is another way to ensure a successful pull of a protected execution environment container image.
Note that this involves listing sensitive information in a file which will not automatically get cleaned
up after the job run is complete.
When running a job remotely via AWX, Ansible Runner can pick up the authentication
information from the Container Registry Credential that was provided by the user. The ``host``,
``username``, ``password``, and ``verify_ssl`` inputs from the credential are passed into Ansible Runner via the ``container_auth_data``
dictionary as key word arguments into a ``json`` file which gets deleted at the end of the job run (even if
the job was cancelled/interrupted), enabling the bypassing of sensitive information from any potentially
persistent job-related files.
Notes and Considerations
------------------------
There are some differences between using Ansible Runner and running Ansible directly from the
command line that have to do with configuration, content locality, and secret data.
Secrets
^^^^^^^
Typically with Ansible you are able to provide secret data via a series of
mechanisms, many of which are pluggable and configurable. When using
Ansible Runner, however, certain considerations need to be made; these are analogous to
how Ansible AWX manage this information.
See :ref:`inputdir` for more information
Container Names
^^^^^^^^^^^^^^^
Like all ansible-runner jobs, each job has an identifier associated with it
which is also the name of the artifacts subfolder where results are saved to.
When a container for job isolation is launched, it will be given a name
of ``ansible_runner_``. Some characters from the job
identifier may be replaced with underscores for compatibility with
names that Podman and Docker allow.
This name is used internally if a command needs to be ran against the container
at a later time (e.g., to stop the container when the job is canceled).
~/.ssh/ symlinks
^^^^^^^^^^^^^^^^
In order to make the ``run`` container execution of Ansible
easier, Ansible Runner will automatically bind mount your local ssh agent
UNIX-domain socket (``SSH_AUTH_SOCK``) into the container runtime. However, this
does not work if files in your ``~/.ssh/`` directory happen to be symlinked to
another directory that is also not mounted into the container runtime. The Ansible
Runner ``run`` subcommand provides the ``--container-volume-mount``
option to address this, among other things.
Here is an example of an ssh config file that is a symlink:
::
$ $ ls -l ~/.ssh/config
lrwxrwxrwx. 1 myuser myuser 34 Jul 15 19:27 /home/myuser/.ssh/config -> /home/myuser/dotfiles/ssh_config
$ ansible-runner run \
--container-volume-mount /home/myuser/dotfiles/:/home/myuser/dotfiles/ \
--process-isolation --process-isolation-executable podman \
/tmp/private --playbook my_playbook.yml -i my_inventory.ini
ansible-runner-2.4.1/docs/external_interface.rst 0000664 0000000 0000000 00000004446 14770573620 0022011 0 ustar 00root root 0000000 0000000 .. _externalintf:
Sending Runner Status and Events to External Systems
====================================================
**Runner** can store event and status data locally for retrieval, it can also emit this information via callbacks provided to the module interface.
Alternatively **Runner** can be configured to send events to an external system via installable plugins. Currently, there are two example plugins are available.
* HTTP Status/Event Emitter Plugin - `ansible-runner-http GitHub repo `_
* ZeroMQ Status/Event Emitter Plugin - `ansible-runner-zeromq GitHub repo `_
Please refer to respective repos to configure these plugins.
.. _plugineventstructure:
Event Structure
---------------
There are two types of events that are emitted via plugins:
* status events:
These are sent whenever Runner's status changes (see :ref:`runnerstatushandler`) for example::
{"status": "running", "runner_ident": "XXXX" }
* ansible events:
These are sent during playbook execution for every event received from **Ansible** (see :ref:`Playbook and Host Events`) for example::
{"runner_ident": "XXXX", }
Writing your own Plugin
-----------------------
In order to write your own plugin interface and have it be picked up and used by **Runner** there are a few things that you'll need to do.
* Declare the module as a Runner entrypoint in your setup file
(`ansible-runner-http has a good example of this `_)::
entry_points=('ansible_runner.plugins': 'modname = your_python_package_name'),
* Implement the ``status_handler()`` and ``event_handler()`` functions at the top of your package, for example see
`ansible-runner-http events.py `_ and the ``__init__``
import `at the top of the module package `_
After installing this, **Runner** will see the plugin and invoke the functions when status and events are sent. If there are any errors in your plugin
they will be raised immediately and **Runner** will fail.
ansible-runner-2.4.1/docs/index.rst 0000664 0000000 0000000 00000004107 14770573620 0017250 0 ustar 00root root 0000000 0000000 .. ansible-runner documentation master file, created by
sphinx-quickstart on Tue May 1 10:47:37 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Ansible Runner
==============
Ansible Runner is a tool and python library that helps when interfacing with Ansible directly or as part of another system
whether that be through a container image interface, as a standalone tool, or as a Python module that can be imported. The goal
is to provide a stable and consistent interface abstraction to Ansible. This allows **Ansible** to be embedded into other systems that don't
want to manage the complexities of the interface on their own (such as CI/CD platforms, Jenkins, or other automated tooling).
**Ansible Runner** represents the modularization of the part of `Ansible AWX `_ that is responsible
for running ``ansible`` and ``ansible-playbook`` tasks and gathers the output from it. It does this by presenting a common interface that doesn't
change, even as **Ansible** itself grows and evolves.
Part of what makes this tooling useful is that it can gather its inputs in a flexible way (See :ref:`intro`:). It also has a system for storing the
output (stdout) and artifacts (host-level event data, fact data, etc) of the playbook run.
There are 3 primary ways of interacting with **Runner**
* A standalone command line tool (``ansible-runner``) that can be started in the foreground or run in the background asynchronously
* A python module - library interface
**Ansible Runner** can also be configured to send status and event data to other systems using a plugin interface, see :ref:`externalintf`.
Examples of this could include:
* Sending status to Ansible AWX
* Sending events to an external logging service
.. toctree::
:maxdepth: 1
:caption: Contents:
intro
install
community
external_interface
standalone
python_interface
execution_environments
remote_jobs
modules
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
ansible-runner-2.4.1/docs/install.rst 0000664 0000000 0000000 00000002151 14770573620 0017604 0 ustar 00root root 0000000 0000000 .. _install:
Installing Ansible Runner
=========================
Ansible Runner requires Python >= 3.9 and is provided from several different locations depending on how you want to use it.
Using pip
---------
To install the latest version from the Python Package Index::
$ pip install ansible-runner
Fedora
------
To install from the Fedora repositories::
$ dnf install python3-ansible-runner
From source
-----------
Check out the source code from `github `_::
$ git clone git://github.com/ansible/ansible-runner
Or download from the `releases page `_
Create a virtual environment using Python and activate it::
$ virtualenv env
$ source env/bin/activate
Then install::
$ cd ansible-runner
$ pip install -e .
.. _builddist:
Build the distribution
----------------------
To produce both wheel and sdist::
$ python3 -m pip install build
$ python3 -m build
To only produce an installable ``wheel``::
$ python3 -m build --wheel
To produce a distribution tarball::
$ python3 -m build --sdist
ansible-runner-2.4.1/docs/intro.rst 0000664 0000000 0000000 00000044745 14770573620 0017310 0 ustar 00root root 0000000 0000000 .. _intro:
Introduction to Ansible Runner
==============================
**Runner** is intended to be most useful as part of automation and tooling that needs to invoke Ansible and consume its results.
Most of the parameterization of the **Ansible** command line is also available on the **Runner** command line but **Runner** also
can rely on an input interface that is mapped onto a directory structure, an example of which can be seen in `the source tree `_.
Further sections in this document refer to the configuration and layout of that hierarchy. This isn't the only way to interface with **Runner**
itself. The Python module interface allows supplying these details as direct module parameters in many forms, and the command line interface allows
supplying them directly as arguments, mimicking the behavior of ``ansible-playbook``. Having the directory structure **does** allow gathering the inputs
from elsewhere and preparing them for consumption by **Runner**, then the tooling can come along and inspect the results after the run.
This is best seen in the way Ansible **AWX** uses **Runner** where most of the content comes from the database (and other content-management components) but
ultimately needs to be brought together in a single place when launching the **Ansible** task.
.. _inputdir:
Runner Input Directory Hierarchy
--------------------------------
This directory contains all necessary inputs. Here's a view of the `demo directory `_ showing
an active configuration.
Note that not everything is required. Defaults will be used or values will be omitted if they are not provided.
.. code-block:: none
.
├── env
│ ├── envvars
│ ├── extravars
│ ├── passwords
│ ├── cmdline
│ ├── settings
│ └── ssh_key
├── inventory
│ └── hosts
└── project
├── test.yml
└── roles
└── testrole
├── defaults
├── handlers
├── meta
├── README.md
├── tasks
├── tests
└── vars
The ``env`` directory
---------------------
The **env** directory contains settings and sensitive files that inform certain aspects of the invocation of the **Ansible** process, an example of which can
be found in `the demo env directory `_. Each of these files can also be represented by a named
pipe providing a bit of an extra layer of security. The formatting and expectation of these files differs slightly depending on what they are representing.
``env/envvars``
---------------
.. note::
For an example see `the demo envvars `_.
**Ansible Runner** will inherit the environment of the launching shell. This file (which can be in json or yaml format) represents
the environment variables that will be added to the environment at run-time::
---
TESTVAR: exampleval
``env/extravars``
-----------------
.. note::
For an example see `the demo extravars `_.
**Ansible Runner** gathers the extra vars provided here and supplies them to the **Ansible Process** itself. This file can be in either json or yaml format::
---
ansible_connection: local
test: val
``env/passwords``
-----------------
.. note::
For an example see `the demo passwords `_.
.. warning::
We expect this interface to change/simplify in the future but will guarantee backwards compatibility. The goal is for the user of **Runner** to not
have to worry about the format of certain prompts emitted from **Ansible** itself. In particular, vault passwords need to become more flexible.
**Ansible** itself is set up to emit passwords to certain prompts, these prompts can be requested (``-k`` for example to prompt for the connection password).
Likewise, prompts can be emitted via `vars_prompt `_ and also
`Ansible Vault `_.
In order for **Runner** to respond with the correct password, it needs to be able to match the prompt and provide the correct password. This is currently supported
by providing a yaml or json formatted file with a regular expression and a value to emit, for example::
---
"^SSH password:\\s*?$": "some_password"
"^BECOME password.*:\\s*?$": "become_password"
``env/cmdline``
---------------
.. warning::
Current **Ansible Runner** does not validate the command line arguments passed using this method so it is up to the playbook writer to provide a valid set of options.
The command line options provided by this method are lower priority than the ones set by **Ansible Runner**. For instance, this will not override ``inventory`` or ``limit`` values.
**Ansible Runner** gathers command line options provided here as a string and supplies them to the **Ansible Process** itself. This file should contain the arguments to be added, for example::
--tags one,two --skip-tags three -u ansible --become
``env/ssh_key``
---------------
.. note::
Currently only a single ssh key can be provided via this mechanism but this is set to `change soon `_.
This file should contain the ssh private key used to connect to the host(s). **Runner** detects when a private key is provided and will wrap the call to
**Ansible** in ssh-agent.
.. _runnersettings:
``env/settings`` - Settings for Runner itself
---------------------------------------------
The **settings** file is a little different than the other files provided in this section in that its contents are meant to control **Runner** directly.
* ``idle_timeout``: ``600`` If no output is detected from ansible in this number of seconds the execution will be terminated.
* ``job_timeout``: ``3600`` The maximum amount of time to allow the job to run for, exceeding this and the execution will be terminated.
* ``pexpect_timeout``: ``10`` Number of seconds for the internal pexpect command to wait to block on input before continuing
* ``pexpect_use_poll``: ``True`` Use ``poll()`` function for communication with child processes instead of ``select()``. ``select()`` is used when the value is set to ``False``. ``select()`` has a known limitation of using only up to 1024 file descriptors.
* ``suppress_output_file``: ``False`` Allow output from ansible to not be streamed to the ``stdout`` or ``stderr`` files inside of the artifacts directory.
* ``suppress_ansible_output``: ``False`` Allow output from ansible to not be printed to the screen.
* ``fact_cache``: ``'fact_cache'`` The directory relative to ``artifacts`` where ``jsonfile`` fact caching will be stored. Defaults to ``fact_cache``. This is ignored if ``fact_cache_type`` is different than ``jsonfile``.
* ``fact_cache_type``: ``'jsonfile'`` The type of fact cache to use. Defaults to ``jsonfile``.
Process Isolation Settings for Runner
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The process isolation settings are meant to control the process isolation feature of **Runner**.
* ``process_isolation``: ``False`` Enable limiting what directories on the filesystem the playbook run has access to.
* ``process_isolation_executable``: ``bwrap`` Path to the executable that will be used to provide filesystem isolation.
* ``process_isolation_path``: ``/tmp`` Path that an isolated playbook run will use for staging.
* ``process_isolation_hide_paths``: ``None`` Path or list of paths on the system that should be hidden from the playbook run.
* ``process_isolation_show_paths``: ``None`` Path or list of paths on the system that should be exposed to the playbook run.
* ``process_isolation_ro_paths``: ``None`` Path or list of paths on the system that should be exposed to the playbook run as read-only.
These settings instruct **Runner** to execute **Ansible** tasks inside a container environment.
For information about building execution environments, see `ansible-builder `_.
To execute **Runner** with an execution environment:
``ansible-runner run --container-image my-execution-environment:latest --process-isolation -p playbook.yml .``
See ``ansible-runner -h`` for other container-related options.
Inventory
---------
The **Runner** ``inventory`` location under the private data dir has the same expectations as inventory provided directly to ansible itself. It can
be either a single file or script or a directory containing static inventory files or scripts. This inventory is automatically loaded and provided to
**Ansible** when invoked and can be further overridden on the command line or via the ``ANSIBLE_INVENTORY`` environment variable to specify the hosts directly.
Giving an absolute path for the inventory location is best practice, because relative paths are interpreted relative to the ``current working directory``
which defaults to the ``project`` directory.
Project
--------
The **Runner** ``project`` directory is the playbook root containing playbooks and roles that those playbooks can consume directly. This is also the
directory that will be set as the ``current working directory`` when launching the **Ansible** process.
Modules
-------
**Runner** has the ability to execute modules directly using Ansible ad-hoc mode.
Roles
-----
**Runner** has the ability to execute `Roles `_ directly without first needing
a playbook to reference them. This directory holds roles used for that. Behind the scenes, **Runner** will generate a playbook and invoke the ``Role``.
.. _artifactdir:
Runner Artifacts Directory Hierarchy
------------------------------------
This directory will contain the results of **Runner** invocation grouped under an ``identifier`` directory. This identifier can be supplied to **Runner** directly
and if not given, an identifier will be generated as a `UUID `_. This is how the directory structure looks
from the top level::
.
├── artifacts
│ └── identifier
├── env
├── inventory
├── profiling_data
├── project
└── roles
The artifact directory itself contains a particular structure that provides a lot of extra detail from a running or previously-run invocation of Ansible/Runner::
.
├── artifacts
│ └── 37f639a3-1f4f-4acb-abee-ea1898013a25
│ ├── fact_cache
│ │ └── localhost
│ ├── job_events
│ │ ├── 1-34437b34-addd-45ae-819a-4d8c9711e191.json
│ │ ├── 2-8c164553-8573-b1e0-76e1-000000000006.json
│ │ ├── 3-8c164553-8573-b1e0-76e1-00000000000d.json
│ │ ├── 4-f16be0cd-99e1-4568-a599-546ab80b2799.json
│ │ ├── 5-8c164553-8573-b1e0-76e1-000000000008.json
│ │ ├── 6-981fd563-ec25-45cb-84f6-e9dc4e6449cb.json
│ │ └── 7-01c7090a-e202-4fb4-9ac7-079965729c86.json
│ ├── rc
│ ├── status
│ └── stdout
The **rc** file contains the actual return code from the **Ansible** process.
The **status** file contains one of three statuses suitable for displaying:
* success: The **Ansible** process finished successfully
* failed: The **Ansible** process failed
* timeout: The **Runner** timeout (see :ref:`runnersettings`)
The **stdout** file contains the actual stdout as it appears at that moment.
.. _artifactevents:
Runner Artifact Job Events (Host and Playbook Events)
-----------------------------------------------------
**Runner** gathers the individual task and playbook events that are emitted as part of the **Ansible** run. This is extremely helpful if you don't want
to process or read the stdout returned from **Ansible** as it contains much more detail and status than just the plain stdout.
It does some of the heavy lifting of assigning order to the events and stores them in json format under the ``job_events`` artifact directory.
It also takes it a step further than normal **Ansible** callback plugins in that it will store the ``stdout`` associated with the event alongside the raw
event data (along with stdout line numbers). It also generates dummy events for stdout that didn't have corresponding host event data::
{
"uuid": "8c164553-8573-b1e0-76e1-000000000008",
"parent_uuid": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"counter": 5,
"stdout": "\r\nTASK [debug] *******************************************************************",
"start_line": 5,
"end_line": 7,
"event": "playbook_on_task_start",
"event_data": {
"playbook": "test.yml",
"playbook_uuid": "34437b34-addd-45ae-819a-4d8c9711e191",
"play": "all",
"play_uuid": "8c164553-8573-b1e0-76e1-000000000006",
"play_pattern": "all",
"task": "debug",
"task_uuid": "8c164553-8573-b1e0-76e1-000000000008",
"task_action": "debug",
"task_path": "\/home\/mjones\/ansible\/ansible-runner\/demo\/project\/test.yml:3",
"task_args": "msg=Test!",
"name": "debug",
"is_conditional": false,
"pid": 10640
},
"pid": 10640,
"created": "2018-06-07T14:54:58.410605"
}
If the playbook runs to completion without getting killed, the last event will always be the ``stats`` event::
{
"uuid": "01c7090a-e202-4fb4-9ac7-079965729c86",
"counter": 7,
"stdout": "\r\nPLAY RECAP *********************************************************************\r\n\u001b[0;32mlocalhost,\u001b[0m : \u001b[0;32mok=2 \u001b[0m changed=0 unreachable=0 failed=0 \r\n",
"start_line": 10,
"end_line": 14,
"event": "playbook_on_stats",
"event_data": {
"playbook": "test.yml",
"playbook_uuid": "34437b34-addd-45ae-819a-4d8c9711e191",
"changed": {
},
"dark": {
},
"failures": {
},
"ok": {
"localhost,": 2
},
"processed": {
"localhost,": 1
},
"skipped": {
},
"artifact_data": {
},
"pid": 10640
},
"pid": 10640,
"created": "2018-06-07T14:54:58.424603"
}
.. note::
The **Runner module interface** presents a programmatic interface to these events that allow getting the final status and performing host filtering of task events.
Runner Profiling Data Directory
-------------------------------
If resource profiling is enabled for **Runner** the ``profiling_data`` directory will be populated with a set of files containing the profiling data::
.
├── profiling_data
│ ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-cpu.json
│ ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-memory.json
│ ├── 0-34437b34-addd-45ae-819a-4d8c9711e191-pids.json
│ ├── 1-8c164553-8573-b1e0-76e1-000000000006-cpu.json
│ ├── 1-8c164553-8573-b1e0-76e1-000000000006-memory.json
│ └── 1-8c164553-8573-b1e0-76e1-000000000006-pids.json
Each file is in `JSON text format `_. Each line of the file will begin with a record separator (RS), continue with a JSON dictionary, and conclude with a line feed (LF) character. The following provides an example of what the resource files may look like. Note that that since the RS and LF are control characters, they are not actually printed below::
==> 0-525400c9-c704-29a6-4107-00000000000c-cpu.json <==
{"timestamp": 1568977988.6844425, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 97.12799768097156}
{"timestamp": 1568977988.9394386, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 94.17538298892688}
{"timestamp": 1568977989.1901696, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 64.38272588006255}
{"timestamp": 1568977989.4594045, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 83.77387744259856}
==> 0-525400c9-c704-29a6-4107-00000000000c-memory.json <==
{"timestamp": 1568977988.4281094, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 36.21484375}
{"timestamp": 1568977988.6842303, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 57.87109375}
{"timestamp": 1568977988.939303, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 66.60546875}
{"timestamp": 1568977989.1900482, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 71.4609375}
{"timestamp": 1568977989.4592078, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 38.25390625}
==> 0-525400c9-c704-29a6-4107-00000000000c-pids.json <==
{"timestamp": 1568977988.4284189, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 5}
{"timestamp": 1568977988.6845856, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 6}
{"timestamp": 1568977988.939547, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 8}
{"timestamp": 1568977989.1902773, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 13}
{"timestamp": 1568977989.4593227, "task_name": "Gathering Facts", "task_uuid": "525400c9-c704-29a6-4107-00000000000c", "value": 6}
* Resource profiling data is grouped by playbook task.
* For each task, there will be three files, corresponding to cpu, memory and pid count data.
* Each file contains a set of data points collected over the course of a playbook task.
* If a task executes quickly and the polling rate for a given metric is large enough, it is possible that no profiling data may be collected during the task's execution. If this is the case, no data file will be created.
ansible-runner-2.4.1/docs/make.bat 0000664 0000000 0000000 00000001462 14770573620 0017015 0 ustar 00root root 0000000 0000000 @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
set SPHINXPROJ=ansible-runner
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
ansible-runner-2.4.1/docs/modules.rst 0000664 0000000 0000000 00000000141 14770573620 0017603 0 ustar 00root root 0000000 0000000 Developer Documentation
=======================
.. toctree::
:maxdepth: 3
ansible_runner
ansible-runner-2.4.1/docs/python_interface.rst 0000664 0000000 0000000 00000043122 14770573620 0021502 0 ustar 00root root 0000000 0000000 .. _python_interface:
Using Runner as a Python Module Interface to Ansible
====================================================
**Ansible Runner** is intended to provide a directly importable and usable API for interfacing with **Ansible** itself and exposes a few helper interfaces.
The modules center around the :class:`Runner ` object. The helper methods will either return an instance of this object which provides an
interface to the results of executing the **Ansible** command or a tuple the actual output and error response based on the interface.
**Ansible Runner** itself is a wrapper around **Ansible** execution and so adds plugins and interfaces to the system in order to gather extra information and
process/store it for use later.
Helper Interfaces
-----------------
The helper :mod:`interfaces ` provides a quick way of supplying the recommended inputs in order to launch a **Runner** process. These interfaces also allow overriding and providing inputs beyond the scope of what the standalone or container interfaces
support. You can see a full list of the inputs in the linked module documentation.
``run()`` helper function
-------------------------
:meth:`ansible_runner.interface.run`
When called, this function will take the inputs (either provided as direct inputs to the function or from the :ref:`inputdir`), and execute **Ansible**. It will run in the
foreground and return the :class:`Runner ` object when finished.
``run_async()`` helper function
-------------------------------
:meth:`ansible_runner.interface.run_async`
Takes the same arguments as :meth:`ansible_runner.interface.run` but will launch **Ansible** asynchronously and return a tuple containing
the ``thread`` object and a :class:`Runner ` object. The **Runner** object can be inspected during execution.
``run_command()`` helper function
---------------------------------
:meth:`ansible_runner.interface.run_command`
When called, this function will take the inputs (either provided as direct inputs to the function or from the :ref:`inputdir`), and execute the command passed either
locally or within an container based on the parameters passed. It will run in the foreground and return a tuple of output and error response when finished. While running
the within container image command the current local working directory will be volume mounted within the container, in addition to this for any of ansible command line
utilities the inventory, vault-password-file, private-key file path will be volume mounted if provided in the ``cmdline_args`` parameters.
``run_command_async()`` helper function
---------------------------------------
:meth:`ansible_runner.interface.run_command_async`
Takes the same arguments as :meth:`ansible_runner.interface.run_command` but will launch asynchronously and return a tuple containing
the ``thread`` object and a :class:`Runner ` object. The **Runner** object can be inspected during execution.
``get_plugin_docs()`` helper function
-------------------------------------
:meth:`ansible_runner.interface.get_plugin_docs`
When called, this function will take the inputs, and execute the ansible-doc command to return the either the plugin-docs or playbook snippet for the passed
list of plugin names. The plugin docs can be fetched either from locally installed plugins or from within an container image based on the parameters passed.
It will run in the foreground and return a tuple of output and error response when finished. While running the command within the container the current local
working directory will be volume mounted within the container.
``get_plugin_docs_async()`` helper function
-------------------------------------------
:meth:`ansible_runner.interface.get_plugin_docs_async`
Takes the same arguments as :meth:`ansible_runner.interface.get_plugin_docs_async` but will launch asynchronously and return a tuple containing
the ``thread`` object and a :class:`Runner ` object. The **Runner** object can be inspected during execution.
``get_plugin_list()`` helper function
-------------------------------------
:meth:`ansible_runner.interface.get_plugin_list`
When called, this function will take the inputs, and execute the ansible-doc command to return the list of installed plugins. The installed plugin can be fetched
either from local environment or from within an container image based on the parameters passed. It will run in the foreground and return a tuple of output and error
response when finished. While running the command within the container the current local working directory will be volume mounted within the container.
``get_inventory()`` helper function
-----------------------------------
:meth:`ansible_runner.interface.get_inventory`
When called, this function will take the inputs, and execute the ansible-inventory command to return the inventory related information based on the action.
If ``action`` is ``list`` it will return all the applicable configuration options for ansible, for ``host`` action it will return information
of a single host and for ``graph`` action it will return the inventory. The execution will be in the foreground and return a tuple of output and error
response when finished. While running the command within the container the current local working directory will be volume mounted within the container.
``get_ansible_config()`` helper function
----------------------------------------
:meth:`ansible_runner.interface.get_ansible_config`
When called, this function will take the inputs, and execute the ansible-config command to return the Ansible configuration related information based on the action.
If ``action`` is ``list`` it will return all the hosts related information including the host and group variables, for ``dump`` action it will return the entire active configuration
and it can be customized to return only the changed configuration value by setting the ``only_changed`` boolean parameter to ``True``. For ``view`` action it will return the
view of the active configuration file. The execution will be in the foreground and return a tuple of output and error response when finished.
While running the command within the container the current local working directory will be volume mounted within the container.
``get_role_list()`` helper function
-----------------------------------
:meth:`ansible_runner.interface.get_role_list`
*Version added: 2.2*
This function will execute the ``ansible-doc`` command to return the list of installed roles
that have an argument specification defined. This data can be fetched from either the local
environment or from within a container image based on the parameters passed. It will run in
the foreground and return a tuple of output and error response when finished. Successful output
will be in JSON format as returned from ``ansible-doc``.
``get_role_argspec()`` helper function
--------------------------------------
:meth:`ansible_runner.interface.get_role_argspec`
*Version added: 2.2*
This function will execute the ``ansible-doc`` command to return a role argument specification.
This data can be fetched from either the local environment or from within a container image
based on the parameters passed. It will run in the foreground and return a tuple of output
and error response when finished. Successful output will be in JSON format as returned from
``ansible-doc``.
The ``Runner`` object
---------------------
The :class:`Runner ` object is returned as part of the execution of **Ansible** itself. Since it wraps both execution and output
it has some helper methods for inspecting the results. Other than the methods and indirect properties, the instance of the object itself contains two direct
properties:
* ``rc`` will represent the actual return code of the **Ansible** process
* ``status`` will represent the state and can be one of:
* ``unstarted``: This is a very brief state where the Runner task has been created but hasn't actually started yet.
* ``successful``: The ``ansible`` process finished successfully.
* ``failed``: The ``ansible`` process failed.
``Runner.stdout``
-----------------
The :class:`Runner ` object contains a property :attr:`ansible_runner.runner.Runner.stdout` which will return an open file
handle containing the `stdout` of the **Ansible** process.
``Runner.stderr``
-----------------
When the ``runner_mode`` is set to ``subprocess`` the :class:`Runner ` object uses a property :attr:`ansible_runner.runner.Runner.stderr` which
will return an open file handle containing the ``stderr`` of the **Ansible** process.
``Runner.events``
-----------------
:attr:`ansible_runner.runner.Runner.events` is a ``generator`` that will return the :ref:`Playbook and Host Events` as Python ``dict`` objects.
``Runner.stats``
----------------
:attr:`ansible_runner.runner.Runner.stats` is a property that will return the final ``playbook stats`` event from **Ansible** in the form of a Python ``dict``
``Runner.host_events``
----------------------
:meth:`ansible_runner.runner.Runner.host_events` is a method that, given a hostname, will return a list of only **Ansible** event data executed on that Host.
``Runner.get_fact_cache``
-------------------------
:meth:`ansible_runner.runner.Runner.get_fact_cache` is a method that, given a hostname, will return a dictionary containing the `Facts `_ stored for that host during execution.
``Runner.event_handler``
------------------------
A function passed to ``__init__`` of :class:``Runner ``, this is invoked every time an Ansible event is received. You can use this to
inspect/process/handle events as they come out of Ansible. This function should return ``True`` to keep the event, otherwise it will be discarded.
``Runner.cancel_callback``
--------------------------
A function passed to ``__init__`` of :class:`Runner `, and to the :meth:`ansible_runner.interface.run` interface functions.
This function will be called for every iteration of the :meth:`ansible_runner.interface.run` event loop and should return `True`
to inform **Runner** cancel and shutdown the **Ansible** process or `False` to allow it to continue.
``Runner.finished_callback``
----------------------------
A function passed to ``__init__`` of :class:`Runner `, and to the :meth:`ansible_runner.interface.run` interface functions.
This function will be called immediately before the **Runner** event loop finishes once **Ansible** has been shut down.
.. _runnerstatushandler:
``Runner.status_handler``
-------------------------
A function passed to ``__init__`` of :class:`Runner ` and to the :meth:`ansible_runner.interface.run` interface functions.
This function will be called any time the ``status`` changes, expected values are:
* ``starting``: Preparing to start but hasn't started running yet
* ``running``: The **Ansible** task is running
* ``canceled``: The task was manually canceled either via callback or the cli
* ``timeout``: The timeout configured in Runner Settings was reached (see :ref:`runnersettings`)
* ``failed``: The **Ansible** process failed
* ``successful``: The **Ansible** process succeeded
Usage examples
--------------
.. code-block:: python
import ansible_runner
r = ansible_runner.run(private_data_dir='/tmp/demo', playbook='test.yml')
print("{}: {}".format(r.status, r.rc))
# successful: 0
for each_host_event in r.events:
print(each_host_event['event'])
print("Final status:")
print(r.stats)
.. code-block:: python
import ansible_runner
def my_artifacts_handler(artifacts_dir):
# Do something here
print(artifacts_dir)
# Do something with artifact directory after the run is complete
r = ansible_runner.run(private_data_dir='/tmp/demo', playbook='test.yml', artifacts_handler=my_artifacts_handler)
.. code-block:: python
import ansible_runner
def my_status_handler(data, runner_config):
# Do something here
print(data)
r = ansible_runner.run(private_data_dir='/tmp/demo', playbook='test.yml', status_handler=my_status_handler)
.. code-block:: python
import ansible_runner
def my_event_handler(data):
# Do something here
print(data)
r = ansible_runner.run(private_data_dir='/tmp/demo', playbook='test.yml', event_handler=my_event_handler)
.. code-block:: python
import ansible_runner
r = ansible_runner.run(private_data_dir='/tmp/demo', host_pattern='localhost', module='shell', module_args='whoami')
print("{}: {}".format(r.status, r.rc))
# successful: 0
for each_host_event in r.events:
print(each_host_event['event'])
print("Final status:")
print(r.stats)
.. code-block:: python
from ansible_runner import Runner, RunnerConfig
# Using tag using RunnerConfig
rc = RunnerConfig(
private_data_dir="project",
playbook="main.yml",
tags='my_tag',
)
rc.prepare()
r = Runner(config=rc)
r.run()
.. code-block:: python
# run the role named 'myrole' contained in the '/project/roles' directory
r = ansible_runner.run(private_data_dir='/tmp/demo', role='myrole')
print("{}: {}".format(r.status, r.rc))
print(r.stats)
.. code-block:: python
# run ansible/generic commands in interactive mode within container
out, err, rc = ansible_runner.run_command(
executable_cmd='ansible-playbook',
cmdline_args=['gather.yaml', '-i', 'inventory', '-vvvv', '-k'],
input_fd=sys.stdin,
output_fd=sys.stdout,
error_fd=sys.stderr,
host_cwd='/home/demo',
process_isolation=True,
container_image='network-ee'
)
print("rc: {}".format(rc))
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# run ansible/generic commands in interactive mode locally
out, err, rc = ansible_runner.run_command(
executable_cmd='ansible-playbook',
cmdline_args=['gather.yaml', '-i', 'inventory', '-vvvv', '-k'],
input_fd=sys.stdin,
output_fd=sys.stdout,
error_fd=sys.stderr,
)
print("rc: {}".format(rc))
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get plugin docs from within container
out, err = ansible_runner.get_plugin_docs(
plugin_names=['vyos.vyos.vyos_command'],
plugin_type='module',
response_format='json',
process_isolation=True,
container_image='network-ee'
)
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get plugin docs from within container in async mode
thread_obj, runner_obj = ansible_runner.get_plugin_docs_async(
plugin_names=['ansible.netcommon.cli_config', 'ansible.netcommon.cli_command'],
plugin_type='module',
response_format='json',
process_isolation=True,
container_image='network-ee'
)
while runner_obj.status not in ['canceled', 'successful', 'timeout', 'failed']:
time.sleep(0.01)
continue
print("out: {}".format(runner_obj.stdout.read()))
print("err: {}".format(runner_obj.stderr.read()))
.. code-block:: python
# get plugin list installed on local system
out, err = ansible_runner.get_plugin_list()
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get plugins with file list from within container
out, err = ansible_runner.get_plugin_list(list_files=True, process_isolation=True, container_image='network-ee')
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get list of changed ansible configuration values
out, err = ansible_runner.get_ansible_config(action='dump', config_file='/home/demo/ansible.cfg', only_changed=True)
print("out: {}".format(out))
print("err: {}".format(err))
# get ansible inventory information
out, err = ansible_runner.get_inventory(
action='list',
inventories=['/home/demo/inventory1', '/home/demo/inventory2'],
response_format='json',
process_isolation=True,
container_image='network-ee'
)
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get all roles with an arg spec installed locally
out, err = ansible_runner.get_role_list()
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get roles with an arg spec from the `foo.bar` collection in a container
out, err = ansible_runner.get_role_list(collection='foo.bar', process_isolation=True, container_image='network-ee')
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get the arg spec for role `baz` from the locally installed `foo.bar` collection
out, err = ansible_runner.get_role_argspec('baz', collection='foo.bar')
print("out: {}".format(out))
print("err: {}".format(err))
.. code-block:: python
# get the arg spec for role `baz` from the `foo.bar` collection installed in a container
out, err = ansible_runner.get_role_argspec('baz', collection='foo.bar', process_isolation=True, container_image='network-ee')
print("out: {}".format(out))
print("err: {}".format(err))
Providing custom behavior and inputs
------------------------------------
**TODO**
The helper methods are just one possible entrypoint, extending the classes used by these helper methods can allow a lot more custom behavior and functionality.
Show:
* How :class:`Runner Config ` is used and how overriding the methods and behavior can work
* Show how custom cancel and status callbacks can be supplied.
ansible-runner-2.4.1/docs/remote_jobs.rst 0000664 0000000 0000000 00000012033 14770573620 0020446 0 ustar 00root root 0000000 0000000 .. _remote_jobs:
Remote job execution
====================
Ansible Runner supports the concept that a job run may be requested on one host but executed on another.
This capability is primarily intended to be used by `Receptor `_.
Support for this in Runner involves a three phase process.
- **Transmit**: Convert the job to a binary format that can be sent to the worker node.
- **Worker**: Actually execute the job.
- **Process**: Receive job results and process them.
The following command illustrates how the three phases work together::
$ ansible-runner transmit ./demo -p test.yml | ansible-runner worker | ansible-runner process ./demo
In this example, the ``ansible-runner transmit`` command is given a private data directory of ``./demo`` and told to select
the ``test.yml`` playbook from it. Instead of executing the playbook as ``ansible-runner run`` would do, the data dir
and command line parameters are converted to a compressed binary stream that is emitted as stdout. The ``transmit``
command generally takes the same command line parameters as the ``run`` command.
The ``ansible-runner worker`` command accepts this stream, runs the playbook, and generates a new compressed binary
stream of the resulting job events and artifacts.
This command optionally accepts the ``--private-data-dir`` option.
If provided, it will extract the contents sent from ``ansible-runner transmit`` into that directory.
The ``ansible-runner process`` command accepts the result stream from the worker, and fires all the normal callbacks
and does job event processing. In the command above, this results in printing the playbook output and saving
artifacts to the data dir. The ``process`` command takes a data dir as a parameter, to know where to save artifacts.
Using Receptor as the remote executor
-------------------------------------
A full expansion on how Receptor works is out of the scope of this document. We can set up a basic receptor node with a simple configuration file::
---
- node:
id: primary
- log-level:
level: Debug
- local-only:
- control-service:
service: control
filename: ./control.sock
- work-command:
worktype: ansible-runner
command: ansible-runner
params: worker
allowruntimeparams: true
We can then start that local receptor node::
$ receptor --config ./receptor.yml
Now we can repeat the ``transmit``/``worker``/``process`` example above, but instead of piping the output of ``transmit`` to ``worker``, we can use the ``receptorctl`` command to send it to the receptor node we just started::
$ ansible-runner transmit ./demo -p test.yml | receptorctl --socket ./control.sock work submit -f --node primary -p - ansible-runner | ansible-runner process ./demo
Cleanup of Resources Used by Jobs
---------------------------------
The transmit and process commands do not offer any automatic deletion of the
private data directory or artifacts, because these are how the user interacts with runner.
When running ``ansible-runner worker``, if no ``--private-data-dir`` is given,
it will extract the contents to a temporary directory which is deleted at the end of execution.
If the ``--private-data-dir`` option is given, then the directory will persist after the run finishes
unless the ``--delete`` flag is also set. In that case, the private data directory will be deleted before execution if it exists and also removed after execution.
The following command offers out-of-band cleanup ::
$ ansible-runner worker cleanup --file-pattern=/tmp/foo_*
This would assure that old directories that fit the file glob ``/tmp/foo_*`` are deleted,
which would could be used to assure cleanup of paths created by commands like
``ansible-runner worker --private_data_dir=/tmp/foo_3``, for example.
NOTE: see the ``--grace-period`` option, which sets the time window.
This command also takes a ``--remove-images`` option to run the podman or docker ``rmi`` command.
There is otherwise no automatic cleanup of images used by a run,
even if ``container_auth_data`` is used to pull from a private container registry.
To be sure that layers are deleted as well, the ``--image-prune`` flag is necessary.
Artifact Directory Specification
--------------------------------
The ``worker`` command does not write artifacts, these are streamed instead, and
the ``process`` command is what ultimately writes the artifacts folder contents.
With the default behavior, ``ansible-runner process ./demo`` would write artifacts to ``./demo/artifacts``.
If you wish to better align with normal ansible-runner use, you can pass the
``--ident`` option to save to a subfolder, so ``ansible-runner process ./demo --ident=43``
would extract artifacts to the folder ``./demo/artifacts/43``.
Python API
----------
Python code importing Ansible Runner can make use of these facilities by setting the ``streamer`` parameter to
``ansible_runner.interface.run``. This parameter can be set to ``transmit``, ``worker`` or ``process`` to invoke
each of the three stages. Other parameters are as normal in the CLI.
ansible-runner-2.4.1/docs/requirements.in 0000664 0000000 0000000 00000000051 14770573620 0020454 0 ustar 00root root 0000000 0000000 ansible-core
sphinx
sphinx-ansible-theme
ansible-runner-2.4.1/docs/requirements.txt 0000664 0000000 0000000 00000003367 14770573620 0020702 0 ustar 00root root 0000000 0000000 #
# This file is autogenerated by pip-compile with Python 3.9
# by the following command:
#
# pip-compile requirements.in
#
alabaster==0.7.16
# via sphinx
ansible-core==2.15.12
# via -r requirements.in
ansible-pygments==0.1.1
# via sphinx-ansible-theme
babel==2.16.0
# via sphinx
certifi==2024.8.30
# via requests
cffi==1.17.1
# via cryptography
charset-normalizer==3.3.2
# via requests
cryptography==44.0.1
# via ansible-core
docutils==0.20.1
# via
# sphinx
# sphinx-rtd-theme
idna==3.8
# via requests
imagesize==1.4.1
# via sphinx
importlib-metadata==8.4.0
# via sphinx
importlib-resources==5.0.7
# via ansible-core
jinja2==3.1.5
# via
# ansible-core
# sphinx
markupsafe==2.1.5
# via jinja2
packaging==24.1
# via
# ansible-core
# sphinx
pycparser==2.22
# via cffi
pygments==2.18.0
# via
# ansible-pygments
# sphinx
pyyaml==6.0.2
# via ansible-core
requests==2.32.3
# via sphinx
resolvelib==1.0.1
# via ansible-core
snowballstemmer==2.2.0
# via sphinx
sphinx==7.4.7
# via
# -r requirements.in
# sphinx-ansible-theme
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx-ansible-theme==0.10.3
# via -r requirements.in
sphinx-rtd-theme==2.0.0
# via sphinx-ansible-theme
sphinxcontrib-applehelp==2.0.0
# via sphinx
sphinxcontrib-devhelp==2.0.0
# via sphinx
sphinxcontrib-htmlhelp==2.1.0
# via sphinx
sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
tomli==2.0.1
# via sphinx
urllib3==2.2.2
# via requests
zipp==3.20.1
# via importlib-metadata
ansible-runner-2.4.1/docs/standalone.rst 0000664 0000000 0000000 00000015573 14770573620 0020302 0 ustar 00root root 0000000 0000000 .. _standalone:
Using Runner as a standalone command line tool
==============================================
The **Ansible Runner** command line tool can be used as a standard command line interface to **Ansible** itself but is primarily intended
to fit into automation and pipeline workflows. Because of this, it has a bit of a different workflow than **Ansible** itself because you can select between a few different modes to launch the command.
While you can launch **Runner** and provide it all of the inputs as arguments to the command line (as you do with **Ansible** itself),
there is another interface where inputs are gathered into a single location referred to in the command line parameters as ``private_data_dir``.
(see :ref:`inputdir`)
To view the parameters accepted by ``ansible-runner``::
$ ansible-runner --help
An example invocation of the standalone ``ansible-runner`` utility::
$ ansible-runner run /tmp/private -p playbook.yml
Where playbook.yml is the playbook from the ``/tmp/private/projects`` directory, and ``run`` is the command mode you want to invoke **Runner** with
The different **commands** that runner accepts are:
* ``run`` starts ``ansible-runner`` in the foreground and waits until the underlying **Ansible** process completes before returning
* ``start`` starts ``ansible-runner`` as a background daemon process and generates a pid file
* ``stop`` terminates an ``ansible-runner`` process that was launched in the background with ``start``
* ``is-alive`` checks the status of an ``ansible-runner`` process that was started in the background with ``start``
While **Runner** is running it creates an ``artifacts`` directory (see :ref:`artifactdir`) regardless of what mode it was started
in. The resulting output and status from **Ansible** will be located here. You can control the exact location underneath the ``artifacts`` directory
with the ``-i IDENT`` argument to ``ansible-runner``, otherwise a random UUID will be generated.
Executing **Runner** in the foreground
--------------------------------------
When launching **Runner** with the ``run`` command, as above, the program will stay in the foreground and you'll see output just as you expect from a normal
**Ansible** process. **Runner** will still populate the ``artifacts`` directory, as mentioned in the previous section, to preserve the output and allow processing
of the artifacts after exit.
Executing **Runner** in the background
--------------------------------------
When launching **Runner** with the ``start`` command, the program will generate a pid file and move to the background. You can check its status with the
``is-alive`` command, or terminate it with the ``stop`` command. You can find the stdout, status, and return code in the ``artifacts`` directory.
Running Playbooks
-----------------
An example invocation using ``demo`` as private directory::
$ ansible-runner run demo --playbook test.yml
Running Modules Directly
------------------------
An example invocating the ``debug`` module with ``demo`` as a private directory::
$ ansible-runner run demo -m debug --hosts localhost -a msg=hello
Running Roles Directly
----------------------
An example invocation using ``demo`` as private directory and ``localhost`` as target::
$ ansible-runner run demo --role testrole --hosts localhost
Ansible roles directory can be provided with ``--roles-path`` option. Role variables can be passed with ``--role-vars`` at runtime.
.. _outputjson:
Running with Process Isolation
------------------------------
**Runner** supports process isolation. Process isolation creates a new mount namespace where the root is on a tmpfs that is invisible from the host
and is automatically cleaned up when the last process exits. You can enable process isolation by providing the ``--process-isolation`` argument on
the command line. **Runner** as of version 2.0 defaults to using ``podman`` as the process isolation executable, but supports
using any executable that is compatible with the ``bubblewrap`` CLI arguments by passing in the ``--process-isolation-executable`` argument::
$ ansible-runner --process-isolation ...
**Runner** supports various process isolation arguments that allow you to provide configuration details to the process isolation executable. To view the complete
list of arguments accepted by ``ansible-runner``::
$ ansible-runner --help
Running with Directory Isolation
--------------------------------
If you need to be able to execute multiple tasks in parallel that might conflict with each other or if you want to make sure a single invocation of
Ansible/Runner doesn't pollute or overwrite the playbook content you can give a base path::
$ ansible-runner --directory-isolation-base-path /tmp/runner
**Runner** will copy the project directory to a temporary directory created under that path, set it as the working directory, and execute from that location.
After running that temp directory will be cleaned up and removed.
Specifying an Alternate Inventory
---------------------------------
The default inventory, if not specified, will be ``/inventory/``.
All files within this subdirectory of the private data directory will be processed as
potential inventory host files. You may specify a different inventory using the ``--inventory``
option. This value may be one of:
- A file name located within ``/inventory/``.
- An absolute or relative path to an alternate inventory file or directory.
This path is not required to be inside of the private data directory.
Examples::
# Use inventory /inventory/hosts.backup
$ ansible-runner run demo -p test.yml --inventory hosts.backup
# Use inventory in the /path/to/alternate-inventory directory (outside of )
$ ansible-runner run demo -p test.yml --inventory /path/to/alternate-inventory
# Use inventory in the inventory2 subdirectory, relative to current directory
$ ansible-runner run demo -p test.yml --inventory inventory2
.. note:: This option has no effect when using process isolation.
Outputting json (raw event data) to the console instead of normal output
------------------------------------------------------------------------
**Runner** supports outputting json event data structure directly to the console (and stdout file) instead of the standard **Ansible** output, thus
mimicking the behavior of the ``json`` output plugin. This is in addition to the event data that's already present in the artifact directory. All that is needed
is to supply the ``-j`` argument on the command line::
$ ansible-runner ... -j ...
Cleaning up artifact directories
--------------------------------
Using the command line argument ``--rotate-artifacts`` allows you to control the number of artifact directories that are present. Given a number as the parameter
for this argument will cause **Runner** to clean up old artifact directories. The default value of ``0`` disables artifact directory cleanup.
ansible-runner-2.4.1/pyproject.toml 0000664 0000000 0000000 00000002160 14770573620 0017370 0 ustar 00root root 0000000 0000000 [build-system]
requires = ["setuptools>=45, <=70.0.0", "setuptools-scm[toml]>=6.2, <=8.1.0"] # pin max versions of build deps and update as needed
build-backend = "setuptools.build_meta"
[tool.setuptools_scm]
[[tool.mypy.overrides]]
module = [
"ansible.*",
"daemon.*",
"pexpect",
]
ignore_missing_imports = true
[tool.pylint.main]
output-format = "colorized"
disable = [
# Some codes we will leave disabled
"C0103", # invalid-name
"C0114", # missing-module-docstring
"C0115", # missing-class-docstring
"C0116", # missing-function-docstring
"C0301", # line-too-long
"R0401", # cyclic-import
"R0801", # duplicate-code
"R0902", # too-many-instance-attributes
"R0903", # too-few-public-methods
"R0904", # too-many-public-methods
"R0911", # too-many-return-statements
"R0912", # too-many-branches
"R0913", # too-many-arguments
"R0914", # too-many-locals
"R0915", # too-many-statements
"W0221", # arguments-differ
"W0511", # fixme
"W0603", # global-statement
"W0718", # broad-exception-caught
"W0719", # broad-exception-raised
"W1514", # unspecified-encoding
]
ansible-runner-2.4.1/pytest.ini 0000664 0000000 0000000 00000000374 14770573620 0016512 0 ustar 00root root 0000000 0000000 [pytest]
markers =
test_all_runtimes: Generate a test for each supported container runtime
testpaths = test
addopts =
-r a
--color yes
--showlocals
--verbose
--durations 10
--durations-min 1
--strict-markers
-Werror
ansible-runner-2.4.1/setup.cfg 0000664 0000000 0000000 00000003203 14770573620 0016274 0 ustar 00root root 0000000 0000000 [metadata]
name = ansible-runner
author = Ansible, Inc.
author_email = info@ansible.com
description = "Consistent Ansible Python API and CLI with container and process isolation runtime capabilities"
url = https://ansible-runner.readthedocs.io
project_urls =
Source = https://github.com/ansible/ansible-runner
long_description = file: README.md
long_description_content_type = text/markdown
license = Apache Software License, Version 2.0
classifiers =
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Natural Language :: English
Operating System :: POSIX
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3.12
Programming Language :: Python :: 3 :: Only
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Systems Administration
Topic :: Utilities
[options]
python_requires = >=3.9
install_requires =
pexpect>=4.5
packaging
python-daemon
pyyaml
# enable `groups` arg to entry_points missing in 3.9 stdlib importlib.metadata
importlib-metadata>= 4.6,< 6.3; python_version<'3.10'
[options.entry_points]
console_scripts =
ansible-runner = ansible_runner.__main__:main
[flake8]
# W503 - Line break occurred before a binary operator
ignore = W503
max-line-length = 160
per-file-ignores =
src/ansible_runner/display_callback/callback/awx_display.py:E402
ansible-runner-2.4.1/src/ 0000775 0000000 0000000 00000000000 14770573620 0015244 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/ 0000775 0000000 0000000 00000000000 14770573620 0020252 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/__init__.py 0000664 0000000 0000000 00000001304 14770573620 0022361 0 ustar 00root root 0000000 0000000 from .utils.importlib_compat import importlib_metadata
from .interface import run, run_async, \
run_command, run_command_async, \
get_plugin_docs, get_plugin_docs_async, get_plugin_list, \
get_role_list, get_role_argspec, \
get_inventory, \
get_ansible_config # noqa
from .exceptions import AnsibleRunnerException, ConfigurationError, CallbackError # noqa
from .runner_config import RunnerConfig # noqa
from .runner import Runner # noqa
plugins = {
entry_point.name: entry_point.load()
for entry_point
in importlib_metadata.entry_points(group='ansible_runner.plugins')
}
ansible-runner-2.4.1/src/ansible_runner/__main__.py 0000664 0000000 0000000 00000107347 14770573620 0022360 0 ustar 00root root 0000000 0000000 #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import annotations
import ast
import threading
import traceback
import argparse
import logging
import signal
import sys
import errno
import json
import stat
import os
import shutil
import textwrap
import tempfile
from contextlib import contextmanager
from pathlib import Path
from uuid import uuid4
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from yaml import safe_dump, safe_load
from ansible_runner import run
from ansible_runner import output
from ansible_runner import cleanup
from ansible_runner.utils import dump_artifact, Bunch, register_for_cleanup
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes, ensure_uuid
from ansible_runner.utils.importlib_compat import importlib_metadata
from ansible_runner.runner import Runner
VERSION = importlib_metadata.version("ansible_runner")
DEFAULT_ROLES_PATH = os.getenv('ANSIBLE_ROLES_PATH', None)
DEFAULT_RUNNER_BINARY = os.getenv('RUNNER_BINARY', None)
DEFAULT_RUNNER_PLAYBOOK = os.getenv('RUNNER_PLAYBOOK', None)
DEFAULT_RUNNER_ROLE = os.getenv('RUNNER_ROLE', None)
DEFAULT_RUNNER_MODULE = os.getenv('RUNNER_MODULE', None)
DEFAULT_UUID = uuid4()
DEFAULT_CLI_ARGS = {
"positional_args": (
(
('private_data_dir',),
{
"help": "base directory containing the ansible-runner metadata "
"(project, inventory, env, etc)"
},
),
),
"generic_args": (
(
('--version',),
{
"action": "version",
"version": VERSION
},
),
(
("--debug",),
{
"action": "store_true",
"help": "enable ansible-runner debug output logging (default=False)"
},
),
(
("--logfile",),
{
"help": "log output messages to a file (default=None)"
},
),
),
"mutually_exclusive_group": (
(
("-p", "--playbook",),
{
"default": DEFAULT_RUNNER_PLAYBOOK,
"help": "invoke an Ansible playbook from the ansible-runner project "
"(See Ansible Playbook Options below)"
},
),
(
("-m", "--module",),
{
"default": DEFAULT_RUNNER_MODULE,
"help": "invoke an Ansible module directly without a playbook "
"(See Ansible Module Options below)"
},
),
(
("-r", "--role",),
{
"default": DEFAULT_RUNNER_ROLE,
"help": "invoke an Ansible role directly without a playbook "
"(See Ansible Role Options below)"
},
),
),
"ansible_group": (
(
("--limit",),
{
"help": "matches Ansible's ```--limit``` parameter to further constrain "
"the inventory to be used (default=None)"
},
),
(
("--cmdline",),
{
"help": "command line options to pass to ansible-playbook at "
"execution time (default=None)"
},
),
(
("--hosts",),
{
"help": "define the set of hosts to execute against (default=None) "
"Note: this parameter only works with -m or -r"
},
),
(
("--forks",),
{
"help": "matches Ansible's ```--forks``` parameter to set the number "
"of concurrent processes (default=None)"
},
),
),
"runner_group": (
# ansible-runner options
(
("-b", "--binary",),
{
"default": DEFAULT_RUNNER_BINARY,
"help": "specifies the full path pointing to the Ansible binaries "
f"(default={DEFAULT_RUNNER_BINARY})"
},
),
(
("-i", "--ident",),
{
"default": DEFAULT_UUID,
"help": "an identifier that will be used when generating the artifacts "
"directory and can be used to uniquely identify a playbook run "
f"(default={DEFAULT_UUID})"
},
),
(
("--rotate-artifacts",),
{
"default": 0,
"type": int,
"help": "automatically clean up old artifact directories after a given "
"number have been created (default=0, disabled)"
},
),
(
("--artifact-dir",),
{
"help": "optional path for the artifact root directory "
"(default=/artifacts)"
},
),
(
("--project-dir",),
{
"help": "optional path for the location of the playbook content directory "
"(default=/project)"
},
),
(
("--inventory",),
{
"help": "optional path for the location of the inventory content directory "
"(default=/inventory)"
},
),
(
("-j", "--json",),
{
"action": "store_true",
"help": "output the JSON event structure to stdout instead of "
"Ansible output (default=False)"
},
),
(
("--omit-event-data",),
{
"action": "store_true",
"help": "Omits including extra event data in the callback payloads "
"or the Runner payload data files "
"(status and stdout still included)"
},
),
(
("--only-failed-event-data",),
{
"action": "store_true",
"help": "Only adds extra event data for failed tasks in the callback "
"payloads or the Runner payload data files "
"(status and stdout still included for other events)"
},
),
(
("--omit-env-files",),
{
"action": "store_true",
"dest": "suppress_env_files",
"help": "Add flag to prevent the writing of the env directory"
},
),
(
("-q", "--quiet",),
{
"action": "store_true",
"help": "disable all messages sent to stdout/stderr (default=False)"
},
),
(
("-v",),
{
"action": "count",
"help": "increase the verbosity with multiple v's (up to 5) of the "
"ansible-playbook output (default=None)"
},
),
),
"roles_group": (
(
("--roles-path",),
{
"default": DEFAULT_ROLES_PATH,
"help": "path used to locate the role to be executed (default=None)"
},
),
(
("--role-vars",),
{
"help": "set of variables to be passed to the role at run time in the "
"form of 'key1=value1 key2=value2 keyN=valueN'(default=None)"
},
),
(
("--role-skip-facts",),
{
"action": "store_true",
"default": False,
"help": "disable fact collection when the role is executed (default=False)"
},
)
),
"playbook_group": (
(
("--process-isolation",),
{
"dest": "process_isolation",
"action": "store_true",
"help": "Isolate execution. Two methods are supported: (1) using a container engine (e.g. podman or docker) "
"to execute **Ansible**. (2) using a sandbox (e.g. bwrap) which will by default restrict access to /tmp "
"(default=False)"
},
),
(
("--process-isolation-executable",),
{
"dest": "process_isolation_executable",
"default": "podman",
"help": "Process isolation executable or container engine used to isolate execution. (default=podman)"
}
),
(
("--process-isolation-path",),
{
"dest": "process_isolation_path",
"default": "/tmp",
"help": "path that an isolated playbook run will use for staging. "
"(default=/tmp)"
}
),
(
("--process-isolation-hide-paths",),
{
"dest": "process_isolation_hide_paths",
"nargs": "*",
"help": "list of paths on the system that should be hidden from the "
"playbook run (default=None)"
}
),
(
("--process-isolation-show-paths",),
{
"dest": "process_isolation_show_paths",
"nargs": "*",
"help": "list of paths on the system that should be exposed to the "
"playbook run (default=None)"
}
),
(
("--process-isolation-ro-paths",),
{
"dest": "process_isolation_ro_paths",
"nargs": "*",
"help": "list of paths on the system that should be exposed to the "
"playbook run as read-only (default=None)"
}
),
(
("--directory-isolation-base-path",),
{
"dest": "directory_isolation_base_path",
"help": "copies the project directory to a location in this directory "
"to prevent multiple simultaneous executions from conflicting "
"(default=None)"
}
)
),
"modules_group": (
(
("-a", "--args",),
{
"dest": "module_args",
"help": "set of arguments to be passed to the module at run time in the "
"form of 'key1=value1 key2=value2 keyN=valueN'(default=None)"
}
),
),
"container_group": (
(
("--container-image",),
{
"dest": "container_image",
"help": "Container image to use when running an ansible task"
}
),
(
("--container-volume-mount",),
{
"dest": "container_volume_mounts",
"action": "append",
"help": "Bind mounts (in the form 'host_dir:/container_dir)'. "
"Can be used more than once to create multiple bind mounts."
}
),
(
("--container-option",),
{
"dest": "container_options",
"action": "append",
"help": "Container options to pass to execution engine. "
"Can be used more than once to send multiple options."
}
),
),
}
logger = logging.getLogger('ansible-runner')
class AnsibleRunnerArgumentParser(argparse.ArgumentParser):
def error(self, message):
# If no sub command was provided, print common usage then exit
if 'required: command' in message.lower():
print_common_usage()
super().error(message)
@contextmanager
def role_manager(vargs):
if vargs.get('role'):
role = {'name': vargs.get('role')}
if vargs.get('role_vars'):
role_vars = {}
for item in vargs['role_vars'].split():
key, value = item.split('=')
try:
role_vars[key] = ast.literal_eval(value)
except Exception:
role_vars[key] = value
role['vars'] = role_vars
kwargs = Bunch(**vargs)
kwargs.update(private_data_dir=vargs.get('private_data_dir'),
json_mode=vargs.get('json'),
ignore_logging=False,
project_dir=vargs.get('project_dir'),
rotate_artifacts=vargs.get('rotate_artifacts'))
if vargs.get('artifact_dir'):
kwargs.artifact_dir = vargs.get('artifact_dir')
if vargs.get('project_dir'):
project_path = kwargs.project_dir = vargs.get('project_dir')
else:
project_path = os.path.join(vargs.get('private_data_dir'), 'project')
project_exists = os.path.exists(project_path)
env_path = os.path.join(vargs.get('private_data_dir'), 'env')
env_exists = os.path.exists(env_path)
envvars_path = os.path.join(vargs.get('private_data_dir'), 'env/envvars')
envvars_exists = os.path.exists(envvars_path)
if vargs.get('cmdline'):
kwargs.cmdline = vargs.get('cmdline')
playbook = None
tmpvars = None
play = [{'hosts': vargs.get('hosts') if vargs.get('hosts') is not None else "all",
'gather_facts': not vargs.get('role_skip_facts'),
'roles': [role]}]
filename = str(uuid4().hex)
playbook = dump_artifact(json.dumps(play), project_path, filename)
kwargs.playbook = playbook
output.debug(f"using playbook file {playbook}")
if vargs.get('inventory'):
kwargs.inventory = vargs.get('inventory')
output.debug(f"using inventory file {kwargs.inventory}")
roles_path = vargs.get('roles_path') or os.path.join(vargs.get('private_data_dir'), 'roles')
roles_path = os.path.abspath(roles_path)
output.debug(f"setting ANSIBLE_ROLES_PATH to {roles_path}")
envvars = {}
if envvars_exists:
with open(envvars_path, 'rb') as f:
tmpvars = f.read()
new_envvars = safe_load(tmpvars)
if new_envvars:
envvars = new_envvars
envvars['ANSIBLE_ROLES_PATH'] = roles_path
kwargs.envvars = envvars
else:
kwargs = vargs
yield kwargs
if vargs.get('role'):
if not project_exists and os.path.exists(project_path):
logger.debug('removing dynamically generated project folder')
shutil.rmtree(project_path)
elif playbook and os.path.isfile(playbook):
logger.debug('removing dynamically generated playbook')
os.remove(playbook)
# if a previous envvars existed in the private_data_dir,
# restore the original file contents
if tmpvars:
with open(envvars_path, 'wb') as f:
f.write(tmpvars)
elif not envvars_exists and os.path.exists(envvars_path):
logger.debug('removing dynamically generated envvars folder')
os.remove(envvars_path)
# since ansible-runner created the env folder, remove it
if not env_exists and os.path.exists(env_path):
logger.debug('removing dynamically generated env folder')
shutil.rmtree(env_path)
def print_common_usage():
print(textwrap.dedent("""
These are common Ansible Runner commands:
execute a playbook contained in an ansible-runner directory:
ansible-runner run /tmp/private -p playbook.yml
ansible-runner start /tmp/private -p playbook.yml
ansible-runner stop /tmp/private
ansible-runner is-alive /tmp/private
directly execute ansible primitives:
ansible-runner run . -r role_name --hosts myhost
ansible-runner run . -m command -a "ls -l" --hosts myhost
`ansible-runner --help` list of optional command line arguments
"""))
def add_args_to_parser(parser, args):
"""
Traverse a tuple of argments to add to a parser
:param argparse.ArgumentParser parser: Instance of a parser, subparser, or argument group
:param tuple args: Tuple of tuples, format ((arg1, arg2), {'kwarg1':'val1'},)
:returns: None
"""
for arg in args:
parser.add_argument(*arg[0], **arg[1])
def valid_inventory(private_data_dir: str, inventory: str) -> str | None:
"""
Validate the --inventory value is an actual file or directory.
The inventory value from the CLI may only be an existing file. Validate it
exists. Supplied value may either be relative to /inventory/
or an absolute path to a file or directory (even outside of private_data_dir).
Since ansible itself accepts a file or directory for the inventory, we check
for either.
:return: Absolute path to the valid inventory, or None otherwise.
"""
# check if absolute or relative path exists
inv = Path(inventory)
if inv.exists() and (inv.is_file() or inv.is_dir()):
return str(inv.absolute())
# check for a file in the pvt_data_dir inventory subdir
inv_subdir_path = Path(private_data_dir, 'inventory', inv)
if (not inv.is_absolute()
and inv_subdir_path.exists()
and (inv_subdir_path.is_file() or inv_subdir_path.is_dir())):
return str(inv_subdir_path.absolute())
return None
def main(sys_args=None):
"""Main entry point for ansible-runner executable
When the ```ansible-runner``` command is executed, this function
is the main entry point that is called and executed.
:param list sys_args: List of arguments to be parsed by the parser
:returns: an instance of SystemExit
:rtype: SystemExit
"""
parser = AnsibleRunnerArgumentParser(
prog='ansible-runner',
description="Use 'ansible-runner' (with no arguments) to see basic usage"
)
subparser = parser.add_subparsers(
help="Command to invoke",
dest='command',
description="COMMAND PRIVATE_DATA_DIR [ARGS]"
)
add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])
subparser.required = True
# positional options
run_subparser = subparser.add_parser(
'run',
help="Run ansible-runner in the foreground"
)
add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])
add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['playbook_group'])
start_subparser = subparser.add_parser(
'start',
help="Start an ansible-runner process in the background"
)
add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])
add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['playbook_group'])
stop_subparser = subparser.add_parser(
'stop',
help="Stop an ansible-runner process that's running in the background"
)
add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])
isalive_subparser = subparser.add_parser(
'is-alive',
help="Check if a an ansible-runner process in the background is still running."
)
add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])
# streaming commands
transmit_subparser = subparser.add_parser(
'transmit',
help="Send a job to a remote ansible-runner process"
)
add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])
worker_subparser = subparser.add_parser(
'worker',
help="Execute work streamed from a controlling instance"
)
worker_subcommands = worker_subparser.add_subparsers(
help="Sub-sub command to invoke",
dest='worker_subcommand',
description="ansible-runner worker [sub-sub-command]",
)
cleanup_command = worker_subcommands.add_parser(
'cleanup',
help="Cleanup private_data_dir patterns from prior jobs and supporting temporary folders.",
)
cleanup.add_cleanup_args(cleanup_command)
worker_subparser.add_argument(
"--private-data-dir",
help="base directory containing the ansible-runner metadata "
"(project, inventory, env, etc)",
)
worker_subparser.add_argument(
"--worker-info",
dest="worker_info",
action="store_true",
help="show the execution node's Ansible Runner version along with its memory and CPU capacities"
)
worker_subparser.add_argument(
"--delete",
dest="delete_directory",
action="store_true",
default=False,
help=(
"Delete existing folder (and everything in it) in the location specified by --private-data-dir. "
"The directory will be re-populated when the streamed data is unpacked. "
"Using this will also assure that the directory is deleted when the job finishes."
)
)
worker_subparser.add_argument(
"--keepalive-seconds",
dest="keepalive_seconds",
default=None,
type=int,
help=(
"Emit a synthetic keepalive event every N seconds of idle. (default=0, disabled)"
)
)
process_subparser = subparser.add_parser(
'process',
help="Receive the output of remote ansible-runner work and distribute the results"
)
add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])
process_subparser.add_argument(
"-i", "--ident",
default=None,
help=(
"An identifier to use as a subdirectory when saving artifacts. "
"Generally intended to match the --ident passed to the transmit command."
)
)
# generic args for all subparsers
add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])
add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])
# runner group
ansible_runner_group_options = (
"Ansible Runner Options",
"configuration options for controlling the ansible-runner "
"runtime environment.",
)
base_runner_group = parser.add_argument_group(*ansible_runner_group_options)
run_runner_group = run_subparser.add_argument_group(*ansible_runner_group_options)
start_runner_group = start_subparser.add_argument_group(*ansible_runner_group_options)
stop_runner_group = stop_subparser.add_argument_group(*ansible_runner_group_options)
isalive_runner_group = isalive_subparser.add_argument_group(*ansible_runner_group_options)
transmit_runner_group = transmit_subparser.add_argument_group(*ansible_runner_group_options)
add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])
add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])
# mutually exclusive group
run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()
start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group()
stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group()
isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group()
transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group()
add_args_to_parser(run_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(start_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(stop_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(isalive_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
add_args_to_parser(transmit_mutually_exclusive_group, DEFAULT_CLI_ARGS['mutually_exclusive_group'])
# ansible options
ansible_options = (
"Ansible Options",
"control the ansible[-playbook] execution environment",
)
run_ansible_group = run_subparser.add_argument_group(*ansible_options)
start_ansible_group = start_subparser.add_argument_group(*ansible_options)
stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)
isalive_ansible_group = isalive_subparser.add_argument_group(*ansible_options)
transmit_ansible_group = transmit_subparser.add_argument_group(*ansible_options)
add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(isalive_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
add_args_to_parser(transmit_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
# roles group
roles_group_options = (
"Ansible Role Options",
"configuration options for directly executing Ansible roles",
)
run_roles_group = run_subparser.add_argument_group(*roles_group_options)
start_roles_group = start_subparser.add_argument_group(*roles_group_options)
stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)
isalive_roles_group = isalive_subparser.add_argument_group(*roles_group_options)
transmit_roles_group = transmit_subparser.add_argument_group(*roles_group_options)
add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])
add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])
# modules groups
modules_group_options = (
"Ansible Module Options",
"configuration options for directly executing Ansible modules",
)
run_modules_group = run_subparser.add_argument_group(*modules_group_options)
start_modules_group = start_subparser.add_argument_group(*modules_group_options)
stop_modules_group = stop_subparser.add_argument_group(*modules_group_options)
isalive_modules_group = isalive_subparser.add_argument_group(*modules_group_options)
transmit_modules_group = transmit_subparser.add_argument_group(*modules_group_options)
add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(isalive_modules_group, DEFAULT_CLI_ARGS['modules_group'])
add_args_to_parser(transmit_modules_group, DEFAULT_CLI_ARGS['modules_group'])
# container group
container_group_options = (
"Ansible Container Options",
"configuration options for executing Ansible playbooks",
)
run_container_group = run_subparser.add_argument_group(*container_group_options)
start_container_group = start_subparser.add_argument_group(*container_group_options)
stop_container_group = stop_subparser.add_argument_group(*container_group_options)
isalive_container_group = isalive_subparser.add_argument_group(*container_group_options)
transmit_container_group = transmit_subparser.add_argument_group(*container_group_options)
add_args_to_parser(run_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(start_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(stop_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(isalive_container_group, DEFAULT_CLI_ARGS['container_group'])
add_args_to_parser(transmit_container_group, DEFAULT_CLI_ARGS['container_group'])
args = parser.parse_args(sys_args)
vargs = vars(args)
if vargs.get('command') == 'worker':
if vargs.get('worker_subcommand') == 'cleanup':
cleanup.run_cleanup(vargs)
parser.exit(0)
if vargs.get('worker_info'):
cpu = get_cpu_count()
mem = get_mem_in_bytes()
errors = []
uuid = ensure_uuid()
if not isinstance(mem, int):
errors.append(mem)
mem = None
if "Could not find" in uuid:
errors.append(uuid)
uuid = None
info = {'errors': errors,
'mem_in_bytes': mem,
'cpu_count': cpu,
'runner_version': VERSION,
'uuid': uuid,
}
print(safe_dump(info, default_flow_style=True))
parser.exit(0)
private_data_dir = vargs.get('private_data_dir')
delete_directory = vargs.get('delete_directory', False)
if private_data_dir and delete_directory:
shutil.rmtree(private_data_dir, ignore_errors=True)
register_for_cleanup(private_data_dir)
elif private_data_dir is None:
temp_private_dir = tempfile.mkdtemp()
vargs['private_data_dir'] = temp_private_dir
register_for_cleanup(temp_private_dir)
if vargs.get('command') == 'process':
# the process command is the final destination of artifacts, user expects private_data_dir to not be cleaned up
if not vargs.get('private_data_dir'):
temp_private_dir = tempfile.mkdtemp()
vargs['private_data_dir'] = temp_private_dir
if vargs.get('command') in ('start', 'run', 'transmit'):
if vargs.get('hosts') and not (vargs.get('module') or vargs.get('role')):
parser.exit(status=1, message="The --hosts option can only be used with -m or -r\n")
if not (vargs.get('module') or vargs.get('role')) and not vargs.get('playbook'):
parser.exit(status=1, message="The -p option must be specified when not using -m or -r\n")
if vargs.get('inventory'):
if not (abs_inv := valid_inventory(vargs['private_data_dir'], vargs.get('inventory'))):
parser.exit(status=1, message="Value for --inventory does not appear to be a valid path.\n")
else:
vargs['inventory'] = abs_inv
output.configure()
# enable or disable debug mode
output.set_debug('enable' if vargs.get('debug') else 'disable')
# set the output logfile
if ('logfile' in args) and vargs.get('logfile'):
output.set_logfile(vargs.get('logfile'))
output.debug('starting debug logging')
# get the absolute path for start since it is a daemon
vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))
pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')
try:
os.makedirs(vargs.get('private_data_dir'), mode=0o700)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(vargs.get('private_data_dir')):
pass
else:
raise
stderr_path = None
context = None
if vargs.get('command') not in ('run', 'transmit', 'worker'):
stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')
if not os.path.exists(stderr_path):
os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
if vargs.get('command') in ('start', 'run', 'transmit', 'worker', 'process'):
if vargs.get('command') == 'start':
context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
else:
context = threading.Lock()
streamer = None
if vargs.get('command') in ('transmit', 'worker', 'process'):
streamer = vargs.get('command')
with context:
with role_manager(vargs) as vargs:
run_options = {
"private_data_dir": vargs.get('private_data_dir'),
"ident": vargs.get('ident'),
"binary": vargs.get('binary'),
"playbook": vargs.get('playbook'),
"module": vargs.get('module'),
"module_args": vargs.get('module_args'),
"host_pattern": vargs.get('hosts'),
"verbosity": vargs.get('v'),
"quiet": vargs.get('quiet'),
"rotate_artifacts": vargs.get('rotate_artifacts'),
"ignore_logging": False,
"json_mode": vargs.get('json'),
"omit_event_data": vargs.get('omit_event_data'),
"only_failed_event_data": vargs.get('only_failed_event_data'),
"inventory": vargs.get('inventory'),
"forks": vargs.get('forks'),
"project_dir": vargs.get('project_dir'),
"artifact_dir": vargs.get('artifact_dir'),
"roles_path": [vargs.get('roles_path')] if vargs.get('roles_path') else None,
"process_isolation": vargs.get('process_isolation'),
"process_isolation_executable": vargs.get('process_isolation_executable'),
"process_isolation_path": vargs.get('process_isolation_path'),
"process_isolation_hide_paths": vargs.get('process_isolation_hide_paths'),
"process_isolation_show_paths": vargs.get('process_isolation_show_paths'),
"process_isolation_ro_paths": vargs.get('process_isolation_ro_paths'),
"container_image": vargs.get('container_image'),
"container_volume_mounts": vargs.get('container_volume_mounts'),
"container_options": vargs.get('container_options'),
"directory_isolation_base_path": vargs.get('directory_isolation_base_path'),
"cmdline": vargs.get('cmdline'),
"limit": vargs.get('limit'),
"streamer": streamer,
"suppress_env_files": vargs.get("suppress_env_files"),
"keepalive_seconds": vargs.get("keepalive_seconds"),
}
try:
res = run(**run_options)
except Exception:
e = traceback.format_exc()
if stderr_path:
with open(stderr_path, 'w+') as ep:
ep.write(e)
else:
sys.stderr.write(e)
return 1
return res.rc
try:
with open(pidfile, 'r') as f:
pid = int(f.readline())
except IOError:
return 1
if vargs.get('command') == 'stop':
Runner.handle_termination(pid, pidfile=pidfile)
return 0
if vargs.get('command') == 'is-alive':
try:
os.kill(pid, signal.SIG_DFL)
return 0
except OSError:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
ansible-runner-2.4.1/src/ansible_runner/cleanup.py 0000664 0000000 0000000 00000016712 14770573620 0022262 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import argparse
import datetime
import glob
import os
import signal
import subprocess
import sys
from pathlib import Path
from tempfile import gettempdir
from ansible_runner.defaults import (
GRACE_PERIOD_DEFAULT,
registry_auth_prefix,
default_process_isolation_executable
)
from ansible_runner.utils import cleanup_folder
__all__ = ['add_cleanup_args', 'run_cleanup']
def add_cleanup_args(command: argparse.ArgumentParser) -> None:
command.add_argument(
"--file-pattern",
help="A file glob pattern to find private_data_dir folders to remove. "
"Example: --file-pattern=/tmp/.ansible-runner-*"
)
command.add_argument(
"--exclude-strings",
nargs='*',
help="A comma separated list of keywords in directory name or path to avoid deleting."
)
command.add_argument(
"--remove-images",
nargs='*',
help="A comma separated list of podman or docker tags to delete. "
"This may not remove the corresponding layers, use the image-prune option to assure full deletion. "
"Example: --remove-images=quay.io/user/image:devel quay.io/user/builder:latest"
)
command.add_argument(
"--grace-period",
default=GRACE_PERIOD_DEFAULT,
type=int,
help="Time (in minutes) after last modification to exclude a folder from deletion for. "
"This is to avoid deleting folders that were recently created, or folders not started via the start command. "
"Value of 0 indicates that no folders will be excluded based on modified time."
)
command.add_argument(
"--image-prune",
action="store_true",
help="If specified, will run docker / podman image prune --force. "
"This will only run after untagging."
)
command.add_argument(
"--process-isolation-executable",
default="podman",
help="The container image to clean up images for (default=podman)"
)
def run_command(cmd: list) -> str:
'''Given list cmd, runs command and returns standard out, expecting success'''
process = subprocess.run(cmd, capture_output=True, check=False)
stdout = str(process.stdout, encoding='utf-8')
if process.returncode != 0:
print('Error running command:')
print(' '.join(cmd))
print('Stdout:')
print(stdout)
raise RuntimeError('Error running command')
return stdout.strip()
def is_alive(directory: str) -> int:
pidfile = os.path.join(directory, 'pid')
try:
with open(pidfile, 'r') as f:
pid = int(f.readline())
except IOError:
return 0
try:
os.kill(pid, signal.SIG_DFL)
return 0
except OSError:
return 1
def project_idents(directory: str) -> list:
"""Given directory, give list of idents that we have artifacts for"""
try:
return os.listdir(os.path.join(directory, 'artifacts'))
except (FileNotFoundError, NotADirectoryError):
return []
def delete_associated_folders(directory: str) -> None:
"""Where directory is the private_data_dir for a completed job, this deletes related tmp folders it used"""
for ident in project_idents(directory):
registry_auth_pattern = f'{gettempdir()}/{registry_auth_prefix}{ident}_*'
for radir in glob.glob(registry_auth_pattern):
changed = cleanup_folder(radir)
if changed:
print(f'Removed associated registry auth dir {radir}')
def validate_pattern(pattern: str) -> None:
# do not let user shoot themselves in foot by deleting these important linux folders
paths = (
'/', '/bin', '/dev', '/home', '/lib', '/mnt', '/proc',
'/run', '/sys', '/usr', '/boot', '/etc', '/opt', '/sbin', gettempdir(), '/var'
)
prohibited_paths = {Path(s) for s in paths}.union(Path(s).resolve() for s in paths)
bad_paths = [dir for dir in glob.glob(pattern) if Path(dir).resolve() in prohibited_paths]
if bad_paths:
raise RuntimeError(
f'Provided pattern could result in deleting system folders:\n{" ".join(bad_paths)}\n'
'Refusing to continue for user system safety.'
)
def cleanup_dirs(pattern: str, exclude_strings: list | None = None, grace_period: int = GRACE_PERIOD_DEFAULT) -> int:
if exclude_strings is None:
exclude_strings = []
try:
validate_pattern(pattern)
except RuntimeError as e:
sys.exit(str(e))
ct = 0
now_time = datetime.datetime.now()
for directory in glob.glob(pattern):
if any(str(exclude_string) in directory for exclude_string in exclude_strings):
continue
if grace_period:
st = os.stat(directory)
modtime = datetime.datetime.fromtimestamp(st.st_mtime)
if modtime > now_time - datetime.timedelta(minutes=grace_period):
continue
if is_alive(directory):
print(f'Excluding running project {directory} from cleanup')
continue
delete_associated_folders(directory)
changed = cleanup_folder(directory)
if changed:
ct += 1
return ct
def cleanup_images(images: list, runtime: str) -> int:
"""
`docker rmi` will just untag while
`podman rmi` will untag and remove layers and cause runing container to be killed
for podman we use `untag` to achieve the same behavior
NOTE: this only untag the image and does not delete the image prune_images need to be call to delete
"""
rm_ct = 0
for image_tag in images:
stdout = run_command([runtime, 'images', '--format="{{.Repository}}:{{.Tag}}"', image_tag])
if not stdout:
continue
for discovered_tag in stdout.split('\n'):
if runtime == 'podman':
try:
stdout = run_command([runtime, 'untag', image_tag])
if not stdout:
rm_ct += 1
except Exception:
pass # best effort untag
else:
stdout = run_command([runtime, 'rmi', discovered_tag.strip().strip('"'), '-f'])
rm_ct += stdout.count('Untagged:')
return rm_ct
def prune_images(runtime: str) -> bool:
"""Run the prune images command and return changed status"""
stdout = run_command([runtime, 'image', 'prune', '-f'])
if not stdout or stdout == "Total reclaimed space: 0B":
return False
return True
def run_cleanup(vargs: dict) -> None:
exclude_strings = vargs.get('exclude_strings') or []
remove_images = vargs.get('remove_images') or []
file_pattern = vargs.get('file_pattern')
dir_ct = image_ct = 0
pruned = False
if file_pattern:
dir_ct = cleanup_dirs(file_pattern, exclude_strings=exclude_strings, grace_period=vargs.get('grace_period', GRACE_PERIOD_DEFAULT))
if dir_ct:
print(f'Removed {dir_ct} private data dir(s) in pattern {file_pattern}')
if remove_images:
image_ct = cleanup_images(remove_images, runtime=vargs.get('process_isolation_executable', default_process_isolation_executable))
if image_ct:
print(f'Removed {image_ct} image(s)')
if vargs.get('image_prune'):
pruned = prune_images(runtime=vargs.get('process_isolation_executable', default_process_isolation_executable))
if pruned:
print('Pruned images')
if dir_ct or image_ct or pruned:
print('(changed: True)')
else:
print('(changed: False)')
ansible-runner-2.4.1/src/ansible_runner/config/ 0000775 0000000 0000000 00000000000 14770573620 0021517 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/config/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0023616 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/config/_base.py 0000664 0000000 0000000 00000073071 14770573620 0023152 0 ustar 00root root 0000000 0000000 ############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0201
from __future__ import annotations
import json
import logging
import os
import re
import stat
import tempfile
import shutil
from base64 import b64encode
from enum import Enum
from uuid import uuid4
from collections.abc import Mapping
from typing import Any
import pexpect
from ansible_runner import defaults
from ansible_runner.output import debug
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.defaults import registry_auth_prefix
from ansible_runner.loader import ArtifactLoader
from ansible_runner.utils import (
get_callback_dir,
open_fifo_write,
args2cmdline,
sanitize_container_name,
cli_mounts,
register_for_cleanup,
)
logger = logging.getLogger('ansible-runner')
class BaseExecutionMode(Enum):
NONE = 0
# run ansible commands either locally or within EE
ANSIBLE_COMMANDS = 1
# execute generic commands
GENERIC_COMMANDS = 2
class BaseConfig:
def __init__(self,
private_data_dir: str | None = None,
host_cwd: str | None = None,
envvars: dict[str, Any] | None = None,
passwords=None,
settings=None,
project_dir: str | None = None,
artifact_dir: str | None = None,
fact_cache_type: str = 'jsonfile',
fact_cache=None,
process_isolation: bool = False,
process_isolation_executable: str | None = None,
container_image: str = "",
container_volume_mounts=None,
container_options=None,
container_workdir: str | None = None,
container_auth_data=None,
ident: str | None = None,
rotate_artifacts: int = 0,
timeout: int | None = None,
ssh_key: str | None = None,
quiet: bool = False,
json_mode: bool = False,
check_job_event_data: bool = False,
suppress_env_files: bool = False,
keepalive_seconds: int | None = None
):
# pylint: disable=W0613
# common params
self.host_cwd = host_cwd
self.envvars = envvars
self.ssh_key_data = ssh_key
self.command: list[str] = []
# container params
self.process_isolation = process_isolation
self.process_isolation_executable = process_isolation_executable or defaults.default_process_isolation_executable
self.container_image = container_image
self.container_volume_mounts = container_volume_mounts
self.container_workdir = container_workdir
self.container_auth_data = container_auth_data
self.registry_auth_path: str
self.container_name: str = "" # like other properties, not accurate until prepare is called
self.container_options = container_options
# runner params
self.rotate_artifacts = rotate_artifacts
self.quiet = quiet
self.json_mode = json_mode
self.passwords = passwords
self.settings = settings
self.timeout = timeout
self.check_job_event_data = check_job_event_data
self.suppress_env_files = suppress_env_files
# ignore this for now since it's worker-specific and would just trip up old runners
# self.keepalive_seconds = keepalive_seconds
# setup initial environment
if private_data_dir:
self.private_data_dir = os.path.abspath(private_data_dir)
# Note that os.makedirs, exist_ok=True is dangerous. If there's a directory writable
# by someone other than the user anywhere in the path to be created, an attacker can
# attempt to compromise the directories via a race.
os.makedirs(self.private_data_dir, exist_ok=True, mode=0o700)
else:
self.private_data_dir = tempfile.mkdtemp(prefix=defaults.AUTO_CREATE_NAMING, dir=defaults.AUTO_CREATE_DIR)
if artifact_dir is None:
artifact_dir = os.path.join(self.private_data_dir, 'artifacts')
else:
artifact_dir = os.path.abspath(artifact_dir)
if ident is None:
self.ident = str(uuid4())
else:
self.ident = str(ident)
self.artifact_dir = os.path.join(artifact_dir, self.ident)
if not project_dir:
self.project_dir = os.path.join(self.private_data_dir, 'project')
else:
self.project_dir = project_dir
self.rotate_artifacts = rotate_artifacts
self.fact_cache_type = fact_cache_type
self.fact_cache = os.path.join(self.artifact_dir, fact_cache or 'fact_cache') if self.fact_cache_type == 'jsonfile' else None
self.loader = ArtifactLoader(self.private_data_dir)
if self.host_cwd:
self.host_cwd = os.path.abspath(self.host_cwd)
self.cwd = self.host_cwd
else:
self.cwd = os.getcwd()
os.makedirs(self.artifact_dir, exist_ok=True, mode=0o700)
_CONTAINER_ENGINES = ('docker', 'podman')
@property
def containerized(self):
return self.process_isolation and self.process_isolation_executable in self._CONTAINER_ENGINES
def prepare_env(self, runner_mode: str = 'pexpect') -> None:
"""
Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
"""
self.runner_mode = runner_mode
try:
if self.settings and isinstance(self.settings, dict):
self.settings.update(self.loader.load_file('env/settings', Mapping)) # type: ignore
else:
self.settings = self.loader.load_file('env/settings', Mapping)
except ConfigurationError:
debug("Not loading settings")
self.settings = {}
if self.runner_mode == 'pexpect':
try:
if self.passwords and isinstance(self.passwords, dict):
self.passwords.update(self.loader.load_file('env/passwords', Mapping)) # type: ignore
else:
self.passwords = self.passwords or self.loader.load_file('env/passwords', Mapping)
except ConfigurationError:
debug('Not loading passwords')
self.expect_passwords = {}
try:
if self.passwords:
self.expect_passwords = {
re.compile(pattern, re.M): password
for pattern, password in self.passwords.items()
}
except Exception as e:
debug(f'Failed to compile RE from passwords: {e}')
self.expect_passwords[pexpect.TIMEOUT] = None
self.expect_passwords[pexpect.EOF] = None
self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
self.idle_timeout = self.settings.get('idle_timeout', None)
if self.timeout:
self.job_timeout = int(self.timeout)
else:
self.job_timeout = self.settings.get('job_timeout', None)
elif self.runner_mode == 'subprocess':
if self.timeout:
self.subprocess_timeout = int(self.timeout)
else:
self.subprocess_timeout = self.settings.get('subprocess_timeout', None)
self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)
self.container_image = self.settings.get('container_image', self.container_image)
self.container_volume_mounts = self.settings.get('container_volume_mounts', self.container_volume_mounts)
self.container_options = self.settings.get('container_options', self.container_options)
self.container_auth_data = self.settings.get('container_auth_data', self.container_auth_data)
if self.containerized:
if not self.container_image:
raise ConfigurationError(
f'container_image required when specifying process_isolation_executable={self.process_isolation_executable}'
)
self.container_name = f"ansible_runner_{sanitize_container_name(self.ident)}"
self.env: dict[str, Any] = {}
if self.process_isolation_executable == 'podman':
# A kernel bug in RHEL < 8.5 causes podman to use the fuse-overlayfs driver. This results in errors when
# trying to set extended file attributes. Setting this environment variable allows modules to take advantage
# of a fallback to work around this bug when failures are encountered.
#
# See the following for more information:
# https://github.com/ansible/ansible/pull/73282
# https://github.com/ansible/ansible/issues/73310
# https://issues.redhat.com/browse/AAP-476
self.env['ANSIBLE_UNSAFE_WRITES'] = '1'
artifact_dir = os.path.join("/runner/artifacts", self.ident)
self.env['AWX_ISOLATED_DATA_DIR'] = artifact_dir
if self.fact_cache_type == 'jsonfile':
self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = os.path.join(artifact_dir, 'fact_cache')
else:
# seed env with existing shell env
self.env = os.environ.copy()
if self.envvars and isinstance(self.envvars, dict):
self.env.update(self.envvars)
try:
envvars = self.loader.load_file('env/envvars', Mapping)
if envvars:
self.env.update(envvars) # type: ignore
except ConfigurationError:
debug("Not loading environment vars")
# Still need to pass default environment to pexpect
try:
if self.ssh_key_data is None:
self.ssh_key_data = self.loader.load_file('env/ssh_key', str) # type: ignore
except ConfigurationError:
debug("Not loading ssh key")
self.ssh_key_data = None
# write the SSH key data into a fifo read by ssh-agent
if self.ssh_key_data:
self.ssh_key_path = os.path.join(self.artifact_dir, 'ssh_key_data')
open_fifo_write(self.ssh_key_path, self.ssh_key_data)
self.suppress_output_file = self.settings.get('suppress_output_file', False)
self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
if 'fact_cache' in self.settings:
if 'fact_cache_type' in self.settings:
if self.settings['fact_cache_type'] == 'jsonfile':
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
else:
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
# Use local callback directory
if self.containerized:
# when containerized, copy the callback dir to $private_data_dir/artifacts//callback
# then append to env['ANSIBLE_CALLBACK_PLUGINS'] with the copied location.
callback_dir = os.path.join(self.artifact_dir, 'callback')
# if callback dir already exists (on repeat execution with the same ident), remove it first.
if os.path.exists(callback_dir):
shutil.rmtree(callback_dir)
shutil.copytree(get_callback_dir(), callback_dir)
container_callback_dir = os.path.join("/runner/artifacts", self.ident, "callback")
self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(filter(None, (self.env.get('ANSIBLE_CALLBACK_PLUGINS'), container_callback_dir)))
else:
callback_dir = self.env.get('AWX_LIB_DIRECTORY', os.getenv('AWX_LIB_DIRECTORY', ''))
if not callback_dir:
callback_dir = get_callback_dir()
self.env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(filter(None, (self.env.get('ANSIBLE_CALLBACK_PLUGINS'), callback_dir)))
# this is an adhoc command if the module is specified, TODO: combine with logic in RunnerConfig class
is_adhoc = bool((getattr(self, 'binary', None) is None) and (getattr(self, 'module', None) is not None))
if self.env.get('ANSIBLE_STDOUT_CALLBACK'):
self.env['ORIGINAL_STDOUT_CALLBACK'] = self.env.get('ANSIBLE_STDOUT_CALLBACK')
if is_adhoc:
# force loading awx_display stdout callback for adhoc commands
self.env["ANSIBLE_LOAD_CALLBACK_PLUGINS"] = '1'
if 'AD_HOC_COMMAND_ID' not in self.env:
self.env['AD_HOC_COMMAND_ID'] = '1'
self.env['ANSIBLE_STDOUT_CALLBACK'] = 'awx_display'
self.env['ANSIBLE_RETRY_FILES_ENABLED'] = 'False'
if 'ANSIBLE_HOST_KEY_CHECKING' not in self.env:
self.env['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
if not self.containerized:
self.env['AWX_ISOLATED_DATA_DIR'] = self.artifact_dir
if self.fact_cache_type == 'jsonfile':
self.env['ANSIBLE_CACHE_PLUGIN'] = 'jsonfile'
if not self.containerized:
self.env['ANSIBLE_CACHE_PLUGIN_CONNECTION'] = self.fact_cache
# Pexpect will error with non-string envvars types, so we ensure string types
self.env = {str(k): str(v) for k, v in self.env.items()}
debug('env:')
for k, v in sorted(self.env.items()):
debug(f' {k}: {v}')
def handle_command_wrap(self, execution_mode: BaseExecutionMode, cmdline_args: list[str]) -> None:
if self.ssh_key_data:
logger.debug('ssh key data added')
self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)
if self.containerized:
logger.debug('containerization enabled')
self.command = self.wrap_args_for_containerization(self.command, execution_mode, cmdline_args)
else:
logger.debug('containerization disabled')
if hasattr(self, 'command') and isinstance(self.command, list):
logger.debug("command: %s", ' '.join(self.command))
def _ensure_path_safe_to_mount(self, path: str) -> None:
if os.path.isfile(path):
path = os.path.dirname(path)
if os.path.join(path, "") in ('/', '/home/', '/usr/'):
raise ConfigurationError("When using containerized execution, cannot mount '/' or '/home' or '/usr'")
def _get_playbook_path(self, cmdline_args: list[str]) -> str | None:
_playbook = ""
_book_keeping_copy = cmdline_args.copy()
for arg in cmdline_args:
if arg in ['-i', '--inventory', '--inventory-file']:
_book_keeping_copy_inventory_index = _book_keeping_copy.index(arg)
_book_keeping_copy.pop(_book_keeping_copy_inventory_index)
try:
_book_keeping_copy.pop(_book_keeping_copy_inventory_index)
except IndexError:
# invalid command, pass through for execution
# to return correct error from ansible-core
return None
if len(_book_keeping_copy) == 1:
# it's probably safe to assume this is the playbook
_playbook = _book_keeping_copy[0]
elif _book_keeping_copy[0][0] != '-':
# this should be the playbook, it's the only "naked" arg
_playbook = _book_keeping_copy[0]
else:
# parse everything beyond the first arg because we checked that
# in the previous case already
for arg in _book_keeping_copy[1:]:
if arg[0] == '-':
continue
if _book_keeping_copy[(_book_keeping_copy.index(arg) - 1)][0] != '-':
_playbook = arg
break
return _playbook
def _update_volume_mount_paths(self,
args_list: list[str],
src_mount_path: str | None,
dst_mount_path: str | None = None,
labels: str | None = None
) -> None:
if src_mount_path is None or not os.path.exists(src_mount_path):
logger.debug("Source volume mount path does not exist: %s", src_mount_path)
return
# ensure source is abs
src_path = os.path.abspath(os.path.expanduser(os.path.expandvars(src_mount_path)))
# set dest src (if None) relative to workdir(not absolute) or provided
if dst_mount_path is None:
dst_path = src_path
elif self.container_workdir and not os.path.isabs(dst_mount_path):
dst_path = os.path.abspath(
os.path.expanduser(
os.path.expandvars(os.path.join(self.container_workdir, dst_mount_path))
)
)
else:
dst_path = os.path.abspath(os.path.expanduser(os.path.expandvars(dst_mount_path)))
# ensure each is a directory not file, use src for dest
# because dest doesn't exist locally
src_dir = src_path if os.path.isdir(src_path) else os.path.dirname(src_path)
dst_dir = dst_path if os.path.isdir(src_path) else os.path.dirname(dst_path)
# always ensure a trailing slash
src_dir = os.path.join(src_dir, "")
dst_dir = os.path.join(dst_dir, "")
# ensure the src and dest are safe mount points
# after stripping off the file and resolving
self._ensure_path_safe_to_mount(src_dir)
self._ensure_path_safe_to_mount(dst_dir)
# format the src dest str
volume_mount_path = f"{src_dir}:{dst_dir}"
# add labels as needed
if labels:
if not labels.startswith(":"):
volume_mount_path += ":"
volume_mount_path += labels
# check if mount path already added in args list
if volume_mount_path not in args_list:
args_list.extend(["-v", volume_mount_path])
def _handle_ansible_cmd_options_bind_mounts(self, args_list: list[str], cmdline_args: list[str]) -> None:
inventory_file_options = ['-i', '--inventory', '--inventory-file']
vault_file_options = ['--vault-password-file', '--vault-pass-file']
private_key_file_options = ['--private-key', '--key-file']
optional_mount_args = inventory_file_options + vault_file_options + private_key_file_options
if not cmdline_args:
return
if '-h' in cmdline_args or '--help' in cmdline_args:
return
for value in self.command:
if 'ansible-playbook' in value:
playbook_file_path = self._get_playbook_path(cmdline_args)
if playbook_file_path:
self._update_volume_mount_paths(args_list, playbook_file_path)
break
cmdline_args_copy = cmdline_args.copy()
optional_arg_paths = []
for arg in cmdline_args:
if arg not in optional_mount_args:
continue
optional_arg_index = cmdline_args_copy.index(arg)
optional_arg_paths.append(cmdline_args[optional_arg_index + 1])
cmdline_args_copy.pop(optional_arg_index)
try:
optional_arg_value = cmdline_args_copy.pop(optional_arg_index)
except IndexError:
# invalid command, pass through for execution
# to return valid error from ansible-core
return
if arg in inventory_file_options and optional_arg_value.endswith(','):
# comma separated host list provided as value
continue
self._update_volume_mount_paths(args_list, optional_arg_value)
def wrap_args_for_containerization(self,
args: list[str],
execution_mode: BaseExecutionMode,
cmdline_args: list[str]
) -> list[str]:
new_args = [self.process_isolation_executable]
new_args.extend(['run', '--rm'])
if self.runner_mode == 'pexpect' or getattr(self, 'input_fd', False):
new_args.extend(['--tty'])
new_args.append('--interactive')
if self.container_workdir:
workdir = self.container_workdir
elif self.host_cwd is not None and os.path.exists(self.host_cwd):
# mount current host working diretory if passed and exist
self._ensure_path_safe_to_mount(self.host_cwd)
self._update_volume_mount_paths(new_args, self.host_cwd)
workdir = self.host_cwd
else:
workdir = "/runner/project"
self.cwd = workdir
new_args.extend(["--workdir", workdir])
# For run() and run_async() API value of base execution_mode is 'BaseExecutionMode.NONE'
# and the container volume mounts are handled separately using 'container_volume_mounts'
# hence ignore additional mount here
if execution_mode != BaseExecutionMode.NONE:
if execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS:
self._handle_ansible_cmd_options_bind_mounts(new_args, cmdline_args)
# Handle automounts for .ssh config
self._handle_automounts(new_args)
if 'podman' in self.process_isolation_executable:
# container namespace stuff
new_args.extend(["--group-add=root"])
new_args.extend(["--ipc=host"])
self._ensure_path_safe_to_mount(self.private_data_dir)
# Relative paths are mounted relative to /runner/project
for subdir in ('project', 'artifacts'):
subdir_path = os.path.join(self.private_data_dir, subdir)
if not os.path.exists(subdir_path):
os.mkdir(subdir_path, 0o700)
# runtime commands need artifacts mounted to output data
self._update_volume_mount_paths(new_args,
f"{self.private_data_dir}/artifacts",
dst_mount_path="/runner/artifacts",
labels=":Z")
else:
subdir_path = os.path.join(self.private_data_dir, 'artifacts')
if not os.path.exists(subdir_path):
os.mkdir(subdir_path, 0o700)
# Mount the entire private_data_dir
# custom show paths inside private_data_dir do not make sense
self._update_volume_mount_paths(new_args, self.private_data_dir, dst_mount_path="/runner", labels=":Z")
if self.container_auth_data:
# Pull in the necessary registry auth info, if there is a container cred
self.registry_auth_path, registry_auth_conf_file = self._generate_container_auth_dir(self.container_auth_data)
if 'podman' in self.process_isolation_executable:
new_args.extend([f"--authfile={self.registry_auth_path}"])
else:
docker_idx = new_args.index(self.process_isolation_executable)
new_args.insert(docker_idx + 1, f"--config={self.registry_auth_path}")
if registry_auth_conf_file is not None:
# Podman >= 3.1.0
self.env['CONTAINERS_REGISTRIES_CONF'] = registry_auth_conf_file
# Podman < 3.1.0
self.env['REGISTRIES_CONFIG_PATH'] = registry_auth_conf_file
if self.container_volume_mounts:
for mapping in self.container_volume_mounts:
volume_mounts = mapping.split(':', 2)
self._ensure_path_safe_to_mount(volume_mounts[0])
new_args.extend(["-v", mapping])
# Reference the file with list of keys to pass into container
# this file will be written in ansible_runner.runner
env_file_host = os.path.join(self.artifact_dir, 'env.list')
new_args.extend(['--env-file', env_file_host])
if 'podman' in self.process_isolation_executable:
# docker doesnt support this option
new_args.extend(['--quiet'])
if 'docker' in self.process_isolation_executable:
new_args.extend([f'--user={os.getuid()}'])
new_args.extend(['--name', self.container_name])
if self.container_options:
new_args.extend(self.container_options)
new_args.extend([self.container_image])
new_args.extend(args)
logger.debug("container engine invocation: %s", ' '.join(new_args))
return new_args
def _generate_container_auth_dir(self, auth_data: dict[str, str]) -> tuple[str, str | None]:
host = auth_data.get('host')
token = f"{auth_data.get('username')}:{auth_data.get('password')}"
encoded_container_auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
# Create a new temp file with container auth data
path = tempfile.mkdtemp(prefix=f'{registry_auth_prefix}{self.ident}_')
register_for_cleanup(path)
if self.process_isolation_executable == 'docker':
auth_filename = 'config.json'
else:
auth_filename = 'auth.json'
registry_auth_path = os.path.join(path, auth_filename)
with open(registry_auth_path, 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
authfile.write(json.dumps(encoded_container_auth_data, indent=4))
registries_conf_path = None
if auth_data.get('verify_ssl', True) is False:
registries_conf_path = os.path.join(path, 'registries.conf')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
f'location = "{host}"',
'insecure = true',
]
registries_conf.write('\n'.join(lines))
auth_path = authfile.name
if self.process_isolation_executable == 'docker':
auth_path = path # docker expects to be passed directory
return (auth_path, registries_conf_path)
def wrap_args_with_ssh_agent(self,
args: list[str],
ssh_key_path: str | None,
ssh_auth_sock: str | None = None,
silence_ssh_add: bool = False
) -> list[str]:
"""
Given an existing command line and parameterization this will return the same command line wrapped with the
necessary calls to ``ssh-agent``
"""
if self.containerized:
artifact_dir = os.path.join("/runner/artifacts", self.ident)
ssh_key_path = os.path.join(artifact_dir, "ssh_key_data")
if ssh_key_path:
ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
if silence_ssh_add:
ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
ssh_key_cleanup_command = f'rm -f {ssh_key_path}'
# The trap ensures the fifo is cleaned up even if the call to ssh-add fails.
# This prevents getting into certain scenarios where subsequent reads will
# hang forever.
cmd = ' && '.join([args2cmdline('trap', ssh_key_cleanup_command, 'EXIT'),
ssh_add_command,
ssh_key_cleanup_command,
args2cmdline(*args)])
args = ['ssh-agent']
if ssh_auth_sock:
args.extend(['-a', ssh_auth_sock])
args.extend(['sh', '-c', cmd])
return args
def _handle_automounts(self, new_args: list[str]) -> None:
for cli_automount in cli_mounts():
for env in cli_automount['ENVS']:
if env in os.environ:
dest_path = os.environ[env]
if os.path.exists(os.environ[env]):
if os.environ[env].startswith(os.environ['HOME']):
dest_path = f"/home/runner/{os.environ[env].lstrip(os.environ['HOME'])}"
elif os.environ[env].startswith('~'):
dest_path = f"/home/runner/{os.environ[env].lstrip('~/')}"
else:
dest_path = os.environ[env]
self._update_volume_mount_paths(new_args, os.environ[env], dst_mount_path=dest_path)
new_args.extend(["-e", f"{env}={dest_path}"])
for paths in cli_automount['PATHS']:
if os.path.exists(paths['src']):
self._update_volume_mount_paths(new_args, paths['src'], dst_mount_path=paths['dest'])
ansible-runner-2.4.1/src/ansible_runner/config/ansible_cfg.py 0000664 0000000 0000000 00000006442 14770573620 0024333 0 ustar 00root root 0000000 0000000 ############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0201
import logging
from ansible_runner.config._base import BaseConfig, BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.utils import get_executable_path
logger = logging.getLogger('ansible-runner')
class AnsibleCfgConfig(BaseConfig):
"""
A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
:py:mod:`ansible_runner.runner.AnsibleCfgConfig` object to launch and manage the invocation of
command execution.
Typically this object is initialized for you when using the standard ``get_ansible_config`` interfaces in :py:mod:`ansible_runner.interface`
but can be used to construct the ``AnsibleCfgConfig`` configuration to be invoked elsewhere. It can also be overridden to provide different
functionality to the AnsibleCfgConfig object.
:Example:
>>> ac = AnsibleCfgConfig(...)
>>> r = Runner(config=ac)
>>> r.run()
"""
def __init__(self, runner_mode=None, **kwargs):
# runner params
self.runner_mode = runner_mode if runner_mode else 'subprocess'
if self.runner_mode not in ['pexpect', 'subprocess']:
raise ConfigurationError(f"Invalid runner mode {self.runner_mode}, valid value is either 'pexpect' or 'subprocess'")
if kwargs.get("process_isolation"):
self._ansible_config_exec_path = "ansible-config"
else:
self._ansible_config_exec_path = get_executable_path("ansible-config")
self.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
super().__init__(**kwargs)
_supported_actions = ('list', 'dump', 'view')
def prepare_ansible_config_command(self, action, config_file=None, only_changed=None):
if action not in AnsibleCfgConfig._supported_actions:
raise ConfigurationError(f'Invalid action {action}, valid value is one of either {", ".join(AnsibleCfgConfig._supported_actions)}')
if action != 'dump' and only_changed:
raise ConfigurationError("only_changed is applicable for action 'dump'")
self.prepare_env(runner_mode=self.runner_mode)
self.cmdline_args = []
self.cmdline_args.append(action)
if config_file:
self.cmdline_args.extend(['-c', config_file])
if only_changed:
self.cmdline_args.append('--only-changed')
self.command = [self._ansible_config_exec_path] + self.cmdline_args
self.handle_command_wrap(self.execution_mode, self.cmdline_args)
ansible-runner-2.4.1/src/ansible_runner/config/command.py 0000664 0000000 0000000 00000010335 14770573620 0023511 0 ustar 00root root 0000000 0000000 ############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0201
import logging
import os
from ansible_runner.config._base import BaseConfig, BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
logger = logging.getLogger('ansible-runner')
class CommandConfig(BaseConfig):
"""
A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
:py:mod:`ansible_runner.runner.CommandConfig` object to launch and manage the invocation of
command execution.
Typically this object is initialized for you when using the standard ``run`_command` interfaces in :py:mod:`ansible_runner.interface`
but can be used to construct the ``CommandConfig`` configuration to be invoked elsewhere. It can also be overridden to provide different
functionality to the CommandConfig object.
:Example:
>>> cc = CommandConfig(...)
>>> r = Runner(config=cc)
>>> r.run()
"""
def __init__(self, input_fd=None, output_fd=None, error_fd=None, runner_mode=None, **kwargs):
# subprocess runner mode params
self.input_fd = input_fd
self.output_fd = output_fd
self.error_fd = error_fd
if runner_mode == 'pexpect' and not self.input_fd:
raise ConfigurationError("input_fd is applicable only with 'subprocess' runner mode")
if runner_mode and runner_mode not in ['pexpect', 'subprocess']:
raise ConfigurationError(f"Invalid runner mode {runner_mode}, valid value is either 'pexpect' or 'subprocess'")
# runner params
self.runner_mode = runner_mode
self.execution_mode = BaseExecutionMode.NONE
super().__init__(**kwargs)
_ANSIBLE_NON_INERACTIVE_CMDS = (
'ansible-config',
'ansible-doc',
'ansible-galaxy',
)
def _set_runner_mode(self):
if self.input_fd is not None or self.executable_cmd.split(os.pathsep)[-1] in CommandConfig._ANSIBLE_NON_INERACTIVE_CMDS:
self.runner_mode = 'subprocess'
else:
self.runner_mode = 'pexpect'
def prepare_run_command(self, executable_cmd, cmdline_args=None):
self.executable_cmd = executable_cmd
self.cmdline_args = cmdline_args
if self.runner_mode is None:
self._set_runner_mode()
self.prepare_env(runner_mode=self.runner_mode)
self._prepare_command()
self.handle_command_wrap(self.execution_mode, self.cmdline_args)
def _prepare_command(self):
"""
Determines if it is ``ansible`` command or ``generic`` command and generate the command line
"""
if not self.executable_cmd:
raise ConfigurationError("For CommandRunner 'executable_cmd' value is required")
if self.executable_cmd.split(os.pathsep)[-1].startswith('ansible'):
self.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
else:
self.execution_mode = BaseExecutionMode.GENERIC_COMMANDS
if self.cmdline_args:
self.command = [self.executable_cmd] + self.cmdline_args
else:
self.command = [self.executable_cmd]
if self.execution_mode == BaseExecutionMode.GENERIC_COMMANDS \
and 'python' in self.executable_cmd.split(os.pathsep)[-1] and self.cmdline_args is None:
raise ConfigurationError("Runner requires python filename for execution")
if self.execution_mode == BaseExecutionMode.NONE:
raise ConfigurationError("No executable for runner to run")
ansible-runner-2.4.1/src/ansible_runner/config/doc.py 0000664 0000000 0000000 00000014142 14770573620 0022640 0 ustar 00root root 0000000 0000000 ############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0201
import logging
from ansible_runner.config._base import BaseConfig, BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.utils import get_executable_path
logger = logging.getLogger('ansible-runner')
class DocConfig(BaseConfig):
"""
A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
:py:mod:`ansible_runner.runner.DocConfig` object to launch and manage the invocation of
command execution.
Typically this object is initialized for you when using the standard ``get_plugin_docs`` or ``get_plugin_list`` interfaces
in :py:mod:`ansible_runner.interface` but can be used to construct the ``DocConfig`` configuration to be invoked elsewhere.
It can also be overridden to provide different functionality to the DocConfig object.
:Example:
>>> dc = DocConfig(...)
>>> r = Runner(config=dc)
>>> r.run()
"""
def __init__(self, runner_mode=None, **kwargs):
# runner params
self.runner_mode = runner_mode if runner_mode else 'subprocess'
if self.runner_mode not in ['pexpect', 'subprocess']:
raise ConfigurationError(f"Invalid runner mode {self.runner_mode}, valid value is either 'pexpect' or 'subprocess'")
if kwargs.get("process_isolation"):
self._ansible_doc_exec_path = "ansible-doc"
else:
self._ansible_doc_exec_path = get_executable_path("ansible-doc")
self.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
super().__init__(**kwargs)
_supported_response_formats = ('json', 'human')
def prepare_plugin_docs_command(self, plugin_names, plugin_type=None, response_format=None,
snippet=False, playbook_dir=None, module_path=None):
if response_format and response_format not in DocConfig._supported_response_formats:
raise ConfigurationError(f'Invalid response_format {response_format}, '
f'valid value is one of either {", ".join(DocConfig._supported_response_formats)}')
if not isinstance(plugin_names, list):
raise ConfigurationError(f"plugin_names should be of type list, instead received {plugin_names} of type {type(plugin_names)}")
self.prepare_env(runner_mode=self.runner_mode)
self.cmdline_args = []
if response_format == 'json':
self.cmdline_args.append('-j')
if snippet:
self.cmdline_args.append('-s')
if plugin_type:
self.cmdline_args.extend(['-t', plugin_type])
if playbook_dir:
self.cmdline_args.extend(['--playbook-dir', playbook_dir])
if module_path:
self.cmdline_args.extend(['-M', module_path])
self.cmdline_args.extend(plugin_names)
self.command = [self._ansible_doc_exec_path] + self.cmdline_args
self.handle_command_wrap(self.execution_mode, self.cmdline_args)
def prepare_plugin_list_command(self, list_files=None, response_format=None, plugin_type=None,
playbook_dir=None, module_path=None):
if response_format and response_format not in DocConfig._supported_response_formats:
raise ConfigurationError(f"Invalid response_format {response_format}, "
f'valid value is one of either {", ".join(DocConfig._supported_response_formats)}')
self.prepare_env(runner_mode=self.runner_mode)
self.cmdline_args = []
if list_files:
self.cmdline_args.append('-F')
else:
self.cmdline_args.append('-l')
if response_format == 'json':
self.cmdline_args.append('-j')
if plugin_type:
self.cmdline_args.extend(['-t', plugin_type])
if playbook_dir:
self.cmdline_args.extend(['--playbook-dir', playbook_dir])
if module_path:
self.cmdline_args.extend(['-M', module_path])
self.command = [self._ansible_doc_exec_path] + self.cmdline_args
self.handle_command_wrap(self.execution_mode, self.cmdline_args)
def prepare_role_list_command(self, collection_name, playbook_dir):
"""
ansible-doc -t role -l -j
"""
self.prepare_env(runner_mode=self.runner_mode)
self.cmdline_args = ['-t', 'role', '-l', '-j']
if playbook_dir:
self.cmdline_args.extend(['--playbook-dir', playbook_dir])
if collection_name:
self.cmdline_args.append(collection_name)
self.command = [self._ansible_doc_exec_path] + self.cmdline_args
self.handle_command_wrap(self.execution_mode, self.cmdline_args)
def prepare_role_argspec_command(self, role_name, collection_name, playbook_dir):
"""
ansible-doc -t role -j .
"""
self.prepare_env(runner_mode=self.runner_mode)
self.cmdline_args = ['-t', 'role', '-j']
if playbook_dir:
self.cmdline_args.extend(['--playbook-dir', playbook_dir])
if collection_name:
role_name = ".".join([collection_name, role_name])
self.cmdline_args.append(role_name)
self.command = [self._ansible_doc_exec_path] + self.cmdline_args
self.handle_command_wrap(self.execution_mode, self.cmdline_args)
ansible-runner-2.4.1/src/ansible_runner/config/inventory.py 0000664 0000000 0000000 00000011346 14770573620 0024133 0 ustar 00root root 0000000 0000000 ############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0201
import logging
from ansible_runner.config._base import BaseConfig, BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.utils import get_executable_path
logger = logging.getLogger('ansible-runner')
class InventoryConfig(BaseConfig):
"""
A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
:py:mod:`ansible_runner.runner.InventoryConfig` object to launch and manage the invocation of
command execution.
Typically this object is initialized for you when using the standard ``get_inventory`` interfaces in :py:mod:`ansible_runner.interface`
but can be used to construct the ``InventoryConfig`` configuration to be invoked elsewhere. It can also be overridden to provide different
functionality to the InventoryConfig object.
:Example:
>>> ic = InventoryConfig(...)
>>> r = Runner(config=ic)
>>> r.run()
"""
def __init__(self, runner_mode=None, **kwargs):
# runner params
self.runner_mode = runner_mode if runner_mode else 'subprocess'
if self.runner_mode not in ['pexpect', 'subprocess']:
raise ConfigurationError(f"Invalid runner mode {self.runner_mode}, valid value is either 'pexpect' or 'subprocess'")
if kwargs.get("process_isolation"):
self._ansible_inventory_exec_path = "ansible-inventory"
else:
self._ansible_inventory_exec_path = get_executable_path("ansible-inventory")
self.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
super().__init__(**kwargs)
_supported_response_formats = ('json', 'yaml', 'toml')
_supported_actions = ('graph', 'host', 'list')
def prepare_inventory_command(self, action, inventories, response_format=None, host=None,
playbook_dir=None, vault_ids=None, vault_password_file=None,
output_file=None, export=None):
if action not in InventoryConfig._supported_actions:
raise ConfigurationError(f'Invalid action {action}, valid value is one of either {", ".join(InventoryConfig._supported_actions)}')
if response_format and response_format not in InventoryConfig._supported_response_formats:
raise ConfigurationError(f"Invalid response_format {response_format}, valid value is one of "
f"either {', '.join(InventoryConfig._supported_response_formats)}")
if not isinstance(inventories, list):
raise ConfigurationError(f"inventories should be of type list, instead received {inventories} of type {type(inventories)}")
if action == "host" and host is None:
raise ConfigurationError("Value of host parameter is required when action in 'host'")
if action == "graph" and response_format and response_format != 'json':
raise ConfigurationError("'graph' action supports only 'json' response format")
self.prepare_env(runner_mode=self.runner_mode)
self.cmdline_args = []
self.cmdline_args.append(f'--{action}')
if action == 'host':
self.cmdline_args.append(host)
for inv in inventories:
self.cmdline_args.extend(['-i', inv])
if response_format in ['yaml', 'toml']:
self.cmdline_args.append(f'--{response_format}')
if playbook_dir:
self.cmdline_args.extend(['--playbook-dir', playbook_dir])
if vault_ids:
self.cmdline_args.extend(['--vault-id', vault_ids])
if vault_password_file:
self.cmdline_args.extend(['--vault-password-file', vault_password_file])
if output_file:
self.cmdline_args.extend(['--output', output_file])
if export:
self.cmdline_args.append('--export')
self.command = [self._ansible_inventory_exec_path] + self.cmdline_args
self.handle_command_wrap(self.execution_mode, self.cmdline_args)
ansible-runner-2.4.1/src/ansible_runner/config/runner.py 0000664 0000000 0000000 00000041516 14770573620 0023411 0 ustar 00root root 0000000 0000000 ############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0201
import json
import logging
import os
import shlex
import stat
import tempfile
import shutil
from ansible_runner import output
from ansible_runner.config._base import BaseConfig, BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.output import debug
from ansible_runner.utils import register_for_cleanup
logger = logging.getLogger('ansible-runner')
class ExecutionMode():
NONE = 0
ANSIBLE = 1
ANSIBLE_PLAYBOOK = 2
RAW = 3
class RunnerConfig(BaseConfig):
"""
A ``Runner`` configuration object that's meant to encapsulate the configuration used by the
:py:mod:`ansible_runner.runner.Runner` object to launch and manage the invocation of ``ansible``
and ``ansible-playbook``
Typically this object is initialized for you when using the standard ``run`` interfaces in :py:mod:`ansible_runner.interface`
but can be used to construct the ``Runner`` configuration to be invoked elsewhere. It can also be overridden to provide different
functionality to the Runner object.
:Example:
>>> rc = RunnerConfig(...)
>>> r = Runner(config=rc)
>>> r.run()
"""
def __init__(self,
private_data_dir, playbook=None, inventory=None, roles_path=None, limit=None,
module=None, module_args=None, verbosity=None, host_pattern=None, binary=None,
extravars=None, suppress_output_file=False, suppress_ansible_output=False, process_isolation_path=None,
process_isolation_hide_paths=None, process_isolation_show_paths=None,
process_isolation_ro_paths=None, tags=None, skip_tags=None,
directory_isolation_base_path=None, forks=None, cmdline=None, omit_event_data=False,
only_failed_event_data=False, **kwargs):
self.runner_mode = "pexpect"
super().__init__(private_data_dir, **kwargs)
self.playbook = playbook
self.inventory = inventory
self.roles_path = roles_path
self.limit = limit
self.module = module
self.module_args = module_args
self.host_pattern = host_pattern
self.binary = binary
self.extra_vars = extravars
self.process_isolation_path = process_isolation_path
self.process_isolation_path_actual = None
self.process_isolation_hide_paths = process_isolation_hide_paths
self.process_isolation_show_paths = process_isolation_show_paths
self.process_isolation_ro_paths = process_isolation_ro_paths
self.directory_isolation_path = directory_isolation_base_path
self.verbosity = verbosity
self.suppress_output_file = suppress_output_file
self.suppress_ansible_output = suppress_ansible_output
self.tags = tags
self.skip_tags = skip_tags
self.execution_mode = ExecutionMode.NONE
self.forks = forks
self.cmdline_args = cmdline
self.omit_event_data = omit_event_data
self.only_failed_event_data = only_failed_event_data
@property
def sandboxed(self):
return self.process_isolation and self.process_isolation_executable not in self._CONTAINER_ENGINES
def prepare(self):
"""
Performs basic checks and then properly invokes
- prepare_inventory
- prepare_env
- prepare_command
It's also responsible for wrapping the command with the proper ssh agent invocation
and setting early ANSIBLE_ environment variables.
"""
# ansible_path = find_executable('ansible')
# if ansible_path is None or not os.access(ansible_path, os.X_OK):
# raise ConfigurationError("Ansible not found. Make sure that it is installed.")
if self.private_data_dir is None:
raise ConfigurationError("Runner Base Directory is not defined")
if self.module and self.playbook:
raise ConfigurationError("Only one of playbook and module options are allowed")
if not os.path.exists(self.artifact_dir):
os.makedirs(self.artifact_dir, mode=0o700)
# Since the `sandboxed` property references attributes that may come from `env/settings`,
# we must call prepare_env() before we can reference it.
self.prepare_env()
if self.sandboxed and self.directory_isolation_path is not None:
self.directory_isolation_path = tempfile.mkdtemp(prefix='runner_di_', dir=self.directory_isolation_path)
if os.path.exists(self.project_dir):
output.debug(f"Copying directory tree from {self.project_dir} to {self.directory_isolation_path} for working directory isolation")
shutil.copytree(self.project_dir, self.directory_isolation_path, dirs_exist_ok=True, symlinks=True)
self.prepare_inventory()
self.prepare_command()
if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK and self.playbook is None:
raise ConfigurationError("Runner playbook required when running ansible-playbook")
if self.execution_mode == ExecutionMode.ANSIBLE and self.module is None:
raise ConfigurationError("Runner module required when running ansible")
if self.execution_mode == ExecutionMode.NONE:
raise ConfigurationError("No executable for runner to run")
self.handle_command_wrap()
debug('env:')
for k, v in sorted(self.env.items()):
debug(f' {k}: {v}')
if hasattr(self, 'command') and isinstance(self.command, list):
debug(f"command: {' '.join(self.command)}")
def prepare_inventory(self):
"""
Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
We make sure that if inventory is a path, that it is an absolute path.
"""
if self.containerized:
self.inventory = '/runner/inventory'
return
if self.inventory is None:
# At this point we expect self.private_data_dir to be an absolute path
# since that is expanded in the base class.
if os.path.exists(os.path.join(self.private_data_dir, "inventory")):
self.inventory = os.path.join(self.private_data_dir, "inventory")
elif isinstance(self.inventory, str) and os.path.exists(self.inventory):
self.inventory = os.path.abspath(self.inventory)
def prepare_env(self):
"""
Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
"""
# setup common env settings
super().prepare_env()
self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)
self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)
self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)
self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)
self.directory_isolation_path = self.settings.get('directory_isolation_base_path', self.directory_isolation_path)
self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))
if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):
self.cwd = self.private_data_dir
else:
if self.directory_isolation_path is not None:
self.cwd = self.directory_isolation_path
else:
self.cwd = self.project_dir
if 'fact_cache' in self.settings:
if 'fact_cache_type' in self.settings:
if self.settings['fact_cache_type'] == 'jsonfile':
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
else:
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
if self.roles_path:
if isinstance(self.roles_path, list):
self.env['ANSIBLE_ROLES_PATH'] = ':'.join(self.roles_path)
else:
self.env['ANSIBLE_ROLES_PATH'] = self.roles_path
self.env["RUNNER_OMIT_EVENTS"] = str(self.omit_event_data)
self.env["RUNNER_ONLY_FAILED_EVENTS"] = str(self.only_failed_event_data)
def prepare_command(self):
try:
cmdline_args = self.loader.load_file('args', str, encoding=None)
self.command = shlex.split(cmdline_args)
self.execution_mode = ExecutionMode.RAW
except ConfigurationError:
self.command = self.generate_ansible_command()
def generate_ansible_command(self):
"""
Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method
will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the
:py:class:`ansible_runner.runner.Runner` object to start the process
"""
if self.binary is not None:
base_command = self.binary
self.execution_mode = ExecutionMode.RAW
elif self.module is not None:
base_command = 'ansible'
self.execution_mode = ExecutionMode.ANSIBLE
else:
base_command = 'ansible-playbook'
self.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK
exec_list = [base_command]
try:
if self.cmdline_args:
cmdline_args = self.cmdline_args
else:
cmdline_args = self.loader.load_file('env/cmdline', str, encoding=None)
args = shlex.split(cmdline_args)
exec_list.extend(args)
except ConfigurationError:
pass
if self.inventory is None:
pass
elif isinstance(self.inventory, list):
for i in self.inventory:
exec_list.append("-i")
exec_list.append(i)
else:
exec_list.append("-i")
exec_list.append(self.inventory)
if self.limit is not None:
exec_list.append("--limit")
exec_list.append(self.limit)
if self.loader.isfile('env/extravars'):
if self.containerized:
extravars_path = '/runner/env/extravars'
else:
extravars_path = self.loader.abspath('env/extravars')
exec_list.extend(['-e', f'@{extravars_path}'])
if self.extra_vars:
if isinstance(self.extra_vars, dict) and self.extra_vars:
extra_vars_list = []
for k in self.extra_vars:
extra_vars_list.append(f"\"{k}\":{json.dumps(self.extra_vars[k])}")
exec_list.extend(
[
'-e',
f'{{{",".join(extra_vars_list)}}}'
]
)
elif self.loader.isfile(self.extra_vars):
exec_list.extend(['-e', f'@{self.loader.abspath(self.extra_vars)}'])
if self.verbosity:
v = 'v' * self.verbosity
exec_list.append(f'-{v}')
if self.tags:
exec_list.extend(['--tags', self.tags])
if self.skip_tags:
exec_list.extend(['--skip-tags', self.skip_tags])
if self.forks:
exec_list.extend(['--forks', str(self.forks)])
# Other parameters
if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
exec_list.append(self.playbook)
elif self.execution_mode == ExecutionMode.ANSIBLE:
exec_list.append("-m")
exec_list.append(self.module)
if self.module_args is not None:
exec_list.append("-a")
exec_list.append(self.module_args)
if self.host_pattern is not None:
exec_list.append(self.host_pattern)
return exec_list
def build_process_isolation_temp_dir(self):
'''
Create a temporary directory for process isolation to use.
'''
path = tempfile.mkdtemp(prefix='ansible_runner_pi_', dir=self.process_isolation_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
register_for_cleanup(path)
return path
def wrap_args_for_sandbox(self, args):
'''
Wrap existing command line with bwrap to restrict access to:
- self.process_isolation_path (generally, /tmp) (except for own /tmp files)
'''
cwd = os.path.realpath(self.cwd)
self.process_isolation_path_actual = self.build_process_isolation_temp_dir()
new_args = [self.process_isolation_executable or 'bwrap']
new_args.extend([
'--die-with-parent',
'--unshare-pid',
'--dev-bind', '/dev', 'dev',
'--proc', '/proc',
'--dir', '/tmp',
'--ro-bind', '/bin', '/bin',
'--ro-bind', '/etc', '/etc',
'--ro-bind', '/usr', '/usr',
'--ro-bind', '/opt', '/opt',
'--symlink', 'usr/lib', '/lib',
'--symlink', 'usr/lib64', '/lib64',
])
for path in sorted(set(self.process_isolation_hide_paths or [])):
if not os.path.exists(path):
logger.debug('hide path not found: %s', path)
continue
path = os.path.realpath(path)
if os.path.isdir(path):
new_path = tempfile.mkdtemp(dir=self.process_isolation_path_actual)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
else:
handle, new_path = tempfile.mkstemp(dir=self.process_isolation_path_actual)
os.close(handle)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
new_args.extend(['--bind', new_path, path])
if self.private_data_dir:
show_paths = [self.private_data_dir]
else:
show_paths = [cwd]
for path in sorted(set(self.process_isolation_ro_paths or [])):
if not os.path.exists(path):
logger.debug('read-only path not found: %s', path)
continue
path = os.path.realpath(path)
new_args.extend(['--ro-bind', path, path])
show_paths.extend(self.process_isolation_show_paths or [])
for path in sorted(set(show_paths)):
if not os.path.exists(path):
logger.debug('show path not found: %s', path)
continue
path = os.path.realpath(path)
new_args.extend(['--bind', path, path])
if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK:
# playbook runs should cwd to the SCM checkout dir
if self.directory_isolation_path is not None:
new_args.extend(['--chdir', os.path.realpath(self.directory_isolation_path)])
else:
new_args.extend(['--chdir', os.path.realpath(self.project_dir)])
elif self.execution_mode == ExecutionMode.ANSIBLE:
# ad-hoc runs should cwd to the root of the private data dir
new_args.extend(['--chdir', os.path.realpath(self.private_data_dir)])
new_args.extend(args)
return new_args
def handle_command_wrap(self):
# wrap args for ssh-agent
if self.ssh_key_data:
debug('ssh-agent agrs added')
self.command = self.wrap_args_with_ssh_agent(self.command, self.ssh_key_path)
if self.sandboxed:
debug('sandbox enabled')
self.command = self.wrap_args_for_sandbox(self.command)
else:
debug('sandbox disabled')
if self.containerized:
debug('containerization enabled')
# container volume mount is handled explicitly for run API's
# using 'container_volume_mounts' arguments
base_execution_mode = BaseExecutionMode.NONE
self.command = self.wrap_args_for_containerization(self.command, base_execution_mode, self.cmdline_args)
else:
debug('containerization disabled')
ansible-runner-2.4.1/src/ansible_runner/defaults.py 0000664 0000000 0000000 00000000527 14770573620 0022437 0 ustar 00root root 0000000 0000000 default_process_isolation_executable = 'podman'
registry_auth_prefix = 'ansible_runner_registry_'
# for ansible-runner worker cleanup command
GRACE_PERIOD_DEFAULT = 60 # minutes
# values passed to tempfile.mkdtemp to generate a private data dir
# when user did not provide one
AUTO_CREATE_NAMING = '.ansible-runner-'
AUTO_CREATE_DIR = None
ansible-runner-2.4.1/src/ansible_runner/display_callback/ 0000775 0000000 0000000 00000000000 14770573620 0023533 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/display_callback/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0025632 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/display_callback/callback/ 0000775 0000000 0000000 00000000000 14770573620 0025267 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/display_callback/callback/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0027366 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/display_callback/callback/awx_display.py 0000664 0000000 0000000 00000074020 14770573620 0030170 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# This file is part of Ansible Tower, but depends on code imported from Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
# pylint: disable=W0212
from __future__ import (absolute_import, division, print_function)
# Python
import json
import stat
import multiprocessing
import threading
import base64
import functools
import collections
import contextlib
import datetime
import os
import sys
import uuid
from copy import copy
# Ansible
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.plugins.loader import callback_loader
from ansible.utils.display import Display
DOCUMENTATION = '''
callback: awx_display
short_description: Playbook event dispatcher for ansible-runner
version_added: "2.0"
description:
- This callback is necessary for ansible-runner to work
type: stdout
extends_documentation_fragment:
- default_callback
requirements:
- Set as stdout in config
'''
IS_ADHOC = os.getenv('AD_HOC_COMMAND_ID') or False
# Dynamically construct base classes for our callback module, to support custom stdout callbacks.
if os.getenv('ORIGINAL_STDOUT_CALLBACK'):
default_stdout_callback = os.getenv('ORIGINAL_STDOUT_CALLBACK')
elif IS_ADHOC:
default_stdout_callback = 'minimal'
else:
default_stdout_callback = 'default'
DefaultCallbackModule: CallbackBase = callback_loader.get(default_stdout_callback).__class__
CENSORED = "the output has been hidden due to the fact that 'no_log: true' was specified for this result"
def current_time():
return datetime.datetime.now(datetime.timezone.utc)
# use a custom JSON serializer so we can properly handle !unsafe and !vault
# objects that may exist in events emitted by the callback plugin
# see: https://github.com/ansible/ansible/pull/38759
class AnsibleJSONEncoderLocal(json.JSONEncoder):
'''
The class AnsibleJSONEncoder exists in Ansible core for this function
this performs a mostly identical function via duck typing
'''
def default(self, o):
'''
Returns JSON-valid representation for special Ansible python objects
which including vault objects and datetime objects
'''
if getattr(o, 'yaml_tag', None) == '!vault':
encrypted_form = o._ciphertext
if isinstance(encrypted_form, bytes):
encrypted_form = encrypted_form.decode('utf-8')
return {'__ansible_vault': encrypted_form}
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
return super().default(o)
class IsolatedFileWrite:
'''
Class that will write partial event data to a file
'''
def __init__(self):
self.private_data_dir = os.getenv('AWX_ISOLATED_DATA_DIR')
def set(self, key, value):
# Strip off the leading key identifying characters :1:ev-
event_uuid = key[len(':1:ev-'):]
# Write data in a staging area and then atomic move to pickup directory
filename = f"{event_uuid}-partial.json"
if not os.path.exists(os.path.join(self.private_data_dir, 'job_events')):
os.mkdir(os.path.join(self.private_data_dir, 'job_events'), 0o700)
dropoff_location = os.path.join(self.private_data_dir, 'job_events', filename)
write_location = '.'.join([dropoff_location, 'tmp'])
partial_data = json.dumps(value, cls=AnsibleJSONEncoderLocal)
with os.fdopen(os.open(write_location, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as f:
f.write(partial_data)
os.rename(write_location, dropoff_location)
class EventContext:
'''
Store global and local (per thread/process) data associated with callback
events and other display output methods.
'''
def __init__(self):
self.display_lock = multiprocessing.RLock()
self._global_ctx = {}
self._local = threading.local()
if os.getenv('AWX_ISOLATED_DATA_DIR'):
self.cache = IsolatedFileWrite()
def add_local(self, **kwargs):
tls = vars(self._local)
ctx = tls.setdefault('_ctx', {})
ctx.update(kwargs)
def remove_local(self, **kwargs):
for key in kwargs:
self._local._ctx.pop(key, None)
@contextlib.contextmanager
def set_local(self, **kwargs):
try:
self.add_local(**kwargs)
yield
finally:
self.remove_local(**kwargs)
def get_local(self):
return getattr(getattr(self, '_local', None), '_ctx', {})
def add_global(self, **kwargs):
self._global_ctx.update(kwargs)
def remove_global(self, **kwargs):
for key in kwargs:
self._global_ctx.pop(key, None)
@contextlib.contextmanager
def set_global(self, **kwargs):
try:
self.add_global(**kwargs)
yield
finally:
self.remove_global(**kwargs)
def get_global(self):
return self._global_ctx
def get(self):
ctx = {}
ctx.update(self.get_global())
ctx.update(self.get_local())
return ctx
def get_begin_dict(self):
omit_event_data = os.getenv("RUNNER_OMIT_EVENTS", "False").lower() == "true"
include_only_failed_event_data = os.getenv("RUNNER_ONLY_FAILED_EVENTS", "False").lower() == "true"
event_data = self.get()
event = event_data.pop('event', None)
if not event:
event = 'verbose'
for key in ('debug', 'verbose', 'deprecated', 'warning', 'system_warning', 'error'):
if event_data.get(key, False):
event = key
break
event_dict = {'event': event}
should_process_event_data = (include_only_failed_event_data and event in ('runner_on_failed', 'runner_on_async_failed', 'runner_on_item_failed')) \
or not include_only_failed_event_data
if os.getenv('JOB_ID', ''):
event_dict['job_id'] = int(os.getenv('JOB_ID', '0'))
if os.getenv('AD_HOC_COMMAND_ID', ''):
event_dict['ad_hoc_command_id'] = int(os.getenv('AD_HOC_COMMAND_ID', '0'))
if os.getenv('PROJECT_UPDATE_ID', ''):
event_dict['project_update_id'] = int(os.getenv('PROJECT_UPDATE_ID', '0'))
event_dict['pid'] = event_data.get('pid', os.getpid())
event_dict['uuid'] = event_data.get('uuid', str(uuid.uuid4()))
event_dict['created'] = event_data.get('created', current_time().isoformat())
if not event_data.get('parent_uuid', None):
for key in ('task_uuid', 'play_uuid', 'playbook_uuid'):
parent_uuid = event_data.get(key, None)
if parent_uuid and parent_uuid != event_data.get('uuid', None):
event_dict['parent_uuid'] = parent_uuid
break
else:
event_dict['parent_uuid'] = event_data.get('parent_uuid', None)
if "verbosity" in event_data:
event_dict["verbosity"] = event_data.pop("verbosity")
if not omit_event_data and should_process_event_data:
max_res = int(os.getenv("MAX_EVENT_RES", "700000"))
if event not in ('playbook_on_stats',) and "res" in event_data and len(str(event_data['res'])) > max_res:
event_data['res'] = {}
else:
event_data = {}
event_dict['event_data'] = event_data
return event_dict
def get_end_dict(self):
return {}
def dump(self, fileobj, data, max_width=78, flush=False):
b64data = base64.b64encode(json.dumps(data).encode('utf-8')).decode()
with self.display_lock:
# pattern corresponding to OutputEventFilter expectation
fileobj.write('\x1b[K')
for offset in range(0, len(b64data), max_width):
chunk = b64data[offset:offset + max_width]
escaped_chunk = f'{chunk}\x1b[{len(chunk)}D'
fileobj.write(escaped_chunk)
fileobj.write('\x1b[K')
if flush:
fileobj.flush()
def dump_begin(self, fileobj):
begin_dict = self.get_begin_dict()
self.cache.set(f":1:ev-{begin_dict['uuid']}", begin_dict)
self.dump(fileobj, {'uuid': begin_dict['uuid']})
def dump_end(self, fileobj):
self.dump(fileobj, self.get_end_dict(), flush=True)
event_context = EventContext()
def with_context(**context):
global event_context # pylint: disable=W0602
def wrap(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with event_context.set_local(**context):
return f(*args, **kwargs)
return wrapper
return wrap
for attr in dir(Display):
if attr.startswith('_') or 'cow' in attr or 'prompt' in attr:
continue
if attr in ('display', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv', 'verbose'):
continue
if not callable(getattr(Display, attr)):
continue
setattr(Display, attr, with_context(**{attr: True})(getattr(Display, attr)))
def with_verbosity(f):
global event_context # pylint: disable=W0602
@functools.wraps(f)
def wrapper(*args, **kwargs):
host = args[2] if len(args) >= 3 else kwargs.get('host', None)
caplevel = args[3] if len(args) >= 4 else kwargs.get('caplevel', 2)
context = {'verbose': True, 'verbosity': (caplevel + 1)}
if host is not None:
context['remote_addr'] = host
with event_context.set_local(**context):
return f(*args, **kwargs)
return wrapper
Display.verbose = with_verbosity(Display.verbose)
def display_with_context(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
log_only = args[5] if len(args) >= 6 else kwargs.get('log_only', False)
stderr = args[3] if len(args) >= 4 else kwargs.get('stderr', False)
event_uuid = event_context.get().get('uuid', None)
with event_context.display_lock:
# If writing only to a log file or there is already an event UUID
# set (from a callback module method), skip dumping the event data.
if log_only or event_uuid:
return f(*args, **kwargs)
try:
fileobj = sys.stderr if stderr else sys.stdout
event_context.add_local(uuid=str(uuid.uuid4()))
event_context.dump_begin(fileobj)
return f(*args, **kwargs)
finally:
event_context.dump_end(fileobj)
event_context.remove_local(uuid=None)
return wrapper
Display.display = display_with_context(Display.display)
class CallbackModule(DefaultCallbackModule):
'''
Callback module for logging ansible/ansible-playbook events.
'''
CALLBACK_NAME = 'awx_display'
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
# These events should never have an associated play.
EVENTS_WITHOUT_PLAY = [
'playbook_on_start',
'playbook_on_stats',
]
# These events should never have an associated task.
EVENTS_WITHOUT_TASK = EVENTS_WITHOUT_PLAY + [
'playbook_on_setup',
'playbook_on_notify',
'playbook_on_import_for_host',
'playbook_on_not_import_for_host',
'playbook_on_no_hosts_matched',
'playbook_on_no_hosts_remaining',
]
def __init__(self):
super().__init__()
self._host_start = {}
self.task_uuids = set()
self.duplicate_task_counts = collections.defaultdict(lambda: 1)
self.play_uuids = set()
self.duplicate_play_counts = collections.defaultdict(lambda: 1)
# NOTE: Ansible doesn't generate a UUID for playbook_on_start so do it for them.
self.playbook_uuid = str(uuid.uuid4())
@contextlib.contextmanager
def capture_event_data(self, event, **event_data):
event_data.setdefault('uuid', str(uuid.uuid4()))
if event not in self.EVENTS_WITHOUT_TASK:
task = event_data.pop('task', None)
else:
task = None
if event_data.get('res'):
if event_data['res'].get('_ansible_no_log', False):
event_data['res'] = {'censored': CENSORED}
if event_data['res'].get('results', []):
event_data['res']['results'] = copy(event_data['res']['results'])
for i, item in enumerate(event_data['res'].get('results', [])):
if isinstance(item, dict) and item.get('_ansible_no_log', False):
event_data['res']['results'][i] = {'censored': CENSORED}
with event_context.display_lock:
try:
event_context.add_local(event=event, **event_data)
if task:
self.set_task(task, local=True)
event_context.dump_begin(sys.stdout)
yield
finally:
event_context.dump_end(sys.stdout)
if task:
self.clear_task(local=True)
event_context.remove_local(event=None, **event_data)
def set_playbook(self, playbook):
file_name = getattr(playbook, '_file_name', '???')
event_context.add_global(playbook=file_name, playbook_uuid=self.playbook_uuid)
self.clear_play()
def set_play(self, play):
if hasattr(play, 'hosts'):
if isinstance(play.hosts, list):
pattern = ','.join(play.hosts)
else:
pattern = play.hosts
else:
pattern = ''
name = play.get_name().strip() or pattern
event_context.add_global(play=name, play_uuid=str(play._uuid), play_pattern=pattern)
self.clear_task()
def clear_play(self):
event_context.remove_global(play=None, play_uuid=None, play_pattern=None)
self.clear_task()
def set_task(self, task, local=False):
self.clear_task(local)
# FIXME: Task is "global" unless using free strategy!
task_ctx = {
'task': (task.name or task.action),
'task_uuid': str(task._uuid),
'task_action': task.action,
'resolved_action': getattr(task, 'resolved_action', task.action),
'task_args': '',
}
try:
task_ctx['task_path'] = task.get_path()
except AttributeError:
pass
if C.DISPLAY_ARGS_TO_STDOUT: # pylint: disable=E1101
if task.no_log:
task_ctx['task_args'] = "the output has been hidden due to the fact that 'no_log: true' was specified for this result"
else:
task_args = ', '.join((f'{k}={v}' for k, v in task.args.items()))
task_ctx['task_args'] = task_args
if getattr(task, '_role', None):
task_role = task._role._role_name
if hasattr(task._role, 'get_name'):
resolved_role = task._role.get_name()
if resolved_role != task_role:
task_ctx['resolved_role'] = resolved_role
else:
task_role = getattr(task, 'role_name', '')
if task_role:
task_ctx['role'] = task_role
if local:
event_context.add_local(**task_ctx)
else:
event_context.add_global(**task_ctx)
def clear_task(self, local=False):
task_ctx = {
'task': None, 'task_path': None, 'task_uuid': None, 'task_action': None, 'task_args': None, 'resolved_action': None,
'role': None, 'resolved_role': None
}
if local:
event_context.remove_local(**task_ctx)
else:
event_context.remove_global(**task_ctx)
def v2_playbook_on_start(self, playbook):
self.set_playbook(playbook)
event_data = {
'uuid': self.playbook_uuid,
}
with self.capture_event_data('playbook_on_start', **event_data):
super().v2_playbook_on_start(playbook)
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None,
salt=None, default=None, unsafe=None):
event_data = {
'varname': varname,
'private': private,
'prompt': prompt,
'encrypt': encrypt,
'confirm': confirm,
'salt_size': salt_size,
'salt': salt,
'default': default,
'unsafe': unsafe,
}
with self.capture_event_data('playbook_on_vars_prompt', **event_data):
super().v2_playbook_on_vars_prompt(
varname, private, prompt, encrypt, confirm, salt_size, salt,
default,
)
def v2_playbook_on_include(self, included_file):
event_data = {
'included_file': included_file._filename if included_file is not None else None,
}
with self.capture_event_data('playbook_on_include', **event_data):
super().v2_playbook_on_include(included_file)
def v2_playbook_on_play_start(self, play):
if IS_ADHOC:
return
play_uuid = str(play._uuid)
if play_uuid in self.play_uuids:
# When this play UUID repeats, it means the play is using the
# free strategy (or serial:1) so different hosts may be running
# different tasks within a play (where duplicate UUIDS are common).
#
# When this is the case, modify the UUID slightly to append
# a counter so we can still _track_ duplicate events, but also
# avoid breaking the display in these scenarios.
self.duplicate_play_counts[play_uuid] += 1
play_uuid = '_'.join([
play_uuid,
str(self.duplicate_play_counts[play_uuid])
])
self.play_uuids.add(play_uuid)
play._uuid = play_uuid
self.set_play(play)
if hasattr(play, 'hosts'):
if isinstance(play.hosts, list):
pattern = ','.join(play.hosts)
else:
pattern = play.hosts
else:
pattern = ''
name = play.get_name().strip() or pattern
event_data = {
'name': name,
'pattern': pattern,
'uuid': str(play._uuid),
}
with self.capture_event_data('playbook_on_play_start', **event_data):
super().v2_playbook_on_play_start(play)
def v2_playbook_on_import_for_host(self, result, imported_file):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_import_for_host'):
super().v2_playbook_on_import_for_host(result, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_not_import_for_host'):
super().v2_playbook_on_not_import_for_host(result, missing_file)
def v2_playbook_on_setup(self):
# NOTE: Not used by Ansible 2.x.
with self.capture_event_data('playbook_on_setup'):
super().v2_playbook_on_setup()
def v2_playbook_on_task_start(self, task, is_conditional):
if IS_ADHOC:
self.set_task(task)
return
# FIXME: Flag task path output as vv.
task_uuid = str(task._uuid)
if task_uuid in self.task_uuids:
# When this task UUID repeats, it means the play is using the
# free strategy (or serial:1) so different hosts may be running
# different tasks within a play (where duplicate UUIDS are common).
#
# When this is the case, modify the UUID slightly to append
# a counter so we can still _track_ duplicate events, but also
# avoid breaking the display in these scenarios.
self.duplicate_task_counts[task_uuid] += 1
task_uuid = '_'.join([
task_uuid,
str(self.duplicate_task_counts[task_uuid])
])
self.task_uuids.add(task_uuid)
self.set_task(task)
event_data = {
'task': task,
'name': task.get_name(),
'is_conditional': is_conditional,
'uuid': task_uuid,
}
with self.capture_event_data('playbook_on_task_start', **event_data):
super().v2_playbook_on_task_start(task, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
# NOTE: Not used by Ansible 2.x.
self.set_task(task)
event_data = {
'task': task,
'name': task.get_name(),
'uuid': str(task._uuid),
'is_conditional': True,
}
with self.capture_event_data('playbook_on_task_start', **event_data):
super().v2_playbook_on_cleanup_task_start(task)
def v2_playbook_on_handler_task_start(self, task):
# NOTE: Re-using playbook_on_task_start event for this v2-specific
# event, but setting is_conditional=True, which is how v1 identified a
# task run as a handler.
self.set_task(task)
event_data = {
'task': task,
'name': task.get_name(),
'uuid': str(task._uuid),
'is_conditional': True,
}
with self.capture_event_data('playbook_on_task_start', **event_data):
super().v2_playbook_on_handler_task_start(task)
def v2_playbook_on_no_hosts_matched(self):
with self.capture_event_data('playbook_on_no_hosts_matched'):
super().v2_playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
with self.capture_event_data('playbook_on_no_hosts_remaining'):
super().v2_playbook_on_no_hosts_remaining()
def v2_playbook_on_notify(self, handler, host):
# NOTE: Not used by Ansible < 2.5.
event_data = {
'host': host.get_name(),
'handler': handler.get_name(),
}
with self.capture_event_data('playbook_on_notify', **event_data):
super().v2_playbook_on_notify(handler, host)
# ansible_stats is, retroactively, added in 2.2
def v2_playbook_on_stats(self, stats):
self.clear_play()
# FIXME: Add count of plays/tasks.
event_data = {
'changed': stats.changed,
'dark': stats.dark,
'failures': stats.failures,
'ignored': getattr(stats, 'ignored', 0),
'ok': stats.ok,
'processed': stats.processed,
'rescued': getattr(stats, 'rescued', 0),
'skipped': stats.skipped,
'artifact_data': stats.custom.get('_run', {}) if hasattr(stats, 'custom') else {}
}
with self.capture_event_data('playbook_on_stats', **event_data):
super().v2_playbook_on_stats(stats)
@staticmethod
def _get_event_loop(task):
if hasattr(task, 'loop_with'): # Ansible >=2.5
return task.loop_with
if hasattr(task, 'loop'): # Ansible <2.4
return task.loop
return None
def _get_result_timing_data(self, result):
host_start = self._host_start.get(result._host.get_name())
if host_start:
end_time = current_time()
return host_start, end_time, (end_time - host_start).total_seconds()
return None, None, None
def v2_runner_on_ok(self, result):
# FIXME: Display detailed results or not based on verbosity.
# strip environment vars from the job event; it already exists on the
# job and sensitive values are filtered there
if result._task.action in ('setup', 'gather_facts'):
result._result.get('ansible_facts', {}).pop('ansible_env', None)
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = {
'host': result._host.get_name(),
'remote_addr': result._host.address,
'task': result._task,
'res': result._result,
'start': host_start,
'end': end_time,
'duration': duration,
'event_loop': self._get_event_loop(result._task),
}
with self.capture_event_data('runner_on_ok', **event_data):
super().v2_runner_on_ok(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
# FIXME: Add verbosity for exception/results output.
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = {
'host': result._host.get_name(),
'remote_addr': result._host.address,
'res': result._result,
'task': result._task,
'start': host_start,
'end': end_time,
'duration': duration,
'ignore_errors': ignore_errors,
'event_loop': self._get_event_loop(result._task),
}
with self.capture_event_data('runner_on_failed', **event_data):
super().v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_skipped(self, result):
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = {
'host': result._host.get_name(),
'remote_addr': result._host.address,
'task': result._task,
'start': host_start,
'end': end_time,
'duration': duration,
'event_loop': self._get_event_loop(result._task),
}
with self.capture_event_data('runner_on_skipped', **event_data):
super().v2_runner_on_skipped(result)
def v2_runner_on_unreachable(self, result):
host_start, end_time, duration = self._get_result_timing_data(result)
event_data = {
'host': result._host.get_name(),
'remote_addr': result._host.address,
'task': result._task,
'start': host_start,
'end': end_time,
'duration': duration,
'res': result._result,
}
with self.capture_event_data('runner_on_unreachable', **event_data):
super().v2_runner_on_unreachable(result)
def v2_runner_on_no_hosts(self, task):
# NOTE: Not used by Ansible 2.x.
event_data = {
'task': task,
}
with self.capture_event_data('runner_on_no_hosts', **event_data):
super().v2_runner_on_no_hosts(task)
def v2_runner_on_async_poll(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = {
'host': result._host.get_name(),
'task': result._task,
'res': result._result,
'jid': result._result.get('ansible_job_id'),
}
with self.capture_event_data('runner_on_async_poll', **event_data):
super().v2_runner_on_async_poll(result)
def v2_runner_on_async_ok(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = {
'host': result._host.get_name(),
'task': result._task,
'res': result._result,
'jid': result._result.get('ansible_job_id'),
}
with self.capture_event_data('runner_on_async_ok', **event_data):
super().v2_runner_on_async_ok(result)
def v2_runner_on_async_failed(self, result):
# NOTE: Not used by Ansible 2.x.
event_data = {
'host': result._host.get_name(),
'task': result._task,
'res': result._result,
'jid': result._result.get('ansible_job_id'),
}
with self.capture_event_data('runner_on_async_failed', **event_data):
super().v2_runner_on_async_failed(result)
def v2_runner_on_file_diff(self, result, diff):
# NOTE: Not used by Ansible 2.x.
event_data = {
'host': result._host.get_name(),
'task': result._task,
'diff': diff,
}
with self.capture_event_data('runner_on_file_diff', **event_data):
super().v2_runner_on_file_diff(result, diff)
def v2_on_file_diff(self, result):
# NOTE: Logged as runner_on_file_diff.
event_data = {
'host': result._host.get_name(),
'task': result._task,
'diff': result._result.get('diff'),
}
with self.capture_event_data('runner_on_file_diff', **event_data):
super().v2_on_file_diff(result)
def v2_runner_item_on_ok(self, result):
event_data = {
'host': result._host.get_name(),
'task': result._task,
'res': result._result,
}
with self.capture_event_data('runner_item_on_ok', **event_data):
super().v2_runner_item_on_ok(result)
def v2_runner_item_on_failed(self, result):
event_data = {
'host': result._host.get_name(),
'task': result._task,
'res': result._result,
}
with self.capture_event_data('runner_item_on_failed', **event_data):
super().v2_runner_item_on_failed(result)
def v2_runner_item_on_skipped(self, result):
event_data = {
'host': result._host.get_name(),
'task': result._task,
'res': result._result,
}
with self.capture_event_data('runner_item_on_skipped', **event_data):
super().v2_runner_item_on_skipped(result)
def v2_runner_retry(self, result):
event_data = {
'host': result._host.get_name(),
'task': result._task,
'res': result._result,
}
with self.capture_event_data('runner_retry', **event_data):
super().v2_runner_retry(result)
def v2_runner_on_start(self, host, task):
event_data = {
'host': host.get_name(),
'task': task
}
self._host_start[host.get_name()] = current_time()
with self.capture_event_data('runner_on_start', **event_data):
super().v2_runner_on_start(host, task)
ansible-runner-2.4.1/src/ansible_runner/exceptions.py 0000664 0000000 0000000 00000000400 14770573620 0022777 0 ustar 00root root 0000000 0000000
class AnsibleRunnerException(Exception):
""" Generic Runner Error """
class ConfigurationError(AnsibleRunnerException):
""" Misconfiguration of Runner """
class CallbackError(AnsibleRunnerException):
""" Exception occurred in Callback """
ansible-runner-2.4.1/src/ansible_runner/interface.py 0000664 0000000 0000000 00000175526 14770573620 0022604 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import json
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.config.runner import RunnerConfig
from ansible_runner.config.command import CommandConfig
from ansible_runner.config.inventory import InventoryConfig
from ansible_runner.config.ansible_cfg import AnsibleCfgConfig
from ansible_runner.config.doc import DocConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
sanitize_json_response,
signal_handler,
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
# Handle logging first thing
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
# If running via the transmit-worker-process method, we must only extract things as read-only
# inside of one of these commands. That could be either transmit or worker.
if kwargs.get('streamer') not in ('worker', 'process'):
dump_artifacts(kwargs)
if kwargs.get('streamer'):
# undo any full paths that were dumped by dump_artifacts above in the streamer case
private_data_dir = kwargs['private_data_dir']
project_dir = os.path.join(private_data_dir, 'project')
playbook_path = kwargs.get('playbook') or ''
if os.path.isabs(playbook_path) and playbook_path.startswith(project_dir):
kwargs['playbook'] = os.path.relpath(playbook_path, project_dir)
inventory_path = kwargs.get('inventory') or ''
if os.path.isabs(inventory_path) and inventory_path.startswith(private_data_dir):
kwargs['inventory'] = os.path.relpath(inventory_path, private_data_dir)
roles_path = kwargs.get('envvars', {}).get('ANSIBLE_ROLES_PATH') or ''
if os.path.isabs(roles_path) and roles_path.startswith(private_data_dir):
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.relpath(roles_path, private_data_dir)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
if cancel_callback is None:
# attempt to load signal handler.
# will return None if we are not in the main thread
cancel_callback = signal_handler()
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str or list playbook: The playbook (either a list or dictionary of plays, or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param str module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param str module_args: The module arguments that will be supplied to ad-hoc mode.
:param str host_pattern: The host pattern to match when running in ad-hoc mode.
:param str or dict or list inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of:
- Path to the inventory file in the ``private_data_dir/inventory`` directory or
an absolute path to the inventory file
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param str role: Name of the role to execute.
:param str or list roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param str cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param bool suppress_env_files: Disable the writing of files into the ``env`` which may store sensitive information
:param str limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param int forks: Control Ansible parallel concurrency
:param int verbosity: Control how verbose the output of ansible-playbook is
:param bool quiet: Disable all output
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param str streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param io.FileIO _input: An optional file or file-like object for use as input in a streaming pipeline
:param io.FileIO _output: An optional file or file-like object for use as output in a streaming pipeline
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param str or list process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param str or list process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param str or list process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param str container_image: Container image to use when running an ansible task
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param bool omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param bool only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing ``rc`` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_command_config(executable_cmd, cmdline_args=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run_command() and run_command_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run_command`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rc = CommandConfig(**kwargs)
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run_command(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the foreground and return a Runner object when complete.
:param str executable_cmd: The command to be executed.
:param list cmdline_args: A list of arguments to be passed to the executable command.
:param int input_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
input file descrption to interact with the sub-process running the command.
:param int output_fd: The output file descriptor to stream the output of command execution.
:param int error_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
error file descrption to read the error received while executing the command.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. If the value of ``input_fd`` parameter
is set or the executable command is one of ``ansible-config``, ``ansible-doc`` or ``ansible-galaxy``
the default value is set to ``subprocess`` else in other cases it is set to ``pexpect``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:returns: Returns a tuple of response, error string and return code.
In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr.
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
r.run()
with r.stdout as stdout, r.stderr as stderr:
response = stdout.read()
error = stderr.read()
return response, error, r.rc
def run_command_async(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run_command`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_plugin_docs_config(plugin_names, plugin_type=None, response_format=None,
snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both get_plugin_docs() and get_plugin_docs_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.get_plugin_docs`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_docs_command(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path)
return Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback, finished_callback=finished_callback)
def get_plugin_docs(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get plugin docs in the foreground and return a Runner object when complete.
:param plugin_names: The name of the plugins to get docs.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param snippet: Show playbook snippet for specified plugin(s).
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type plugin_names: list
:type plugin_type: str
:type response_format: str
:type snippet: bool
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the value of ``response_format`` is ``json``
it returns a python dictionary object.
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
r.run()
with r.stdout as stdout, r.stderr as stderr:
response = stdout.read()
error = stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_plugin_docs_async(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.get_plugin_docs`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
doc_runner_thread = threading.Thread(target=r.run)
doc_runner_thread.start()
return doc_runner_thread, r
def get_plugin_list(list_files=None, response_format=None, plugin_type=None, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get list of installed Ansible plugins.
:param list_files: The boolean parameter is set to ``True`` returns file path of the plugin along with the plugin name.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type list_files: bool
:type plugin_type: str
:type response_format: str
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the value of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_list_command(list_files=list_files, response_format=response_format, plugin_type=plugin_type,
playbook_dir=playbook_dir, module_path=module_path)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
with r.stdout as stdout, r.stderr as stderr:
response = stdout.read()
error = stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_inventory(action, inventories, response_format=None, host=None, playbook_dir=None,
vault_ids=None, vault_password_file=None, output_file=None, export=None, **kwargs):
'''
Run an ansible-inventory command to get inventory related details.
:param action: Valid values are one of ``graph``, ``host``, ``list``.
``graph`` returns the inventory graph.
``host`` returns info for a specific host and works as an inventory script.
``list`` returns info for all hosts and also works as an inventory script.
:param inventories: List of inventory host path.
:param response_format: The output format for response. Valid values can be one of ``json``, ``yaml``, ``toml``.
Default is ``json``. If ``action`` is ``graph`` only allowed value is ``json``.
:param host: When ``action`` is set to ``host`` this parameter is used to get the host specific information.
:param playbook_dir: This parameter is used to sets the relative path for the inventory.
:param vault_ids: The vault identity to use.
:param vault_password_file: The vault password files to use.
:param output_file: The file path in which inventory details should be sent to.
:param export: The boolean value if set represent in a way that is optimized for export,not as an accurate
representation of how Ansible has processed it.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type action: str
:type inventories: list
:type response_format: str
:type host: str
:type playbook_dir: str
:type vault_ids: str
:type vault_password_file: str
:type output_file: str
:type export: bool
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr. If the vaue of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = InventoryConfig(**kwargs)
rd.prepare_inventory_command(action=action, inventories=inventories, response_format=response_format, host=host, playbook_dir=playbook_dir,
vault_ids=vault_ids, vault_password_file=vault_password_file, output_file=output_file, export=export)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
with r.stdout as stdout, r.stderr as stderr:
response = stdout.read()
error = stderr.read()
if response and response_format == 'json':
response = json.loads(sanitize_json_response(response))
return response, error
def get_ansible_config(action, config_file=None, only_changed=None, **kwargs):
'''
Run an ansible-config command to get ansible configuration releated details.
:param action: Valid values are one of ``list``, ``dump``, ``view``
``list`` returns all config options, ``dump`` returns the active configuration and
``view`` returns the view of configuration file.
:param config_file: Path to configuration file, defaults to first file found in precedence. .
:param only_changed: The boolean value when set to ``True`` returns only the configurations that have changed
from the default. This parameter is applicable only when ``action`` is set to ``dump``.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The current working directory from which the command in executable_cmd should be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible.
Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type action: str
:type config_file: str
:type only_changed: bool
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: Callable
:type cancel_callback: Callable
:type finished_callback: Callable
:type status_handler: Callable
:type artifacts_handler: Callable
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = AnsibleCfgConfig(**kwargs)
rd.prepare_ansible_config_command(action=action, config_file=config_file, only_changed=only_changed)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
with r.stdout as stdout, r.stderr as stderr:
response = stdout.read()
error = stderr.read()
return response, error
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to set the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. If the timeout is triggered, it will force cancel the execution.
:param bool process_isolation: Enable process isolation using a container engine, such as podman.
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an Ansible task
:param list container_volume_mounts: List of bind mounts in the form ``host_dir:/container_dir:labels``. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes
(for example: started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception. If set to 'False', log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
with r.stdout as stdout, r.stderr as stderr:
response = stdout.read()
error = stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to set the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. If the timeout is triggered, it will force cancel the execution.
:param bool process_isolation: Enable process isolation using a container engine, such as podman.
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an Ansible task
:param list container_volume_mounts: List of bind mounts in the form ``host_dir:/container_dir:labels``. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param Callable event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param Callable cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param Callable finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param Callable status_handler: An optional callback that will be invoked any time the status changes
(for example: started, running, failed, successful, timeout)
:param Callable artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception. If set to 'False', log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
with r.stdout as stdout, r.stderr as stderr:
response = stdout.read()
error = stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
ansible-runner-2.4.1/src/ansible_runner/loader.py 0000664 0000000 0000000 00000013713 14770573620 0022077 0 ustar 00root root 0000000 0000000 #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import annotations
import os
import json
import codecs
from typing import Any, Dict
from yaml import safe_load, YAMLError
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.output import debug
class ArtifactLoader:
'''
Handles loading and caching file contents from disk
This class will load the file contents and attempt to deserialize the
contents as either JSON or YAML. If the file contents cannot be
deserialized, the contents will be returned to the caller as a string.
The deserialized file contents are stored as a cached object in the
instance to avoid any additional reads from disk for subsequent calls
to load the same file.
'''
def __init__(self, base_path: str):
self._cache: Dict[str, Any] = {}
self.base_path = base_path
def _load_json(self, contents: str) -> dict | None:
'''
Attempts to deserialize the contents of a JSON object
:param str contents: The contents to deserialize.
:return: A dict if the contents are JSON serialized,
otherwise returns None.
'''
try:
return json.loads(contents)
except ValueError:
return None
def _load_yaml(self, contents: str) -> dict | None:
'''
Attempts to deserialize the contents of a YAML object.
:param str contents: The contents to deserialize.
:return: A dict if the contents are YAML serialized,
otherwise returns None.
'''
try:
return safe_load(contents)
except YAMLError:
return None
def _get_contents(self, path: str) -> str:
'''
Loads the contents of the file specified by path
:param str path: The relative or absolute path to the file to
be loaded. If the path is relative, then it is combined
with the base_path to generate a full path string
:return: The contents of the file as a string
:raises: ConfigurationError if the file cannot be loaded.
'''
try:
if not os.path.exists(path):
raise ConfigurationError(f"specified path does not exist {path}")
with codecs.open(path, encoding='utf-8') as f:
data = f.read()
return data
except (IOError, OSError) as exc:
raise ConfigurationError(f"error trying to load file contents: {exc}") from exc
def abspath(self, path: str) -> str:
'''
Transform the path to an absolute path
:param str path: The path to transform to an absolute path
:return: The absolute path to the file.
'''
if not path.startswith(os.path.sep) or path.startswith('~'):
path = os.path.expanduser(os.path.join(self.base_path, path))
return path
def isfile(self, path: str) -> bool:
'''
Check if the path is a file
:param str path: The path to the file to check. If the path is relative
it will be exanded to an absolute path
:return: True if path is a file, False otherwise.
'''
return os.path.isfile(self.abspath(path))
def load_file(self, path: str, objtype: Any | None = None, encoding='utf-8') -> bytes | str | dict | None:
'''
Load the file specified by path
This method will first try to load the file contents from cache and
if there is a cache miss, it will load the contents from disk
:param str path: The full or relative path to the file to be loaded.
:param Any objtype: The object type of the file contents. This
is used to type check the deserialized content against the
contents loaded from disk. Ignore serializing if objtype is str.
Only Mapping or str types are supported.
:param str encoding: The file contents text encoding.
:return: The deserialized file contents which could be either a
string object or a dict object
:raises: ConfigurationError on error during file load or deserialization.
'''
parsed_data: bytes | str | dict | None
path = self.abspath(path)
debug(f"file path is {path}")
if path in self._cache:
return self._cache[path]
try:
debug(f"cache miss, attempting to load file from disk: {path}")
contents = parsed_data = self._get_contents(path)
if encoding:
parsed_data = contents.encode(encoding)
except ConfigurationError as exc:
debug(str(exc))
raise
except UnicodeEncodeError as exc:
raise ConfigurationError('unable to encode file contents') from exc
if objtype is not str:
for deserializer in (self._load_json, self._load_yaml):
parsed_data = deserializer(contents)
if parsed_data:
break
if objtype and not isinstance(parsed_data, objtype):
debug(f"specified file {path} is not of type {objtype}")
raise ConfigurationError('invalid file serialization type for contents')
self._cache[path] = parsed_data
return parsed_data
ansible-runner-2.4.1/src/ansible_runner/output.py 0000664 0000000 0000000 00000006015 14770573620 0022166 0 ustar 00root root 0000000 0000000 #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import logging
DEBUG_ENABLED = False
TRACEBACK_ENABLED = True
_display_logger = logging.getLogger('ansible-runner.display')
_debug_logger = logging.getLogger('ansible-runner.debug')
def display(msg: str, log_only: bool = False) -> None:
if not log_only:
_display_logger.log(70, msg)
_debug_logger.log(10, msg)
def debug(msg: str) -> None:
if DEBUG_ENABLED:
if isinstance(msg, Exception):
if TRACEBACK_ENABLED:
_debug_logger.exception(msg)
display(msg)
def set_logfile(filename: str) -> None:
handlers = [h.get_name() for h in _debug_logger.handlers]
if 'logfile' not in handlers:
logfile_handler = logging.FileHandler(filename)
logfile_handler.set_name('logfile')
formatter = logging.Formatter('%(asctime)s: %(message)s')
logfile_handler.setFormatter(formatter)
_debug_logger.addHandler(logfile_handler)
def set_debug(value: str) -> None:
global DEBUG_ENABLED
if value.lower() not in ('enable', 'disable'):
raise ValueError(f"value must be one of `enable` or `disable`, got {value}")
DEBUG_ENABLED = value.lower() == 'enable'
def set_traceback(value: str) -> None:
global TRACEBACK_ENABLED
if value.lower() not in ('enable', 'disable'):
raise ValueError(f"value must be one of `enable` or `disable`, got {value}")
TRACEBACK_ENABLED = value.lower() == 'enable'
def configure() -> None:
'''
Configures the logging facility
This function will setup an initial logging facility for handling display
and debug outputs. The default facility will send display messages to
stdout and the default debug facility will do nothing.
:returns: None
'''
root_logger = logging.getLogger()
root_logger.addHandler(logging.NullHandler())
root_logger.setLevel(99)
_display_logger.setLevel(70)
_debug_logger.setLevel(10)
display_handlers = [h.get_name() for h in _display_logger.handlers]
if 'stdout' not in display_handlers:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.set_name('stdout')
formatter = logging.Formatter('%(message)s')
stdout_handler.setFormatter(formatter)
_display_logger.addHandler(stdout_handler)
ansible-runner-2.4.1/src/ansible_runner/plugins/ 0000775 0000000 0000000 00000000000 14770573620 0021733 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/plugins/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0024032 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/runner.py 0000664 0000000 0000000 00000062361 14770573620 0022145 0 ustar 00root root 0000000 0000000 import os
import stat
import time
import json
import errno
import signal
from subprocess import Popen, PIPE, CalledProcessError, TimeoutExpired, run as run_subprocess
import shutil
import codecs
import collections
import datetime
import logging
import traceback
import pexpect
import ansible_runner.plugins
from ansible_runner.output import debug
from .utils import OutputEventFilter, cleanup_artifact_dir, ensure_str, collect_new_events
from .exceptions import CallbackError, AnsibleRunnerException
logger = logging.getLogger('ansible-runner')
class Runner:
def __init__(self, config, cancel_callback=None, remove_partials=True, event_handler=None,
artifacts_handler=None, finished_callback=None, status_handler=None):
self.config = config
self.cancel_callback = cancel_callback
self.event_handler = event_handler
self.artifacts_handler = artifacts_handler
self.finished_callback = finished_callback
self.status_handler = status_handler
self.canceled = False
self.timed_out = False
self.errored = False
self.status = "unstarted"
self.rc = None
self.remove_partials = remove_partials
self.last_stdout_update = 0.0
# default runner mode to pexpect
self.runner_mode = self.config.runner_mode if hasattr(self.config, 'runner_mode') else 'pexpect'
self.directory_isolation_path = self.config.directory_isolation_path if hasattr(self.config, 'directory_isolation_path') else None
self.directory_isolation_cleanup = self.config.directory_isolation_cleanup if hasattr(self.config, 'directory_isolation_cleanup') else None
self.process_isolation = self.config.process_isolation if hasattr(self.config, 'process_isolation') else None
self.process_isolation_path_actual = self.config.process_isolation_path_actual if hasattr(self.config, 'process_isolation_path_actual') else None
def event_callback(self, event_data):
'''
Invoked for every Ansible event to collect stdout with the event data and store it for
later use
'''
self.last_stdout_update = time.time()
if 'uuid' in event_data:
filename = f"{event_data['uuid']}-partial.json"
partial_filename = os.path.join(self.config.artifact_dir,
'job_events',
filename)
full_filename = os.path.join(self.config.artifact_dir,
'job_events',
f"{event_data['counter']}-{event_data['uuid']}.json"
)
try:
event_data.update({'runner_ident': str(self.config.ident)})
try:
with codecs.open(partial_filename, 'r', encoding='utf-8') as read_file:
partial_event_data = json.load(read_file)
event_data.update(partial_event_data)
if self.remove_partials:
os.remove(partial_filename)
except IOError as e:
msg = "Failed to open ansible stdout callback plugin partial data" \
f" file {partial_filename} with error {str(e)}"
debug(msg)
if self.config.check_job_event_data:
raise AnsibleRunnerException(msg) from e
# prefer 'created' from partial data, but verbose events set time here
if 'created' not in event_data:
event_data['created'] = datetime.datetime.now(datetime.timezone.utc).isoformat()
if self.event_handler is not None:
should_write = self.event_handler(event_data)
else:
should_write = True
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].event_handler(self.config, event_data)
if should_write:
temporary_filename = full_filename + '.tmp'
with codecs.open(temporary_filename, 'w', encoding='utf-8') as write_file:
os.chmod(temporary_filename, stat.S_IRUSR | stat.S_IWUSR)
json.dump(event_data, write_file)
os.rename(temporary_filename, full_filename)
except IOError as e:
debug(f"Failed writing event data: {e}")
def status_callback(self, status):
self.status = status
status_data = {'status': status, 'runner_ident': str(self.config.ident)}
if status == 'starting':
status_data.update({'command': self.config.command, 'env': self.config.env, 'cwd': self.config.cwd})
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].status_handler(self.config, status_data)
if self.status_handler is not None:
self.status_handler(status_data, runner_config=self.config)
def run(self):
'''
Launch the Ansible task configured in self.config (A RunnerConfig object), returns once the
invocation is complete
'''
# pylint: disable=R1732
password_patterns = []
password_values = []
self.status_callback('starting')
command_filename = os.path.join(self.config.artifact_dir, 'command')
try:
os.makedirs(self.config.artifact_dir, mode=0o700)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(self.config.artifact_dir):
pass
else:
raise
job_events_path = os.path.join(self.config.artifact_dir, 'job_events')
if not os.path.exists(job_events_path):
os.mkdir(job_events_path, 0o700)
command = self.config.command
with codecs.open(command_filename, 'w', encoding='utf-8') as f:
os.chmod(command_filename, stat.S_IRUSR | stat.S_IWUSR)
json.dump(
{'command': command,
'cwd': self.config.cwd,
'env': self.config.env}, f, ensure_ascii=False
)
if self.config.ident is not None:
cleanup_artifact_dir(os.path.join(self.config.artifact_dir, ".."), self.config.rotate_artifacts)
if hasattr(self.config, 'suppress_ansible_output'):
suppress_ansible_output = self.config.suppress_ansible_output
else:
suppress_ansible_output = False
if not self.config.suppress_output_file:
stdout_filename = os.path.join(self.config.artifact_dir, 'stdout')
stderr_filename = os.path.join(self.config.artifact_dir, 'stderr')
os.close(os.open(stdout_filename, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8')
stderr_handle = codecs.open(stderr_filename, 'w', encoding='utf-8')
else:
stdout_handle = None
stderr_handle = None
stdout_handle = OutputEventFilter(stdout_handle, self.event_callback, suppress_ansible_output, output_json=self.config.json_mode)
stderr_handle = OutputEventFilter(stderr_handle, self.event_callback, suppress_ansible_output, output_json=self.config.json_mode)
if self.runner_mode == 'pexpect' and not isinstance(self.config.expect_passwords, collections.OrderedDict):
# We iterate over `expect_passwords.keys()` and
# `expect_passwords.values()` separately to map matched inputs to
# patterns and choose the proper string to send to the subprocess;
# enforce usage of an OrderedDict so that the ordering of elements in
# `keys()` matches `values()`.
expect_passwords = collections.OrderedDict(self.config.expect_passwords)
password_patterns = list(expect_passwords.keys())
password_values = list(expect_passwords.values())
# pexpect needs all env vars to be utf-8 encoded bytes
# https://github.com/pexpect/pexpect/issues/512
# Use a copy so as not to cause problems when serializing the job_env.
if self.config.containerized:
# We call the actual docker or podman executable right where we are
cwd = os.getcwd()
# If this is containerized, the shell environment calling podman has little
# to do with the actual job environment, but still needs PATH, auth, etc.
pexpect_env = os.environ.copy()
# But we still rely on env vars to pass secrets
pexpect_env.update(self.config.env)
# Write the keys to pass into container to expected file in artifacts dir
# option expecting should have already been written in ansible_runner.config.runner
env_file_host = os.path.join(self.config.artifact_dir, 'env.list')
with open(env_file_host, 'w') as f:
f.write(
'\n'.join(
[f"{key}={value}" for key, value in self.config.env.items()]
)
)
else:
cwd = self.config.cwd
pexpect_env = self.config.env
env = {
ensure_str(k): ensure_str(v) if k != 'PATH' and isinstance(v, str) else v
for k, v in pexpect_env.items()
}
self.status_callback('running')
self.last_stdout_update = time.time()
# The subprocess runner interface provides stdin/stdout/stderr with streaming capability
# to the caller if input_fd/output_fd/error_fd is passed to config class.
# Alsp, provides an workaround for known issue in pexpect for long running non-interactive process
# https://pexpect.readthedocs.io/en/stable/commonissues.html#truncated-output-just-before-child-exits
if self.runner_mode == 'subprocess':
if hasattr(self.config, 'input_fd') and self.config.input_fd:
input_fd = self.config.input_fd
else:
input_fd = None
if hasattr(self.config, 'output_fd') and self.config.output_fd:
output_fd = self.config.output_fd
else:
output_fd = PIPE
if hasattr(self.config, 'error_fd') and self.config.error_fd:
error_fd = self.config.error_fd
else:
error_fd = PIPE
subprocess_timeout = self.config.subprocess_timeout if hasattr(self.config, 'subprocess_timeout') else None
try:
stdout_response = ''
stderr_response = ''
kwargs = {
'cwd': cwd,
'env': env,
'stdin': input_fd,
'stdout': output_fd,
'stderr': error_fd,
'universal_newlines': True,
}
if subprocess_timeout is not None:
kwargs.update({'timeout': subprocess_timeout})
proc_out = run_subprocess(command, check=True, **kwargs)
stdout_response = proc_out.stdout
stderr_response = proc_out.stderr
self.rc = proc_out.returncode
except CalledProcessError as exc:
logger.debug("%s execution failed, returncode: %s, output: %s, stdout: %s, stderr: %s",
exc.cmd, exc.returncode, exc.output, exc.stdout, exc.stderr)
self.rc = exc.returncode
self.errored = True
stdout_response = exc.stdout
stderr_response = exc.stderr
except TimeoutExpired as exc:
logger.debug("%s execution timedout, timeout: %s, output: %s, stdout: %s, stderr: %s",
exc.cmd, exc.timeout, exc.output, exc.stdout, exc.stderr)
self.rc = 254
stdout_response = exc.stdout
stderr_response = exc.stderr
self.timed_out = True
except Exception as exc:
stderr_response = traceback.format_exc()
self.rc = 254
self.errored = True
logger.debug("received exception: %s", exc)
if self.timed_out or self.errored:
self.kill_container()
if stdout_response is not None:
if isinstance(stdout_response, bytes):
stdout_response = stdout_response.decode()
stdout_handle.write(stdout_response)
if stderr_response is not None:
if isinstance(stderr_response, bytes):
stderr_response = stderr_response.decode()
stderr_handle.write(stderr_response)
stdout_handle.close()
stderr_handle.close()
else:
try:
child = pexpect.spawn(
command[0],
command[1:],
cwd=cwd,
env=env,
ignore_sighup=True,
encoding='utf-8',
codec_errors='replace',
echo=False,
use_poll=self.config.pexpect_use_poll,
)
child.logfile_read = stdout_handle
except pexpect.exceptions.ExceptionPexpect as e:
child = collections.namedtuple(
'MissingProcess', 'exitstatus isalive expect close'
)(
exitstatus=127,
isalive=lambda: False,
expect=lambda *args, **kwargs: None,
close=lambda: None,
)
# create the events directory (the callback plugin won't run, so it
# won't get created)
events_directory = os.path.join(self.config.artifact_dir, 'job_events')
if not os.path.exists(events_directory):
os.mkdir(events_directory, 0o700)
stdout_handle.write(str(e))
stdout_handle.write('\n')
job_start = time.time()
while child.isalive():
result_id = child.expect(password_patterns, timeout=self.config.pexpect_timeout, searchwindowsize=100)
password = password_values[result_id]
if password is not None:
child.sendline(password)
self.last_stdout_update = time.time()
if self.cancel_callback:
try:
self.canceled = self.cancel_callback()
except Exception as e:
stdout_handle.close()
stderr_handle.close()
# TODO: logger.exception('Could not check cancel callback - cancelling immediately')
# if isinstance(extra_update_fields, dict):
# extra_update_fields['job_explanation'] = "System error during job execution, check system logs"
raise CallbackError(f"Exception in Cancel Callback: {e}") from e
if self.config.job_timeout and not self.canceled and (time.time() - job_start) > self.config.job_timeout:
self.timed_out = True
# if isinstance(extra_update_fields, dict):
# extra_update_fields['job_explanation'] = "Job terminated due to timeout"
if self.canceled or self.timed_out or self.errored:
self.kill_container()
Runner.handle_termination(child.pid)
if self.config.idle_timeout and (time.time() - self.last_stdout_update) > self.config.idle_timeout:
self.kill_container()
Runner.handle_termination(child.pid)
self.timed_out = True
# fix for https://github.com/ansible/ansible-runner/issues/1330
# Since we're (ab)using pexpect's logging callback as our source of stdout data, we need to pump the stream one last
# time, in case any new output was written by the child between the last return from expect and its termination. Ideally
# this would have an arbitrarily large timeout value as well, in case a ridiculous amount of data was written, but just
# invoking one last pump should cover the vast majority of real-world cases.
child.expect(pexpect.EOF, timeout=5)
# close the child to ensure no more output will be written before we close the stream interposers
child.close()
stdout_handle.close()
stderr_handle.close()
self.rc = child.exitstatus if not (self.timed_out or self.canceled) else 254
if self.canceled:
self.status_callback('canceled')
elif self.rc == 0 and not self.timed_out:
self.status_callback('successful')
elif self.timed_out:
self.status_callback('timeout')
else:
self.status_callback('failed')
for filename, data in [
('status', self.status),
('rc', self.rc),
]:
artifact_path = os.path.join(self.config.artifact_dir, filename)
if not os.path.exists(artifact_path):
os.close(os.open(artifact_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
with open(artifact_path, 'w') as f:
f.write(str(data))
if self.directory_isolation_path and self.directory_isolation_cleanup:
shutil.rmtree(self.directory_isolation_path)
if self.process_isolation and self.process_isolation_path_actual:
def _delete(retries=15):
try:
shutil.rmtree(self.process_isolation_path_actual)
except OSError as e:
res = False
if e.errno == 16 and retries > 0:
time.sleep(1)
res = _delete(retries=retries - 1)
if not res:
raise
return True
_delete()
if self.artifacts_handler is not None:
try:
self.artifacts_handler(self.config.artifact_dir)
except Exception as e:
raise CallbackError(f"Exception in Artifact Callback: {e}") from e
if self.finished_callback is not None:
try:
self.finished_callback(self)
except Exception as e:
raise CallbackError(f"Exception in Finished Callback: {e}") from e
return self.status, self.rc
@property
def stdout(self):
'''
Returns an open file handle to the stdout representing the Ansible run
'''
stdout_path = os.path.join(self.config.artifact_dir, 'stdout')
if not os.path.exists(stdout_path):
raise AnsibleRunnerException("stdout missing")
return open(os.path.join(self.config.artifact_dir, 'stdout'), 'r')
@property
def stderr(self):
'''
Returns an open file handle to the stderr representing the Ansible run
'''
stderr_path = os.path.join(self.config.artifact_dir, 'stderr')
if not os.path.exists(stderr_path):
raise AnsibleRunnerException("stderr missing")
return open(os.path.join(self.config.artifact_dir, 'stderr'), 'r')
@property
def events(self):
'''
A generator that will return all ansible job events in the order that they were emitted from Ansible
:Example:
.. code-block::
{
"event": "runner_on_ok",
"uuid": "00a50d9c-161a-4b74-b978-9f60becaf209",
"stdout": "ok: [localhost] => {\\r\\n \\" msg\\":\\"Test!\\"\\r\\n}",
"counter": 6,
"pid": 740,
"created": "2018-04-05T18:24:36.096725",
"end_line": 10,
"start_line": 7,
"event_data": {
"play_pattern": "all",
"play": "all",
"task": "debug",
"task_args": "msg=Test!",
"remote_addr": "localhost",
"res": {
"msg": "Test!",
"changed": false,
"_ansible_verbose_always": true,
"_ansible_no_log": false
},
"pid": 740,
"play_uuid": "0242ac11-0002-443b-cdb1-000000000006",
"task_uuid": "0242ac11-0002-443b-cdb1-000000000008",
"event_loop": null,
"playbook_uuid": "634edeee-3228-4c17-a1b4-f010fdd42eb2",
"playbook": "test.yml",
"task_action": "debug",
"host": "localhost",
"task_path": "/tmp/demo/project/test.yml:3"
}
}
'''
# collection of all the events that were yielded
old_events = {}
event_path = os.path.join(self.config.artifact_dir, 'job_events')
# Wait for events dir to be created
now = datetime.datetime.now()
while not os.path.exists(event_path):
time.sleep(0.05)
wait_time = datetime.datetime.now() - now
if wait_time.total_seconds() > 60:
raise AnsibleRunnerException(f"events directory is missing: {event_path}")
while self.status == "running":
for event, old_evnts in collect_new_events(event_path, old_events):
old_events = old_evnts
yield event
# collect new events that were written after the playbook has finished
for event, old_evnts in collect_new_events(event_path, old_events):
old_events = old_evnts
yield event
@property
def stats(self):
'''
Returns the final high level stats from the Ansible run
Example:
{'dark': {}, 'failures': {}, 'skipped': {}, 'ok': {u'localhost': 2}, 'processed': {u'localhost': 1}}
'''
last_event = list(filter(lambda x: 'event' in x and x['event'] == 'playbook_on_stats',
self.events))
if not last_event:
return None
last_event = last_event[0]['event_data']
return {'skipped': last_event.get('skipped', {}),
'ok': last_event.get('ok', {}),
'dark': last_event.get('dark', {}),
'failures': last_event.get('failures', {}),
'ignored': last_event.get('ignored', {}),
'rescued': last_event.get('rescued', {}),
'processed': last_event.get('processed', {}),
'changed': last_event.get('changed', {})
}
def host_events(self, host):
'''
Given a host name, this will return all task events executed on that host
'''
all_host_events = filter(lambda x: 'event_data' in x and 'host' in x['event_data'] and x['event_data']['host'] == host,
self.events)
return all_host_events
def kill_container(self):
'''
Internal method to terminate a container being used for job isolation
'''
container_name = self.config.container_name
if container_name:
container_cli = self.config.process_isolation_executable
cmd = [container_cli, 'kill', container_name]
with Popen(cmd, stdout=PIPE, stderr=PIPE) as proc:
_, stderr = proc.communicate()
if proc.returncode:
logger.info("Error from %s kill %s command:\n%s",
container_cli, container_name, stderr)
else:
logger.info("Killed container %s", container_name)
@classmethod
def handle_termination(cls, pid, pidfile=None):
'''
Internal method to terminate a subprocess spawned by ``pexpect`` representing an invocation of runner.
:param pid: the process id of the running the job.
:param pidfile: the daemon's PID file
'''
try:
pgroup = os.getpgid(pid)
os.killpg(pgroup, signal.SIGKILL)
except (OSError, ProcessLookupError):
pass
try:
os.remove(pidfile)
except (TypeError, OSError):
pass
def get_fact_cache(self, host):
'''
Get the entire fact cache only if the fact_cache_type is 'jsonfile'
'''
if self.config.fact_cache_type != 'jsonfile':
raise Exception('Unsupported fact cache type. Only "jsonfile" is supported for reading and writing facts from ansible-runner')
fact_cache = os.path.join(self.config.fact_cache, host)
if os.path.exists(fact_cache):
with open(fact_cache) as f:
return json.loads(f.read())
return {}
def set_fact_cache(self, host, data):
'''
Set the entire fact cache data only if the fact_cache_type is 'jsonfile'
'''
if self.config.fact_cache_type != 'jsonfile':
raise Exception('Unsupported fact cache type. Only "jsonfile" is supported for reading and writing facts from ansible-runner')
fact_cache = os.path.join(self.config.fact_cache, host)
if not os.path.exists(os.path.dirname(fact_cache)):
os.makedirs(os.path.dirname(fact_cache), mode=0o700)
with open(fact_cache, 'w') as f:
return f.write(json.dumps(data))
ansible-runner-2.4.1/src/ansible_runner/runner_config.py 0000664 0000000 0000000 00000001647 14770573620 0023472 0 ustar 00root root 0000000 0000000 ############################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0401,W0614
# to maintain backward compatibility
from ansible_runner.config.runner import * # noqa
ansible-runner-2.4.1/src/ansible_runner/streaming.py 0000664 0000000 0000000 00000034765 14770573620 0022634 0 ustar 00root root 0000000 0000000 from __future__ import annotations # allow newer type syntax until 3.10 is our minimum
import codecs
import json
import os
import stat
import sys
import tempfile
import uuid
import traceback
from collections.abc import Mapping
from functools import wraps
from threading import Event, RLock, Thread
import ansible_runner
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.loader import ArtifactLoader
import ansible_runner.plugins
from ansible_runner.utils import register_for_cleanup
from ansible_runner.utils.streaming import stream_dir, unstream_dir
class UUIDEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, uuid.UUID):
return o.hex
return json.JSONEncoder.default(self, o)
class MockConfig:
def __init__(self, settings):
self.settings = settings
self.command = None
self.cwd = None
self.env = None
class Transmitter:
def __init__(self, _output=None, **kwargs):
if _output is None:
_output = sys.stdout.buffer
self._output = _output
self.private_data_dir = os.path.abspath(kwargs.pop('private_data_dir'))
self.only_transmit_kwargs = kwargs.pop('only_transmit_kwargs', False)
if 'keepalive_seconds' in kwargs:
kwargs.pop('keepalive_seconds') # don't confuse older runners with this Worker-only arg
self.kwargs = kwargs
self.status = "unstarted"
self.rc = None
def run(self):
self._output.write(
json.dumps({'kwargs': self.kwargs}, cls=UUIDEncoder).encode('utf-8')
)
self._output.write(b'\n')
self._output.flush()
if not self.only_transmit_kwargs:
stream_dir(self.private_data_dir, self._output)
self._output.write(json.dumps({'eof': True}).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
return self.status, self.rc
class Worker:
def __init__(self, _input=None, _output=None, keepalive_seconds: float | None = None, **kwargs):
if _input is None:
_input = sys.stdin.buffer
if _output is None:
_output = sys.stdout.buffer
if keepalive_seconds is None: # if we didn't get an explicit int value, fall back to envvar
# FIXME: emit/log a warning and silently continue if this value won't parse
keepalive_seconds = float(os.environ.get('ANSIBLE_RUNNER_KEEPALIVE_SECONDS', 0))
self._keepalive_interval_sec = keepalive_seconds
self._keepalive_thread: Thread | None = None
self._output_event = Event()
self._output_lock = RLock()
self._input = _input
self._output = _output
self.kwargs = kwargs
self.job_kwargs = None
private_data_dir = kwargs.get('private_data_dir')
if private_data_dir is None:
private_data_dir = tempfile.mkdtemp()
register_for_cleanup(private_data_dir)
self.private_data_dir = private_data_dir
self.status = "unstarted"
self.rc = None
def _begin_keepalive(self):
"""Starts a keepalive thread at most once"""
if not self._keepalive_thread:
self._keepalive_thread = Thread(target=self._keepalive_loop, daemon=True)
self._keepalive_thread.start()
def _end_keepalive(self):
"""Disable the keepalive interval and notify the keepalive thread to shut down"""
self._keepalive_interval_sec = 0
self._output_event.set()
def _keepalive_loop(self):
"""Main loop for keepalive injection thread; exits when keepalive interval is <= 0"""
# pylint: disable=R1732
while self._keepalive_interval_sec > 0:
# block until output has occurred or keepalive interval elapses
if self._output_event.wait(timeout=self._keepalive_interval_sec):
# output was sent before keepalive timeout; reset the event and start waiting again
self._output_event.clear()
continue
# keepalive interval elapsed; try to send a keepalive...
# pre-acquire the output lock without blocking
if not self._output_lock.acquire(blocking=False):
# something else has the lock; output is imminent, so just skip this keepalive
# NB: a long-running operation under an event handler that's holding this lock but not actually moving
# output could theoretically block keepalives long enough to cause problems, but it's probably not
# worth the added locking hassle to be pedantic about it
continue
try:
# were keepalives recently disabled?
if self._keepalive_interval_sec <= 0:
# we're probably shutting down; don't risk corrupting output by writing now, just bail out
return
# output a keepalive event
# FIXME: this could be a lot smaller (even just `{}`) if a short-circuit discard was guaranteed in
# Processor or if other layers were more defensive about missing event keys and/or unknown dictionary
# values...
self.event_handler({'event': 'keepalive', 'counter': 0, 'uuid': 0})
finally:
# always release the output lock (
self._output_lock.release()
# NOTE: This should be decorated with staticmethod, but until our minimum supported
# Python is 3.10 (which allows static methods to be called as regular functions), we
# cannot decorate it as such, and must ignore typing errors at call locations.
def _synchronize_output_reset_keepalive(wrapped_method):
"""
Utility decorator to synchronize event writes and flushes to avoid keepalives splatting in the middle of
mid-write events, and reset keepalive interval on write completion.
"""
# pylint: disable=E0213,E1102,W0212
@wraps(wrapped_method)
def wrapper(self, *args, **kwargs):
with self._output_lock:
ret = wrapped_method(self, *args, **kwargs)
# signal the keepalive thread last, so the timeout restarts after the last write, not before the first
self._output_event.set()
return ret
return wrapper
def update_paths(self, kwargs):
if kwargs.get('envvars'):
if 'ANSIBLE_ROLES_PATH' in kwargs['envvars']:
roles_path = kwargs['envvars']['ANSIBLE_ROLES_PATH']
roles_dir = os.path.join(self.private_data_dir, 'roles')
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.join(roles_dir, roles_path)
if kwargs.get('inventory'):
kwargs['inventory'] = os.path.join(self.private_data_dir, kwargs['inventory'])
return kwargs
def run(self):
self._begin_keepalive()
try:
while True:
try:
line = self._input.readline()
data = json.loads(line)
except (json.decoder.JSONDecodeError, IOError):
self.status_handler({'status': 'error', 'job_explanation': 'Failed to JSON parse a line from transmit stream.'}, None)
self.finished_callback(None) # send eof line
return self.status, self.rc
if 'kwargs' in data:
self.job_kwargs = self.update_paths(data['kwargs'])
elif 'zipfile' in data:
try:
unstream_dir(self._input, data['zipfile'], self.private_data_dir)
except Exception:
self.status_handler({
'status': 'error',
'job_explanation': 'Failed to extract private data directory on worker.',
'result_traceback': traceback.format_exc()
}, None)
self.finished_callback(None) # send eof line
return self.status, self.rc
elif 'eof' in data:
break
self.kwargs.update(self.job_kwargs)
self.kwargs['quiet'] = True
self.kwargs['suppress_ansible_output'] = True
self.kwargs['private_data_dir'] = self.private_data_dir
self.kwargs['status_handler'] = self.status_handler
self.kwargs['event_handler'] = self.event_handler
self.kwargs['artifacts_handler'] = self.artifacts_handler
self.kwargs['finished_callback'] = self.finished_callback
r = ansible_runner.interface.run(**self.kwargs)
self.status, self.rc = r.status, r.rc
# FIXME: do cleanup on the tempdir
finally:
self._end_keepalive()
return self.status, self.rc
@_synchronize_output_reset_keepalive # type: ignore
def status_handler(self, status_data, runner_config):
# pylint: disable=W0613
self.status = status_data['status']
self._output.write(json.dumps(status_data).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
@_synchronize_output_reset_keepalive # type: ignore
def event_handler(self, event_data):
self._output.write(json.dumps(event_data).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
@_synchronize_output_reset_keepalive # type: ignore
def artifacts_handler(self, artifact_dir):
stream_dir(artifact_dir, self._output)
self._output.flush()
@_synchronize_output_reset_keepalive # type: ignore
def finished_callback(self, runner_obj):
# pylint: disable=W0613
self._end_keepalive() # ensure that we can't splat a keepalive event after the eof event
self._output.write(json.dumps({'eof': True}).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
class Processor:
def __init__(self, _input=None, status_handler=None, event_handler=None,
artifacts_handler=None, cancel_callback=None, finished_callback=None, **kwargs):
if _input is None:
_input = sys.stdin.buffer
self._input = _input
self.quiet = kwargs.get('quiet')
private_data_dir = kwargs.get('private_data_dir')
if private_data_dir is None:
private_data_dir = tempfile.mkdtemp()
self.private_data_dir = private_data_dir
self._loader = ArtifactLoader(self.private_data_dir)
settings = kwargs.get('settings')
if settings is None:
try:
settings = self._loader.load_file('env/settings', Mapping)
except ConfigurationError:
settings = {}
self.config = MockConfig(settings)
if kwargs.get('artifact_dir'):
self.artifact_dir = os.path.abspath(kwargs.get('artifact_dir'))
else:
project_artifacts = os.path.abspath(os.path.join(self.private_data_dir, 'artifacts'))
if ident := kwargs.get('ident'):
self.artifact_dir = os.path.join(project_artifacts, str(ident))
else:
self.artifact_dir = project_artifacts
self.status_handler = status_handler
self.event_handler = event_handler
self.artifacts_handler = artifacts_handler
self.cancel_callback = cancel_callback # FIXME: unused
self.finished_callback = finished_callback
self.status = "unstarted"
self.rc = None
def status_callback(self, status_data):
self.status = status_data['status']
if self.status == 'starting':
self.config.command = status_data.get('command')
self.config.env = status_data.get('env')
self.config.cwd = status_data.get('cwd')
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].status_handler(self.config, status_data)
if self.status_handler is not None:
self.status_handler(status_data, runner_config=self.config)
def event_callback(self, event_data):
# FIXME: this needs to be more defensive to not blow up on "malformed" events or new values it doesn't recognize
counter = event_data.get('counter')
uuid_val = event_data.get('uuid')
if not counter or not uuid_val:
# FIXME: log a warning about a malformed event?
return
full_filename = os.path.join(self.artifact_dir,
'job_events',
f'{counter}-{uuid_val}.json')
if not self.quiet and 'stdout' in event_data:
print(event_data['stdout'])
if self.event_handler is not None:
should_write = self.event_handler(event_data)
else:
should_write = True
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].event_handler(self.config, event_data)
if should_write:
with codecs.open(full_filename, 'w', encoding='utf-8') as write_file:
os.chmod(full_filename, stat.S_IRUSR | stat.S_IWUSR)
json.dump(event_data, write_file)
def artifacts_callback(self, artifacts_data):
length = artifacts_data['zipfile']
unstream_dir(self._input, length, self.artifact_dir)
if self.artifacts_handler is not None:
self.artifacts_handler(self.artifact_dir)
def run(self):
job_events_path = os.path.join(self.artifact_dir, 'job_events')
if not os.path.exists(job_events_path):
os.makedirs(job_events_path, 0o700, exist_ok=True)
while True:
try:
line = self._input.readline()
data = json.loads(line)
except (json.decoder.JSONDecodeError, IOError) as exc:
self.status_callback({
'status': 'error',
'job_explanation': (
f'Failed to JSON parse a line from worker stream. Error: {exc} Line with invalid JSON data: {line[:1000]}'
)
})
break
if 'status' in data:
self.status_callback(data)
elif 'zipfile' in data:
self.artifacts_callback(data)
elif 'eof' in data:
break
elif data.get('event') == 'keepalive':
# just ignore keepalives
continue
else:
self.event_callback(data)
if self.finished_callback is not None:
self.finished_callback(self)
return self.status, self.rc
ansible-runner-2.4.1/src/ansible_runner/utils/ 0000775 0000000 0000000 00000000000 14770573620 0021412 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/src/ansible_runner/utils/__init__.py 0000664 0000000 0000000 00000041433 14770573620 0023530 0 ustar 00root root 0000000 0000000
from __future__ import annotations
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
from pathlib import Path
import pwd
from shlex import quote
import uuid
import codecs
import atexit
import signal
from codecs import StreamReaderWriter
from collections.abc import Callable, Iterable, MutableMapping
from io import StringIO
from typing import Any, Iterator
from ansible_runner.exceptions import ConfigurationError
def cleanup_folder(folder: str) -> bool:
"""Deletes folder, returns True or False based on whether a change happened."""
try:
shutil.rmtree(folder)
return True
except (FileNotFoundError, NotADirectoryError):
return False
def register_for_cleanup(folder: str) -> None:
'''
Provide the path to a folder to make sure it is deleted when execution finishes.
The folder need not exist at the time when this is called.
'''
atexit.register(cleanup_folder, folder)
def get_plugin_dir() -> str:
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "display_callback"))
def get_callback_dir() -> str:
return os.path.join(get_plugin_dir(), 'callback')
def is_dir_owner(directory: str) -> bool:
'''Returns True if current user is the owner of directory'''
current_user = pwd.getpwuid(os.geteuid()).pw_name
callback_owner = Path(directory).owner()
return bool(current_user == callback_owner)
class Bunch:
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj: Any) -> bool:
'''
Inspects the object and returns if it is a playbook
:param Any obj: The object to be inspected by this function.
:return: True if the object is a list and False if it is not.
'''
return isinstance(obj, Iterable) and (not isinstance(obj, str) and not isinstance(obj, MutableMapping))
def isinventory(obj: Any) -> bool:
'''
Inspects the object and returns if it is an inventory
:param Any obj: The object to be inspected by this function.
:return: True if the object is an inventory dict and False if it is not.
'''
return isinstance(obj, (MutableMapping, str))
def check_isolation_executable_installed(isolation_executable: str) -> bool:
'''
Check that process isolation executable is installed.
:param str isolation_executable: Executable name (e.g. podman, docker, bwrap).
:return: True if the executable is installed, False otherwise.
'''
cmd = [isolation_executable, '--version']
try:
with subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
proc.communicate()
return bool(proc.returncode == 0)
except FileNotFoundError:
pass
return False
def dump_artifact(obj: str,
path: str,
filename: str | None = None
) -> str:
'''
Write the artifact to disk at the specified path
:param str obj: The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created.
:param str path: The full path to the artifacts data directory.
:param str filename: The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
:return: The full path filename for the artifact that was generated.
'''
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
_, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IWUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path: str, num_keep: int = 0) -> None:
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted(
[os.path.join(path, p) for p in os.listdir(path)],
key=os.path.getmtime
)
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs: dict) -> None:
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += f":{os.path.join(private_data_dir, 'roles')}"
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
playbook = kwargs.get('playbook')
if playbook:
# Ensure the play is a list of dictionaries
if isinstance(playbook, MutableMapping):
playbook = [playbook]
if isplaybook(playbook):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(playbook), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, MutableMapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, str):
if not os.path.exists(os.path.join(path, obj)):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
elif os.path.isabs(obj):
kwargs['inventory'] = obj
else:
kwargs['inventory'] = os.path.join(path, obj)
if not kwargs.get('suppress_env_files', False):
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path: str, old_events: dict) -> Iterator[tuple[dict, dict]]:
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys():
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter:
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self,
handle: StreamReaderWriter,
event_callback: Callable[[dict], None],
suppress_ansible_output: bool = False,
output_json: bool = False
) -> None:
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data: dict | None = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self) -> None:
if self._handle:
self._handle.flush()
def write(self, data: str) -> None:
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(stdout_actual)
sys.stdout.write("\n")
sys.stdout.flush()
if self._handle:
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(line)
if self._handle:
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self) -> None:
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback({'event': 'EOF'})
if self._handle:
self._handle.close()
def _emit_event(self,
buffered_stdout: str,
next_event_data: dict | None = None
) -> dict:
next_event_data = next_event_data or {}
event_data: dict[str, Any]
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict({'event': 'verbose'})
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = {}
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk.rstrip('\n\r')
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path: str, data: str | bytes) -> None:
'''
Opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
# If the data is a string instead of bytes, convert it before writing the fifo
if isinstance(data, str):
data = data.encode()
def worker(path, data):
with open(path, 'wb') as fh:
fh.write(data)
threading.Thread(target=worker,
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([quote(a) for a in args])
def ensure_str(s: Any, encoding='utf-8', errors='strict') -> str:
"""
Coerce *s* to ``str``.
- ``str`` -> ``str``
- ``bytes`` -> decoded to ``str``
"""
if not isinstance(s, (str, bytes)):
raise TypeError(f"not expecting type '{type(s)}'")
if isinstance(s, bytes):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name: str) -> str:
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
:param str original_name: Container name containing potentially invalid characters
"""
return re.sub('[^a-zA-Z0-9_-]', '_', str(original_name))
def cli_mounts():
return [
{
'ENVS': ['SSH_AUTH_SOCK'],
'PATHS': [
{
'src': f"{os.environ['HOME']}/.ssh/",
'dest': '/home/runner/.ssh/'
},
{
'src': f"{os.environ['HOME']}/.ssh/",
'dest': '/root/.ssh/'
},
{
'src': '/etc/ssh/ssh_known_hosts',
'dest': '/etc/ssh/ssh_known_hosts'
}
]
},
]
def sanitize_json_response(data: str) -> str:
'''
Removes warning message from response message emitted by Ansible
command line utilities.
:param str data: The string data to be sanitized
'''
start_re = re.compile("{(.|\n)*", re.MULTILINE)
found = start_re.search(data)
if found:
data = found.group().strip()
return data
def get_executable_path(name: str) -> str:
exec_path = shutil.which(name)
if exec_path is None:
raise ConfigurationError(f"{name} command not found")
return exec_path
def signal_handler() -> Callable[[], bool] | None:
# Only the main thread is allowed to set a new signal handler
# pylint: disable=W4902
if threading.current_thread() is not threading.main_thread():
return None
signal_event = threading.Event()
# closure to set signal event
def _handler(number, frame):
# pylint: disable=W0613
signal_event.set()
signal.signal(signal.SIGTERM, _handler)
signal.signal(signal.SIGINT, _handler)
return signal_event.is_set
ansible-runner-2.4.1/src/ansible_runner/utils/base64io.py 0000664 0000000 0000000 00000022365 14770573620 0023410 0 ustar 00root root 0000000 0000000 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Base64 stream with context manager support."""
from __future__ import division
import base64
import io
import logging
import string
import math
LOGGER_NAME = "base64io"
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from types import TracebackType # noqa pylint: disable=unused-import
from typing import (
IO,
AnyStr,
Optional,
Type,
)
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
__all__ = ("Base64IO",)
__version__ = "1.0.3"
_LOGGER = logging.getLogger(LOGGER_NAME)
def _to_bytes(data: AnyStr) -> bytes:
"""Convert input data from either string or bytes to bytes.
:param data: Data to convert
:returns: ``data`` converted to bytes
:rtype: bytes
"""
if isinstance(data, bytes):
return data
return data.encode("utf-8")
class Base64IO(io.IOBase):
"""Base64 stream with context manager support.
Wraps a stream, base64-decoding read results before returning them and base64-encoding
written bytes before writing them to the stream. Instances
of this class are not reusable in order maintain consistency with the :class:`io.IOBase`
behavior on ``close()``.
.. note::
Provides iterator and context manager interfaces.
.. warning::
Because up to two bytes of data must be buffered to ensure correct base64 encoding
of all data written, this object **must** be closed after you are done writing to
avoid data loss. If used as a context manager, we take care of that for you.
:param wrapped: Stream to wrap
"""
closed = False
def __init__(self, wrapped: IO) -> None:
"""Check for required methods on wrapped stream and set up read buffer.
:raises TypeError: if ``wrapped`` does not have attributes needed to determine the stream's state
"""
required_attrs = ("read", "write", "close", "closed", "flush")
if not all(hasattr(wrapped, attr) for attr in required_attrs):
raise TypeError(
f"Base64IO wrapped object must have attributes: {repr(sorted(required_attrs))}"
)
super().__init__()
self.__wrapped = wrapped
self.__read_buffer = b""
self.__write_buffer = b""
def __enter__(self):
"""Return self on enter."""
return self
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
"""Properly close self on exit."""
self.close()
def close(self) -> None:
"""Close this stream, encoding and writing any buffered bytes is present.
.. note::
This does **not** close the wrapped stream.
"""
if self.__write_buffer:
self.__wrapped.write(base64.b64encode(self.__write_buffer))
self.__write_buffer = b""
self.closed = True
def _passthrough_interactive_check(self, method_name: str) -> bool:
"""Attempt to call the specified method on the wrapped stream and return the result.
If the method is not found on the wrapped stream, return False.
:param str method_name: Name of method to call
:rtype: bool
"""
try:
method = getattr(self.__wrapped, method_name)
except AttributeError:
return False
return method()
def writable(self) -> bool:
"""Determine if the stream can be written to.
Delegates to wrapped stream when possible.
Otherwise returns False.
:rtype: bool
"""
return self._passthrough_interactive_check("writable")
def readable(self) -> bool:
"""Determine if the stream can be read from.
Delegates to wrapped stream when possible.
Otherwise returns False.
:rtype: bool
"""
return self._passthrough_interactive_check("readable")
def flush(self) -> None:
"""Flush the write buffer of the wrapped stream."""
return self.__wrapped.flush()
def write(self, b: bytes) -> int:
"""Base64-encode the bytes and write them to the wrapped stream.
Any bytes that would require padding for the next write call are buffered until the
next write or close.
.. warning::
Because up to two bytes of data must be buffered to ensure correct base64 encoding
of all data written, this object **must** be closed after you are done writing to
avoid data loss. If used as a context manager, we take care of that for you.
:param bytes b: Bytes to write to wrapped stream
:raises ValueError: if called on closed Base64IO object
:raises IOError: if underlying stream is not writable
"""
if self.closed:
raise ValueError("I/O operation on closed file.")
if not self.writable():
raise IOError("Stream is not writable")
# Load any stashed bytes and clear the buffer
_bytes_to_write = self.__write_buffer + b
self.__write_buffer = b""
# If an even base64 chunk or finalizing the stream, write through.
if len(_bytes_to_write) % 3 == 0:
return self.__wrapped.write(base64.b64encode(_bytes_to_write))
# We're not finalizing the stream, so stash the trailing bytes and encode the rest.
trailing_byte_pos = -1 * (len(_bytes_to_write) % 3)
self.__write_buffer = _bytes_to_write[trailing_byte_pos:]
return self.__wrapped.write(base64.b64encode(_bytes_to_write[:trailing_byte_pos]))
def _read_additional_data_removing_whitespace(self, data: bytes, total_bytes_to_read: int) -> bytes:
"""Read additional data from wrapped stream until we reach the desired number of bytes.
.. note::
All whitespace is ignored.
:param bytes data: Data that has already been read from wrapped stream
:param int total_bytes_to_read: Number of total non-whitespace bytes to read from wrapped stream
:returns: ``total_bytes_to_read`` bytes from wrapped stream with no whitespace
:rtype: bytes
"""
if total_bytes_to_read is None:
# If the requested number of bytes is None, we read the entire message, in which
# case the base64 module happily removes any whitespace.
return data
_data_buffer = io.BytesIO()
_data_buffer.write(b"".join(data.split()))
_remaining_bytes_to_read = total_bytes_to_read - _data_buffer.tell()
while _remaining_bytes_to_read > 0:
_raw_additional_data = _to_bytes(self.__wrapped.read(_remaining_bytes_to_read))
if not _raw_additional_data:
# No more data to read from wrapped stream.
break
_data_buffer.write(b"".join(_raw_additional_data.split()))
_remaining_bytes_to_read = total_bytes_to_read - _data_buffer.tell()
return _data_buffer.getvalue()
def read(self, b=-1) -> bytes:
"""Read bytes from wrapped stream, base64-decoding before return.
.. note::
The number of bytes requested from the wrapped stream is adjusted to return the
requested number of bytes after decoding returned bytes.
:param int b: Number of bytes to read
:returns: Decoded bytes from wrapped stream
:rtype: bytes
"""
if self.closed:
raise ValueError("I/O operation on closed file.")
if not self.readable():
raise IOError("Stream is not readable")
if b is None or b < 0:
b = -1
_bytes_to_read = -1
elif b == 0:
_bytes_to_read = 0
elif b > 0:
# Calculate number of encoded bytes that must be read to get b raw bytes.
_bytes_to_read = int((b - len(self.__read_buffer)) * 4 / 3)
_bytes_to_read = int(math.ceil(_bytes_to_read / 4.0) * 4.0)
# Read encoded bytes from wrapped stream.
data = _to_bytes(self.__wrapped.read(_bytes_to_read))
# Remove whitespace from read data and attempt to read more data to get the desired
# number of bytes.
if any(char in data for char in string.whitespace.encode("utf-8")):
data = self._read_additional_data_removing_whitespace(data, _bytes_to_read)
results = io.BytesIO()
# First, load any stashed bytes
results.write(self.__read_buffer)
# Decode encoded bytes.
results.write(base64.b64decode(data))
results.seek(0)
output_data = results.read(b)
# Stash any extra bytes for the next run.
self.__read_buffer = results.read()
return output_data
ansible-runner-2.4.1/src/ansible_runner/utils/capacity.py 0000664 0000000 0000000 00000003406 14770573620 0023564 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import multiprocessing
import re
import uuid
from pathlib import Path
def get_cpu_count() -> int:
# `multiprocessing` info: https://docs.python.org/3/library/multiprocessing.html
cpu_count = multiprocessing.cpu_count()
return cpu_count
def get_mem_in_bytes() -> int | str:
try:
with open('/proc/meminfo') as f:
mem = f.read()
matched = re.search(r'^MemTotal:\s+(\d+)', mem)
if matched:
mem_capacity = int(matched.groups()[0])
return mem_capacity * 1024
except FileNotFoundError:
error = "The /proc/meminfo file could not found, memory capacity undiscoverable."
return error
def ensure_uuid(uuid_file_path: Path | None = None, mode: int = 0o0600):
if uuid_file_path is None:
uuid_file_path = Path.home().joinpath('.ansible_runner_uuid')
if uuid_file_path.exists():
uuid_file_path.chmod(mode)
# Read the contents of file if it already exists
saved_uuid = uuid_file_path.read_text()
return saved_uuid.strip()
# Generate a new UUID if file is not found
newly_generated_uuid = _set_uuid(uuid_file_path, mode)
return newly_generated_uuid
def _set_uuid(uuid_file_path: Path | None = None, mode: int = 0o0600):
if uuid_file_path is None:
uuid_file_path = Path.home().joinpath('.ansible_runner_uuid')
generated_uuid = str(uuid.uuid4())
if not uuid_file_path.exists():
# Ensure the file starts with correct permissions
uuid_file_path.touch(mode)
# Ensure the correct permissions if the file exists
uuid_file_path.chmod(mode)
# Store the newly-generated UUID in a new file in home dir
uuid_file_path.write_text(generated_uuid)
return generated_uuid
ansible-runner-2.4.1/src/ansible_runner/utils/importlib_compat.py 0000664 0000000 0000000 00000000270 14770573620 0025327 0 ustar 00root root 0000000 0000000 # pylint: disable=W0611
import sys
if sys.version_info < (3, 10):
import importlib_metadata # noqa: F401
else:
import importlib.metadata as importlib_metadata # noqa: F401
ansible-runner-2.4.1/src/ansible_runner/utils/streaming.py 0000664 0000000 0000000 00000011637 14770573620 0023765 0 ustar 00root root 0000000 0000000 # pylint: disable=R0914
import io
import time
import tempfile
import zipfile
import os
import json
import sys
import stat
from pathlib import Path
from .base64io import Base64IO
def stream_dir(source_directory: str, stream: io.FileIO) -> None:
with tempfile.NamedTemporaryFile() as tmp:
with zipfile.ZipFile(
tmp.name, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True, strict_timestamps=False
) as archive:
if source_directory:
for dirpath, dirs, files in os.walk(source_directory):
relpath = os.path.relpath(dirpath, source_directory)
if relpath == ".":
relpath = ""
for fname in files + dirs:
full_path = os.path.join(dirpath, fname)
# Magic to preserve symlinks
if os.path.islink(full_path):
archive_relative_path = os.path.relpath(dirpath, source_directory)
file_relative_path = os.path.join(archive_relative_path, fname)
zip_info = zipfile.ZipInfo(file_relative_path)
zip_info.create_system = 3
permissions = 0o777
permissions |= 0xA000
zip_info.external_attr = permissions << 16
archive.writestr(zip_info, os.readlink(full_path))
elif stat.S_ISFIFO(os.stat(full_path).st_mode):
# skip any pipes, as python hangs when attempting
# to open them.
# i.e. ssh_key_data that was never cleaned up
continue
else:
archive.write(
os.path.join(dirpath, fname), arcname=os.path.join(relpath, fname)
)
archive.close()
zip_size = Path(tmp.name).stat().st_size
with open(tmp.name, "rb") as source:
if stream.name == "":
target = sys.stdout.buffer
else:
target = stream
target.write(json.dumps({"zipfile": zip_size}).encode("utf-8") + b"\n")
target.flush()
with Base64IO(target) as encoded_target:
for line in source:
encoded_target.write(line)
def unstream_dir(stream: io.FileIO, length: int, target_directory: str) -> None:
# NOTE: caller needs to process exceptions
with tempfile.NamedTemporaryFile() as tmp:
with open(tmp.name, "wb") as target:
with Base64IO(stream) as source:
remaining = length
chunk_size = 1024 * 1000 # 1 MB
while remaining != 0:
chunk_size = min(chunk_size, remaining)
data = source.read(chunk_size)
target.write(data)
remaining -= chunk_size
with zipfile.ZipFile(tmp.name, "r") as archive:
# Fancy extraction in order to preserve permissions
# AWX relies on the execution bit, in particular, for inventory
# https://www.burgundywall.com/post/preserving-file-perms-with-python-zipfile-module
for info in archive.infolist():
out_path = os.path.join(target_directory, info.filename)
perms = info.external_attr >> 16
mode = stat.filemode(perms)
is_symlink = mode[:1] == 'l'
if os.path.exists(out_path):
if is_symlink:
os.remove(out_path)
elif stat.S_ISFIFO(os.stat(out_path).st_mode):
# remove any pipes, as python hangs when attempting
# to open them.
# i.e. ssh_key_data that was never cleaned up
os.remove(out_path)
continue
elif os.path.isdir(out_path):
# Special case, the important dirs were pre-created so don't try to chmod them
continue
archive.extract(info.filename, path=target_directory)
# Fancy logic to preserve modification times
# AWX uses modification times to determine if new facts were written for a host
# https://stackoverflow.com/questions/9813243/extract-files-from-zip-file-and-retain-mod-date
date_time = time.mktime(info.date_time + (0, 0, -1))
os.utime(out_path, times=(date_time, date_time))
if is_symlink:
with open(out_path) as fd:
link = fd.read()
os.remove(out_path)
os.symlink(link, out_path)
else:
os.chmod(out_path, perms)
ansible-runner-2.4.1/test/ 0000775 0000000 0000000 00000000000 14770573620 0015434 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0017533 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/conftest.py 0000664 0000000 0000000 00000005346 14770573620 0017643 0 ustar 00root root 0000000 0000000 # pylint: disable=W0621
import shutil
from pathlib import Path
from packaging.version import Version
import pytest
from ansible_runner import defaults
from ansible_runner.utils.importlib_compat import importlib_metadata
CONTAINER_RUNTIMES = (
'docker',
'podman',
)
@pytest.fixture(autouse=True)
def mock_env_user(monkeypatch):
monkeypatch.setenv("ANSIBLE_DEVEL_WARNING", "False")
@pytest.fixture(autouse=True)
def change_save_path(tmp_path, mocker):
mocker.patch.object(defaults, 'AUTO_CREATE_DIR', str(tmp_path))
@pytest.fixture(scope='session')
def is_pre_ansible211():
"""
Check if the version of Ansible is less than 2.11.
CI tests with either ansible-core (>=2.11), ansible-base (==2.10), and ansible (<=2.9).
"""
try:
if importlib_metadata.version("ansible-core"):
return False
except importlib_metadata.PackageNotFoundError:
# Must be ansible-base or ansible
pass
return True
@pytest.fixture(scope='session')
def skipif_pre_ansible211(is_pre_ansible211):
if is_pre_ansible211:
pytest.skip("Valid only on Ansible 2.11+")
@pytest.fixture(scope="session")
def is_pre_ansible212():
try:
base_version = importlib_metadata.version("ansible")
if Version(base_version) < Version("2.12"):
return True
except importlib_metadata.PackageNotFoundError:
pass
return False
@pytest.fixture(scope="session")
def skipif_pre_ansible212(is_pre_ansible212):
if is_pre_ansible212:
pytest.skip("Valid only on Ansible 2.12+")
# TODO: determine if we want to add docker / podman
# to zuul instances in order to run these tests
def pytest_generate_tests(metafunc):
"""If a test uses the custom marker ``test_all_runtimes``, generate marks
for all supported container runtimes. The requires the test to accept
and use the ``runtime`` argument.
Based on examples from https://docs.pytest.org/en/latest/example/parametrize.html.
"""
for mark in getattr(metafunc.function, 'pytestmark', []):
if getattr(mark, 'name', '') == 'test_all_runtimes':
args = tuple(
pytest.param(
runtime,
marks=pytest.mark.skipif(
shutil.which(runtime) is None,
reason=f'{runtime} is not installed',
),
)
for runtime in CONTAINER_RUNTIMES
)
metafunc.parametrize('runtime', args)
break
@pytest.fixture
def project_fixtures(tmp_path):
source = Path(__file__).parent / 'fixtures' / 'projects'
dest = tmp_path / 'projects'
shutil.copytree(source, dest)
yield dest
shutil.rmtree(dest, ignore_errors=True)
ansible-runner-2.4.1/test/fixtures/ 0000775 0000000 0000000 00000000000 14770573620 0017305 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/ 0000775 0000000 0000000 00000000000 14770573620 0021136 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/ 0000775 0000000 0000000 00000000000 14770573620 0024312 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ 0000775 0000000 0000000 00000000000 14770573620 0026630 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/ 0000775 0000000 0000000 00000000000 14770573620 0032643 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy/ 0000775 0000000 0000000 00000000000 14770573620 0034170 5 ustar 00root root 0000000 0000000 peanuts/ 0000775 0000000 0000000 00000000000 14770573620 0035570 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy README.md 0000664 0000000 0000000 00000000000 14770573620 0037035 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy/peanuts galaxy.yml 0000664 0000000 0000000 00000000442 14770573620 0037600 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy/peanuts ---
authors:
- The pytest tmp_path
dependencies: {}
description: A collection for ansible-runner test test_include_role_from_collection_events.
license:
- GPL-3.0-only
name: peanuts
namespace: groovy
readme: README.md
repository: https://peanuts.invalid
tags:
- legume
version: 1.0.0
roles/ 0000775 0000000 0000000 00000000000 14770573620 0036714 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy/peanuts hello/ 0000775 0000000 0000000 00000000000 14770573620 0040017 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy/peanuts/roles tasks/ 0000775 0000000 0000000 00000000000 14770573620 0041144 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy/peanuts/roles/hello main.yml 0000664 0000000 0000000 00000000042 14770573620 0042607 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/collections/ansible_collections/groovy/peanuts/roles/hello/tasks ---
- debug: msg="Hello peanuts!"
ansible-runner-2.4.1/test/fixtures/projects/collection_role/env/ 0000775 0000000 0000000 00000000000 14770573620 0025102 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/env/envvars 0000664 0000000 0000000 00000000032 14770573620 0026504 0 ustar 00root root 0000000 0000000 ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/collection_role/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0026347 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/collection_role/inventory/hosts 0000664 0000000 0000000 00000000176 14770573620 0027436 0 ustar 00root root 0000000 0000000 all:
hosts:
testhost:
ansible_connection: local
ansible_python_interpreter: "{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/collection_role/use_role.yml 0000664 0000000 0000000 00000000112 14770573620 0026644 0 ustar 00root root 0000000 0000000 - hosts: all
gather_facts: no
roles:
- name: groovy.peanuts.hello
ansible-runner-2.4.1/test/fixtures/projects/containerized/ 0000775 0000000 0000000 00000000000 14770573620 0023774 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/containerized/env/ 0000775 0000000 0000000 00000000000 14770573620 0024564 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/containerized/env/envvars 0000664 0000000 0000000 00000000032 14770573620 0026166 0 ustar 00root root 0000000 0000000 ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/containerized/env/settings 0000664 0000000 0000000 00000000116 14770573620 0026345 0 ustar 00root root 0000000 0000000 idle_timeout: 60
job_timeout: 360
pexpect_timeout: 10
process_isolation: true
ansible-runner-2.4.1/test/fixtures/projects/containerized/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0026031 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/containerized/inventory/hosts 0000664 0000000 0000000 00000000176 14770573620 0027120 0 ustar 00root root 0000000 0000000 all:
hosts:
testhost:
ansible_connection: local
ansible_python_interpreter: "{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/containerized/project/ 0000775 0000000 0000000 00000000000 14770573620 0025442 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/containerized/project/test-container.yml 0000664 0000000 0000000 00000000412 14770573620 0031121 0 ustar 00root root 0000000 0000000 - hosts: all
gather_facts: no
tasks:
- name: Gather container facts
gather_facts:
gather_subset: virtual
- name:
assert:
that:
- ansible_facts.virtualization_type in ['docker', 'podman', 'container', 'containerd']
ansible-runner-2.4.1/test/fixtures/projects/debug/ 0000775 0000000 0000000 00000000000 14770573620 0022224 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/debug/env/ 0000775 0000000 0000000 00000000000 14770573620 0023014 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/debug/env/envvars 0000664 0000000 0000000 00000000032 14770573620 0024416 0 ustar 00root root 0000000 0000000 ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/debug/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0024261 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/debug/inventory/inv_1 0000664 0000000 0000000 00000000133 14770573620 0025215 0 ustar 00root root 0000000 0000000 host_1 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/debug/inventory/inv_2 0000664 0000000 0000000 00000000133 14770573620 0025216 0 ustar 00root root 0000000 0000000 host_2 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/debug/project/ 0000775 0000000 0000000 00000000000 14770573620 0023672 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/debug/project/debug.yml 0000664 0000000 0000000 00000000067 14770573620 0025506 0 ustar 00root root 0000000 0000000 - hosts: all
gather_facts: no
tasks:
- debug:
ansible-runner-2.4.1/test/fixtures/projects/debug/project/roles/ 0000775 0000000 0000000 00000000000 14770573620 0025016 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/debug/project/roles/hello_world/ 0000775 0000000 0000000 00000000000 14770573620 0027330 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/debug/project/roles/hello_world/tasks/ 0000775 0000000 0000000 00000000000 14770573620 0030455 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/debug/project/roles/hello_world/tasks/main.yml 0000664 0000000 0000000 00000000076 14770573620 0032127 0 ustar 00root root 0000000 0000000 - name: "Hello World role"
debug:
msg: "Hello World!"
ansible-runner-2.4.1/test/fixtures/projects/directory_isolation/ 0000775 0000000 0000000 00000000000 14770573620 0025223 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/directory_isolation/env/ 0000775 0000000 0000000 00000000000 14770573620 0026013 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/directory_isolation/env/settings 0000664 0000000 0000000 00000000147 14770573620 0027600 0 ustar 00root root 0000000 0000000 process_isolation: True
process_isolation_executable: bwrap
directory_isolation_base_path: /tmp/runner
ansible-runner-2.4.1/test/fixtures/projects/directory_isolation/project/ 0000775 0000000 0000000 00000000000 14770573620 0026671 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/directory_isolation/project/main.yml 0000664 0000000 0000000 00000000126 14770573620 0030337 0 ustar 00root root 0000000 0000000 - hosts: all
gather_facts: no
tasks:
- debug: msg="directory_isolation test"
ansible-runner-2.4.1/test/fixtures/projects/files/ 0000775 0000000 0000000 00000000000 14770573620 0022240 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/files/test_ee.py 0000664 0000000 0000000 00000000106 14770573620 0024237 0 ustar 00root root 0000000 0000000 import os
print("os-release: %s" % os.system("cat /etc/os-release"))
ansible-runner-2.4.1/test/fixtures/projects/host_status/ 0000775 0000000 0000000 00000000000 14770573620 0023516 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/host_status/env/ 0000775 0000000 0000000 00000000000 14770573620 0024306 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/host_status/env/envvars 0000664 0000000 0000000 00000000032 14770573620 0025710 0 ustar 00root root 0000000 0000000 ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/host_status/inventory 0000664 0000000 0000000 00000000273 14770573620 0025500 0 ustar 00root root 0000000 0000000 1_ok
2_skipped
3_changed
4_failed
5_ignored
6_rescued
7_unreachable ansible_connection=ssh
[all:vars]
ansible_connection=local
ansible_python_interpreter="{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/host_status/project/ 0000775 0000000 0000000 00000000000 14770573620 0025164 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/host_status/project/gen_host_status.yml 0000664 0000000 0000000 00000002054 14770573620 0031121 0 ustar 00root root 0000000 0000000 - name: Get at least one host in each state
hosts: all
gather_facts: no
tasks:
- name: A debug msg all hosts will show except for skipped ones
debug:
msg: Playing {{ ansible_host }}
when: inventory_hostname is not search('skipped|ignored')
- name: Hosts haven't really changed, but we will say they have
debug:
msg: Intentionally changed
changed_when: true
when: "'_changed' in inventory_hostname"
- name: All failhosts aboard the failboat
fail:
msg: Intentional failure
when: "'_failed' in inventory_hostname"
- name: Ignore this failure for some hosts
fail:
ignore_errors: true
when: "'_ignored' in inventory_hostname"
- name: Reach out to the unreachable hosts
ping:
when: "'_unreachable' in inventory_hostname"
- name: Fail and rescue - collection of tasks
block:
- fail:
msg: "HALP!!!"
when: "'_rescued' in inventory_hostname"
rescue:
- debug:
msg: "ε-(´・`) フ"
ansible-runner-2.4.1/test/fixtures/projects/job_env/ 0000775 0000000 0000000 00000000000 14770573620 0022560 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/job_env/env/ 0000775 0000000 0000000 00000000000 14770573620 0023350 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/job_env/env/envvars 0000664 0000000 0000000 00000000052 14770573620 0024754 0 ustar 00root root 0000000 0000000 FOO: gifmyvqok2
ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/job_env/env/settings 0000664 0000000 0000000 00000000034 14770573620 0025130 0 ustar 00root root 0000000 0000000 ---
process_isolation: true
ansible-runner-2.4.1/test/fixtures/projects/job_env/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0024615 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/job_env/inventory/hosts 0000664 0000000 0000000 00000000176 14770573620 0025704 0 ustar 00root root 0000000 0000000 all:
hosts:
testhost:
ansible_connection: local
ansible_python_interpreter: "{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/job_env/project/ 0000775 0000000 0000000 00000000000 14770573620 0024226 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/job_env/project/printenv.yml 0000664 0000000 0000000 00000000141 14770573620 0026612 0 ustar 00root root 0000000 0000000 - hosts: all
gather_facts: no
tasks:
- debug:
msg: "{{ lookup('env', 'FOO') }}"
ansible-runner-2.4.1/test/fixtures/projects/music/ 0000775 0000000 0000000 00000000000 14770573620 0022256 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/music/project/ 0000775 0000000 0000000 00000000000 14770573620 0023724 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/music/project/roles/ 0000775 0000000 0000000 00000000000 14770573620 0025050 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/music/project/roles/Into_The_Mystic/ 0000775 0000000 0000000 00000000000 14770573620 0030111 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/music/project/roles/Into_The_Mystic/meta/ 0000775 0000000 0000000 00000000000 14770573620 0031037 5 ustar 00root root 0000000 0000000 argument_specs.yml 0000664 0000000 0000000 00000000625 14770573620 0034525 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/music/project/roles/Into_The_Mystic/meta ---
argument_specs:
main:
short_description: The main entry point for the Into_The_Mystic role.
options:
foghorn:
type: "bool"
required: false
default: true
description: "If true, the foghorn blows."
soul:
type: "str"
required: true
description: "Type of soul to rock"
choices:
- "gypsy"
- "normal"
ansible-runner-2.4.1/test/fixtures/projects/pexpect_timeout_data_loss/ 0000775 0000000 0000000 00000000000 14770573620 0026405 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/pexpect_timeout_data_loss/project/ 0000775 0000000 0000000 00000000000 14770573620 0030053 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/pexpect_timeout_data_loss/project/pb.yml 0000664 0000000 0000000 00000000725 14770573620 0031203 0 ustar 00root root 0000000 0000000 # part of the regression test for https://github.com/ansible/ansible-runner/issues/1330
- hosts: localhost
gather_facts: no
tasks:
# sleep significantly longer than the configured pexpect timeout; the cancel callback will inject
# additional delay before the next process status sampling interval that can cause further output to be lost;
# if all is well, we'll do another loop over the child output until it's all been consumed...
- raw: sleep 2
ansible-runner-2.4.1/test/fixtures/projects/printenv/ 0000775 0000000 0000000 00000000000 14770573620 0023003 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/printenv/env/ 0000775 0000000 0000000 00000000000 14770573620 0023573 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/printenv/env/envvars 0000664 0000000 0000000 00000000032 14770573620 0025175 0 ustar 00root root 0000000 0000000 ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/printenv/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0025040 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/printenv/inventory/hosts 0000664 0000000 0000000 00000000176 14770573620 0026127 0 ustar 00root root 0000000 0000000 all:
hosts:
testhost:
ansible_connection: local
ansible_python_interpreter: "{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/printenv/project/ 0000775 0000000 0000000 00000000000 14770573620 0024451 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/printenv/project/action_plugins/ 0000775 0000000 0000000 00000000000 14770573620 0027467 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/printenv/project/action_plugins/look_at_environment.py 0000664 0000000 0000000 00000001152 14770573620 0034114 0 ustar 00root root 0000000 0000000 from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
import os
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = result['failed'] = False
result['msg'] = ''
env_dict = dict(os.environ)
result['printenv'] = '\n'.join(
'{0}={1}'.format(k, v) for k, v in env_dict.items()
)
result['environment'] = env_dict
result['cwd'] = os.getcwd()
return result
ansible-runner-2.4.1/test/fixtures/projects/printenv/project/get_environment.yml 0000664 0000000 0000000 00000000105 14770573620 0030373 0 ustar 00root root 0000000 0000000 - hosts: all
gather_facts: no
tasks:
- look_at_environment:
ansible-runner-2.4.1/test/fixtures/projects/sleep/ 0000775 0000000 0000000 00000000000 14770573620 0022246 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/sleep/env/ 0000775 0000000 0000000 00000000000 14770573620 0023036 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/sleep/env/envvars 0000664 0000000 0000000 00000000032 14770573620 0024440 0 ustar 00root root 0000000 0000000 ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/sleep/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0024303 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/sleep/inventory/hosts 0000664 0000000 0000000 00000000176 14770573620 0025372 0 ustar 00root root 0000000 0000000 all:
hosts:
testhost:
ansible_connection: local
ansible_python_interpreter: "{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/sleep/project/ 0000775 0000000 0000000 00000000000 14770573620 0023714 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/sleep/project/sleep.yml 0000664 0000000 0000000 00000000342 14770573620 0025546 0 ustar 00root root 0000000 0000000 - name: Sleep playbook for testing things while process is running
hosts: all
gather_facts: no
vars:
sleep_interval: 30
tasks:
- name: Sleep for a specified interval
command: sleep {{ sleep_interval }}
ansible-runner-2.4.1/test/fixtures/projects/use_role/ 0000775 0000000 0000000 00000000000 14770573620 0022753 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/use_role/env/ 0000775 0000000 0000000 00000000000 14770573620 0023543 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/use_role/env/envvars 0000664 0000000 0000000 00000000032 14770573620 0025145 0 ustar 00root root 0000000 0000000 ANSIBLE_DEVEL_WARNING: no
ansible-runner-2.4.1/test/fixtures/projects/use_role/inventory/ 0000775 0000000 0000000 00000000000 14770573620 0025010 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/use_role/inventory/hosts 0000664 0000000 0000000 00000000176 14770573620 0026077 0 ustar 00root root 0000000 0000000 all:
hosts:
testhost:
ansible_connection: local
ansible_python_interpreter: "{{ ansible_playbook_python }}"
ansible-runner-2.4.1/test/fixtures/projects/use_role/roles/ 0000775 0000000 0000000 00000000000 14770573620 0024077 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/use_role/roles/benthomasson.hello_role/ 0000775 0000000 0000000 00000000000 14770573620 0030722 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/use_role/roles/benthomasson.hello_role/meta/ 0000775 0000000 0000000 00000000000 14770573620 0031650 5 ustar 00root root 0000000 0000000 .galaxy_install_info 0000664 0000000 0000000 00000000074 14770573620 0035621 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/use_role/roles/benthomasson.hello_role/meta {install_date: 'Mon Aug 20 13:21:07 2018', version: master}
ansible-runner-2.4.1/test/fixtures/projects/use_role/roles/benthomasson.hello_role/meta/main.yml 0000664 0000000 0000000 00000000270 14770573620 0033316 0 ustar 00root root 0000000 0000000 ---
galaxy_info:
author: Ben Thomasson
description: Hello World role
company: Red Hat
license: Apache
min_ansible_version: 1.2
galaxy_tags:
- hello
dependencies: []
ansible-runner-2.4.1/test/fixtures/projects/use_role/roles/benthomasson.hello_role/tasks/ 0000775 0000000 0000000 00000000000 14770573620 0032047 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/fixtures/projects/use_role/roles/benthomasson.hello_role/tasks/main.yml 0000664 0000000 0000000 00000000015 14770573620 0033512 0 ustar 00root root 0000000 0000000 ---
- debug:
ansible-runner-2.4.1/test/fixtures/projects/use_role/use_role.yml 0000664 0000000 0000000 00000000115 14770573620 0025310 0 ustar 00root root 0000000 0000000 - hosts: all
gather_facts: no
roles:
- name: benthomasson.hello_role
ansible-runner-2.4.1/test/integration/ 0000775 0000000 0000000 00000000000 14770573620 0017757 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/integration/Dockerfile 0000664 0000000 0000000 00000000646 14770573620 0021757 0 ustar 00root root 0000000 0000000 # This is not fully compatible builder Dockerfile, but meets the needs of tests
FROM quay.io/centos/centos:stream9
ARG WHEEL
COPY $WHEEL /$WHEEL
RUN dnf install -y python3-pip
RUN python3 -m pip install /$WHEEL ansible-core
RUN mkdir -p /runner/{env,inventory,project,artifacts} /home/runner/.ansible/tmp
RUN chmod -R 777 /runner /home/runner
WORKDIR /runner
ENV HOME=/home/runner
CMD ["ansible-runner", "run", "/runner"]
ansible-runner-2.4.1/test/integration/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0022056 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/integration/callback/ 0000775 0000000 0000000 00000000000 14770573620 0021513 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/integration/callback/other_callback.py 0000664 0000000 0000000 00000000452 14770573620 0025023 0 ustar 00root root 0000000 0000000 from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'other_callback'
def v2_playbook_on_play_start(self, play):
pass
def v2_runner_on_ok(self, result):
pass
ansible-runner-2.4.1/test/integration/conftest.py 0000664 0000000 0000000 00000012153 14770573620 0022160 0 ustar 00root root 0000000 0000000 import json
import os
import subprocess
import pathlib
import random
from string import ascii_lowercase
import pexpect
import pytest
import yaml
from ansible_runner.config.runner import RunnerConfig
here = pathlib.Path(__file__).parent
@pytest.fixture(scope='function')
def rc(tmp_path):
conf = RunnerConfig(str(tmp_path))
conf.suppress_ansible_output = True
conf.expect_passwords = {
pexpect.TIMEOUT: None,
pexpect.EOF: None
}
conf.cwd = str(tmp_path)
conf.env = {}
conf.job_timeout = 10
conf.idle_timeout = 0
conf.pexpect_timeout = 2.
conf.pexpect_use_poll = True
return conf
class CompletedProcessProxy:
def __init__(self, result):
self.result = result
def __getattr__(self, attr):
return getattr(self.result, attr)
@property
def json(self):
try:
response_json = json.loads(self.stdout)
except json.JSONDecodeError:
pytest.fail(
f"Unable to convert the response to a valid json - stdout: {self.stdout}, stderr: {self.stderr}"
)
return response_json
@property
def yaml(self):
return yaml.safe_load(self.stdout)
@pytest.fixture(scope='function')
def cli():
def run(args, *a, **kw):
if not kw.pop('bare', None):
args = ['ansible-runner'] + args
kw['encoding'] = 'utf-8'
if 'check' not in kw:
# By default we want to fail if a command fails to run. Tests that
# want to skip this can pass check=False when calling this fixture
kw['check'] = True
if 'stdout' not in kw:
kw['stdout'] = subprocess.PIPE
if 'stderr' not in kw:
kw['stderr'] = subprocess.PIPE
kw.setdefault('env', os.environ.copy()).update({
'LANG': 'en_US.UTF-8'
})
try:
ret = CompletedProcessProxy(subprocess.run(' '.join(args), check=kw.pop('check'), shell=True, *a, **kw))
except subprocess.CalledProcessError as err:
pytest.fail(
f"Running {err.cmd} resulted in a non-zero return code: {err.returncode} - stdout: {err.stdout}, stderr: {err.stderr}"
)
return ret
return run
@pytest.fixture
def container_image(request, cli, tmp_path): # pylint: disable=W0621
try:
containerized = request.getfixturevalue('containerized')
if not containerized:
yield None
return
except Exception:
# Test func doesn't use containerized
pass
if (env_image_name := os.getenv('RUNNER_TEST_IMAGE_NAME')):
yield env_image_name
return
cli(
['pyproject-build', '-w', '-o', str(tmp_path)],
cwd=here.parent.parent,
bare=True,
)
wheel = next(tmp_path.glob('*.whl')) # pylint: disable=R1708
runtime = request.getfixturevalue('runtime')
dockerfile_path = tmp_path / 'Dockerfile'
dockerfile_path.write_text(
(here / 'Dockerfile').read_text()
)
random_string = ''.join(random.choice(ascii_lowercase) for i in range(10))
image_name = f'ansible-runner-{random_string}-event-test'
cli(
[runtime, 'build', '--build-arg', f'WHEEL={wheel.name}', '--rm=true', '-t', image_name, '-f', str(dockerfile_path), str(tmp_path)],
bare=True,
)
yield image_name
cli(
[runtime, 'rmi', '-f', image_name],
bare=True,
)
@pytest.fixture
def container_image_devel(request, cli, tmp_path): # pylint: disable=W0621
branch = request.getfixturevalue('branch')
DOCKERFILE = f"""
FROM quay.io/centos/centos:stream9
ARG WHEEL
COPY $WHEEL /$WHEEL
# Need python 3.11 minimum for devel
RUN dnf install -y python3.11 python3.11-pip git
RUN alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 0
RUN python3 -m pip install /$WHEEL git+https://github.com/ansible/ansible@{branch}
RUN mkdir -p /runner/{{env,inventory,project,artifacts}} /home/runner/.ansible/tmp
RUN chmod -R 777 /runner /home/runner
WORKDIR /runner
ENV HOME=/home/runner
CMD ["ansible-runner", "run", "/runner"]
"""
try:
containerized = request.getfixturevalue('containerized')
if not containerized:
yield None
return
except Exception:
# Test func doesn't use containerized
pass
if (env_image_name := os.getenv('RUNNER_TEST_IMAGE_NAME')):
yield env_image_name
return
cli(
['pyproject-build', '-w', '-o', str(tmp_path)],
cwd=here.parent.parent,
bare=True,
)
wheel = next(tmp_path.glob('*.whl')) # pylint: disable=R1708
runtime = request.getfixturevalue('runtime')
dockerfile_path = tmp_path / 'Dockerfile'
dockerfile_path.write_text(DOCKERFILE)
random_string = ''.join(random.choice(ascii_lowercase) for i in range(10))
image_name = f'ansible-runner-{random_string}-event-test'
cli(
[runtime, 'build', '--build-arg', f'WHEEL={wheel.name}', '--rm=true', '-t', image_name, '-f', str(dockerfile_path), str(tmp_path)],
bare=True,
)
yield image_name
cli(
[runtime, 'rmi', '-f', image_name],
bare=True,
)
ansible-runner-2.4.1/test/integration/containerized/ 0000775 0000000 0000000 00000000000 14770573620 0022615 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/integration/containerized/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0024714 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/integration/containerized/test_cleanup_images.py 0000664 0000000 0000000 00000004374 14770573620 0027212 0 ustar 00root root 0000000 0000000 import json
import random
from base64 import b64decode
from string import ascii_lowercase
import pytest
from ansible_runner.cleanup import cleanup_images, prune_images
@pytest.mark.test_all_runtimes
def test_cleanup_new_image(cli, runtime, tmp_path, container_image):
# Create new image just for this test with a unique layer
random_string = ''.join(random.choice(ascii_lowercase) for i in range(10))
special_string = f"Verify this in test - {random_string}"
dockerfile_path = tmp_path / 'Dockerfile'
dockerfile_path.write_text('\n'.join([
f'FROM {container_image}',
f'RUN echo {special_string} > /tmp/for_test.txt'
]))
image_name = f'quay.io/fortest/{random_string}:latest'
build_cmd = [runtime, 'build', '--rm=true', '-t', image_name, '-f', str(dockerfile_path), str(tmp_path)]
cli(build_cmd, bare=True)
# get an id for the unique layer
r = cli([runtime, 'images', image_name, '--format="{{.ID}}"'], bare=True)
layer_id = r.stdout.strip()
assert layer_id in cli([runtime, 'images'], bare=True).stdout
# workaround for https://github.com/ansible/ansible-runner/issues/758
tmp_path.joinpath('project').mkdir()
# force no colors so that we can JSON load ad hoc output
env_path = tmp_path.joinpath('env')
env_path.mkdir()
env_path.joinpath('envvars').write_text('{"ANSIBLE_NOCOLOR": "true"}')
# assure that the image is usable in ansible-runner as an EE
r = cli([
'run', str(tmp_path), '-m', 'slurp', '-a', 'src=/tmp/for_test.txt', '--hosts=localhost', '--ident', 'for_test',
'--container-image', image_name, '--process-isolation', '--process-isolation-executable', runtime,
])
stdout = r.stdout
data = json.loads(stdout[stdout.index('{'):stdout.index('}') + 1])
assert 'content' in data
assert special_string == str(b64decode(data['content']).strip(), encoding='utf-8')
image_ct = cleanup_images(images=[image_name], runtime=runtime)
assert image_ct == 1
prune_images(runtime=runtime) # May or may not do anything, depends on docker / podman
assert layer_id not in cli([runtime, 'images'], bare=True).stdout # establishes that cleanup was genuine
assert cleanup_images(images=[image_name], runtime=runtime) == 0 # should be no-op
ansible-runner-2.4.1/test/integration/containerized/test_cli_containerized.py 0000664 0000000 0000000 00000005557 14770573620 0027727 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os
import signal
import sys
from test.utils.common import iterate_timeout
from uuid import uuid4
import pytest
@pytest.mark.test_all_runtimes
def test_module_run(cli, project_fixtures, runtime, container_image):
r = cli([
'run',
'--process-isolation-executable', runtime,
'--container-image', container_image,
'-m', 'ping',
'--hosts', 'testhost',
project_fixtures.joinpath('containerized').as_posix(),
])
assert '"ping": "pong"' in r.stdout
@pytest.mark.test_all_runtimes
def test_playbook_run(cli, project_fixtures, runtime, container_image):
# Ensure the container environment variable is set so that Ansible fact gathering
# is able to detect it is running inside a container.
envvars_path = project_fixtures / 'containerized' / 'env' / 'envvars'
with envvars_path.open('a') as f:
f.write(f'container: {runtime}\n')
r = cli([
'run',
'--process-isolation-executable', runtime,
'--container-image', container_image,
'-p', 'test-container.yml',
project_fixtures.joinpath('containerized').as_posix(),
])
assert 'PLAY RECAP *******' in r.stdout
assert 'failed=0' in r.stdout
@pytest.mark.test_all_runtimes
def test_provide_env_var(cli, project_fixtures, runtime, container_image):
r = cli([
'run',
'--process-isolation-executable', runtime,
'--container-image', container_image,
'-p', 'printenv.yml',
project_fixtures.joinpath('job_env').as_posix(),
])
assert 'gifmyvqok2' in r.stdout, r.stdout
@pytest.mark.test_all_runtimes
@pytest.mark.skipif(sys.platform == 'darwin', reason='ansible-runner start does not work reliably on macOS')
def test_cli_kill_cleanup(cli, runtime, project_fixtures, container_image):
unique_string = str(uuid4()).replace('-', '')
ident = f'kill_test_{unique_string}'
pdd = os.path.join(project_fixtures, 'sleep')
cli_args = [
'start', pdd,
'-p', 'sleep.yml',
'--ident', ident,
'--process-isolation',
'--process-isolation-executable', runtime,
'--container-image', container_image,
]
cli(cli_args)
def container_is_running():
r = cli([runtime, 'ps', '-f', f'name=ansible_runner_{ident}', '--format={{.Names}}'], bare=True)
return ident in r.stdout
timeout = 10
for _ in iterate_timeout(timeout, 'confirm ansible-runner started container', interval=1):
if container_is_running():
break
# Here, we will do sigterm to kill the parent process, it should handle this gracefully
with open(os.path.join(pdd, 'pid'), 'r') as f:
pid = int(f.read().strip())
os.kill(pid, signal.SIGTERM)
for _ in iterate_timeout(timeout, 'confirm container no longer running', interval=1):
if not container_is_running():
break
ansible-runner-2.4.1/test/integration/containerized/test_container_management.py 0000664 0000000 0000000 00000013773 14770573620 0030417 0 ustar 00root root 0000000 0000000 import os
import time
import json
from glob import glob
from uuid import uuid4
import pytest
from ansible_runner.interface import run
@pytest.mark.test_all_runtimes
def is_running(cli, runtime, container_name):
cmd = [runtime, 'ps', '-aq', '--filter', f'name={container_name}']
r = cli(cmd, bare=True)
output = f'{r.stdout}{r.stderr}'
print(' '.join(cmd))
print(output)
return output.strip()
class CancelStandIn:
def __init__(self, runtime, cli, container_name, delay=0.2):
self.runtime = runtime
self.cli = cli
self.delay = delay
self.container_name = container_name
self.checked_running = False
self.start_time = None
def cancel(self):
# Avoid checking for some initial delay to allow container startup
if not self.start_time:
self.start_time = time.time()
if time.time() - self.start_time < self.delay:
return False
# guard against false passes by checking for running container
if not self.checked_running:
for _ in range(5):
if is_running(self.cli, self.runtime, self.container_name):
break
time.sleep(0.2)
else:
print(self.cli([self.runtime, 'ps', '-a'], bare=True).stdout)
raise Exception('Never spawned expected container')
self.checked_running = True
# Established that container was running, now we cancel job
return True
@pytest.mark.test_all_runtimes
def test_cancel_will_remove_container(project_fixtures, runtime, cli, container_image):
private_data_dir = project_fixtures / 'sleep'
ident = uuid4().hex[:12]
container_name = f'ansible_runner_{ident}'
cancel_standin = CancelStandIn(runtime, cli, container_name)
res = run(
private_data_dir=private_data_dir,
playbook='sleep.yml',
settings={
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
},
cancel_callback=cancel_standin.cancel,
ident=ident
)
with res.stdout as f:
assert res.rc == 254, f.read()
assert res.status == 'canceled'
assert not is_running(
cli, runtime, container_name
), 'Found a running container, they should have all been stopped'
@pytest.mark.test_all_runtimes
def test_non_owner_install(mocker, project_fixtures, runtime, container_image):
"""Simulates a run on a conputer where ansible-runner install is not owned by current user"""
mocker.patch('ansible_runner.utils.is_dir_owner', return_value=False)
private_data_dir = project_fixtures / 'debug'
res = run(
private_data_dir=private_data_dir,
playbook='debug.yml',
settings={
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
)
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
assert res.status == 'successful'
@pytest.mark.test_all_runtimes
def test_invalid_registry_host(tmp_path, runtime):
pdd_path = tmp_path / 'private_data_dir'
pdd_path.mkdir()
private_data_dir = str(pdd_path)
image_name = 'quay.io/kdelee/does-not-exist'
res = run(
private_data_dir=private_data_dir,
playbook='ping.yml',
settings={
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': image_name,
'container_options': ['--user=root', '--pull=always'],
},
container_auth_data={'host': 'somedomain.invalid', 'username': 'foouser', 'password': '349sk34', 'verify_ssl': False},
ident='awx_123'
)
assert res.status == 'failed'
assert res.rc > 0
assert os.path.exists(res.config.registry_auth_path)
with res.stdout as f:
result_stdout = f.read()
auth_file_path = os.path.join(res.config.registry_auth_path, 'config.json')
registry_conf = os.path.join(res.config.registry_auth_path, 'registries.conf')
error_msg = 'access to the requested resource is not authorized'
if runtime == 'podman':
assert image_name in result_stdout
error_msg = 'unauthorized'
auth_file_path = res.config.registry_auth_path
registry_conf = os.path.join(os.path.dirname(res.config.registry_auth_path), 'registries.conf')
assert error_msg in result_stdout
with open(auth_file_path, 'r') as f:
content = f.read()
assert res.config.container_auth_data['host'] in content
assert 'Zm9vdXNlcjozNDlzazM0' in content # the b64 encoded of username and password
assert os.path.exists(registry_conf)
with open(registry_conf, 'r') as f:
assert f.read() == '\n'.join([
'[[registry]]',
'location = "somedomain.invalid"',
'insecure = true'
])
@pytest.mark.test_all_runtimes
def test_registry_auth_file_cleanup(tmp_path, cli, runtime):
pdd_path = tmp_path / 'private_data_dir'
pdd_path.mkdir()
private_data_dir = str(pdd_path)
auth_registry_glob = '/tmp/ansible_runner_registry_*'
registry_files_before = set(glob(auth_registry_glob))
settings_data = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': 'quay.io/kdelee/does-not-exist',
'container_options': ['--user=root', '--pull=always'],
'container_auth_data': {'host': 'https://somedomain.invalid', 'username': 'foouser', 'password': '349sk34'},
}
env_path = pdd_path / 'env'
env_path.mkdir()
with env_path.joinpath('settings').open('w') as f:
f.write(json.dumps(settings_data, indent=2))
this_ident = str(uuid4())[:5]
cli(['run', private_data_dir, '--ident', this_ident, '-p', 'ping.yml'], check=False)
discovered_registry_files = set(glob(auth_registry_glob)) - registry_files_before
for file_name in discovered_registry_files:
assert this_ident not in file_name
ansible-runner-2.4.1/test/integration/exec_env/ 0000775 0000000 0000000 00000000000 14770573620 0021553 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/integration/exec_env/Containerfile 0000664 0000000 0000000 00000000406 14770573620 0024260 0 ustar 00root root 0000000 0000000 FROM registry.access.redhat.com/ubi8/ubi
RUN yum -y install python3-pip gcc python3-devel openssh-clients
RUN pip3 install https://github.com/ansible/ansible/archive/devel.tar.gz
RUN pip3 install https://github.com/ansible/ansible-runner/archive/devel.tar.gz
ansible-runner-2.4.1/test/integration/exec_env/demo.yml 0000664 0000000 0000000 00000002034 14770573620 0023221 0 ustar 00root root 0000000 0000000 ---
- name: Run some basic system automation stuff
hosts: rhel8
tasks:
- name: install some basic packages
yum:
name:
- tmux
- git
- vim-enhanced
- python3
- name: enable journald persistent storage
file:
path: /var/log/journal
state: directory
- name: Get tuned profile
slurp:
src: /etc/tuned/active_profile
register: tuned_active_profile
- debug:
msg: "{{ tuned_active_profile['content'] | b64decode | trim }}"
- name: tuned-adm set throughput-performance
shell: /usr/sbin/tuned-adm profile throughput-performance
when: "tuned_active_profile['content'] | b64decode | trim != 'throughput-performance'"
- name: don't allow password based ssh
lineinfile:
path: /etc/ssh/sshd_config
regexp: '^PasswordAuthentication'
line: "PasswordAuthentication no"
notify: restart sshd
handlers:
- name: restart sshd
service:
name: sshd
state: restarted
ansible-runner-2.4.1/test/integration/exec_env/inventory.ini 0000664 0000000 0000000 00000000127 14770573620 0024311 0 ustar 00root root 0000000 0000000 [rhel8]
azure-rhel8 ansible_host=168.62.187.3 ansible_user=admiller ansible_become=yes
ansible-runner-2.4.1/test/integration/test___main__.py 0000664 0000000 0000000 00000012035 14770573620 0023111 0 ustar 00root root 0000000 0000000 import os
import sys
import uuid
import json
import random
import string
import pytest
import ansible_runner.__main__
def random_string():
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(random.randint(3, 20)))
def random_json(keys=None):
data = {}
if keys:
for key in keys:
data[key] = random_string()
else:
for _ in range(0, 5):
data[random_string()] = random_string()
return json.dumps(data)
def cmdline(command, *args):
cmd = ['ansible-runner', command]
cmd.extend(args)
sys.argv = cmd
def test_main_bad_private_data_dir():
tmpfile = os.path.join('/tmp', str(uuid.uuid4().hex))
with open(tmpfile, 'w') as f:
f.write(random_string())
cmdline('run', tmpfile, '-p', 'fake')
try:
with pytest.raises(OSError):
ansible_runner.__main__.main()
finally:
os.remove(tmpfile)
def save_playbook(**kwargs):
os.link(kwargs['playbook'], os.path.join(kwargs['private_data_dir'], 'play.yml'))
raise AttributeError("Raised intentionally")
@pytest.mark.parametrize(
('options', 'expected_playbook'),
(
(
['-r', 'test'],
[{'hosts': 'all', 'gather_facts': True, 'roles': [{'name': 'test'}]}],
),
(
['-r', 'test', '--role-skip-facts'],
[{'hosts': 'all', 'gather_facts': False, 'roles': [{'name': 'test'}]}],
),
(
['-r', 'test', '--role-vars', 'foo=bar'],
[{'hosts': 'all', 'gather_facts': True, 'roles': [{'name': 'test', 'vars': {'foo': 'bar'}}]}],
),
(
['-r', 'test', '--roles-path', '/tmp/roles'],
[{'hosts': 'all', 'gather_facts': True, 'roles': [{'name': 'test'}]}],
),
)
)
def test_cmdline_role(options, expected_playbook, tmp_path, mocker):
mocker.patch.object(ansible_runner.__main__, 'run', save_playbook)
spy = mocker.spy(ansible_runner.__main__, 'run')
command = ['run', str(tmp_path)]
command.extend(options)
rc = ansible_runner.__main__.main(command)
with open(tmp_path / 'play.yml') as f:
playbook = json.loads(f.read())
assert rc == 1
assert playbook == expected_playbook
assert spy.call_args.kwargs.get('private_data_dir') == str(tmp_path)
def test_cmdline_role_with_playbook_option():
"""Test error is raised with invalid command line option '-p'
"""
cmdline('run', 'private_data_dir', '-r', 'fake', '-p', 'fake')
with pytest.raises(SystemExit) as exc:
ansible_runner.__main__.main()
assert exc == 1
def test_cmdline_playbook(tmp_path):
private_data_dir = tmp_path
play = [{'hosts': 'all', 'tasks': [{'debug': {'msg': random_string()}}]}]
path = private_data_dir / 'project'
path.mkdir()
playbook = path / 'main.yaml'
with open(playbook, 'w') as f:
f.write(json.dumps(play))
path = private_data_dir / 'inventory'
os.makedirs(path)
inventory = path / 'hosts'
with open(inventory, 'w') as f:
f.write('[all]\nlocalhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"')
cmdline('run', str(private_data_dir), '-p', str(playbook), '--inventory', str(inventory))
assert ansible_runner.__main__.main() == 0
with open(playbook) as f:
assert json.loads(f.read()) == play
def test_cmdline_playbook_hosts():
"""Test error is raised with trying to pass '--hosts' with '-p'
"""
cmdline('run', 'private_data_dir', '-p', 'fake', '--hosts', 'all')
with pytest.raises(SystemExit) as exc:
ansible_runner.__main__.main()
assert exc.value.code == 1
def test_cmdline_includes_one_option():
"""Test error is raised if not '-p', '-m' or '-r'
"""
cmdline('run', 'private_data_dir')
with pytest.raises(SystemExit) as exc:
ansible_runner.__main__.main()
assert exc.value.code == 1
def test_cmdline_cmdline_override(tmp_path):
private_data_dir = tmp_path
play = [{'hosts': 'all', 'tasks': [{'debug': {'msg': random_string()}}]}]
path = private_data_dir / 'project'
path.mkdir()
playbook = path / 'main.yaml'
with open(playbook, 'w') as f:
f.write(json.dumps(play))
path = private_data_dir / 'inventory'
os.makedirs(path)
inventory = path / 'hosts'
with open(inventory, 'w') as f:
f.write('[all]\nlocalhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"')
cmdline('run', str(private_data_dir), '-p', str(playbook), '--cmdline', '-e foo=bar')
assert ansible_runner.__main__.main() == 0
def test_cmdline_invalid_inventory(tmp_path):
"""
Test that an invalid inventory path causes an error.
"""
private_data_dir = tmp_path
inv_path = private_data_dir / 'inventory'
inv_path.mkdir(parents=True)
cmdline('run', str(private_data_dir), '-p', 'test.yml', '--inventory', 'badInventoryPath')
with pytest.raises(SystemExit) as exc:
ansible_runner.__main__.main()
assert exc.value.code == 1
ansible-runner-2.4.1/test/integration/test_config.py 0000664 0000000 0000000 00000003146 14770573620 0022641 0 ustar 00root root 0000000 0000000 import os
from ansible_runner.config._base import BaseConfig
from ansible_runner.interface import run
def test_combine_python_and_file_settings(project_fixtures):
rc = BaseConfig(private_data_dir=str(project_fixtures / 'job_env'), settings={'job_timeout': 40}, container_image='bar')
rc.prepare_env()
assert rc.settings == {'job_timeout': 40, 'process_isolation': True}
def test_default_ansible_callback(project_fixtures):
"""This is the reference case for stdout customization tests, assures default stdout callback is used"""
res = run(private_data_dir=str(project_fixtures / 'debug'), playbook='debug.yml')
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
assert 'ok: [host_1] => {' in stdout, stdout
assert '"msg": "Hello world!"' in stdout, stdout
def test_custom_stdout_callback_via_host_environ(project_fixtures, mocker):
mocker.patch.dict(os.environ, {'ANSIBLE_STDOUT_CALLBACK': 'minimal'})
res = run(private_data_dir=str(project_fixtures / 'debug'), playbook='debug.yml')
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
assert 'host_1 | SUCCESS => {' in stdout, stdout
assert '"msg": "Hello world!"' in stdout, stdout
def test_custom_stdout_callback_via_envvars(project_fixtures):
res = run(private_data_dir=str(project_fixtures / 'debug'), playbook='debug.yml', envvars={'ANSIBLE_STDOUT_CALLBACK': 'minimal'})
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
assert 'host_1 | SUCCESS => {' in stdout, stdout
assert '"msg": "Hello world!"' in stdout, stdout
ansible-runner-2.4.1/test/integration/test_core_integration.py 0000664 0000000 0000000 00000004742 14770573620 0024732 0 ustar 00root root 0000000 0000000 import sys
import pytest
from ansible_runner.interface import run
TEST_BRANCHES = (
'devel',
'milestone',
'stable-2.18', # current stable
'stable-2.17', # stable - 1
)
@pytest.mark.test_all_runtimes
@pytest.mark.parametrize('branch', TEST_BRANCHES)
@pytest.mark.skipif(sys.platform == 'darwin', reason='does not work on macOS')
def test_adhoc(tmp_path, runtime, branch, container_image_devel): # pylint: disable=W0613
# pvt_data_dir is mounted on the container, so it must contain the expected directories
project_dir = tmp_path / 'project'
project_dir.mkdir()
r = run(private_data_dir=str(tmp_path),
host_pattern='localhost',
module='shell',
module_args='pwd',
process_isolation_executable=runtime,
process_isolation=True,
container_image=container_image_devel,
)
assert r.status == 'successful'
assert r.rc == 0
assert 'ok' in r.stats
assert 'localhost' in r.stats['ok']
events = [x['event'] for x in r.events if x['event'] != 'verbose']
assert len(events) == 4
@pytest.mark.test_all_runtimes
@pytest.mark.parametrize('branch', TEST_BRANCHES)
@pytest.mark.skipif(sys.platform == 'darwin', reason='does not work on macOS')
def test_playbook(tmp_path, runtime, branch, container_image_devel): # pylint: disable=W0613
PLAYBOOK = """
- hosts: localhost
gather_facts: False
tasks:
- set_fact:
foo: bar
"""
# pvt_data_dir is mounted on the container, so it must contain the expected directories
project_dir = tmp_path / 'project'
project_dir.mkdir()
inventory_dir = tmp_path / 'inventory'
inventory_dir.mkdir()
hosts_file = inventory_dir / 'hosts'
hosts_file.write_text('localhost\n')
playbook = project_dir / 'test.yml'
playbook.write_text(PLAYBOOK)
r = run(private_data_dir=str(tmp_path),
playbook='test.yml',
process_isolation_executable=runtime,
process_isolation=True,
container_image=container_image_devel,
)
expected_events = [
'playbook_on_start',
'playbook_on_play_start',
'playbook_on_task_start',
'runner_on_start',
'runner_on_ok',
'playbook_on_stats',
]
assert r.status == 'successful'
assert r.rc == 0
assert 'ok' in r.stats
assert 'localhost' in r.stats['ok']
events = [x['event'] for x in r.events if x['event'] != 'verbose']
assert events == expected_events
ansible-runner-2.4.1/test/integration/test_display_callback.py 0000664 0000000 0000000 00000033450 14770573620 0024656 0 ustar 00root root 0000000 0000000 from __future__ import absolute_import
import json
import os
import yaml
import pytest
from ansible_runner.interface import init_runner
HERE = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture()
def executor(tmp_path, request):
private_data_dir = tmp_path / 'foo'
private_data_dir.mkdir()
playbooks = request.node.callspec.params.get('playbook')
playbook = list(playbooks.values())[0]
envvars = request.node.callspec.params.get('envvars')
if envvars is None:
envvars = {}
# warning messages create verbose events and interfere with assertions
envvars["ANSIBLE_DEPRECATION_WARNINGS"] = "False"
# python interpreter used is not of much interest, we really want to silence warnings
envvars['ANSIBLE_PYTHON_INTERPRETER'] = 'auto_silent'
inventory = 'localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"'
r = init_runner(
private_data_dir=private_data_dir,
inventory=inventory,
envvars=envvars,
playbook=yaml.safe_load(playbook)
)
return r
@pytest.mark.parametrize('event', ['playbook_on_start',
'playbook_on_play_start',
'playbook_on_task_start', 'runner_on_ok',
'playbook_on_stats'])
@pytest.mark.parametrize('playbook', [
{'helloworld.yml': '''
- name: Hello World Sample
connection: local
hosts: all
gather_facts: no
tasks:
- name: Hello Message
debug:
msg: "Hello World!"
'''}, # noqa
{'results_included.yml': '''
- name: Run module which generates results list
connection: local
hosts: all
gather_facts: no
vars:
results: ['foo', 'bar']
tasks:
- name: Generate results list
debug:
var: results
'''} # noqa
], ids=['helloworld.yml', 'results_included.yml'])
@pytest.mark.parametrize('envvars', [
{'ANSIBLE_CALLBACK_PLUGINS': os.path.join(HERE, 'callback')},
{'ANSIBLE_CALLBACK_PLUGINS': ''}],
ids=['local-callback-plugin', 'no-callback-plugin']
)
def test_callback_plugin_receives_events(executor, event, playbook, envvars): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
assert event in [task['event'] for task in executor.events]
@pytest.mark.parametrize('playbook', [
{'no_log_on_ok.yml': '''
- name: args should not be logged when task-level no_log is set
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "SENSITIVE"
no_log: true
'''}, # noqa
{'no_log_on_fail.yml': '''
- name: failed args should not be logged when task-level no_log is set
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "SENSITIVE"
no_log: true
failed_when: true
ignore_errors: true
'''}, # noqa
{'no_log_on_skip.yml': '''
- name: skipped task args should be suppressed with no_log
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "SENSITIVE"
no_log: true
when: false
'''}, # noqa
{'no_log_on_play.yml': '''
- name: args should not be logged when play-level no_log set
connection: local
hosts: all
gather_facts: no
no_log: true
tasks:
- shell: echo "SENSITIVE"
'''}, # noqa
{'async_no_log.yml': '''
- name: async task args should suppressed with no_log
connection: local
hosts: all
gather_facts: no
no_log: true
tasks:
- async: 10
poll: 1
shell: echo "SENSITIVE"
no_log: true
'''}, # noqa
{'with_items.yml': '''
- name: with_items tasks should be suppressed with no_log
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo {{ item }}
no_log: true
with_items: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
when: item != "SENSITIVE-SKIPPED"
failed_when: item == "SENSITIVE-FAILED"
ignore_errors: yes
'''}, # noqa, NOTE: with_items will be deprecated in 2.9
{'loop.yml': '''
- name: loop tasks should be suppressed with no_log
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo {{ item }}
no_log: true
loop: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
when: item != "SENSITIVE-SKIPPED"
failed_when: item == "SENSITIVE-FAILED"
ignore_errors: yes
'''}, # noqa
])
def test_callback_plugin_no_log_filters(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
assert 'SENSITIVE' not in json.dumps(list(executor.events))
@pytest.mark.parametrize('playbook', [
{'no_log_on_ok.yml': '''
- name: args should not be logged when no_log is set at the task or module level
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "PUBLIC"
- shell: echo "PRIVATE"
no_log: true
- uri: url=https://example.org url_username="PUBLIC" url_password="PRIVATE"
'''}, # noqa
])
def test_callback_plugin_task_args_leak(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
assert events[0]['event'] == 'playbook_on_start'
assert events[1]['event'] == 'playbook_on_play_start'
# task 1
assert events[2]['event'] == 'playbook_on_task_start'
assert events[3]['event'] == 'runner_on_start'
assert events[4]['event'] == 'runner_on_ok'
# task 2 no_log=True
assert events[5]['event'] == 'playbook_on_task_start'
assert events[6]['event'] == 'runner_on_start'
assert events[7]['event'] == 'runner_on_ok'
assert 'PUBLIC' in json.dumps(events), events
for event in events:
assert 'PRIVATE' not in json.dumps(event), event
# make sure playbook was successful, so all tasks were hit
assert not events[-1]['event_data']['failures'], 'Unexpected playbook execution failure'
@pytest.mark.parametrize(
"playbook",
[
{
"simple.yml": """
- name: simpletask
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "resolved actions test!"
"""
}, # noqa
],
)
def test_resolved_actions(executor, playbook, skipif_pre_ansible212): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
# task 1
assert events[2]["event"] == "playbook_on_task_start"
assert "resolved_action" in events[2]["event_data"]
assert events[2]["event_data"]["resolved_action"] == "ansible.builtin.shell"
@pytest.mark.parametrize("playbook", [
{'loop_with_no_log.yml': '''
- name: playbook variable should not be overwritten when using no log
connection: local
hosts: all
gather_facts: no
tasks:
- command: "{{ item }}"
register: command_register
no_log: True
with_items:
- "echo helloworld!"
- debug: msg="{{ command_register.results|map(attribute='stdout')|list }}"
'''}, # noqa
])
def test_callback_plugin_censoring_does_not_overwrite(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
assert events[0]['event'] == 'playbook_on_start'
assert events[1]['event'] == 'playbook_on_play_start'
# task 1
assert events[2]['event'] == 'playbook_on_task_start'
# Ordering of task and item events may differ randomly
assert set(['runner_on_start', 'runner_item_on_ok', 'runner_on_ok']) == {data['event'] for data in events[3:6]}
# task 2 no_log=True
assert events[6]['event'] == 'playbook_on_task_start'
assert events[7]['event'] == 'runner_on_start'
assert events[8]['event'] == 'runner_on_ok'
assert 'helloworld!' in events[8]['event_data']['res']['msg']
@pytest.mark.parametrize('playbook', [
{'strip_env_vars.yml': '''
- name: sensitive environment variables should be stripped from events
connection: local
hosts: all
tasks:
- shell: echo "Hello, World!"
'''}, # noqa
])
def test_callback_plugin_strips_task_environ_variables(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
for event in list(executor.events):
assert os.environ['PATH'] not in json.dumps(event)
@pytest.mark.parametrize('playbook', [
{'custom_set_stat.yml': '''
- name: custom set_stat calls should persist to the local disk so awx can save them
connection: local
hosts: all
tasks:
- set_stats:
data:
foo: "bar"
'''}, # noqa
])
def test_callback_plugin_saves_custom_stats(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
for event in executor.events:
event_data = event.get('event_data', {})
if 'artifact_data' in event_data:
assert event_data['artifact_data'] == {'foo': 'bar'}
break
else:
raise Exception('Did not find expected artifact data in event data')
@pytest.mark.parametrize('playbook', [
{'handle_playbook_on_notify.yml': '''
- name: handle playbook_on_notify events properly
connection: local
hosts: all
handlers:
- name: my_handler
debug: msg="My Handler"
tasks:
- debug: msg="My Task"
changed_when: true
notify:
- my_handler
'''}, # noqa
])
def test_callback_plugin_records_notify_events(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
notify_events = [x for x in executor.events if x['event'] == 'playbook_on_notify']
assert len(notify_events) == 1
assert notify_events[0]['event_data']['handler'] == 'my_handler'
assert notify_events[0]['event_data']['host'] == 'localhost'
assert notify_events[0]['event_data']['task'] == 'debug'
@pytest.mark.parametrize('playbook', [
{'no_log_module_with_var.yml': '''
- name: ensure that module-level secrets are redacted
connection: local
hosts: all
vars:
pw: SENSITIVE
tasks:
- uri:
url: https://example.org
url_username: john-jacob-jingleheimer-schmidt
url_password: "{{ pw }}"
'''}, # noqa
])
def test_module_level_no_log(executor, playbook): # pylint: disable=W0613,W0621
# It's possible for `no_log=True` to be defined at the _module_ level,
# e.g., for the URI module password parameter
# This test ensures that we properly redact those
executor.run()
assert list(executor.events)
assert 'john-jacob-jingleheimer-schmidt' in json.dumps(list(executor.events))
assert 'SENSITIVE' not in json.dumps(list(executor.events))
def test_output_when_given_invalid_playbook(tmp_path):
# As shown in the following issue:
#
# https://github.com/ansible/ansible-runner/issues/29
#
# There was a lack of output by runner when a playbook that doesn't exist
# is provided. This was fixed in this PR:
#
# https://github.com/ansible/ansible-runner/pull/34
#
# But no test validated it. This does that.
private_data_dir = str(tmp_path)
ex = init_runner(
private_data_dir=private_data_dir,
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"},
playbook=os.path.join(private_data_dir, 'fake_playbook.yml')
)
ex.run()
with ex.stdout as f:
stdout = f.read()
assert "ERROR! the playbook:" in stdout
assert "could not be found" in stdout
def test_output_when_given_non_playbook_script(tmp_path):
# As shown in the following pull request:
#
# https://github.com/ansible/ansible-runner/pull/256
#
# This ports some functionality that previously lived in awx and allows raw
# lines of stdout to be treated as event lines.
#
# As mentioned in the pull request as well, there were no specs added, and
# this is a retro-active test based on the sample repo provided in the PR:
#
# https://github.com/AlanCoding/ansible-runner-examples/tree/master/non_playbook/sleep_with_writes
private_data_dir = str(tmp_path)
with open(os.path.join(private_data_dir, "args"), 'w') as args_file:
args_file.write("bash sleep_and_write.sh\n")
with open(os.path.join(private_data_dir, "sleep_and_write.sh"), 'w') as script_file:
script_file.write("echo 'hi world'\nsleep 0.5\necho 'goodbye world'\n")
# Update the settings to make this test a bit faster :)
os.mkdir(os.path.join(private_data_dir, "env"))
with open(os.path.join(private_data_dir, "env", "settings"), 'w') as settings_file:
settings_file.write("pexpect_timeout: 0.2")
ex = init_runner(
private_data_dir=private_data_dir,
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"}
)
ex.run()
with ex.stdout as f:
stdout = f.readlines()
assert stdout[0].strip() == "hi world"
assert stdout[1].strip() == "goodbye world"
events = list(ex.events)
assert len(events) == 2
assert events[0]['event'] == 'verbose'
assert events[0]['stdout'] == 'hi world'
assert events[1]['event'] == 'verbose'
assert events[1]['stdout'] == 'goodbye world'
@pytest.mark.parametrize('playbook', [
{'listvars.yml': '''
- name: List Variables
connection: local
hosts: localhost
gather_facts: false
tasks:
- name: Print a lot of lines
debug:
msg: "{{ ('F' * 150) | list }}"
'''}, # noqa
])
def test_large_stdout_parsing_when_using_json_output(executor, playbook): # pylint: disable=W0613,W0621
# When the json flag is used, it is possible to output more data than
# pexpect's maxread default of 2000 characters. As a result, if not
# handled properly, the stdout can end up being corrupted with partial
# non-event matches with raw "non-json" lines being intermixed with json
# ones.
#
# This tests to confirm we don't pollute the stdout output with non-json
# lines when a single event has a lot of output.
executor.config.env['ANSIBLE_NOCOLOR'] = str(True)
executor.run()
with executor.stdout as f:
text = f.read()
assert text.count('"F"') == 150
ansible-runner-2.4.1/test/integration/test_events.py 0000664 0000000 0000000 00000015076 14770573620 0022705 0 ustar 00root root 0000000 0000000 import json
import pytest
from ansible_runner import run, run_async
@pytest.mark.test_all_runtimes
@pytest.mark.parametrize('containerized', [True, False])
def test_basic_events(containerized, runtime, tmp_path, container_image, is_run_async=False, g_facts=False):
inventory = 'localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"'
playbook = [{'hosts': 'all', 'gather_facts': g_facts, 'tasks': [{'debug': {'msg': "test"}}]}]
run_args = {'private_data_dir': str(tmp_path),
'inventory': inventory,
'envvars': {"ANSIBLE_DEPRECATION_WARNINGS": "False", 'ANSIBLE_PYTHON_INTERPRETER': 'auto_silent'},
'playbook': playbook}
if containerized:
run_args.update({'process_isolation': True,
'process_isolation_executable': runtime,
'container_image': container_image,
'container_volume_mounts': [f'{tmp_path}:{tmp_path}']})
if not is_run_async:
r = run(**run_args)
else:
thread, r = run_async(**run_args)
thread.join() # ensure async run finishes
event_types = [x['event'] for x in r.events if x['event'] != 'verbose']
okay_events = list(filter(lambda x: 'event' in x and x['event'] == 'runner_on_ok', r.events))
assert event_types[0] == 'playbook_on_start'
assert "playbook_on_play_start" in event_types
assert "runner_on_ok" in event_types
assert "playbook_on_stats" in event_types
assert r.rc == 0
if not is_run_async:
assert len(okay_events) == 1
else:
assert len(okay_events) == 2
okay_event = okay_events[0]
assert "uuid" in okay_event and len(okay_event['uuid']) == 36
assert "parent_uuid" in okay_event and len(okay_event['parent_uuid']) == 36
assert "stdout" in okay_event and len(okay_event['stdout']) > 0
assert "start_line" in okay_event and int(okay_event['start_line']) > 0
assert "end_line" in okay_event and int(okay_event['end_line']) > 0
assert "event_data" in okay_event and len(okay_event['event_data']) > 0
@pytest.mark.test_all_runtimes
@pytest.mark.parametrize('containerized', [True, False])
def test_async_events(containerized, runtime, tmp_path, container_image):
test_basic_events(containerized, runtime, tmp_path, container_image, is_run_async=True, g_facts=True)
def test_basic_serializeable(tmp_path):
inv = 'localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"'
r = run(private_data_dir=str(tmp_path),
inventory=inv,
playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
events = list(r.events)
json.dumps(events)
def test_event_omission(tmp_path):
inv = 'localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"'
r = run(private_data_dir=str(tmp_path),
inventory=inv,
omit_event_data=True,
playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
events = []
for x in r.events:
if x['event'] == 'verbose':
continue
events.append(x)
assert not any(x['event_data'] for x in events)
def test_event_omission_except_failed(tmp_path):
inv = 'localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"'
r = run(private_data_dir=str(tmp_path),
inventory=inv,
only_failed_event_data=True,
playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'fail': {'msg': "test"}}]}])
events = []
for x in r.events:
if x['event'] == 'verbose':
continue
events.append(x)
all_event_datas = [x['event_data'] for x in events if x['event_data']]
assert len(all_event_datas) == 1
def test_runner_on_start(tmp_path):
r = run(private_data_dir=str(tmp_path),
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
playbook=[{'hosts': 'all', 'gather_facts': False, 'tasks': [{'debug': {'msg': "test"}}]}])
start_events = list(filter(lambda x: 'event' in x and x['event'] == 'runner_on_start', r.events))
assert len(start_events) == 1
def test_playbook_on_stats_summary_fields(project_fixtures):
private_data_dir = project_fixtures / 'host_status'
res = run(
private_data_dir=private_data_dir,
playbook='gen_host_status.yml'
)
assert res.rc != 0, res.stdout.read()
EXPECTED_SUMMARY_FIELDS = ('changed', 'dark', 'failures', 'ignored', 'ok', 'rescued', 'skipped')
runner_stats = res.stats
for stat in EXPECTED_SUMMARY_FIELDS:
assert stat in runner_stats
assert runner_stats[stat] # expected at least 1 host in each stat type
def test_include_role_events(project_fixtures):
r = run(
private_data_dir=str(project_fixtures / 'use_role'),
playbook='use_role.yml'
)
role_events = [event for event in r.events if event.get('event_data', {}).get('role', '') == "benthomasson.hello_role"]
assert 'runner_on_ok' in [event['event'] for event in role_events]
for event in role_events:
event_data = event['event_data']
assert not event_data.get('warning', False) # role use should not contain warnings
assert 'resolved_role' not in event_data # should not specify FQCN name if not from collection
if event['event'] == 'runner_on_ok':
assert event_data['res']['msg'] == 'Hello world!'
if event['event'] == 'playbook_on_task_start':
assert event_data['resolved_action'] == 'ansible.builtin.debug'
def test_include_role_from_collection_events(project_fixtures):
r = run(
private_data_dir=str(project_fixtures / 'collection_role'),
playbook='use_role.yml'
)
for event in r.events:
event_data = event['event_data']
assert not event_data.get('warning', False) # role use should not contain warnings
if event['event'] in ('runner_on_ok', 'playbook_on_task_start', 'runner_on_start'):
assert event_data['role'] == 'hello'
assert event_data['resolved_role'] == 'groovy.peanuts.hello'
if event['event'] == 'runner_on_ok':
assert event_data['res']['msg'] == 'Hello peanuts!'
if event['event'] == 'playbook_on_task_start':
assert event_data['resolved_action'] == 'ansible.builtin.debug'
if event['event'] == 'playbook_on_stats':
assert 'resolved_role' not in event_data
assert 'resolved_action' not in event_data
ansible-runner-2.4.1/test/integration/test_interface.py 0000664 0000000 0000000 00000044074 14770573620 0023341 0 ustar 00root root 0000000 0000000 import os
import shutil
import pytest
from ansible_runner.interface import (
get_ansible_config,
get_inventory,
get_plugin_docs,
get_plugin_docs_async,
get_plugin_list,
get_role_argspec,
get_role_list,
run,
run_async,
run_command,
run_command_async,
)
def test_run():
r = run(module='debug', host_pattern='localhost')
assert r.status == 'successful'
@pytest.mark.parametrize(
'playbook', (
[{'hosts': 'localhost', 'tasks': [{'ping': ''}]}],
{'hosts': 'localhost', 'tasks': [{'ping': ''}]},
)
)
def test_run_playbook_data(playbook, tmp_path):
r = run(private_data_dir=str(tmp_path), playbook=playbook)
assert r.status == 'successful'
def test_run_async(tmp_path):
thread, r = run_async(private_data_dir=str(tmp_path), module='debug', host_pattern='localhost')
thread.join()
assert r.status == 'successful'
def test_repeat_run_with_new_inventory(project_fixtures):
'''Repeat runs with different inventories should not fail'''
private_data_dir = project_fixtures / 'debug'
shutil.rmtree(private_data_dir / 'inventory')
hosts_file = private_data_dir / 'inventory' / 'hosts'
res = run(
private_data_dir=private_data_dir,
playbook='debug.yml',
inventory='localhost',
)
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
assert hosts_file.read_text() == 'localhost', 'hosts file content is incorrect'
# Run again with a different inventory
res = run(
private_data_dir=private_data_dir,
playbook='debug.yml',
inventory='127.0.0.1',
)
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
assert hosts_file.read_text() == '127.0.0.1', 'hosts file content is incorrect'
def get_env_data(res):
for event in res.events:
found = bool(
event['event'] in ('runner_on_ok', 'runner_on_start', 'playbook_on_task_start') and event.get(
'event_data', {}
).get('task_action', None) == 'look_at_environment'
)
if found and 'res' in event['event_data']:
return event['event_data']['res']
print('output:')
with res.stdout as f:
print(f.read())
raise RuntimeError('Count not find look_at_environment task from playbook')
def test_env_accuracy(request, project_fixtures):
printenv_example = project_fixtures / 'printenv'
os.environ['SET_BEFORE_TEST'] = 'MADE_UP_VALUE'
# Remove the envvars file if it exists
try:
os.remove(printenv_example / "env/envvars")
except FileNotFoundError:
pass
def remove_test_env_var():
if 'SET_BEFORE_TEST' in os.environ:
del os.environ['SET_BEFORE_TEST']
request.addfinalizer(remove_test_env_var)
res = run(
private_data_dir=printenv_example,
playbook='get_environment.yml',
inventory=None,
envvars={'FROM_TEST': 'FOOBAR'},
)
with res.stdout as f:
assert res.rc == 0, f.read()
actual_env = get_env_data(res)['environment']
assert actual_env == res.config.env
# Assert that the env file was properly created
assert os.path.exists(printenv_example / "env/envvars") == 1
def test_no_env_files(project_fixtures):
printenv_example = project_fixtures / 'printenv'
os.environ['SET_BEFORE_TEST'] = 'MADE_UP_VALUE'
# Remove the envvars file if it exists
try:
os.remove(printenv_example / "env/envvars")
except FileNotFoundError:
pass
res = run(
private_data_dir=printenv_example,
playbook='get_environment.yml',
inventory=None,
envvars={'FROM_TEST': 'FOOBAR'},
suppress_env_files=True,
)
with res.stdout as f:
assert res.rc == 0, f.read()
# Assert that the env file was not created
assert os.path.exists(printenv_example / "env/envvars") == 0
@pytest.mark.test_all_runtimes
def test_env_accuracy_inside_container(request, project_fixtures, runtime, container_image):
printenv_example = project_fixtures / 'printenv'
os.environ['SET_BEFORE_TEST'] = 'MADE_UP_VALUE'
def remove_test_env_var():
if 'SET_BEFORE_TEST' in os.environ:
del os.environ['SET_BEFORE_TEST']
request.addfinalizer(remove_test_env_var)
res = run(
private_data_dir=printenv_example,
project_dir='/tmp',
playbook='get_environment.yml',
inventory=None,
envvars={'FROM_TEST': 'FOOBAR'},
settings={
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
)
assert res.rc == 0, res.stdout.read()
env_data = get_env_data(res)
actual_env = env_data['environment']
expected_env = res.config.env.copy()
# NOTE: the reported environment for containerized jobs will not account for
# all environment variables, particularly those set by the entrypoint script
for key, value in expected_env.items():
assert key in actual_env
assert actual_env[key] == value, f'Reported value wrong for {key} env var'
assert env_data['cwd'] == res.config.cwd
def test_multiple_inventories(project_fixtures):
private_data_dir = project_fixtures / 'debug'
res = run(
private_data_dir=private_data_dir,
playbook='debug.yml',
)
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
# providing no inventory should cause /inventory
# to be used, reading both inventories in the directory
assert 'host_1' in stdout
assert 'host_2' in stdout
def test_inventory_absolute_path(project_fixtures):
private_data_dir = project_fixtures / 'debug'
res = run(
private_data_dir=private_data_dir,
playbook='debug.yml',
inventory=[
str(private_data_dir / 'inventory' / 'inv_1'),
],
)
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
# hosts can be down-selected to one inventory out of those available
assert 'host_1' in stdout
assert 'host_2' not in stdout
def test_run_command(project_fixtures):
private_data_dir = project_fixtures / 'debug'
inventory = private_data_dir / 'inventory' / 'inv_1'
playbook = private_data_dir / 'project' / 'debug.yml'
out, err, rc = run_command(
private_data_dir=private_data_dir,
executable_cmd='ansible-playbook',
cmdline_args=[str(playbook), '-i', str(inventory)]
)
assert "Hello world!" in out
assert rc == 0
assert err == ''
def test_run_command_injection_error():
_, err, rc = run_command(
executable_cmd='whoami',
cmdline_args=[';hostname'],
runner_mode='subprocess',
)
assert rc == 1
assert "usage: whoami" in err or "whoami: extra operand ‘;hostname’" in err
@pytest.mark.test_all_runtimes
def test_run_command_injection_error_within_container(runtime, container_image):
_, err, rc = run_command(
executable_cmd='whoami',
cmdline_args=[';hostname'],
runner_mode='subprocess',
process_isolation_executable=runtime,
process_isolation=True,
container_image=container_image,
)
assert rc == 1
assert "whoami: extra operand ';hostname'" in err
@pytest.mark.test_all_runtimes
def test_run_ansible_command_within_container(project_fixtures, runtime, container_image):
private_data_dir = project_fixtures / 'debug'
inventory = private_data_dir / 'inventory' / 'inv_1'
playbook = private_data_dir / 'project' / 'debug.yml'
container_kwargs = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
out, err, rc = run_command(
private_data_dir=private_data_dir,
executable_cmd='ansible-playbook',
cmdline_args=[str(playbook), '-i', str(inventory)],
**container_kwargs
)
assert "Hello world!" in out
assert rc == 0
assert err == ''
@pytest.mark.test_all_runtimes
def test_run_script_within_container(project_fixtures, runtime, container_image):
private_data_dir = project_fixtures / 'debug'
script_path = project_fixtures / 'files'
container_volume_mounts = [f"{script_path}:{script_path}:Z"]
container_kwargs = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
'container_volume_mounts': container_volume_mounts
}
out, _, rc = run_command(
private_data_dir=private_data_dir,
executable_cmd='python3',
cmdline_args=[str(script_path / 'test_ee.py')],
**container_kwargs
)
assert "os-release" in out
assert rc == 0
def test_run_command_async(project_fixtures):
private_data_dir = project_fixtures / 'debug'
inventory = private_data_dir / 'inventory' / 'inv_1'
playbook = private_data_dir / 'project' / 'debug.yml'
thread, r = run_command_async(
private_data_dir=private_data_dir,
executable_cmd='ansible-playbook',
cmdline_args=[str(playbook), '-i', str(inventory)]
)
thread.join()
with r.stdout as f:
out = f.read()
assert "Hello world!" in out
assert r.status == 'successful'
def test_get_plugin_docs():
out, _ = get_plugin_docs(
plugin_names=['file', 'copy'],
plugin_type='module',
quiet=True
)
assert 'copy' in out
assert 'file' in out
def test_get_plugin_docs_async():
thread, r = get_plugin_docs_async(
plugin_names=['file', 'copy'],
plugin_type='module',
quiet=True
)
thread.join()
with r.stdout as f:
out = f.read()
assert 'copy' in out
assert 'file' in out
assert r.status == 'successful'
@pytest.mark.test_all_runtimes
def test_get_plugin_docs_within_container(runtime, container_image):
container_kwargs = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
out, _ = get_plugin_docs(
plugin_names=['file', 'copy'],
plugin_type='module',
quiet=True,
**container_kwargs
)
assert 'copy' in out
assert 'file' in out
def test_get_plugin_docs_list():
out, _ = get_plugin_list(
list_files=True,
quiet=True
)
assert 'copy' in out
assert 'file' in out
@pytest.mark.test_all_runtimes
def test_get_plugin_docs_list_within_container(runtime, container_image):
container_kwargs = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
out, _ = get_plugin_list(
list_files=True,
quiet=True,
**container_kwargs
)
assert 'copy' in out
assert 'file' in out
def test_ansible_config():
out, _ = get_ansible_config(
action='list',
quiet=True
)
assert 'DEFAULT_VERBOSITY' in out
def test_get_inventory(project_fixtures):
private_data_dir = project_fixtures / 'debug'
inventory1 = private_data_dir / 'inventory' / 'inv_1'
inventory2 = private_data_dir / 'inventory' / 'inv_2'
out, _ = get_inventory(
action='list',
inventories=[str(inventory1), str(inventory2)],
response_format='json',
quiet=True
)
assert 'host_1' in out['ungrouped']['hosts']
assert 'host_2' in out['ungrouped']['hosts']
@pytest.mark.test_all_runtimes
def test_get_inventory_within_container(project_fixtures, runtime, container_image):
container_kwargs = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
private_data_dir = project_fixtures / 'debug'
inventory1 = private_data_dir / 'inventory' / 'inv_1'
inventory2 = private_data_dir / 'inventory' / 'inv_2'
out, _ = get_inventory(
action='list',
inventories=[str(inventory1), str(inventory2)],
response_format='json',
quiet=True,
**container_kwargs
)
assert 'host_1' in out['ungrouped']['hosts']
assert 'host_2' in out['ungrouped']['hosts']
def test_run_role(project_fixtures):
''' Test that we can run a role via the API. '''
private_data_dir = project_fixtures / 'debug'
res = run(
private_data_dir=private_data_dir,
role='hello_world',
)
with res.stdout as f:
stdout = f.read()
assert res.rc == 0, stdout
assert 'Hello World!' in stdout
# pylint: disable=W0613
def test_get_role_list(project_fixtures, skipif_pre_ansible211):
"""
Test get_role_list() running locally, specifying a playbook directory
containing our test role.
"""
pdir = str(project_fixtures / 'music' / 'project')
expected = {
"main": "The main entry point for the Into_The_Mystic role."
}
resp, _ = get_role_list(playbook_dir=pdir)
assert isinstance(resp, dict)
# So that tests can work locally, where multiple roles might be returned,
# we check for this single role.
assert 'Into_The_Mystic' in resp
assert 'entry_points' in resp['Into_The_Mystic']
assert resp['Into_The_Mystic']['entry_points'] == expected
@pytest.mark.test_all_runtimes
def test_get_role_list_within_container(project_fixtures, runtime, skipif_pre_ansible211, container_image):
"""
Test get_role_list() running in a container.
"""
pdir = str(project_fixtures / 'music')
expected = {
"Into_The_Mystic": {
"collection": "",
"entry_points": {
"main": "The main entry point for the Into_The_Mystic role."
}
}
}
container_kwargs = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
resp, _ = get_role_list(private_data_dir=pdir, playbook_dir="/runner/project", **container_kwargs)
assert isinstance(resp, dict)
assert resp == expected
def test_get_role_argspec(project_fixtures, skipif_pre_ansible211):
"""
Test get_role_argspec() running locally, specifying a playbook directory
containing our test role.
"""
use_role_example = str(project_fixtures / 'music' / 'project')
expected_epoint = {
"main": {
"options": {
"foghorn": {
"default": True,
"description": "If true, the foghorn blows.",
"required": False,
"type": "bool"
},
"soul": {
"choices": [
"gypsy",
"normal"
],
"description": "Type of soul to rock",
"required": True,
"type": "str"
}
},
"short_description": "The main entry point for the Into_The_Mystic role."
}
}
resp, _ = get_role_argspec('Into_The_Mystic', playbook_dir=use_role_example)
assert isinstance(resp, dict)
assert 'Into_The_Mystic' in resp
assert resp['Into_The_Mystic']['entry_points'] == expected_epoint
@pytest.mark.test_all_runtimes
def test_get_role_argspec_within_container(project_fixtures, runtime, skipif_pre_ansible211, container_image):
"""
Test get_role_argspec() running inside a container. Since the test container
does not currently contain any collections or roles, specify playbook_dir
pointing to the project dir of private_data_dir so that we will find a role.
"""
pdir = str(project_fixtures / 'music')
expected_epoint = {
"main": {
"options": {
"foghorn": {
"default": True,
"description": "If true, the foghorn blows.",
"required": False,
"type": "bool"
},
"soul": {
"choices": [
"gypsy",
"normal"
],
"description": "Type of soul to rock",
"required": True,
"type": "str"
}
},
"short_description": "The main entry point for the Into_The_Mystic role."
}
}
container_kwargs = {
'process_isolation_executable': runtime,
'process_isolation': True,
'container_image': container_image,
}
resp, _ = get_role_argspec('Into_The_Mystic', private_data_dir=pdir, playbook_dir="/runner/project", **container_kwargs)
assert isinstance(resp, dict)
assert 'Into_The_Mystic' in resp
assert resp['Into_The_Mystic']['entry_points'] == expected_epoint
class TestRelativePvtDataDirPaths:
"""
Class to handle test setup/teardown of tests that need to change working
directory to test relative paths.
"""
def setup_method(self):
self._old_workdir = os.getcwd() # pylint: disable=W0201
def teardown_method(self):
os.chdir(self._old_workdir)
def test_inventory_as_string(self, project_fixtures):
"""
Test of bug fix for GH issue #1216: https://github.com/ansible/ansible-runner/issues/1216
A relative private data directory combined with an inventory specified as a string
would produce an invalid inventory path being passed along to ansible.
"""
os.chdir(str(project_fixtures))
inventory = 'hostA ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"'
r = run(private_data_dir='debug',
inventory=inventory,
playbook='debug.yml')
with r.stdout as output:
text = output.read()
assert r.status == 'successful'
assert "No inventory was parsed" not in text
def test_default_inventory(self, project_fixtures):
"""
Test relative pvt data dir with the default inventory.
"""
os.chdir(str(project_fixtures))
r = run(private_data_dir='debug', playbook='debug.yml')
with r.stdout as output:
text = output.read()
assert r.status == 'successful'
assert "No inventory was parsed" not in text
ansible-runner-2.4.1/test/integration/test_main.py 0000664 0000000 0000000 00000013563 14770573620 0022324 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import multiprocessing
from test.utils.common import iterate_timeout
import pytest
import yaml
from ansible_runner.__main__ import main
@pytest.mark.parametrize(
('command', 'expected'),
(
(None, {'out': 'These are common Ansible Runner commands', 'err': ''}),
([], {'out': 'These are common Ansible Runner commands', 'err': ''}),
(['run'], {'out': '', 'err': 'the following arguments are required'}),
)
)
def test_help(command, expected, capsys, monkeypatch):
# Ensure that sys.argv of the test command does not affect the test environment.
monkeypatch.setattr('sys.argv', command or [])
with pytest.raises(SystemExit) as exc:
main(command)
stdout, stderr = capsys.readouterr()
assert exc.value.code == 2, 'Should raise SystemExit with return code 2'
assert expected['out'] in stdout
assert expected['err'] in stderr
def test_module_run(tmp_path):
private_data_dir = tmp_path / 'ping'
rc = main(['run', '-m', 'ping',
'--hosts', 'localhost',
str(private_data_dir)])
assert private_data_dir.exists()
assert private_data_dir.joinpath('artifacts').exists()
assert rc == 0
def test_module_run_debug(tmp_path):
output = tmp_path / 'ping'
rc = main(['run', '-m', 'ping',
'--hosts', 'localhost',
'--debug',
str(output)])
assert output.exists()
assert output.joinpath('artifacts').exists()
assert rc == 0
def test_module_run_clean(tmp_path):
rc = main(['run', '-m', 'ping',
'--hosts', 'localhost',
str(tmp_path)])
assert rc == 0
def test_role_run(project_fixtures):
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
str(project_fixtures / 'use_role')])
artifact_dir = project_fixtures / 'use_role' / 'artifacts'
assert artifact_dir.exists()
assert rc == 0
def test_role_logfile(project_fixtures):
logfile = project_fixtures / 'use_role' / 'test_role_logfile'
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
'--logfile', str(logfile),
str(project_fixtures / 'use_role')])
assert logfile.exists()
assert rc == 0
def test_role_bad_project_dir(tmp_path, project_fixtures):
bad_project_path = tmp_path / "bad_project_dir"
bad_project_path.write_text('not a directory')
with pytest.raises(OSError):
main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
'--logfile', str(project_fixtures / 'use_role' / 'new_logfile'),
str(bad_project_path)])
@pytest.mark.parametrize('envvars', [
{'msg': 'hi'},
{
'msg': 'utf-8-䉪ቒ칸ⱷ?噂폄蔆㪗輥',
'蔆㪗輥': '䉪ቒ칸'
}],
ids=['regular-text', 'utf-8-text']
)
def test_role_run_env_vars(envvars, project_fixtures):
env_path = project_fixtures / 'use_role' / 'env'
env_vars = env_path / 'envvars'
with env_vars.open('a', encoding='utf-8') as f:
f.write(yaml.dump(envvars))
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
str(project_fixtures / 'use_role')])
assert rc == 0
def test_role_run_args(project_fixtures):
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
'--role-vars', 'msg=hi',
str(project_fixtures / 'use_role')])
assert rc == 0
def test_role_run_inventory(project_fixtures):
rc = main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'testhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
'--inventory', str(project_fixtures / 'use_role' / 'inventory'),
str(project_fixtures / 'use_role')])
assert rc == 0
def test_role_run_inventory_missing(project_fixtures):
with pytest.raises(SystemExit) as exc:
main(['run', '-r', 'benthomasson.hello_role',
'--hosts', 'testhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
'--inventory', 'does_not_exist',
str(project_fixtures / 'use_role')])
assert exc.value.code == 1
def test_role_start(project_fixtures):
mpcontext = multiprocessing.get_context('fork')
p = mpcontext.Process(
target=main,
args=[[
'start',
'-r', 'benthomasson.hello_role',
'--hosts', 'localhost',
'--roles-path', str(project_fixtures / 'use_role' / 'roles'),
str(project_fixtures / 'use_role'),
]]
)
p.start()
p.join()
def test_playbook_start(project_fixtures):
private_data_dir = project_fixtures / 'sleep'
mpcontext = multiprocessing.get_context('fork')
p = mpcontext.Process(
target=main,
args=[[
'start',
'-p', 'sleep.yml',
str(private_data_dir),
]]
)
p.start()
pid_path = private_data_dir / 'pid'
for _ in iterate_timeout(30, "pid file creation"):
if pid_path.exists():
break
rc = main(['is-alive', str(private_data_dir)])
assert rc == 0
rc = main(['stop', str(private_data_dir)])
assert rc == 0
for _ in iterate_timeout(30, "background process to stop"):
rc = main(['is-alive', str(private_data_dir)])
if rc == 1:
break
rc = main(['stop', str(private_data_dir)])
assert rc == 1
ansible-runner-2.4.1/test/integration/test_runner.py 0000664 0000000 0000000 00000022024 14770573620 0022701 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import json
import os
import re
import sys
import time
from test.utils.common import iterate_timeout
import pytest
from ansible_runner import Runner, run
from ansible_runner.exceptions import AnsibleRunnerException
@pytest.mark.xfail(reason='Test is unstable')
def test_password_prompt(rc):
rc.command = [sys.executable, '-c', 'import time; print(input("Password: "))']
rc.expect_passwords[re.compile(r'Password:\s*?$', re.M)] = '1234'
status, exitcode = Runner(config=rc).run()
assert status == 'successful'
assert exitcode == 0
# stdout file can be subject to a race condition
for _ in iterate_timeout(30.0, 'stdout file to be written with 1234 in it', interval=0.2):
with open(os.path.join(rc.artifact_dir, 'stdout')) as f:
if '1234' in f.read():
break
def test_run_command(rc):
rc.command = ['sleep', '1']
status, exitcode = Runner(config=rc).run()
assert status == 'successful'
assert exitcode == 0
with open(os.path.join(rc.artifact_dir, 'command')) as f:
data = json.load(f)
assert data.get('command') == ['sleep', '1']
assert 'cwd' in data
assert isinstance(data.get('env'), dict)
def test_run_command_with_unicode(rc):
expected = '"utf-8-䉪ቒ칸ⱷ?噂폄蔆㪗輥"'
rc.command = ['echo', '"utf-8-䉪ቒ칸ⱷ?噂폄蔆㪗輥"']
rc.envvars = {"䉪ቒ칸": "蔆㪗輥"}
rc.prepare_env()
status, exitcode = Runner(config=rc).run()
assert status == 'successful'
assert exitcode == 0
with open(os.path.join(rc.artifact_dir, 'command')) as f:
data = json.load(f)
assert data.get('command') == ['echo', expected]
assert 'cwd' in data
assert isinstance(data.get('env'), dict)
assert "䉪ቒ칸" in data.get('env')
def test_run_command_finished_callback(rc, mocker):
finished_callback = mocker.MagicMock()
rc.command = ['sleep', '1']
runner = Runner(config=rc, finished_callback=finished_callback)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
finished_callback.assert_called_with(runner)
def test_run_command_explosive_finished_callback(rc):
def boom(*args):
raise Exception('boom')
rc.command = ['sleep', '1']
runner = Runner(config=rc, finished_callback=boom)
with pytest.raises(Exception):
runner.run()
def test_run_command_explosive_cancel_callback(rc):
def boom(*args):
raise Exception('boom')
rc.command = ['sleep', '1']
runner = Runner(config=rc, cancel_callback=boom)
with pytest.raises(Exception):
runner.run()
def test_run_command_cancel_callback(rc):
def cancel(*args): # pylint: disable=W0613
return True
rc.command = ['sleep', '1']
runner = Runner(config=rc, cancel_callback=cancel)
status, exitcode = runner.run()
assert status == 'canceled'
assert exitcode == 254
def test_run_command_job_timeout(rc):
rc.command = ['sleep', '1']
rc.job_timeout = 0.0000001
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'timeout'
assert exitcode == 254
def test_run_command_idle_timeout(rc):
rc.command = ['sleep', '1']
rc.idle_timeout = 0.0000001
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'timeout'
assert exitcode == 254
def test_run_command_failed(rc):
rc.command = ['false']
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'failed'
assert exitcode == 1
def test_executable_not_found(rc):
rc.command = ['supercalifragilistic']
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'failed'
assert exitcode == 127
events = list(runner.events)
assert len(events) == 1
assert 'The command was not found or was not executable: supercalifragilistic' in events[0]['stdout'] # noqa
def test_run_command_long_running(rc):
rc.command = ['yes']
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'timeout'
assert exitcode == 254
def test_run_command_long_running_children(rc):
rc.command = ['bash', '-c', "(yes)"]
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'timeout'
assert exitcode == 254
def test_run_command_events_missing(rc):
rc.command = ['sleep', '1']
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
assert not list(runner.events)
def test_run_command_stdout_missing(rc):
rc.command = ['sleep', '1']
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
os.unlink(os.path.join(runner.config.artifact_dir, 'stdout'))
with pytest.raises(AnsibleRunnerException):
list(runner.stdout)
def test_run_command_no_stats(rc):
rc.command = ['sleep', '1']
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
assert runner.stats is None
def test_run_command_ansible(rc):
rc.module = "debug"
rc.host_pattern = "localhost"
rc.prepare()
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
assert list(runner.events)
assert runner.stats != {}
assert list(runner.host_events('localhost')), repr(list(runner.events))
with runner.stdout as f:
stdout = f.read()
assert stdout != ""
def test_run_command_ansible_event_handler(rc, mocker):
event_handler = mocker.MagicMock()
status_handler = mocker.MagicMock()
rc.module = "debug"
rc.host_pattern = "localhost"
rc.prepare()
runner = Runner(config=rc, event_handler=event_handler, status_handler=status_handler)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
event_handler.assert_called()
status_handler.assert_called()
def test_run_command_ansible_event_handler_failure(rc):
def event_handler(*args):
raise IOError()
rc.module = "debug"
rc.host_pattern = "localhost"
rc.prepare()
runner = Runner(config=rc, event_handler=event_handler)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
def test_run_command_ansible_rotate_artifacts(rc):
rc.module = "debug"
rc.host_pattern = "localhost"
rc.prepare()
rc.rotate_artifacts = 1
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
def test_get_fact_cache(rc):
assert os.path.basename(rc.fact_cache) == 'fact_cache'
rc.module = "setup"
rc.host_pattern = "localhost"
rc.prepare()
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
print(rc.cwd)
assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache'))
assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache', 'localhost'))
data = runner.get_fact_cache('localhost')
assert data
def test_set_fact_cache(rc):
assert os.path.basename(rc.fact_cache) == 'fact_cache'
rc.module = "debug"
rc.module_args = "var=message"
rc.host_pattern = "localhost"
rc.prepare()
runner = Runner(config=rc)
runner.set_fact_cache('localhost', {'message': 'hello there'})
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
print(rc.cwd)
assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache'))
assert os.path.exists(os.path.join(rc.artifact_dir, 'fact_cache', 'localhost'))
data = runner.get_fact_cache('localhost')
assert data['message'] == 'hello there'
def test_set_extra_vars(rc):
rc.module = "debug"
rc.module_args = "var=test_extra_vars"
rc.host_pattern = "localhost"
rc.extra_vars = {'test_extra_vars': 'hello there'}
rc.prepare()
runner = Runner(config=rc)
runner.run()
# stdout file can be subject to a race condition
for _ in iterate_timeout(30.0, 'stdout file to be written with "hello there" in it', interval=0.2):
with open(os.path.join(rc.artifact_dir, 'stdout')) as f:
if 'hello there' in f.read():
break
# regression test for https://github.com/ansible/ansible-runner/issues/1330
def test_pexpect_timeout(project_fixtures):
r = run(
private_data_dir=str(project_fixtures / 'pexpect_timeout_data_loss'),
playbook='pb.yml',
settings={"pexpect_timeout": 0.1}, # set the pexpect timeout very low
cancel_callback=lambda: time.sleep(3) or False, # induce enough delay in the child polling loop that the child will exit before being polled again
)
# ensure we got playbook_on_stats; if pexpect ate it, we won't...
assert any(ev for ev in r.events if ev.get('event', None) == 'playbook_on_stats')
ansible-runner-2.4.1/test/integration/test_transmit_worker_process.py 0000664 0000000 0000000 00000046770 14770573620 0026376 0 ustar 00root root 0000000 0000000 import base64
import io
import json
import os
import socket
import concurrent.futures
import time
import threading
import pytest
from ansible_runner import run
from ansible_runner.streaming import Transmitter, Worker, Processor
import ansible_runner.interface # AWX import pattern
class TestStreamingUsage:
# pylint: disable=W0201
@pytest.fixture(autouse=True)
def reset_self_props(self):
self.status_data = None
def status_handler(self, status_data, runner_config=None): # pylint: disable=W0613
self.status_data = status_data
def get_job_kwargs(self, job_type):
"""For this test scenaro, the ansible-runner interface kwargs"""
if job_type == 'run':
job_kwargs = {'playbook': 'debug.yml'}
else:
job_kwargs = {'module': 'setup', 'host_pattern': 'localhost'}
# also test use of user env vars
job_kwargs['envvars'] = {'MY_ENV_VAR': 'bogus'}
return job_kwargs
@staticmethod
def get_stdout(process_dir):
events_dir = os.path.join(process_dir, 'artifacts', 'job_events')
events = []
for file in os.listdir(events_dir):
with open(os.path.join(events_dir, file), 'r') as f:
if file in ('status', 'rc'):
continue
content = f.read()
events.append(json.loads(content))
return '\n'.join(event['stdout'] for event in events)
@staticmethod
def check_artifacts(process_dir, job_type):
assert set(os.listdir(process_dir)) == {'artifacts', }
stdout = TestStreamingUsage.get_stdout(process_dir)
if job_type == 'run':
assert 'Hello world!' in stdout
else:
assert '"ansible_facts"' in stdout
@pytest.mark.parametrize("job_type", ['run', 'adhoc'])
def test_remote_job_interface(self, tmp_path, project_fixtures, job_type):
transmit_dir = project_fixtures / 'debug'
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
process_dir = tmp_path / 'for_process'
process_dir.mkdir()
job_kwargs = self.get_job_kwargs(job_type)
outgoing_buffer_file = tmp_path / 'buffer_out'
outgoing_buffer_file.touch()
outgoing_buffer = outgoing_buffer_file.open('b+r')
transmitter = Transmitter(_output=outgoing_buffer, private_data_dir=transmit_dir, **job_kwargs)
for key, value in job_kwargs.items():
assert transmitter.kwargs.get(key, '') == value
status, rc = transmitter.run()
assert rc in (None, 0)
assert status == 'unstarted'
outgoing_buffer.seek(0)
sent = outgoing_buffer.read()
assert sent # should not be blank at least
assert b'zipfile' in sent
incoming_buffer_file = tmp_path / 'buffer_in'
incoming_buffer_file.touch()
incoming_buffer = incoming_buffer_file.open('b+r')
outgoing_buffer.seek(0)
worker = Worker(_input=outgoing_buffer, _output=incoming_buffer, private_data_dir=worker_dir)
worker.run()
outgoing_buffer.seek(0)
assert set(os.listdir(worker_dir)) == {'artifacts', 'inventory', 'project', 'env'}, outgoing_buffer.read()
incoming_buffer.seek(0) # again, be kind, rewind
processor = Processor(_input=incoming_buffer, private_data_dir=process_dir)
processor.run()
outgoing_buffer.close()
incoming_buffer.close()
self.check_artifacts(str(process_dir), job_type)
@pytest.mark.parametrize("keepalive_setting", [
0, # keepalive explicitly disabled, default
1, # emit keepalives every 1s
0.000000001, # emit keepalives on a ridiculously small interval to test for output corruption
None, # default disable, test sets envvar for keepalives
])
def test_keepalive_setting(self, tmp_path, project_fixtures, keepalive_setting):
# pylint: disable=W0212
verbosity = None
output_corruption_test_mode = 0 < (keepalive_setting or 0) < 1
if output_corruption_test_mode:
verbosity = 5
# FIXME: turn on debug output too just to really spam the thing
if keepalive_setting is None:
# test the envvar fallback
os.environ['ANSIBLE_RUNNER_KEEPALIVE_SECONDS'] = '1'
elif 'ANSIBLE_RUNNER_KEEPALIVE_SECONDS' in os.environ:
# don't allow this envvar to affect the test behavior
del os.environ['ANSIBLE_RUNNER_KEEPALIVE_SECONDS']
worker_dir = tmp_path / 'for_worker'
process_dir = tmp_path / 'for_process'
for directory in (worker_dir, process_dir):
directory.mkdir()
outgoing_buffer = io.BytesIO()
incoming_buffer = io.BytesIO()
for buffer in (outgoing_buffer, incoming_buffer):
buffer.name = 'foo'
status, rc = Transmitter(
_output=outgoing_buffer, private_data_dir=project_fixtures / 'sleep',
playbook='sleep.yml', extravars={'sleep_interval': 2}, verbosity=verbosity
).run()
assert rc in (None, 0)
assert status == 'unstarted'
outgoing_buffer.seek(0)
worker_start_time = time.time()
worker = Worker(
_input=outgoing_buffer, _output=incoming_buffer, private_data_dir=worker_dir,
keepalive_seconds=keepalive_setting
)
worker.run()
assert time.time() - worker_start_time > 2.0 # task sleeps for 2 second
assert isinstance(worker._keepalive_thread, threading.Thread) # we currently always create and start the thread
assert worker._keepalive_thread.daemon
worker._keepalive_thread.join(2) # wait a couple of keepalive intervals to avoid exit race
assert not worker._keepalive_thread.is_alive() # make sure it's dead
incoming_buffer.seek(0)
Processor(_input=incoming_buffer, private_data_dir=process_dir).run()
stdout = self.get_stdout(process_dir)
assert 'Sleep for a specified interval' in stdout
assert '"event": "keepalive"' not in stdout
incoming_data = incoming_buffer.getvalue().decode('utf-8')
if keepalive_setting == 0:
assert incoming_data.count('"event": "keepalive"') == 0
elif 0 < (keepalive_setting or 0) < 1:
# JSON-load every line to ensure no interleaved keepalive output corruption
line = None
try:
pending_payload_length = 0
for line in incoming_data.splitlines():
if pending_payload_length:
# decode and check length to validate that we didn't trash the payload
# zap the mashed eof message from the end if present
line = line.rsplit('{"eof": true}', 1)[0] # FUTURE: change this to removesuffix for 3.9+
assert pending_payload_length == len(base64.b64decode(line))
pending_payload_length = 0 # back to normal
continue
data = json.loads(line)
pending_payload_length = data.get('zipfile', 0)
except json.JSONDecodeError:
pytest.fail(f'unparseable JSON in output (likely corrupted by keepalive): {line}')
else:
# account for some wobble in the number of keepalives for artifact gather, etc
assert 1 <= incoming_data.count('"event": "keepalive"') < 5
@pytest.mark.parametrize("job_type", ['run', 'adhoc'])
def test_remote_job_by_sockets(self, tmp_path, project_fixtures, job_type):
"""This test case is intended to be close to how the AWX use case works
the process interacts with receptorctl with sockets
sockets are used here, but worker is manually called instead of invoked by receptor
"""
transmit_dir = project_fixtures / 'debug'
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
process_dir = tmp_path / 'for_process'
process_dir.mkdir()
job_kwargs = self.get_job_kwargs(job_type)
def transmit_method(transmit_sockfile_write):
return ansible_runner.interface.run(
streamer='transmit',
_output=transmit_sockfile_write,
private_data_dir=transmit_dir, **job_kwargs)
def worker_method(transmit_sockfile_read, results_sockfile_write):
return ansible_runner.interface.run(
streamer='worker',
_input=transmit_sockfile_read, _output=results_sockfile_write,
private_data_dir=worker_dir, **job_kwargs)
def process_method(results_sockfile_read):
return ansible_runner.interface.run(
streamer='process', quiet=True,
_input=results_sockfile_read,
private_data_dir=process_dir, status_handler=self.status_handler, **job_kwargs)
transmit_socket_write, transmit_socket_read = socket.socketpair()
results_socket_write, results_socket_read = socket.socketpair()
transmit_socket_read_file = transmit_socket_read.makefile('rb')
transmit_socket_write_file = transmit_socket_write.makefile('wb')
results_socket_read_file = results_socket_read.makefile('rb')
results_socket_write_file = results_socket_write.makefile('wb')
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
transmit_future = executor.submit(transmit_method, transmit_socket_write_file)
# In real AWX implementation, worker is done via receptorctl
executor.submit(worker_method, transmit_socket_read_file, results_socket_write_file)
while True:
if transmit_future.done():
break
time.sleep(0.05) # additionally, AWX calls cancel_callback()
res = transmit_future.result()
assert res.rc in (None, 0)
assert res.status == 'unstarted'
process_future = executor.submit(process_method, results_socket_read_file)
while True:
if process_future.done():
break
time.sleep(0.05) # additionally, AWX calls cancel_callback()
for s in (
transmit_socket_write, transmit_socket_read, results_socket_write, results_socket_read,
transmit_socket_write_file, transmit_socket_read_file, results_socket_write_file, results_socket_read_file,
):
s.close()
assert self.status_data is not None
if 'result_traceback' in self.status_data:
raise Exception(self.status_data['result_traceback'])
assert self.status_data.get('status') == 'successful'
assert set(os.listdir(worker_dir)) == {'artifacts', 'inventory', 'project', 'env'}
self.check_artifacts(str(process_dir), job_type)
def test_process_isolation_executable_not_exist(self, tmp_path, mocker):
"""Case transmit should not fail if process isolation executable does not exist and
worker should fail if process isolation executable does not exist
"""
mocker.patch.object(ansible_runner.interface, 'check_isolation_executable_installed', return_value=False)
job_kwargs = self.get_job_kwargs('run')
job_kwargs['process_isolation'] = True
job_kwargs['process_isolation_executable'] = 'does_not_exist'
outgoing_buffer_file = tmp_path / 'buffer_out'
outgoing_buffer_file.touch()
outgoing_buffer = outgoing_buffer_file.open('b+r')
transmitter = ansible_runner.interface.run(
streamer='transmit',
_output=outgoing_buffer,
**job_kwargs,
)
# valide process_isolation kwargs are passed to transmitter
assert transmitter.kwargs['process_isolation'] == job_kwargs['process_isolation']
assert transmitter.kwargs['process_isolation_executable'] == job_kwargs['process_isolation_executable']
# validate that transmit did not fail due to missing process isolation executable
assert transmitter.rc in (None, 0)
# validate that transmit buffer is not empty
outgoing_buffer.seek(0)
sent = outgoing_buffer.read()
assert sent # should not be blank at least
# validate buffer contains kwargs
assert b'kwargs' in sent
# validate kwargs in buffer contain correct process_isolation and process_isolation_executable
for line in sent.decode('utf-8').split('\n'):
if "kwargs" in line:
kwargs = json.loads(line).get("kwargs", {})
assert kwargs['process_isolation'] == job_kwargs['process_isolation']
assert kwargs['process_isolation_executable'] == job_kwargs['process_isolation_executable']
break
worker_dir = tmp_path / 'for_worker'
incoming_buffer_file = tmp_path / 'buffer_in'
incoming_buffer_file.touch()
incoming_buffer = incoming_buffer_file.open('b+r')
outgoing_buffer.seek(0)
# validate that worker fails raise sys.exit(1) when process isolation executable does not exist
with pytest.raises(SystemExit) as exc:
ansible_runner.interface.run(
streamer='worker',
_input=outgoing_buffer,
_output=incoming_buffer,
private_data_dir=worker_dir,
)
assert exc.value.code == 1
outgoing_buffer.close()
incoming_buffer.close()
@pytest.fixture
def transmit_stream(project_fixtures, tmp_path):
outgoing_buffer = tmp_path / 'buffer'
outgoing_buffer.touch()
transmit_dir = project_fixtures / 'debug'
with outgoing_buffer.open('wb') as f:
transmitter = Transmitter(_output=f, private_data_dir=transmit_dir, playbook='debug.yml')
status, rc = transmitter.run()
assert rc in (None, 0)
assert status == 'unstarted'
return outgoing_buffer
@pytest.fixture
def worker_stream(transmit_stream, tmp_path): # pylint: disable=W0621
ingoing_buffer = tmp_path / 'buffer2' # basically how some demos work
ingoing_buffer.touch()
worker_dir = tmp_path / 'worker_dir'
worker_dir.mkdir()
with transmit_stream.open('rb') as out:
with ingoing_buffer.open('wb') as f:
worker = Worker(_input=out, _output=f, private_data_dir=worker_dir)
status, rc = worker.run()
assert rc in (None, 0)
assert status == 'successful'
return ingoing_buffer
def test_worker_without_delete_no_dir(tmp_path, cli, transmit_stream): # pylint: disable=W0621
worker_dir = tmp_path / 'for_worker'
with open(transmit_stream, 'rb') as stream:
worker_args = ['worker', '--private-data-dir', str(worker_dir)]
r = cli(worker_args, stdin=stream)
assert '{"eof": true}' in r.stdout
assert worker_dir.joinpath('project', 'debug.yml').exists()
def test_worker_without_delete_dir_exists(tmp_path, cli, transmit_stream): # pylint: disable=W0621
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
test_file_path = worker_dir / 'test_file.txt'
test_file_path.write_text('data\n')
with open(transmit_stream, 'rb') as stream:
worker_args = ['worker', '--private-data-dir', str(worker_dir)]
r = cli(worker_args, stdin=stream)
assert '{"eof": true}' in r.stdout
assert worker_dir.joinpath('project', 'debug.yml').exists()
assert test_file_path.exists()
def test_worker_delete_no_dir(tmp_path, cli, transmit_stream): # pylint: disable=W0621
"""
Case where non-existing --delete is provided to worker command
it should always delete everything both before and after the run
"""
worker_dir = tmp_path / 'for_worker'
with open(transmit_stream, 'rb') as f:
worker_args = ['worker', '--private-data-dir', str(worker_dir), '--delete']
r = cli(worker_args, stdin=f)
assert '{"eof": true}' in r.stdout
assert not worker_dir.exists()
assert not worker_dir.joinpath('project', 'debug.yml').exists()
def test_worker_delete_dir_exists(tmp_path, cli, transmit_stream): # pylint: disable=W0621
"""
Case where non-existing --delete is provided to worker command
it should always delete everything both before and after the run
"""
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
test_file = worker_dir / 'test_file.txt'
test_file.write_text('data\n')
with open(transmit_stream, 'rb') as f:
worker_args = ['worker', '--private-data-dir', str(worker_dir), '--delete']
r = cli(worker_args, stdin=f)
assert '{"eof": true}' in r.stdout
assert not worker_dir.exists()
assert not worker_dir.joinpath('project', 'debug.yml').exists()
def test_process_with_custom_ident(tmp_path, cli, worker_stream): # pylint: disable=W0621
process_dir = tmp_path / 'for_process'
process_dir.mkdir()
with open(worker_stream, 'rb') as f:
process_args = ['process', str(process_dir), '--ident', 'custom_ident']
r = cli(process_args, stdin=f)
assert 'Hello world!' in r.stdout
assert (process_dir / 'artifacts').exists()
assert (process_dir / 'artifacts' / 'custom_ident').exists()
assert (process_dir / 'artifacts' / 'custom_ident' / 'job_events').exists()
def test_missing_private_dir_transmit():
outgoing_buffer = io.BytesIO()
# Transmit
with pytest.raises(ValueError) as excinfo:
run(
streamer='transmit',
_output=outgoing_buffer,
private_data_dir='/foo/bar/baz',
playbook='debug.yml',
)
assert "private_data_dir path is either invalid or does not exist" in str(excinfo.value)
def test_garbage_private_dir_worker(tmp_path):
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
incoming_buffer = io.BytesIO(
b'{"kwargs": {"playbook": "debug.yml"}}\n{"zipfile": 5}\n\x01\x02\x03\x04\x05{"eof": true}\n')
outgoing_buffer = io.BytesIO()
# Worker
run(
streamer='worker',
_input=incoming_buffer,
_output=outgoing_buffer,
private_data_dir=worker_dir,
)
outgoing_buffer.seek(0)
sent = outgoing_buffer.readline()
data = json.loads(sent)
assert data['status'] == 'error'
assert data['job_explanation'] == 'Failed to extract private data directory on worker.'
assert data['result_traceback']
def test_unparsable_line_worker(tmp_path):
worker_dir = tmp_path / 'for_worker'
worker_dir.mkdir()
incoming_buffer = io.BytesIO(b'')
outgoing_buffer = io.BytesIO()
# Worker
run(
streamer='worker',
_input=incoming_buffer,
_output=outgoing_buffer,
private_data_dir=worker_dir,
)
outgoing_buffer.seek(0)
sent = outgoing_buffer.readline()
data = json.loads(sent)
assert data['status'] == 'error'
assert data['job_explanation'] == 'Failed to JSON parse a line from transmit stream.'
def test_unparsable_really_big_line_processor(tmp_path):
process_dir = tmp_path / 'for_process'
process_dir.mkdir()
incoming_buffer = io.BytesIO(bytes(f'not-json-data with extra garbage:{"f"*10000}', encoding='utf-8'))
def status_receiver(status_data, runner_config): # pylint: disable=W0613
assert status_data['status'] == 'error'
assert 'Failed to JSON parse a line from worker stream.' in status_data['job_explanation']
assert 'not-json-data with extra garbage:ffffffffff' in status_data['job_explanation']
assert len(status_data['job_explanation']) < 2000
run(
streamer='process',
_input=incoming_buffer,
private_data_dir=process_dir,
status_handler=status_receiver
)
ansible-runner-2.4.1/test/requirements.txt 0000664 0000000 0000000 00000000233 14770573620 0020716 0 ustar 00root root 0000000 0000000 mypy==1.6.0
pylint==3.0.1
pytest==8.1.1
pytest-cov
pytest-mock
pytest-timeout
pytest-xdist==2.5.0
types-pyyaml
flake8==6.1.0
yamllint==1.32.0
cryptography
ansible-runner-2.4.1/test/unit/ 0000775 0000000 0000000 00000000000 14770573620 0016413 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0020512 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/__main__/ 0000775 0000000 0000000 00000000000 14770573620 0020133 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/__main__/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0022232 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/__main__/main/ 0000775 0000000 0000000 00000000000 14770573620 0021057 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/__main__/main/test_worker.py 0000664 0000000 0000000 00000003366 14770573620 0024011 0 ustar 00root root 0000000 0000000 import ansible_runner.__main__ as ansible_runner__main__
import pytest
def test_worker_delete(mocker):
mock_output = mocker.patch.object(ansible_runner__main__, 'output')
mock_output.configure.side_effect = AttributeError('Raised intentionally')
mock_register_for_cleanup = mocker.patch.object(ansible_runner__main__, 'register_for_cleanup')
mock_rmtree = mocker.patch.object(ansible_runner__main__.shutil, 'rmtree')
mock_mkdtemp = mocker.patch.object(ansible_runner__main__.tempfile, 'mkdtemp', return_value='some_tmp_dir')
sys_args = [
'worker',
'--delete',
]
with pytest.raises(AttributeError, match='Raised intentionally'):
ansible_runner__main__.main(sys_args)
mock_rmtree.assert_not_called()
mock_register_for_cleanup.assert_called_once_with('some_tmp_dir')
mock_mkdtemp.assert_called_once()
def test_worker_delete_private_data_dir(mocker, tmp_path):
mock_output = mocker.patch.object(ansible_runner__main__, 'output')
mock_output.configure.side_effect = AttributeError('Raised intentionally')
mock_register_for_cleanup = mocker.patch.object(ansible_runner__main__, 'register_for_cleanup')
mock_rmtree = mocker.patch.object(ansible_runner__main__.shutil, 'rmtree')
mock_mkdtemp = mocker.patch.object(ansible_runner__main__.tempfile, 'mkdtemp', return_value='some_tmp_dir')
sys_args = [
'worker',
'--private-data-dir', str(tmp_path),
'--delete',
]
with pytest.raises(AttributeError, match='Raised intentionally'):
ansible_runner__main__.main(sys_args)
mock_rmtree.assert_called_once_with(str(tmp_path), ignore_errors=True)
mock_register_for_cleanup.assert_called_once_with(str(tmp_path))
mock_mkdtemp.assert_not_called()
ansible-runner-2.4.1/test/unit/config/ 0000775 0000000 0000000 00000000000 14770573620 0017660 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/config/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0021757 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/config/test__base.py 0000664 0000000 0000000 00000030747 14770573620 0022355 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os
import re
from functools import partial
from test.utils.common import RSAKey
import pytest
from pexpect import TIMEOUT, EOF
from ansible_runner.config._base import BaseConfig, BaseExecutionMode
from ansible_runner.loader import ArtifactLoader
from ansible_runner.exceptions import ConfigurationError
def load_file_side_effect(path, value, *args, **kwargs):
# pylint: disable=W0613
if args[0] == path:
if value:
return value
raise ConfigurationError
def test_base_config_init_defaults(tmp_path):
rc = BaseConfig(private_data_dir=tmp_path.as_posix())
assert rc.private_data_dir == tmp_path.as_posix()
assert rc.ident is not None
assert rc.process_isolation is False
assert rc.fact_cache_type == 'jsonfile'
assert rc.json_mode is False
assert rc.quiet is False
assert rc.quiet is False
assert rc.rotate_artifacts == 0
assert rc.artifact_dir == tmp_path.joinpath('artifacts').joinpath(rc.ident).as_posix()
assert isinstance(rc.loader, ArtifactLoader)
def test_base_config_with_artifact_dir(tmp_path, patch_private_data_dir):
# pylint: disable=W0613
rc = BaseConfig(artifact_dir=tmp_path.joinpath('this-is-some-dir').as_posix())
assert rc.artifact_dir == tmp_path.joinpath('this-is-some-dir').joinpath(rc.ident).as_posix()
# Check that the private data dir is placed in our default location with our default prefix
# and has some extra uniqueness on the end.
base_private_data_dir = tmp_path.joinpath('.ansible-runner-').as_posix()
assert rc.private_data_dir.startswith(base_private_data_dir)
assert len(rc.private_data_dir) > len(base_private_data_dir)
rc.prepare_env()
assert not tmp_path.joinpath('artifacts').exists()
assert tmp_path.joinpath('this-is-some-dir').exists()
def test_base_config_init_with_ident(tmp_path):
rc = BaseConfig(private_data_dir=tmp_path.as_posix(), ident='test')
assert rc.private_data_dir == tmp_path.as_posix()
assert rc.ident == 'test'
assert rc.artifact_dir == tmp_path.joinpath('artifacts').joinpath('test').as_posix()
assert isinstance(rc.loader, ArtifactLoader)
def test_base_config_project_dir(tmp_path):
rc = BaseConfig(private_data_dir=tmp_path.as_posix(), project_dir='/another/path')
assert rc.project_dir == '/another/path'
rc = BaseConfig(private_data_dir=tmp_path.as_posix())
assert rc.project_dir == tmp_path.joinpath('project').as_posix()
def test_prepare_environment_vars_only_strings_from_file(mocker):
rc = BaseConfig(envvars={'D': 'D'})
value = {"A": 1, "B": True, "C": "foo"}
envvar_side_effect = partial(load_file_side_effect, 'env/envvars', value)
mocker.patch.object(rc.loader, 'load_file', side_effect=envvar_side_effect)
rc.prepare_env()
assert 'A' in rc.env
assert isinstance(rc.env['A'], str)
assert 'B' in rc.env
assert isinstance(rc.env['B'], str)
assert 'C' in rc.env
assert isinstance(rc.env['C'], str)
assert 'D' in rc.env
assert rc.env['D'] == 'D'
def test_prepare_environment_vars_only_strings_from_interface():
rc = BaseConfig(envvars={'D': 'D', 'A': 1, 'B': True, 'C': 'foo'})
rc.prepare_env()
assert 'A' in rc.env
assert isinstance(rc.env['A'], str)
assert 'B' in rc.env
assert isinstance(rc.env['B'], str)
assert 'C' in rc.env
assert isinstance(rc.env['C'], str)
assert 'D' in rc.env
assert rc.env['D'] == 'D'
def test_prepare_environment_pexpect_defaults():
rc = BaseConfig()
rc.prepare_env()
assert len(rc.expect_passwords) == 2
assert TIMEOUT in rc.expect_passwords
assert rc.expect_passwords[TIMEOUT] is None
assert EOF in rc.expect_passwords
assert rc.expect_passwords[EOF] is None
def test_prepare_env_passwords(mocker):
rc = BaseConfig()
value = {'^SSH [pP]assword.*$': 'secret'}
password_side_effect = partial(load_file_side_effect, 'env/passwords', value)
mocker.patch.object(rc.loader, 'load_file', side_effect=password_side_effect)
rc.prepare_env()
rc.expect_passwords.pop(TIMEOUT)
rc.expect_passwords.pop(EOF)
assert len(rc.expect_passwords) == 1
assert isinstance(list(rc.expect_passwords.keys())[0], re.Pattern)
assert 'secret' in rc.expect_passwords.values()
def test_prepare_environment_subprocess_defaults():
rc = BaseConfig()
rc.prepare_env(runner_mode="subprocess")
assert rc.subprocess_timeout is None
def test_prepare_environment_subprocess_timeout():
rc = BaseConfig(timeout=100)
rc.prepare_env(runner_mode="subprocess")
assert rc.subprocess_timeout == 100
def test_prepare_env_settings_defaults():
rc = BaseConfig()
rc.prepare_env()
assert rc.settings == {}
def test_prepare_env_settings(mocker):
rc = BaseConfig()
value = {'test': 'string'}
settings_side_effect = partial(load_file_side_effect, 'env/settings', value)
mocker.patch.object(rc.loader, 'load_file', side_effect=settings_side_effect)
rc.prepare_env()
assert rc.settings == value
def test_prepare_env_sshkey_defaults():
rc = BaseConfig()
rc.prepare_env()
assert rc.ssh_key_data is None
def test_prepare_env_sshkey(mocker):
rc = BaseConfig()
rsa_key = RSAKey()
rsa_private_key_value = rsa_key.private
sshkey_side_effect = partial(load_file_side_effect, 'env/ssh_key', rsa_private_key_value)
mocker.patch.object(rc.loader, 'load_file', side_effect=sshkey_side_effect)
rc.prepare_env()
assert rc.ssh_key_data == rsa_private_key_value
def test_prepare_env_defaults():
rc = BaseConfig(host_cwd='/tmp/project')
rc.prepare_env()
assert rc.idle_timeout is None
assert rc.job_timeout is None
assert rc.pexpect_timeout == 5
assert rc.host_cwd == '/tmp/project'
def test_prepare_env_ansible_vars(mocker, tmp_path):
mocker.patch.dict('os.environ', {
'AWX_LIB_DIRECTORY': '/awx_lib_directory_via_environ',
})
artifact_dir = tmp_path.joinpath('some_artifacts')
rc = BaseConfig(artifact_dir=artifact_dir.as_posix())
rc.ssh_key_data = None
rc.env = {}
rc.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
rc.prepare_env()
assert not hasattr(rc, 'ssh_key_path')
assert rc.command == []
assert rc.env['ANSIBLE_STDOUT_CALLBACK'] == 'awx_display'
assert rc.env['ANSIBLE_RETRY_FILES_ENABLED'] == 'False'
assert rc.env['ANSIBLE_HOST_KEY_CHECKING'] == 'False'
assert rc.env['AWX_ISOLATED_DATA_DIR'] == artifact_dir.joinpath(rc.ident).as_posix()
def test_prepare_with_ssh_key(mocker, tmp_path):
open_fifo_write_mock = mocker.patch('ansible_runner.config._base.open_fifo_write')
custom_artifacts = tmp_path.joinpath('custom_arts')
rc = BaseConfig(private_data_dir=tmp_path.as_posix(), artifact_dir=custom_artifacts.as_posix())
rc.artifact_dir = custom_artifacts.as_posix()
rc.env = {}
rc.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
rsa_key = RSAKey()
rc.ssh_key_data = rsa_key.private
rc.command = 'ansible-playbook'
rc.cmdline_args = []
rc.prepare_env()
assert rc.ssh_key_path == custom_artifacts.joinpath('ssh_key_data').as_posix()
assert open_fifo_write_mock.called
def test_wrap_args_with_ssh_agent_defaults(tmp_path):
rc = BaseConfig(private_data_dir=str(tmp_path))
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], f'{tmp_path}/sshkey')
assert res == [
'ssh-agent',
'sh', '-c',
f"trap 'rm -f {tmp_path}/sshkey' EXIT && ssh-add {tmp_path}/sshkey && rm -f {tmp_path}/sshkey && ansible-playbook main.yaml"
]
def test_wrap_args_with_ssh_agent_with_auth(tmp_path):
rc = BaseConfig(private_data_dir=str(tmp_path))
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], f'{tmp_path}/sshkey', f'{tmp_path}/sshauth')
assert res == [
'ssh-agent', '-a', f'{tmp_path}/sshauth',
'sh', '-c',
f"trap 'rm -f {tmp_path}/sshkey' EXIT && ssh-add {tmp_path}/sshkey && rm -f {tmp_path}/sshkey && ansible-playbook main.yaml"
]
def test_wrap_args_with_ssh_agent_silent(tmp_path):
rc = BaseConfig(private_data_dir=str(tmp_path))
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], f'{tmp_path}/sshkey', silence_ssh_add=True)
assert res == [
'ssh-agent',
'sh', '-c',
f"trap 'rm -f {tmp_path}/sshkey' EXIT && ssh-add {tmp_path}/sshkey 2>/dev/null && rm -f {tmp_path}/sshkey && ansible-playbook main.yaml"
]
def test_container_volume_mounting_with_Z(tmp_path, mocker):
mocker.patch('os.path.isdir', return_value=False)
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.makedirs', return_value=True)
rc = BaseConfig(private_data_dir=str(tmp_path))
os.path.isdir = mocker.Mock()
rc.container_volume_mounts = ['project_path:project_path:Z']
rc.container_name = 'foo'
rc.runner_mode = 'pexpect'
rc.env = {}
rc.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
rc.command = ['ansible-playbook', 'foo.yml']
rc.container_image = 'network-ee'
rc.cmdline_args = ['foo.yml']
new_args = rc.wrap_args_for_containerization(rc.command, rc.execution_mode, rc.cmdline_args)
assert new_args[0] == 'podman'
for i, entry in enumerate(new_args):
if entry == '-v':
mount = new_args[i + 1]
if mount.endswith('project_path:Z'):
break
else:
raise Exception(f'Could not find expected mount, args: {new_args}')
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_containerization_settings(tmp_path, runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
tmp_path.joinpath('.ssh').mkdir()
mock_containerized = mocker.patch('ansible_runner.config._base.BaseConfig.containerized', new_callable=mocker.PropertyMock)
mock_containerized.return_value = True
rc = BaseConfig(private_data_dir=tmp_path)
rc.ident = 'foo'
rc.cmdline_args = ['main.yaml', '-i', '/tmp/inventory']
rc.command = ['ansible-playbook'] + rc.cmdline_args
rc.process_isolation = True
rc.runner_mode = 'pexpect'
rc.process_isolation_executable = runtime
rc.container_image = 'my_container'
rc.container_volume_mounts = ['/host1:/container1', 'host2:/container2']
rc.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
rc.prepare_env()
rc.handle_command_wrap(rc.execution_mode, rc.cmdline_args)
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [
runtime,
'run',
'--rm',
'--tty',
'--interactive',
'--workdir',
'/runner/project',
'-v', f'{str(tmp_path)}/.ssh/:/home/runner/.ssh/',
'-v', f'{str(tmp_path)}/.ssh/:/root/.ssh/'
]
if os.path.exists('/etc/ssh/ssh_known_hosts'):
expected_command_start.extend(['-v', '/etc/ssh/:/etc/ssh/'])
if runtime == 'podman':
expected_command_start.extend(['--group-add=root', '--ipc=host'])
expected_command_start.extend([
'-v', f'{rc.private_data_dir}/artifacts/:/runner/artifacts/:Z',
'-v', f'{rc.private_data_dir}/:/runner/:Z',
'-v', '/host1:/container1',
'-v', 'host2:/container2',
'--env-file', f'{rc.artifact_dir}/env.list',
])
expected_command_start.extend(extra_container_args)
expected_command_start.extend([
'--name', 'ansible_runner_foo',
'my_container', 'ansible-playbook', 'main.yaml', '-i', '/tmp/inventory',
])
assert expected_command_start == rc.command
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_containerization_unsafe_write_setting(tmp_path, runtime, mocker):
mock_containerized = mocker.patch('ansible_runner.config._base.BaseConfig.containerized', new_callable=mocker.PropertyMock)
rc = BaseConfig(private_data_dir=tmp_path)
rc.ident = 'foo'
rc.cmdline_args = ['main.yaml', '-i', '/tmp/inventory']
rc.command = ['ansible-playbook'] + rc.cmdline_args
rc.process_isolation = True
rc.runner_mode = 'pexpect'
rc.process_isolation_executable = runtime
rc.container_image = 'my_container'
rc.container_volume_mounts = ['/host1:/container1', 'host2:/container2']
mock_containerized.return_value = True
rc.execution_mode = BaseExecutionMode.ANSIBLE_COMMANDS
rc.prepare_env()
rc.handle_command_wrap(rc.execution_mode, rc.cmdline_args)
expected = {
'docker': None,
'podman': '1',
}
assert rc.env.get('ANSIBLE_UNSAFE_WRITES') == expected[runtime]
ansible-runner-2.4.1/test/unit/config/test_ansible_cfg.py 0000664 0000000 0000000 00000007240 14770573620 0023530 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os
import pytest
from ansible_runner.config.ansible_cfg import AnsibleCfgConfig
from ansible_runner.config._base import BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.utils import get_executable_path
def test_ansible_cfg_init_defaults(tmp_path, patch_private_data_dir):
# pylint: disable=W0613
rc = AnsibleCfgConfig()
# Check that the private data dir is placed in our default location with our default prefix
# and has some extra uniqueness on the end.
base_private_data_dir = tmp_path.joinpath('.ansible-runner-').as_posix()
assert rc.private_data_dir.startswith(base_private_data_dir)
assert len(rc.private_data_dir) > len(base_private_data_dir)
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
def test_invalid_runner_mode_value():
with pytest.raises(ConfigurationError) as exc:
AnsibleCfgConfig(runner_mode='test')
assert "Invalid runner mode" in exc.value.args[0]
def test_prepare_config_command():
rc = AnsibleCfgConfig()
rc.prepare_ansible_config_command('list', config_file='/tmp/ansible.cfg')
expected_command = [get_executable_path('ansible-config'), 'list', '-c', '/tmp/ansible.cfg']
assert rc.command == expected_command
assert rc.runner_mode == 'subprocess'
def test_prepare_config_invalid_command():
with pytest.raises(ConfigurationError) as exc:
rc = AnsibleCfgConfig()
rc.prepare_ansible_config_command('list', config_file='/tmp/ansible.cfg', only_changed=True)
assert "only_changed is applicable for action 'dump'" == exc.value.args[0]
def test_prepare_config_invalid_action():
with pytest.raises(ConfigurationError) as exc:
rc = AnsibleCfgConfig()
rc.prepare_ansible_config_command('test')
assert "Invalid action test, valid value is one of either list, dump, view" == exc.value.args[0]
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_prepare_config_command_with_containerization(tmp_path, runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
tmp_path.joinpath('.ssh').mkdir()
kwargs = {
'private_data_dir': tmp_path,
'process_isolation': True,
'container_image': 'my_container',
'process_isolation_executable': runtime
}
rc = AnsibleCfgConfig(**kwargs)
rc.ident = 'foo'
rc.prepare_ansible_config_command('list', config_file='/tmp/ansible.cfg')
assert rc.runner_mode == 'subprocess'
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [
runtime,
'run',
'--rm',
'--interactive',
'--workdir',
'/runner/project',
'-v', f'{rc.private_data_dir}/.ssh/:/home/runner/.ssh/',
'-v', f'{str(tmp_path)}/.ssh/:/root/.ssh/',
]
if os.path.exists('/etc/ssh/ssh_known_hosts'):
expected_command_start.extend(['-v', '/etc/ssh/:/etc/ssh/'])
if runtime == 'podman':
expected_command_start.extend(['--group-add=root', '--ipc=host'])
expected_command_start.extend([
'-v', f'{rc.private_data_dir}/artifacts/:/runner/artifacts/:Z',
'-v', f'{rc.private_data_dir}/:/runner/:Z',
'--env-file', f'{rc.artifact_dir}/env.list',
])
expected_command_start.extend(extra_container_args)
expected_command_start.extend([
'--name',
'ansible_runner_foo',
'my_container',
'ansible-config',
'list',
'-c',
'/tmp/ansible.cfg',
])
assert expected_command_start == rc.command
ansible-runner-2.4.1/test/unit/config/test_command.py 0000664 0000000 0000000 00000010101 14770573620 0022700 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os
import pytest
from ansible_runner.config.command import CommandConfig
from ansible_runner.config._base import BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
def test_ansible_config_defaults(tmp_path, patch_private_data_dir):
# pylint: disable=W0613
rc = CommandConfig()
# Check that the private data dir is placed in our default location with our default prefix
# and has some extra uniqueness on the end.
base_private_data_dir = tmp_path.joinpath('.ansible-runner-').as_posix()
assert rc.private_data_dir.startswith(base_private_data_dir)
assert len(rc.private_data_dir) > len(base_private_data_dir)
assert rc.execution_mode == BaseExecutionMode.NONE
assert rc.runner_mode is None
def test_invalid_runner_mode_value():
with pytest.raises(ConfigurationError) as exc:
CommandConfig(runner_mode='test')
assert "Invalid runner mode" in exc.value.args[0]
def test_prepare_run_command_interactive():
rc = CommandConfig()
executable_cmd = 'ansible-playbook'
cmdline_args = ['main.yaml', '-i', 'test']
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
expected_command = ['ansible-playbook', 'main.yaml', '-i', 'test']
assert rc.command == expected_command
assert rc.runner_mode == 'pexpect'
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
def test_prepare_run_command_non_interactive():
rc = CommandConfig()
executable_cmd = 'ansible-doc'
cmdline_args = ['-l']
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
expected_command = ['ansible-doc', '-l']
assert rc.command == expected_command
assert rc.runner_mode == 'subprocess'
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
def test_prepare_run_command_generic():
rc = CommandConfig()
executable_cmd = 'python3'
cmdline_args = ['test.py']
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
expected_command = ['python3', 'test.py']
assert rc.command == expected_command
assert rc.runner_mode == 'pexpect'
assert rc.execution_mode == BaseExecutionMode.GENERIC_COMMANDS
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_prepare_run_command_with_containerization(tmp_path, runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
tmp_path.joinpath('.ssh').mkdir()
kwargs = {
'private_data_dir': tmp_path,
'process_isolation': True,
'container_image': 'my_container',
'process_isolation_executable': runtime
}
cwd = os.getcwd()
executable_cmd = 'ansible-playbook'
cmdline_args = ['main.yaml', '-i', cwd]
rc = CommandConfig(**kwargs)
rc.ident = 'foo'
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
assert rc.runner_mode == 'pexpect'
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [
runtime,
'run',
'--rm',
'--tty',
'--interactive',
'--workdir',
'/runner/project',
'-v', f'{cwd}/:{cwd}/',
'-v', f'{rc.private_data_dir}/.ssh/:/home/runner/.ssh/',
'-v', f'{rc.private_data_dir}/.ssh/:/root/.ssh/',
]
if os.path.exists('/etc/ssh/ssh_known_hosts'):
expected_command_start.extend(['-v', '/etc/ssh/:/etc/ssh/'])
if runtime == 'podman':
expected_command_start.extend(['--group-add=root', '--ipc=host'])
expected_command_start.extend([
'-v', f'{rc.private_data_dir}/artifacts/:/runner/artifacts/:Z',
'-v', f'{rc.private_data_dir}/:/runner/:Z',
'--env-file', f'{rc.artifact_dir}/env.list',
])
expected_command_start.extend(extra_container_args)
expected_command_start.extend([
'--name',
'ansible_runner_foo',
'my_container',
executable_cmd,
])
expected_command_start.extend(cmdline_args)
assert expected_command_start == rc.command
ansible-runner-2.4.1/test/unit/config/test_container_volmount_generation.py 0000664 0000000 0000000 00000017253 14770573620 0027441 0 ustar 00root root 0000000 0000000 """ Ensure the generation of container volume mounts is handled
predictably and consistently """
# pylint: disable=W0212,W0621
import os
from typing import NamedTuple
import pytest
from ansible_runner.config._base import BaseConfig
from ansible_runner.exceptions import ConfigurationError
class Variation(NamedTuple):
"""one piece of the path"""
comment: str
path: str
dir_variations = (
Variation(comment="dir no slash", path="/somedir_0"),
Variation(comment="dir with slash", path="/somedir_1/"),
Variation(comment="nested dir no slash", path="/somedir/otherdir_0"),
Variation(comment="nested dir with slash", path="/somedir/otherdir_1/"),
Variation(comment="path with dot", path="/somedir/foo.bar"),
Variation(comment="path with var no slash", path="$HOME/somedir_0"),
Variation(comment="path with var slash", path="$HOME/somedir_1"),
Variation(comment="path with ~ no slash", path="~/somedir_2"),
Variation(comment="path with ~ slash", path="~/somedir_3"),
)
labels = (None, "", "Z", "ro,Z", ":z")
not_safe = ("/", "/home", "/usr")
def id_for_dst(value):
"""generate a test id for dest"""
return f"dst->{value.comment}"
def id_for_isdir(value):
"""generate a test id for dest"""
return f"isdir->{value}"
def id_for_label(value):
"""generate a test id for labels"""
return f"labels->{value}"
def id_for_src(value):
"""generate a test id for src"""
return f"src->{value.comment}"
def resolve_path(path):
"""Fully resolve a path"""
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
def generate_volmount_args(src_str, dst_str, vol_labels):
"""Generate a podman style volmount string"""
vol_mount_str = f"{src_str}:{dst_str}"
if vol_labels:
if not vol_labels.startswith(":"):
vol_mount_str += ":"
vol_mount_str += vol_labels
return ["-v", vol_mount_str]
@pytest.mark.parametrize("not_safe", not_safe)
def test_check_not_safe_to_mount_dir(not_safe, mocker):
"""Ensure unsafe directories are not mounted"""
with pytest.raises(ConfigurationError):
bc = BaseConfig()
mocker.patch("os.path.exists", return_value=True)
bc._update_volume_mount_paths(
args_list=[], src_mount_path=not_safe, dst_mount_path=None
)
@pytest.mark.parametrize("not_safe", not_safe)
def test_check_not_safe_to_mount_file(not_safe, mocker):
"""Ensure unsafe directories for a given file are not mounted"""
file_path = os.path.join(not_safe, "file.txt")
with pytest.raises(ConfigurationError):
bc = BaseConfig()
mocker.patch("os.path.exists", return_value=True)
bc._update_volume_mount_paths(
args_list=[], src_mount_path=file_path, dst_mount_path=None
)
@pytest.mark.parametrize("path", dir_variations, ids=id_for_src)
def test_duplicate_detection_dst(path, mocker):
"""Ensure no duplicate volume mount entries are created"""
mocker.patch("os.path.exists", return_value=True)
mocker.patch("os.path.isdir", return_value=True)
base_config = BaseConfig()
def generate():
for entry in dir_variations:
for label in labels:
base_config._update_volume_mount_paths(
args_list=first_pass,
src_mount_path=path.path,
dst_mount_path=entry.path,
labels=label,
)
first_pass = []
generate()
second_pass = first_pass[:]
generate()
assert first_pass == second_pass
@pytest.mark.parametrize("labels", labels, ids=id_for_label)
@pytest.mark.parametrize("path", dir_variations, ids=id_for_src)
def test_no_dst_all_dirs(path, labels, mocker):
mocker.patch("os.path.exists", return_value=True)
mocker.patch("os.path.isdir", return_value=True)
# Ensure dst == src when not provided
src_str = os.path.join(resolve_path(path.path), "")
dst_str = src_str
expected = generate_volmount_args(src_str=src_str, dst_str=dst_str, vol_labels=labels)
result = []
BaseConfig()._update_volume_mount_paths(
args_list=result, src_mount_path=path.path, dst_mount_path=None, labels=labels
)
explanation = (
f"provided: {path.path}:{None}",
f"got: {result}",
f"expected {expected}",
)
assert result == expected, explanation
assert all(part.endswith('/') for part in result[1].split(':')[0:1]), explanation
@pytest.mark.parametrize("labels", labels, ids=id_for_label)
@pytest.mark.parametrize("dst", dir_variations, ids=id_for_dst)
@pytest.mark.parametrize("src", dir_variations, ids=id_for_src)
def test_src_dst_all_dirs(src, dst, labels, mocker):
mocker.patch("os.path.exists", return_value=True)
mocker.patch("os.path.isdir", return_value=True)
# Ensure src and dest end with trailing slash
src_str = os.path.join(resolve_path(src.path), "")
dst_str = os.path.join(resolve_path(dst.path), "")
expected = generate_volmount_args(src_str=src_str, dst_str=dst_str, vol_labels=labels)
result = []
BaseConfig()._update_volume_mount_paths(
args_list=result, src_mount_path=src.path, dst_mount_path=dst.path, labels=labels
)
explanation = (
f"provided: {src.path}:{dst.path}",
f"got: {result}",
f"expected {expected}",
)
assert result == expected, explanation
assert all(part.endswith('/') for part in result[1].split(':')[0:1]), explanation
@pytest.mark.parametrize("labels", labels, ids=id_for_label)
@pytest.mark.parametrize("path", dir_variations, ids=id_for_src)
def test_src_dst_all_files(path, labels, mocker):
"""Ensure file paths are transformed correctly into dir paths"""
src_str = os.path.join(resolve_path(path.path), "")
dst_str = src_str
expected = generate_volmount_args(src_str=src_str, dst_str=dst_str, vol_labels=labels)
result = []
src_file = os.path.join(path.path, "", "file.txt")
dest_file = src_file
base_config = BaseConfig()
mocker.patch("os.path.exists", return_value=True)
mocker.patch("os.path.isdir", return_value=False)
base_config._update_volume_mount_paths(
args_list=result, src_mount_path=src_file, dst_mount_path=dest_file, labels=labels
)
explanation = (
f"provided: {src_file}:{dest_file}",
f"got: {result}",
f"expected {expected}",
)
assert result == expected, explanation
assert all(part.endswith('/') for part in result[1].split(':')[0:1]), explanation
@pytest.mark.parametrize("relative", (".", "..", "../.."))
@pytest.mark.parametrize("labels", labels, ids=id_for_label)
@pytest.mark.parametrize("dst", dir_variations, ids=id_for_dst)
@pytest.mark.parametrize("src", dir_variations, ids=id_for_src)
def test_src_dst_all_relative_dirs(src, dst, labels, relative, mocker):
mocker.patch("os.path.exists", return_value=True)
mocker.patch("os.path.isdir", return_value=True)
# Ensure src is resolved and dest mapped to workdir when relative
relative_src = f"{relative}{src.path}"
relative_dst = f"{relative}{dst.path}"
workdir = "/workdir"
src_str = os.path.join(resolve_path(relative_src), "")
dst_str = os.path.join(resolve_path(os.path.join(workdir, relative_dst)), "")
expected = generate_volmount_args(src_str=src_str, dst_str=dst_str, vol_labels=labels)
result = []
BaseConfig(container_workdir=workdir)._update_volume_mount_paths(
args_list=result, src_mount_path=relative_src, dst_mount_path=relative_dst, labels=labels
)
explanation = (
f"provided: {relative_src}:{relative_dst}",
f"got: {result}",
f"expected {expected}",
)
assert result == expected, explanation
assert all(part.endswith('/') for part in result[1].split(':')[0:1]), explanation
ansible-runner-2.4.1/test/unit/config/test_doc.py 0000775 0000000 0000000 00000014553 14770573620 0022051 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os
import pytest
from ansible_runner.config.doc import DocConfig
from ansible_runner.config._base import BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.utils import get_executable_path
def test_ansible_doc_defaults(tmp_path, patch_private_data_dir):
# pylint: disable=W0613
rc = DocConfig()
# Check that the private data dir is placed in our default location with our default prefix
# and has some extra uniqueness on the end.
base_private_data_dir = tmp_path.joinpath('.ansible-runner-').as_posix()
assert rc.private_data_dir.startswith(base_private_data_dir)
assert len(rc.private_data_dir) > len(base_private_data_dir)
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
assert rc.runner_mode == 'subprocess'
def test_invalid_runner_mode_value():
with pytest.raises(ConfigurationError) as exc:
DocConfig(runner_mode='test')
assert "Invalid runner mode" in exc.value.args[0]
def test_invalid_response_format_value():
with pytest.raises(ConfigurationError) as exc:
rc = DocConfig()
plugin_names = ['copy', 'file']
rc.prepare_plugin_docs_command(plugin_names, response_format='test')
assert "Invalid response_format test, valid value is one of either json, human" == exc.value.args[0]
def test_invalid_plugin_name_value():
with pytest.raises(ConfigurationError) as exc:
rc = DocConfig()
plugin_names = 'copy', 'file'
rc.prepare_plugin_docs_command(plugin_names)
assert "plugin_names should be of type list" in exc.value.args[0]
def test_prepare_plugin_docs_command():
rc = DocConfig()
plugin_names = ['copy', 'file']
plugin_type = 'module'
rc.prepare_plugin_docs_command(plugin_names, plugin_type=plugin_type, snippet=True, playbook_dir='/tmp/test')
expected_command = [get_executable_path('ansible-doc'), '-s', '-t', 'module', '--playbook-dir', '/tmp/test', 'copy', 'file']
assert rc.command == expected_command
assert rc.runner_mode == 'subprocess'
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_prepare_plugin_docs_command_with_containerization(tmp_path, runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
tmp_path.joinpath('.ssh').mkdir()
kwargs = {
'private_data_dir': tmp_path,
'process_isolation': True,
'container_image': 'my_container',
'process_isolation_executable': runtime
}
rc = DocConfig(**kwargs)
rc.ident = 'foo'
plugin_names = ['copy', 'file']
plugin_type = 'module'
rc.prepare_plugin_docs_command(plugin_names, plugin_type=plugin_type, snippet=True, playbook_dir='/tmp/test')
assert rc.runner_mode == 'subprocess'
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [
runtime,
'run',
'--rm',
'--interactive',
'--workdir',
'/runner/project',
'-v', f'{rc.private_data_dir}/.ssh/:/home/runner/.ssh/',
'-v', f'{rc.private_data_dir}/.ssh/:/root/.ssh/',
]
if os.path.exists('/etc/ssh/ssh_known_hosts'):
expected_command_start.extend(['-v', '/etc/ssh/:/etc/ssh/'])
if runtime == 'podman':
expected_command_start.extend(['--group-add=root', '--ipc=host'])
expected_command_start.extend([
'-v', f'{rc.private_data_dir}/artifacts/:/runner/artifacts/:Z',
'-v', f'{rc.private_data_dir}/:/runner/:Z',
'--env-file', f'{rc.artifact_dir}/env.list',
])
expected_command_start.extend(extra_container_args)
expected_command_start.extend([
'--name', 'ansible_runner_foo',
'my_container',
'ansible-doc',
'-s',
'-t', 'module',
'--playbook-dir', '/tmp/test',
'copy',
'file',
])
assert expected_command_start == rc.command
def test_prepare_plugin_list_command():
rc = DocConfig()
rc.prepare_plugin_list_command(list_files=True, plugin_type='module', playbook_dir='/tmp/test', module_path='/test/module')
expected_command = [get_executable_path('ansible-doc'), '-F', '-t', 'module', '--playbook-dir', '/tmp/test', '-M', '/test/module']
assert rc.command == expected_command
assert rc.runner_mode == 'subprocess'
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_prepare_plugin_list_command_with_containerization(tmp_path, runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
tmp_path.joinpath('.ssh').mkdir()
kwargs = {
'private_data_dir': tmp_path,
'process_isolation': True,
'container_image': 'my_container',
'process_isolation_executable': runtime
}
rc = DocConfig(**kwargs)
rc.ident = 'foo'
rc.prepare_plugin_list_command(list_files=True, plugin_type='module', playbook_dir='/tmp/test', module_path='/test/module')
assert rc.runner_mode == 'subprocess'
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [
runtime,
'run',
'--rm',
'--interactive',
'--workdir',
'/runner/project',
'-v', f'{rc.private_data_dir}/.ssh/:/home/runner/.ssh/',
'-v', f'{rc.private_data_dir}/.ssh/:/root/.ssh/',
]
if os.path.exists('/etc/ssh/ssh_known_hosts'):
expected_command_start.extend(['-v', '/etc/ssh/:/etc/ssh/'])
if runtime == 'podman':
expected_command_start.extend(['--group-add=root', '--ipc=host'])
expected_command_start.extend([
'-v', f'{rc.private_data_dir}/artifacts/:/runner/artifacts/:Z',
'-v', f'{rc.private_data_dir}/:/runner/:Z',
'--env-file', f'{rc.artifact_dir}/env.list',
])
expected_command_start.extend(extra_container_args)
expected_command_start.extend([
'--name', 'ansible_runner_foo',
'my_container',
'ansible-doc',
'-F',
'-t', 'module',
'--playbook-dir', '/tmp/test',
'-M', '/test/module'
])
assert expected_command_start == rc.command
ansible-runner-2.4.1/test/unit/config/test_inventory.py 0000664 0000000 0000000 00000013225 14770573620 0023331 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
import os
import pytest
from ansible_runner.config.inventory import InventoryConfig
from ansible_runner.config._base import BaseExecutionMode
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.utils import get_executable_path
def test_ansible_inventory_init_defaults(tmp_path, patch_private_data_dir):
# pylint: disable=W0613
rc = InventoryConfig()
# Check that the private data dir is placed in our default location with our default prefix
# and has some extra uniqueness on the end.
base_private_data_dir = tmp_path.joinpath('.ansible-runner-').as_posix()
assert rc.private_data_dir.startswith(base_private_data_dir)
assert len(rc.private_data_dir) > len(base_private_data_dir)
assert rc.execution_mode == BaseExecutionMode.ANSIBLE_COMMANDS
def test_invalid_runner_mode_value():
with pytest.raises(ConfigurationError) as exc:
InventoryConfig(runner_mode='test')
assert "Invalid runner mode" in exc.value.args[0]
def test_prepare_inventory_command():
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('list', inventories, response_format='yaml', playbook_dir='/tmp',
vault_ids='1234', vault_password_file='/tmp/password')
expected_command = [get_executable_path('ansible-inventory'), '--list', '-i', '/tmp/inventory1', '-i', '/tmp/inventory2', '--yaml', '--playbook-dir'] + \
['/tmp', '--vault-id', '1234', '--vault-password-file', '/tmp/password']
assert rc.command == expected_command
assert rc.runner_mode == 'subprocess'
def test_prepare_inventory_invalid_action():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('test', inventories=inventories)
assert "Invalid action test, valid value is one of either graph, host, list" == exc.value.args[0]
def test_prepare_inventory_invalid_response_format():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('list', inventories=inventories, response_format='test')
assert "Invalid response_format test, valid value is one of either json, yaml, toml" == exc.value.args[0]
def test_prepare_inventory_invalid_inventories():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = '/tmp/inventory1'
rc.prepare_inventory_command('list', inventories=inventories)
assert "inventories should be of type list" in exc.value.args[0]
def test_prepare_inventory_invalid_host_action():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('host', inventories=inventories)
assert "Value of host parameter is required when action in 'host'" == exc.value.args[0]
def test_prepare_inventory_invalid_graph_response_format():
with pytest.raises(ConfigurationError) as exc:
rc = InventoryConfig()
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('graph', inventories=inventories, response_format='toml')
assert "'graph' action supports only 'json' response format" == exc.value.args[0]
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_prepare_inventory_command_with_containerization(tmp_path, runtime, mocker):
mocker.patch.dict('os.environ', {'HOME': str(tmp_path)}, clear=True)
tmp_path.joinpath('.ssh').mkdir()
kwargs = {
'private_data_dir': tmp_path,
'process_isolation': True,
'container_image': 'my_container',
'process_isolation_executable': runtime
}
rc = InventoryConfig(**kwargs)
rc.ident = 'foo'
inventories = ['/tmp/inventory1', '/tmp/inventory2']
rc.prepare_inventory_command('list', inventories, response_format='yaml', playbook_dir='/tmp',
vault_ids='1234', vault_password_file='/tmp/password', output_file='/tmp/inv_out.txt',
export=True)
assert rc.runner_mode == 'subprocess'
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [
runtime,
'run',
'--rm',
'--interactive',
'--workdir',
'/runner/project',
'-v', f'{rc.private_data_dir}/.ssh/:/home/runner/.ssh/',
'-v', f'{rc.private_data_dir}/.ssh/:/root/.ssh/',
]
if os.path.exists('/etc/ssh/ssh_known_hosts'):
expected_command_start.extend(['-v', '/etc/ssh/:/etc/ssh/'])
if runtime == 'podman':
expected_command_start.extend(['--group-add=root', '--ipc=host'])
expected_command_start.extend([
'-v', f'{rc.private_data_dir}/artifacts/:/runner/artifacts/:Z',
'-v', f'{rc.private_data_dir}/:/runner/:Z',
'--env-file', f'{rc.artifact_dir}/env.list',
])
expected_command_start.extend(extra_container_args)
expected_command_start.extend([
'--name',
'ansible_runner_foo',
'my_container',
'ansible-inventory',
'--list',
'-i', '/tmp/inventory1',
'-i', '/tmp/inventory2',
'--yaml',
'--playbook-dir', '/tmp',
'--vault-id', '1234',
'--vault-password-file', '/tmp/password',
'--output', '/tmp/inv_out.txt',
'--export',
])
assert expected_command_start == rc.command
ansible-runner-2.4.1/test/unit/config/test_runner.py 0000664 0000000 0000000 00000057741 14770573620 0022620 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
from functools import partial
import os
import re
from test.utils.common import RSAKey
from pexpect import TIMEOUT, EOF
import pytest
from ansible_runner.config.runner import RunnerConfig, ExecutionMode
from ansible_runner.loader import ArtifactLoader
from ansible_runner.exceptions import ConfigurationError
def load_file_side_effect(path, value, *args, **kwargs):
# pylint: disable=W0613
if args[0] == path:
if value:
return value
raise ConfigurationError
def test_runner_config_init_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
assert rc.private_data_dir == '/'
assert rc.ident is not None
assert rc.playbook is None
assert rc.inventory is None
assert rc.limit is None
assert rc.module is None
assert rc.module_args is None
assert rc.artifact_dir == os.path.join(f'/artifacts/{rc.ident}')
assert isinstance(rc.loader, ArtifactLoader)
def test_runner_config_with_artifact_dir(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/', artifact_dir='/this-is-some-dir')
assert rc.artifact_dir == os.path.join('/this-is-some-dir', rc.ident)
def test_runner_config_init_with_ident(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/', ident='test')
assert rc.private_data_dir == '/'
assert rc.ident == 'test'
assert rc.playbook is None
assert rc.inventory is None
assert rc.limit is None
assert rc.module is None
assert rc.module_args is None
assert rc.artifact_dir == os.path.join('/artifacts/test')
assert isinstance(rc.loader, ArtifactLoader)
def test_runner_config_project_dir(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/', project_dir='/another/path')
assert rc.project_dir == '/another/path'
rc = RunnerConfig('/')
assert rc.project_dir == '/project'
def test_prepare_environment_vars_only_strings(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig(private_data_dir="/", envvars={'D': 'D'})
value = {'A': 1, 'B': True, 'C': 'foo'}
envvar_side_effect = partial(load_file_side_effect, 'env/envvars', value)
mocker.patch.object(rc.loader, 'load_file', side_effect=envvar_side_effect)
rc.prepare_env()
assert 'A' in rc.env
assert isinstance(rc.env['A'], str)
assert 'B' in rc.env
assert isinstance(rc.env['B'], str)
assert 'C' in rc.env
assert isinstance(rc.env['C'], str)
assert 'D' in rc.env
assert rc.env['D'] == 'D'
def test_prepare_env_ad_hoc_command(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig(private_data_dir="/")
value = {'AD_HOC_COMMAND_ID': 'teststring'}
envvar_side_effect = partial(load_file_side_effect, 'env/envvars', value)
mocker.patch.object(rc.loader, 'load_file', side_effect=envvar_side_effect)
rc.prepare_env()
assert rc.cwd == '/'
def test_prepare_environment_pexpect_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig(private_data_dir="/")
rc.prepare_env()
assert len(rc.expect_passwords) == 2
assert TIMEOUT in rc.expect_passwords
assert rc.expect_passwords[TIMEOUT] is None
assert EOF in rc.expect_passwords
assert rc.expect_passwords[EOF] is None
def test_prepare_env_passwords(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig(private_data_dir='/')
value = {'^SSH [pP]assword.*$': 'secret'}
password_side_effect = partial(load_file_side_effect, 'env/passwords', value)
mocker.patch.object(rc.loader, 'load_file', side_effect=password_side_effect)
rc.prepare_env()
rc.expect_passwords.pop(TIMEOUT)
rc.expect_passwords.pop(EOF)
assert len(rc.expect_passwords) == 1
assert isinstance(list(rc.expect_passwords.keys())[0], re.Pattern)
assert 'secret' in rc.expect_passwords.values()
def test_prepare_env_extra_vars_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.extra_vars is None
def test_prepare_env_settings_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.settings == {}
def test_prepare_env_settings(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
value = {'test': 'string'}
settings_side_effect = partial(load_file_side_effect, 'env/settings', value)
mocker.patch.object(rc.loader, 'load_file', side_effect=settings_side_effect)
rc.prepare_env()
assert rc.settings == value
def test_prepare_env_sshkey_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.ssh_key_data is None
def test_prepare_env_sshkey(mocker):
mocker.patch('ansible_runner.config._base.open_fifo_write')
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rsa_key = RSAKey()
rsa_private_key_value = rsa_key.private
sshkey_side_effect = partial(load_file_side_effect, 'env/ssh_key', rsa_private_key_value)
mocker.patch.object(rc.loader, 'load_file', side_effect=sshkey_side_effect)
rc.prepare_env()
assert rc.ssh_key_data == rsa_private_key_value
def test_prepare_env_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
path_exists = mocker.patch('os.path.exists')
path_exists.return_value = True
rc = RunnerConfig('/')
rc.prepare_env()
assert rc.idle_timeout is None
assert rc.job_timeout is None
assert rc.pexpect_timeout == 5
assert rc.cwd == '/project'
def test_prepare_env_directory_isolation(mocker):
mocker.patch('os.makedirs', return_value=True)
path_exists = mocker.patch('os.path.exists')
path_exists.return_value = True
rc = RunnerConfig('/')
rc.directory_isolation_path = '/tmp/foo'
rc.prepare_env()
assert rc.cwd == '/tmp/foo'
def test_prepare_env_directory_isolation_from_settings(mocker, project_fixtures):
'''
Test that sandboxing with directory isolation works correctly with `env/settings` values.
'''
# Mock away the things that would actually prepare the isolation directory.
mocker.patch('os.makedirs', return_value=True)
copy_tree = mocker.patch('shutil.copytree')
mkdtemp = mocker.patch('tempfile.mkdtemp')
mkdtemp.return_value = '/tmp/runner/runner_di_XYZ'
mocker.patch('ansible_runner.config.runner.RunnerConfig.build_process_isolation_temp_dir')
# The `directory_isolation` test data sets up an `env/settings` file for us.
private_data_dir = project_fixtures / 'directory_isolation'
rc = RunnerConfig(private_data_dir=str(private_data_dir), playbook='main.yaml')
# This is where all the magic happens
rc.prepare()
assert rc.sandboxed
assert rc.process_isolation_executable == 'bwrap'
assert rc.project_dir == str(private_data_dir / 'project')
assert os.path.exists(rc.project_dir)
# `directory_isolation_path` should be used to create a new temp path underneath
assert rc.directory_isolation_path == '/tmp/runner/runner_di_XYZ'
mkdtemp.assert_called_once_with(prefix='runner_di_', dir='/tmp/runner')
# The project files should be copied to the isolation path.
copy_tree.assert_called_once_with(rc.project_dir, rc.directory_isolation_path, dirs_exist_ok=True, symlinks=True)
def test_prepare_inventory(mocker):
mocker.patch('os.makedirs', return_value=True)
path_exists = mocker.patch('os.path.exists', return_value=True)
rc = RunnerConfig(private_data_dir='/')
rc.prepare_inventory()
assert rc.inventory == '/inventory'
rc.inventory = '/tmp/inventory'
rc.prepare_inventory()
assert rc.inventory == '/tmp/inventory'
path_exists.return_value = False
rc.inventory = 'localhost,anotherhost,'
rc.prepare_inventory()
assert rc.inventory == 'localhost,anotherhost,'
rc.inventory = None
rc.prepare_inventory()
assert rc.inventory is None
@pytest.mark.parametrize(
'extra_vars, expected',
(
({'test': 'key'}, ['ansible-playbook', '-i', '/inventory', '-e', '@/env/extravars', '-e', '{"test":"key"}', 'main.yaml']),
('/tmp/extravars.yml', ['ansible-playbook', '-i', '/inventory', '-e', '@/env/extravars', '-e', '@/tmp/extravars.yml', 'main.yaml']),
(None, ['ansible-playbook', '-i', '/inventory', '-e', '@/env/extravars', 'main.yaml']),
)
)
def test_generate_ansible_command_extra_vars(mocker, extra_vars, expected):
mocker.patch('os.makedirs', return_value=True)
mocker.patch('os.path.exists', return_value=True)
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml')
rc.inventory = '/inventory'
rc.prepare_inventory()
mocker.patch.object(rc.loader, 'isfile', side_effect=lambda x: True)
rc.extra_vars = extra_vars
cmd = rc.generate_ansible_command()
assert cmd == expected
def test_generate_ansible_command(mocker):
mocker.patch('os.makedirs', return_value=True)
mocker.patch('os.path.exists', return_value=True)
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml')
rc.prepare_inventory()
rc.extra_vars = None
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', 'main.yaml']
rc.extra_vars = {'test': 'key'}
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '{"test":"key"}', 'main.yaml']
rc.extra_vars = None
rc.inventory = "localhost,"
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', 'localhost,', 'main.yaml']
rc.inventory = ['thing1', 'thing2']
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', 'thing1', '-i', 'thing2', 'main.yaml']
rc.inventory = []
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', 'main.yaml']
rc.inventory = None
mocker.patch('os.path.exists', return_value=False)
rc.prepare_inventory()
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', 'main.yaml']
rc.verbosity = 3
mocker.patch('os.path.exists', return_value=True)
rc.prepare_inventory()
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '-vvv', 'main.yaml']
rc.verbosity = None
rc.limit = 'hosts'
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '--limit', 'hosts', 'main.yaml']
rc.limit = None
rc.module = 'setup'
cmd = rc.generate_ansible_command()
assert cmd == ['ansible', '-i', '/inventory', '-m', 'setup']
rc.module = None
rc.module = 'setup'
rc.module_args = 'test=string'
cmd = rc.generate_ansible_command()
assert cmd == ['ansible', '-i', '/inventory', '-m', 'setup', '-a', 'test=string']
rc.module_args = None
rc.module = None
rc.forks = 5
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '--forks', '5', 'main.yaml']
def test_generate_ansible_command_with_api_extravars(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml', extravars={"foo": "bar"})
path_exists = mocker.patch('os.path.exists')
path_exists.return_value = True
rc.prepare_inventory()
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '{"foo":"bar"}', 'main.yaml']
def test_generate_ansible_command_with_dict_extravars(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml', extravars={"foo": "test \n hello"})
path_exists = mocker.patch('os.path.exists')
path_exists.return_value = True
rc.prepare_inventory()
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook', '-i', '/inventory', '-e', '{"foo":"test \\n hello"}', 'main.yaml']
@pytest.mark.parametrize('cmdline,tokens', [
('--tags foo --skip-tags', ['--tags', 'foo', '--skip-tags']),
('--limit "䉪ቒ칸ⱷ?噂폄蔆㪗輥"', ['--limit', '䉪ቒ칸ⱷ?噂폄蔆㪗輥']),
])
def test_generate_ansible_command_with_cmdline_args(cmdline, tokens, mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig(private_data_dir='/', playbook='main.yaml')
path_exists = mocker.patch('os.path.exists')
path_exists.return_value = True
rc.prepare_inventory()
rc.extra_vars = {}
cmdline_side_effect = partial(load_file_side_effect, 'env/cmdline', cmdline)
mocker.patch.object(rc.loader, 'load_file', side_effect=cmdline_side_effect)
cmd = rc.generate_ansible_command()
assert cmd == ['ansible-playbook'] + tokens + ['-i', '/inventory', 'main.yaml']
def test_prepare_command_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
cmd_side_effect = partial(load_file_side_effect, 'args', None)
def generate_side_effect():
return ['test', '"string with spaces"']
mocker.patch.object(rc.loader, 'load_file', side_effect=cmd_side_effect)
mocker.patch.object(rc, 'generate_ansible_command', side_effect=generate_side_effect)
rc.prepare_command()
assert rc.command == ['test', '"string with spaces"']
def test_prepare_with_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rc.prepare_inventory = mocker.Mock()
rc.prepare_env = mocker.Mock()
rc.prepare_command = mocker.Mock()
rc.ssh_key_data = None
rc.artifact_dir = '/'
rc.env = {}
with pytest.raises(ConfigurationError) as exc:
rc.prepare()
assert str(exc.value) == 'No executable for runner to run'
def test_prepare(mocker):
mocker.patch.dict('os.environ', {
'AWX_LIB_DIRECTORY': '/awx_lib_directory_via_environ',
})
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rc.prepare_inventory = mocker.Mock()
rc.prepare_command = mocker.Mock()
rc.ssh_key_data = None
rc.artifact_dir = '/'
rc.env = {}
rc.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK
rc.playbook = 'main.yaml'
rc.prepare()
assert rc.prepare_inventory.called
assert rc.prepare_command.called
assert not hasattr(rc, 'ssh_key_path')
assert rc.command == []
assert rc.env['ANSIBLE_STDOUT_CALLBACK'] == 'awx_display'
assert rc.env['ANSIBLE_RETRY_FILES_ENABLED'] == 'False'
assert rc.env['ANSIBLE_HOST_KEY_CHECKING'] == 'False'
assert rc.env['AWX_ISOLATED_DATA_DIR'] == '/'
def test_prepare_with_ssh_key(mocker):
mocker.patch('os.makedirs', return_value=True)
open_fifo_write_mock = mocker.patch('ansible_runner.config._base.open_fifo_write')
rc = RunnerConfig('/')
rc.prepare_inventory = mocker.Mock()
rc.prepare_command = mocker.Mock()
rc.wrap_args_with_ssh_agent = mocker.Mock()
rc.ssh_key_data = None
rc.artifact_dir = '/'
rc.env = {}
rc.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK
rc.playbook = 'main.yaml'
rsa_key = RSAKey()
rc.ssh_key_data = rsa_key.private
rc.command = 'ansible-playbook'
mocker.patch.dict('os.environ', {'AWX_LIB_DIRECTORY': '/'})
rc.prepare()
assert rc.ssh_key_path == '/ssh_key_data'
assert rc.wrap_args_with_ssh_agent.called
assert open_fifo_write_mock.called
def test_wrap_args_with_ssh_agent_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey')
assert res == [
'ssh-agent',
'sh', '-c',
"trap 'rm -f /tmp/sshkey' EXIT && ssh-add /tmp/sshkey && rm -f /tmp/sshkey && ansible-playbook main.yaml"
]
def test_wrap_args_with_ssh_agent_with_auth(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey', '/tmp/sshauth')
assert res == [
'ssh-agent', '-a', '/tmp/sshauth',
'sh', '-c',
"trap 'rm -f /tmp/sshkey' EXIT && ssh-add /tmp/sshkey && rm -f /tmp/sshkey && ansible-playbook main.yaml"
]
def test_wrap_args_with_ssh_agent_silent(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
res = rc.wrap_args_with_ssh_agent(['ansible-playbook', 'main.yaml'], '/tmp/sshkey', silence_ssh_add=True)
assert res == [
'ssh-agent',
'sh', '-c',
"trap 'rm -f /tmp/sshkey' EXIT && ssh-add /tmp/sshkey 2>/dev/null && rm -f /tmp/sshkey && ansible-playbook main.yaml"
]
def test_bwrap_process_isolation_defaults(mocker):
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rc.artifact_dir = '/tmp/artifacts'
rc.playbook = 'main.yaml'
rc.command = 'ansible-playbook'
rc.process_isolation = True
rc.process_isolation_executable = 'bwrap'
path_exists = mocker.patch('os.path.exists')
path_exists.return_value = True
rc.prepare()
assert rc.command == [
'bwrap',
'--die-with-parent',
'--unshare-pid',
'--dev-bind', '/dev', 'dev',
'--proc', '/proc',
'--dir', '/tmp',
'--ro-bind', '/bin', '/bin',
'--ro-bind', '/etc', '/etc',
'--ro-bind', '/usr', '/usr',
'--ro-bind', '/opt', '/opt',
'--symlink', 'usr/lib', '/lib',
'--symlink', 'usr/lib64', '/lib64',
'--bind', '/', '/',
'--chdir', '/project',
'ansible-playbook', '-i', '/inventory', 'main.yaml',
]
def test_bwrap_process_isolation_and_directory_isolation(mocker, patch_private_data_dir, tmp_path):
# pylint: disable=W0613
def mock_exists(path):
if path == "/project":
return False
return True
class MockArtifactLoader:
def __init__(self, base_path):
self.base_path = base_path
def load_file(self, path, objtype=None, encoding='utf-8'):
raise ConfigurationError
def isfile(self, _):
return False
mocker.patch('ansible_runner.config.runner.os.makedirs', return_value=True)
mocker.patch('ansible_runner.config.runner.os.chmod', return_value=True)
mocker.patch('ansible_runner.config.runner.os.path.exists', mock_exists)
mocker.patch('ansible_runner.config._base.ArtifactLoader', new=MockArtifactLoader)
artifact_path = tmp_path / 'artifacts'
artifact_path.mkdir()
rc = RunnerConfig('/')
rc.artifact_dir = tmp_path / 'artifacts'
rc.directory_isolation_path = tmp_path / 'dirisolation'
rc.playbook = 'main.yaml'
rc.command = 'ansible-playbook'
rc.process_isolation = True
rc.process_isolation_executable = 'bwrap'
rc.prepare()
assert rc.command == [
'bwrap',
'--die-with-parent',
'--unshare-pid',
'--dev-bind', '/dev', 'dev',
'--proc', '/proc',
'--dir', '/tmp',
'--ro-bind', '/bin', '/bin',
'--ro-bind', '/etc', '/etc',
'--ro-bind', '/usr', '/usr',
'--ro-bind', '/opt', '/opt',
'--symlink', 'usr/lib', '/lib',
'--symlink', 'usr/lib64', '/lib64',
'--bind', '/', '/',
'--chdir', os.path.realpath(rc.directory_isolation_path),
'ansible-playbook', '-i', '/inventory', 'main.yaml',
]
def test_process_isolation_settings(mocker, tmp_path):
mocker.patch('os.path.isdir', return_value=False)
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.makedirs', return_value=True)
rc = RunnerConfig('/')
rc.artifact_dir = tmp_path.joinpath('artifacts').as_posix()
rc.playbook = 'main.yaml'
rc.command = 'ansible-playbook'
rc.process_isolation = True
rc.process_isolation_executable = 'not_bwrap'
rc.process_isolation_hide_paths = ['/home', '/var']
rc.process_isolation_show_paths = ['/usr']
rc.process_isolation_ro_paths = ['/venv']
rc.process_isolation_path = tmp_path.as_posix()
mocker.patch('os.path.exists', return_value=True)
rc.prepare()
print(rc.command)
expected = [
'not_bwrap',
'--die-with-parent',
'--unshare-pid',
'--dev-bind', '/dev', 'dev',
'--proc', '/proc',
'--dir', '/tmp',
'--ro-bind', '/bin', '/bin',
'--ro-bind', '/etc', '/etc',
'--ro-bind', '/usr', '/usr',
'--ro-bind', '/opt', '/opt',
'--symlink', 'usr/lib', '/lib',
'--symlink', 'usr/lib64', '/lib64',
]
index = len(expected)
assert rc.command[0:index] == expected
# hide /home
assert rc.command[index] == '--bind'
assert 'ansible_runner_pi' in rc.command[index + 1]
assert rc.command[index + 2] == os.path.realpath('/home') # needed for Mac
# hide /var
assert rc.command[index + 3] == '--bind'
assert 'ansible_runner_pi' in rc.command[index + 4]
assert rc.command[index + 5] in ('/var', '/private/var')
# read-only bind
assert rc.command[index + 6:index + 9] == ['--ro-bind', '/venv', '/venv']
# root bind
assert rc.command[index + 9:index + 12] == ['--bind', '/', '/']
# show /usr
assert rc.command[index + 12:index + 15] == ['--bind', '/usr', '/usr']
# chdir and ansible-playbook command
assert rc.command[index + 15:] == ['--chdir', '/project', 'ansible-playbook', '-i', '/inventory', 'main.yaml']
def test_container_volume_mounting_with_Z(mocker, tmp_path):
mocker.patch('os.path.isdir', return_value=True)
mocker.patch('os.path.exists', return_value=True)
rc = RunnerConfig(str(tmp_path))
rc.container_volume_mounts = ['/tmp/project_path:/tmp/project_path:Z']
rc.container_name = 'foo'
rc.container_image = 'bar'
rc.env = {}
new_args = rc.wrap_args_for_containerization(['ansible-playbook', 'foo.yml'], 0, None)
assert new_args[0] == 'podman'
for i, entry in enumerate(new_args):
if entry == '-v':
mount = new_args[i + 1]
if mount.endswith(':/tmp/project_path:Z'):
break
else:
raise Exception(f'Could not find expected mount, args: {new_args}')
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_containerization_settings(tmp_path, runtime, mocker):
mocker.patch('os.path.isdir', return_value=True)
mocker.patch('os.path.exists', return_value=True)
mock_containerized = mocker.patch('ansible_runner.runner_config.RunnerConfig.containerized', new_callable=mocker.PropertyMock)
mock_containerized.return_value = True
# In this test get_callback_dir() will not return a callback plugin dir that exists
# mock shutil.copytree and shutil.rmtree to just return True instead of trying to copy
mocker.patch('shutil.copytree', return_value=True)
mocker.patch('shutil.rmtree', return_value=True)
rc = RunnerConfig(tmp_path)
rc.ident = 'foo'
rc.playbook = 'main.yaml'
rc.command = 'ansible-playbook'
rc.process_isolation = True
rc.process_isolation_executable = runtime
rc.container_image = 'my_container'
rc.container_volume_mounts = ['/host1:/container1', '/host2:/container2']
rc.prepare()
# validate ANSIBLE_CALLBACK_PLUGINS env var is set
assert rc.env.get('ANSIBLE_CALLBACK_PLUGINS', None) is not None
# validate ANSIBLE_CALLBACK_PLUGINS contains callback plugin dir
callback_plugins = rc.env['ANSIBLE_CALLBACK_PLUGINS'].split(':')
callback_dir = os.path.join("/runner/artifacts", str(rc.ident), "callback")
assert callback_dir in callback_plugins
extra_container_args = []
if runtime == 'podman':
extra_container_args = ['--quiet']
else:
extra_container_args = [f'--user={os.getuid()}']
expected_command_start = [runtime, 'run', '--rm', '--tty', '--interactive', '--workdir', '/runner/project'] + \
['-v', f'{rc.private_data_dir}/:/runner/:Z'] + \
['-v', '/host1:/container1', '-v', '/host2:/container2'] + \
['--env-file', f'{rc.artifact_dir}/env.list'] + \
extra_container_args + \
['--name', 'ansible_runner_foo'] + \
['my_container', 'ansible-playbook', '-i', '/runner/inventory', 'main.yaml']
assert expected_command_start == rc.command
ansible-runner-2.4.1/test/unit/conftest.py 0000664 0000000 0000000 00000000326 14770573620 0020613 0 ustar 00root root 0000000 0000000 import pytest
@pytest.fixture
def patch_private_data_dir(tmp_path, mocker):
mocker.patch('ansible_runner.config._base.tempfile.mkdtemp', return_value=tmp_path.joinpath('.ansible-runner-lo0zrl9x').as_posix())
ansible-runner-2.4.1/test/unit/test__main__.py 0000664 0000000 0000000 00000002665 14770573620 0021416 0 ustar 00root root 0000000 0000000 from ansible_runner.__main__ import valid_inventory
def test_valid_inventory_file_in_inventory(tmp_path):
"""
Test a relative file name within inventory subdir.
"""
data_dir = tmp_path / "datadir"
inv_dir = data_dir / "inventory"
inv_dir.mkdir(parents=True)
hosts = inv_dir / "hosts.xyz"
hosts.touch()
assert valid_inventory(str(data_dir), "hosts.xyz") == str(hosts.absolute())
def test_valid_inventory_absolute_path_to_file(tmp_path):
"""
Test an absolute path to a file outside of data dir.
"""
data_dir = tmp_path / "datadir"
inv_dir = data_dir / "inventory"
inv_dir.mkdir(parents=True)
(tmp_path / "otherdir").mkdir()
hosts = tmp_path / "otherdir" / "hosts.xyz"
hosts.touch()
assert valid_inventory(str(data_dir), str(hosts.absolute())) == str(hosts.absolute())
def test_valid_inventory_absolute_path_to_directory(tmp_path):
"""
Test an absolute path to a directory outside of data dir.
"""
data_dir = tmp_path / "datadir"
inv_dir = data_dir / "inventory"
inv_dir.mkdir(parents=True)
(tmp_path / "otherdir").mkdir()
hosts = tmp_path / "otherdir"
hosts.touch()
assert valid_inventory(str(data_dir), str(hosts.absolute())) == str(hosts.absolute())
def test_valid_inventory_doesnotexist(tmp_path):
"""
Test that a bad inventory path returns False.
"""
assert valid_inventory(str(tmp_path), "doesNotExist") is None
ansible-runner-2.4.1/test/unit/test_cleanup.py 0000664 0000000 0000000 00000006002 14770573620 0021451 0 ustar 00root root 0000000 0000000 import os
import pathlib
import random
import time
import pytest
from ansible_runner.cleanup import cleanup_dirs, validate_pattern
from ansible_runner.config.runner import RunnerConfig
def test_simple_dir_cleanup_with_exclusions(tmp_path):
paths = []
for i in range(0, 6, 2):
trailing = ''.join(random.choice("abcdefica3829") for i in range(8))
path = tmp_path / f'pattern_{i}_{trailing}'
path.mkdir()
paths.append(path)
a_file_path = tmp_path / 'pattern_32_donotcleanme'
a_file_path.write_text('this is a file and should not be cleaned by the cleanup command')
keep_dir_path = tmp_path / 'pattern_42_alsokeepme'
keep_dir_path.mkdir()
ct = cleanup_dirs(pattern=str(tmp_path / 'pattern_*_*'), exclude_strings=[42], grace_period=0)
assert ct == 3 # cleaned up 3 private_data_dirs
for path in paths:
assert not path.exists()
assert a_file_path.exists()
assert keep_dir_path.exists()
assert cleanup_dirs(pattern=str(tmp_path / 'pattern_*_*'), exclude_strings=[42], grace_period=0) == 0 # no more to cleanup
def test_cleanup_command_grace_period(tmp_path):
old_dir = str(tmp_path / 'modtime_old_xyz')
new_dir = str(tmp_path / 'modtime_new_abc')
os.mkdir(old_dir)
time.sleep(1)
os.mkdir(new_dir)
ct = cleanup_dirs(pattern=str(tmp_path / 'modtime_*_*'), grace_period=1. / 60.)
assert ct == 1
assert not os.path.exists(old_dir)
assert os.path.exists(new_dir)
@pytest.mark.parametrize('runtime', ('docker', 'podman'))
def test_registry_auth_cleanup(tmp_path, runtime):
pdd_path = tmp_path / 'private_data_dir'
pdd_path.mkdir()
private_data_dir = str(pdd_path)
rc = RunnerConfig(
private_data_dir=private_data_dir,
playbook='ping.yml',
process_isolation_executable=runtime,
process_isolation=True,
container_image='foo.invalid/alan/runner',
container_auth_data={'host': 'https://somedomain.invalid', 'username': 'foouser', 'password': '349sk34'},
ident='awx_123'
)
rc.prepare()
assert rc.registry_auth_path
assert os.path.exists(rc.registry_auth_path)
ct = cleanup_dirs(pattern=private_data_dir, grace_period=0)
assert ct == 1
assert not os.path.exists(private_data_dir)
assert not os.path.exists(rc.registry_auth_path)
@pytest.mark.parametrize(
('pattern', 'match'), (
('/', '/'),
('/home', '/home'),
('/', 'Provided pattern could result in deleting system folders'),
('/home', 'Provided pattern could result in deleting system folders'),
('/hom*', '/home'),
)
)
def test_validate_pattern(pattern, match, monkeypatch):
def mock_resolve(path):
resolved = pathlib.Path(path)
if path.as_posix().startswith('/hom'):
resolved = pathlib.Path('/System/Volumes/Data/home')
return resolved
monkeypatch.setattr('ansible_runner.cleanup.Path.resolve', mock_resolve)
with pytest.raises(RuntimeError, match=match):
validate_pattern(pattern)
ansible-runner-2.4.1/test/unit/test_event_filter.py 0000664 0000000 0000000 00000014165 14770573620 0022521 0 ustar 00root root 0000000 0000000 # pylint: disable=W0621
import base64
import json
from io import StringIO
from pprint import pprint
import pytest
from ansible_runner.utils import OutputEventFilter
MAX_WIDTH = 78
EXAMPLE_UUID = '890773f5-fe6d-4091-8faf-bdc8021d65dd'
def write_encoded_event_data(fileobj, data):
b64data = base64.b64encode(json.dumps(data).encode('utf-8')).decode()
# pattern corresponding to OutputEventFilter expectation
fileobj.write('\x1b[K')
for offset in range(0, len(b64data), MAX_WIDTH):
chunk = b64data[offset:offset + MAX_WIDTH]
escaped_chunk = f'{chunk}\x1b[{len(chunk)}D'
fileobj.write(escaped_chunk)
fileobj.write('\x1b[K')
@pytest.fixture
def fake_callback():
return []
@pytest.fixture
def fake_cache():
return {}
@pytest.fixture
def wrapped_handle(job_event_callback):
# Preliminary creation of resources usually done in tasks.py
return OutputEventFilter(StringIO(), job_event_callback)
@pytest.fixture
def job_event_callback(fake_callback, fake_cache):
def method(event_data):
print('fake callback called')
if 'uuid' in event_data:
cache_event = fake_cache.get(f":1:ev-{event_data['uuid']}", None)
if cache_event is not None:
event_data.update(cache_event)
fake_callback.append(event_data)
return method
def test_event_recomb(fake_callback, fake_cache, wrapped_handle):
# Pretend that this is done by the Ansible callback module
fake_cache[f':1:ev-{EXAMPLE_UUID}'] = {'event': 'foo'}
write_encoded_event_data(wrapped_handle, {
'uuid': EXAMPLE_UUID
})
wrapped_handle.write('\r\nTASK [Gathering Facts] *********************************************************\n')
wrapped_handle.write('\u001b[0;33mchanged: [localhost]\u001b[0m\n')
write_encoded_event_data(wrapped_handle, {})
# stop pretending
assert len(fake_callback) == 1
recomb_data = fake_callback[0]
assert 'event' in recomb_data
assert recomb_data['event'] == 'foo'
def test_separate_verbose_events(fake_callback, wrapped_handle):
# Pretend that this is done by the Ansible callback module
wrapped_handle.write('Using /etc/ansible/ansible.cfg as config file\n')
wrapped_handle.write('SSH password: \n')
write_encoded_event_data(wrapped_handle, { # associated with _next_ event
'uuid': EXAMPLE_UUID
})
# stop pretending
assert len(fake_callback) == 2
for event_data in fake_callback:
assert 'event' in event_data
assert event_data['event'] == 'verbose'
def test_large_data_payload(fake_callback, fake_cache, wrapped_handle):
# Pretend that this is done by the Ansible callback module
fake_cache[f':1:ev-{EXAMPLE_UUID}'] = {'event': 'foo'}
event_data_to_encode = {
'uuid': EXAMPLE_UUID,
'host': 'localhost',
'role': 'some_path_to_role'
}
assert len(json.dumps(event_data_to_encode)) > MAX_WIDTH
write_encoded_event_data(wrapped_handle, event_data_to_encode)
wrapped_handle.write('\r\nTASK [Gathering Facts] *********************************************************\n')
wrapped_handle.write('\u001b[0;33mchanged: [localhost]\u001b[0m\n')
write_encoded_event_data(wrapped_handle, {})
# stop pretending
assert len(fake_callback) == 1
recomb_data = fake_callback[0]
assert 'role' in recomb_data
assert recomb_data['role'] == 'some_path_to_role'
assert 'event' in recomb_data
assert recomb_data['event'] == 'foo'
def test_event_lazy_parsing(fake_callback, fake_cache, wrapped_handle):
# Pretend that this is done by the Ansible callback module
fake_cache[f':1:ev-{EXAMPLE_UUID}'] = {'event': 'foo'}
buff = StringIO()
event_data_to_encode = {
'uuid': EXAMPLE_UUID,
'host': 'localhost',
'role': 'some_path_to_role'
}
write_encoded_event_data(buff, event_data_to_encode)
# write the data to the event filter in chunks to test lazy event matching
buff.seek(0)
start_token_chunk = buff.read(1) # \x1b
start_token_remainder = buff.read(2) # [K
body = buff.read(15) # next 15 bytes of base64 data
remainder = buff.read() # the remainder
for chunk in (start_token_chunk, start_token_remainder, body, remainder):
wrapped_handle.write(chunk)
wrapped_handle.write('\r\nTASK [Gathering Facts] *********************************************************\n')
wrapped_handle.write('\u001b[0;33mchanged: [localhost]\u001b[0m\n')
write_encoded_event_data(wrapped_handle, {})
# stop pretending
assert len(fake_callback) == 1
recomb_data = fake_callback[0]
assert 'role' in recomb_data
assert recomb_data['role'] == 'some_path_to_role'
assert 'event' in recomb_data
assert recomb_data['event'] == 'foo'
@pytest.mark.timeout(1)
def test_large_stdout_blob():
def _callback(*args, **kw):
# pylint: disable=W0613
pass
f = OutputEventFilter(StringIO(), _callback)
for _ in range(1024 * 10):
f.write('x' * 1024)
def test_verbose_line_buffering():
events = []
def _callback(event_data):
events.append(event_data)
f = OutputEventFilter(StringIO(), _callback)
f.write('one two\r\n\r\n')
assert len(events) == 2
assert events[0]['start_line'] == 0
assert events[0]['end_line'] == 1
assert events[0]['stdout'] == 'one two'
assert events[1]['start_line'] == 1
assert events[1]['end_line'] == 2
assert events[1]['stdout'] == ''
f.write('three')
assert len(events) == 2
f.write('\r\nfou')
# three is not pushed to buffer until its line completes
assert len(events) == 3
assert events[2]['start_line'] == 2
assert events[2]['end_line'] == 3
assert events[2]['stdout'] == 'three'
f.write('r\r')
f.write('\nfi')
assert events[3]['start_line'] == 3
assert events[3]['end_line'] == 4
assert events[3]['stdout'] == 'four'
f.write('ve')
f.write('\r\n')
assert len(events) == 5
assert events[4]['start_line'] == 4
assert events[4]['end_line'] == 5
assert events[4]['stdout'] == 'five'
f.close()
pprint(events)
assert len(events) == 6
assert events[5]['event'] == 'EOF'
ansible-runner-2.4.1/test/unit/test_interface.py 0000664 0000000 0000000 00000001622 14770573620 0021765 0 ustar 00root root 0000000 0000000 import pytest
from ansible_runner.interface import init_runner
def test_default_callback_set(mocker):
mocker.patch('ansible_runner.interface.signal_handler', side_effect=AttributeError('Raised intentionally'))
with pytest.raises(AttributeError, match='Raised intentionally'):
init_runner(ignore_logging=True)
def test_set_cancel_callback(mocker):
mock_runner = mocker.patch('ansible_runner.interface.Runner', side_effect=AttributeError('Raised intentionally'))
mock_runner_config = mocker.patch('ansible_runner.interface.RunnerConfig')
mock_runner_config.prepare.return_value = None
def custom_cancel_callback():
return 'custom'
with pytest.raises(AttributeError, match='Raised intentionally'):
init_runner(ignore_logging=True, cancel_callback=custom_cancel_callback)
assert mock_runner.call_args.kwargs['cancel_callback'] is custom_cancel_callback
ansible-runner-2.4.1/test/unit/test_loader.py 0000664 0000000 0000000 00000007205 14770573620 0021276 0 ustar 00root root 0000000 0000000 # pylint: disable=W0212,W0621
from io import BytesIO
from pytest import raises, fixture
import ansible_runner.loader
from ansible_runner.exceptions import ConfigurationError
@fixture
def loader(tmp_path):
return ansible_runner.loader.ArtifactLoader(str(tmp_path))
def test__load_json_success(loader):
res = loader._load_json('{"test": "string"}')
assert isinstance(res, dict)
assert res['test'] == 'string'
def test__load_json_failure(loader):
res = loader._load_json('---\ntest: string')
assert res is None
res = loader._load_json('test string')
assert res is None
def test__load_yaml_success(loader):
res = loader._load_yaml('---\ntest: string')
assert isinstance(res, dict)
assert res['test'] == 'string'
res = loader._load_yaml('{"test": "string"}')
assert isinstance(res, dict)
assert res['test'] == 'string'
def test__load_yaml_failure(loader):
res = loader._load_yaml('---\ntest: string:')
assert res is None
def test_abspath(loader, tmp_path):
res = loader.abspath('/test')
assert res == '/test'
res = loader.abspath('test')
assert res == tmp_path.joinpath('test').as_posix()
res = loader.abspath('~/test')
assert res.startswith('/')
def test_load_file_text_cache_hit(loader, mocker, tmp_path):
mock_get_contents = mocker.patch.object(ansible_runner.loader.ArtifactLoader, '_get_contents')
mock_get_contents.return_value = 'test\nstring'
assert not loader._cache
testfile = tmp_path.joinpath('test').as_posix()
# cache miss
res = loader.load_file(testfile, str)
assert mock_get_contents.called
assert mock_get_contents.called_with_args(testfile)
assert res == b'test\nstring'
assert testfile in loader._cache
mock_get_contents.reset_mock()
# cache hit
res = loader.load_file(testfile, str)
assert not mock_get_contents.called
assert res == b'test\nstring'
assert testfile in loader._cache
def test_load_file_json(loader, mocker, tmp_path):
mock_get_contents = mocker.patch.object(ansible_runner.loader.ArtifactLoader, '_get_contents')
mock_get_contents.return_value = '---\ntest: string'
assert not loader._cache
testfile = tmp_path.joinpath('test').as_posix()
res = loader.load_file(testfile)
assert mock_get_contents.called
assert mock_get_contents.called_with_args(testfile)
assert testfile in loader._cache
assert res['test'] == 'string'
def test_load_file_type_check(loader, mocker, tmp_path):
mock_get_contents = mocker.patch.object(ansible_runner.loader.ArtifactLoader, '_get_contents')
mock_get_contents.return_value = '---\ntest: string'
assert not loader._cache
testfile = tmp_path.joinpath('test').as_posix()
# type check passes
res = loader.load_file(testfile, dict)
assert res is not None
mock_get_contents.reset_mock()
mock_get_contents.return_value = 'test string'
loader._cache = {}
# type check fails
with raises(ConfigurationError):
res = loader.load_file(testfile, dict)
assert res is not None
def test_get_contents_ok(loader, mocker):
mock_open = mocker.patch('codecs.open')
handler = BytesIO()
handler.write(b"test string")
handler.seek(0)
mock_open.return_value.__enter__.return_value = handler
res = loader._get_contents('/tmp')
assert res == b'test string'
def test_get_contents_invalid_path(loader, tmp_path):
with raises(ConfigurationError):
loader._get_contents(tmp_path.joinpath('invalid').as_posix())
def test_get_contents_exception(loader, tmp_path):
with raises(ConfigurationError):
loader._get_contents(tmp_path.as_posix())
ansible-runner-2.4.1/test/unit/test_runner.py 0000664 0000000 0000000 00000016241 14770573620 0021341 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# pylint: disable=W0621
import codecs
import datetime
import os
import sys
import json
from pathlib import Path
from test.utils.common import iterate_timeout
import pexpect
import pytest
from ansible_runner import Runner
from ansible_runner.exceptions import CallbackError, AnsibleRunnerException
from ansible_runner.config.runner import RunnerConfig
@pytest.fixture(scope='function')
def rc(request, tmp_path):
# pylint: disable=W0613
rc = RunnerConfig(str(tmp_path))
rc.suppress_ansible_output = True
rc.expect_passwords = {
pexpect.TIMEOUT: None,
pexpect.EOF: None
}
rc.cwd = str(tmp_path)
rc.env = {}
rc.job_timeout = .5
rc.idle_timeout = 0
rc.pexpect_timeout = .1
rc.pexpect_use_poll = True
return rc
def test_simple_spawn(rc):
rc.command = ['ls', '-la']
status, exitcode = Runner(config=rc).run()
assert status == 'successful'
assert exitcode == 0
def test_error_code(rc):
rc.command = ['ls', '--nonsense']
status, exitcode = Runner(config=rc).run()
assert status == 'failed'
assert exitcode > 0
# TODO: matt does not like this test
def test_job_timeout(rc):
rc.command = [sys.executable, '-c', 'import time; time.sleep(5)']
runner = Runner(config=rc)
status, _ = runner.run()
assert status == 'timeout'
assert runner.timed_out is True
def test_cancel_callback(rc):
rc.command = [sys.executable, '-c', 'print(input("Password: "))']
status, _ = Runner(config=rc, cancel_callback=lambda: True).run()
assert status == 'canceled'
def test_cancel_callback_error(rc):
def kaboom():
raise Exception('kaboom')
rc.command = [sys.executable, '-c', 'print(input("Password: "))']
with pytest.raises(CallbackError):
Runner(config=rc, cancel_callback=kaboom).run()
def test_verbose_event_created_time(rc):
rc.command = ['echo', 'helloworld']
runner = Runner(config=rc)
status, exitcode = runner.run()
assert status == 'successful'
assert exitcode == 0
for event in runner.events:
assert 'created' in event, event
assert datetime.datetime.fromisoformat(event['created']).tzinfo == datetime.timezone.utc
@pytest.mark.parametrize('value', ['abc123', 'Iñtërnâtiônàlizætiøn'])
def test_env_vars(rc, value):
rc.command = [sys.executable, '-c', 'import os; print(os.getenv("X_MY_ENV"))']
rc.env = {'X_MY_ENV': value}
status, exitcode = Runner(config=rc).run()
assert status == 'successful'
assert exitcode == 0
with codecs.open(os.path.join(rc.artifact_dir, 'stdout'), 'r', encoding='utf-8') as f:
assert value in f.read()
def test_event_callback_data_check(rc, mocker):
rc.ident = "testident"
rc.check_job_event_data = True
runner = Runner(config=rc, remove_partials=False)
runner.event_handler = mocker.Mock()
with pytest.raises(AnsibleRunnerException) as exc:
runner.event_callback({"uuid": "testuuid", "counter": 0})
assert "Failed to open ansible stdout callback plugin partial data" in str(exc)
def test_event_callback_interface_has_ident(rc, mocker):
rc.ident = "testident"
runner = Runner(config=rc, remove_partials=False)
runner.event_handler = mocker.Mock()
mocker.patch('codecs.open', mocker.mock_open(read_data=json.dumps({"event": "test"})))
chmod = mocker.patch('os.chmod', mocker.Mock())
mocker.patch('os.mkdir', mocker.Mock())
runner.event_callback({"uuid": "testuuid", "counter": 0})
assert runner.event_handler.call_count == 1
runner.event_handler.assert_called_with({
'runner_ident': 'testident',
'counter': 0,
'uuid': 'testuuid',
'event': 'test',
'created': mocker.ANY
})
chmod.assert_called_once()
runner.status_callback("running")
def test_event_callback_interface_calls_event_handler_for_verbose_event(rc, mocker):
rc.ident = "testident"
event_handler = mocker.Mock()
runner = Runner(config=rc, event_handler=event_handler)
mocker.patch('os.mkdir', mocker.Mock())
runner.event_callback({"uuid": "testuuid", "event": "verbose", "counter": 0})
assert event_handler.call_count == 1
event_handler.assert_called_with({
'runner_ident': 'testident',
'counter': 0,
'uuid': 'testuuid',
'event': 'verbose',
'created': mocker.ANY
})
def test_status_callback_interface(rc, mocker):
runner = Runner(config=rc)
assert runner.status == 'unstarted'
runner.status_handler = mocker.Mock()
runner.status_callback("running")
assert runner.status_handler.call_count == 1
runner.status_handler.assert_called_with(
{'status': 'running', 'runner_ident': str(rc.ident)},
runner_config=runner.config)
assert runner.status == 'running'
@pytest.mark.parametrize('runner_mode', ['pexpect', 'subprocess'])
def test_stdout_file_write(rc, runner_mode):
if runner_mode == 'pexpect':
pytest.skip('Writing to stdout can be flaky, probably due to some pexpect bug')
rc.command = ['echo', 'hello_world_marker']
rc.runner_mode = runner_mode
runner = Runner(config=rc)
status, _ = runner.run()
assert status == 'successful'
stdout_path = Path(rc.artifact_dir) / 'stdout'
# poll until we are sure the file has been written to
for _ in iterate_timeout(30.0, 'stdout file to be written', interval=0.2):
if stdout_path.read_text().strip():
break
assert 'hello_world_marker' in stdout_path.read_text()
assert list(runner.events)
assert 'hello_world_marker' in list(runner.events)[0]['stdout']
@pytest.mark.parametrize('runner_mode', ['pexpect', 'subprocess'])
def test_stdout_file_no_write(rc, runner_mode):
rc.command = ['echo', 'hello_world_marker']
rc.runner_mode = runner_mode
rc.suppress_output_file = True
runner = Runner(config=rc)
status, _ = runner.run()
assert status == 'successful'
for filename in ('stdout', 'stderr'):
stdout_path = Path(rc.artifact_dir) / filename
assert not stdout_path.exists()
@pytest.mark.parametrize('runner_mode',
[
pytest.param('pexpect', marks=pytest.mark.xfail(reason="Test is unstable with pexpect")),
'subprocess'
])
def test_multiline_blank_write(rc, runner_mode):
rc.command = ['echo', 'hello_world_marker\n\n\n']
rc.runner_mode = runner_mode
runner = Runner(config=rc)
status, _ = runner.run()
assert status == 'successful'
stdout_path = Path(rc.artifact_dir) / 'stdout'
assert stdout_path.read_text() == 'hello_world_marker\n\n\n\n' # one extra newline okay
@pytest.mark.parametrize('runner_mode', ['subprocess'])
@pytest.mark.filterwarnings("error")
def test_no_ResourceWarning_error(rc, runner_mode):
"""
Test that no ResourceWarning error is propogated up with warnings-as-errors enabled.
Not properly closing stdout/stderr in Runner.run() will cause a ResourceWarning
error that is only seen when we treat warnings as an error.
"""
rc.command = ['echo', 'Hello World']
rc.runner_mode = runner_mode
runner = Runner(config=rc)
status, _ = runner.run()
assert status == 'successful'
ansible-runner-2.4.1/test/unit/test_streaming.py 0000664 0000000 0000000 00000000733 14770573620 0022020 0 ustar 00root root 0000000 0000000 import os
from ansible_runner.streaming import Processor
class TestProcessor:
def test_artifact_dir_with_int_ident(self, tmp_path):
kwargs = {
'private_data_dir': str(tmp_path),
'ident': 123,
}
p = Processor(**kwargs)
assert p.artifact_dir == os.path.join(kwargs['private_data_dir'],
'artifacts',
str(kwargs['ident']))
ansible-runner-2.4.1/test/unit/test_utils.py 0000664 0000000 0000000 00000000612 14770573620 0021163 0 ustar 00root root 0000000 0000000 import os
import stat
from ansible_runner.utils import dump_artifact
def test_artifact_permissions(tmp_path):
"""Artifacts should allow user read/write"""
filename = dump_artifact("artifact content", str(tmp_path))
file_mode = stat.S_IMODE(os.stat(filename).st_mode)
user_rw = stat.S_IRUSR | stat.S_IWUSR
assert (user_rw & file_mode) == user_rw, "file mode is incorrect"
ansible-runner-2.4.1/test/unit/utils/ 0000775 0000000 0000000 00000000000 14770573620 0017553 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/utils/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0021652 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/utils/capacity/ 0000775 0000000 0000000 00000000000 14770573620 0021350 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/utils/capacity/__init__.py 0000664 0000000 0000000 00000000000 14770573620 0023447 0 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/unit/utils/capacity/test_uuid.py 0000664 0000000 0000000 00000006124 14770573620 0023732 0 ustar 00root root 0000000 0000000 # pylint: disable=W0621,W0613
import pytest
from ansible_runner.utils.capacity import (
_set_uuid,
ensure_uuid,
)
@pytest.fixture
def mock_uuid(mocker):
uuid = 'f6bf3d15-7a6b-480a-b29c-eb4d0acf38ce'
mocker.patch('ansible_runner.utils.capacity.uuid.uuid4', return_value=uuid)
return uuid
@pytest.fixture
def mock_home_path(mocker, tmp_path):
mocker.patch('ansible_runner.utils.capacity.Path.home', return_value=tmp_path)
def test_set_uuid(mock_uuid, mock_home_path, tmp_path):
uuid = _set_uuid()
expected_path = tmp_path / '.ansible_runner_uuid'
assert uuid == mock_uuid
assert expected_path.exists()
assert expected_path.stat().st_mode == 0o100600
assert expected_path.read_text() == mock_uuid
def test_set_uuid_mode(mock_uuid, tmp_path, mock_home_path):
uuid = _set_uuid(mode=0o750)
expected_path = tmp_path / '.ansible_runner_uuid'
assert uuid == mock_uuid
assert expected_path.exists()
assert expected_path.stat().st_mode == 0o100750
assert expected_path.read_text() == mock_uuid
def test_set_uuid_change_mode(mock_uuid, tmp_path):
uuid_path = tmp_path / 'uuid'
uuid_path.touch(0o777)
uuid = _set_uuid(uuid_path)
assert uuid == mock_uuid
assert uuid_path.exists()
assert uuid_path.stat().st_mode == 0o100600
assert uuid_path.read_text() == mock_uuid
def test_set_uuid_path(mock_uuid, tmp_path):
uuid_path = tmp_path / 'uuid'
uuid = _set_uuid(uuid_path)
assert uuid == mock_uuid
assert uuid_path.exists()
assert uuid_path.stat().st_mode == 0o100600
assert uuid_path.read_text() == mock_uuid
def test_set_uuid_bad_path(mock_uuid, tmp_path):
uuid_path = tmp_path / 'nope' / 'uuid'
with pytest.raises(FileNotFoundError, match='No such file or directory'):
_set_uuid(uuid_path)
def test_ensure_uuid(mocker, mock_uuid, mock_home_path, tmp_path):
mock_set_uuid = mocker.patch('ansible_runner.utils.capacity._set_uuid', return_value=mock_uuid)
uuid = ensure_uuid()
assert uuid == mock_uuid
mock_set_uuid.assert_called_with(tmp_path / '.ansible_runner_uuid', 0o600)
def test_ensure_uuid_does_not_exist(mocker, mock_uuid, tmp_path):
mock_set_uuid = mocker.patch('ansible_runner.utils.capacity._set_uuid', return_value=mock_uuid)
uuid_path = tmp_path / 'uuid'
uuid = ensure_uuid(uuid_path)
assert uuid == mock_uuid
mock_set_uuid.assert_called_once_with(uuid_path, 0o600)
def test_ensure_uuid_exists(mocker, mock_uuid, tmp_path):
mock_set_uuid = mocker.patch('ansible_runner.utils.capacity._set_uuid', return_value=mock_uuid)
uuid_path = tmp_path / 'uuid'
uuid_path.write_text(mock_uuid + '\n')
uuid = ensure_uuid(uuid_path)
assert uuid == mock_uuid
assert mock_set_uuid.call_count == 0
def test_ensure_uuid_exists_mode(mocker, mock_uuid, tmp_path):
mock_set_uuid = mocker.patch('ansible_runner.utils.capacity._set_uuid', return_value=mock_uuid)
uuid_path = tmp_path / 'uuid'
uuid_path.touch(0o775)
ensure_uuid(uuid_path)
assert mock_set_uuid.call_count == 0
assert uuid_path.stat().st_mode == 0o100600
ansible-runner-2.4.1/test/unit/utils/test_cleanup_folder.py 0000664 0000000 0000000 00000001223 14770573620 0024144 0 ustar 00root root 0000000 0000000 from ansible_runner.utils import cleanup_folder
def test_cleanup_folder(tmp_path):
folder_path = tmp_path / 'a_folder'
folder_path.mkdir()
assert folder_path.exists() # sanity
cleanup_folder(str(folder_path))
assert not folder_path.exists()
def test_cleanup_folder_already_deleted(tmp_path):
missing_dir = tmp_path / 'missing'
assert not missing_dir.exists() # sanity
cleanup_folder(str(missing_dir))
assert not missing_dir.exists()
def test_cleanup_folder_file_no_op(tmp_path):
file_path = tmp_path / 'a_file'
file_path.write_text('foobar')
cleanup_folder(str(file_path))
assert file_path.exists()
ansible-runner-2.4.1/test/unit/utils/test_dump_artifacts.py 0000664 0000000 0000000 00000015661 14770573620 0024202 0 ustar 00root root 0000000 0000000 import pytest
from ansible_runner.utils import dump_artifacts
def test_dump_artifacts_private_data_dir_does_not_exists():
data_dir = '/not/a/path'
kwargs = {'private_data_dir': data_dir}
with pytest.raises(ValueError, match='invalid or does not exist'):
dump_artifacts(kwargs)
assert kwargs['private_data_dir'] == data_dir
def test_dump_artifacts_private_data_dir_create_tempfile(mocker):
mocker.patch('ansible_runner.utils.os.path.exists', side_effect=AttributeError('Raised intentionally'))
mocker.patch('ansible_runner.utils.tempfile.mkdtemp', return_value='/tmp/dir')
kwargs = {}
with pytest.raises(AttributeError, match='Raised intentionally'):
dump_artifacts(kwargs)
assert kwargs['private_data_dir'] == '/tmp/dir'
@pytest.mark.parametrize(
'playbook', (
[{'playbook': [{'hosts': 'all'}]}],
{'playbook': [{'hosts': 'all'}]},
)
)
def test_dump_artifacts_playbook_object(mocker, playbook):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact', side_effect=AttributeError('Raised intentionally'))
mocker.patch('ansible_runner.utils.isplaybook', return_value=True)
playbook_string = '[{"playbook": [{"hosts": "all"}]}]'
kwargs = {'private_data_dir': '/tmp', 'playbook': playbook}
with pytest.raises(AttributeError, match='Raised intentionally'):
dump_artifacts(kwargs)
mock_dump_artifact.assert_called_once_with(playbook_string, '/tmp/project', 'main.json')
def test_dump_artifacts_role(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
kwargs = {
'private_data_dir': '/tmp',
'role': 'test',
'playbook': [{'playbook': [{'hosts': 'all'}]}],
}
dump_artifacts(kwargs)
assert mock_dump_artifact.call_count == 2
mock_dump_artifact.assert_called_with('{"ANSIBLE_ROLES_PATH": "/tmp/roles"}', '/tmp/env', 'envvars')
def test_dump_artifacts_roles_path(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
kwargs = {
'private_data_dir': '/tmp',
'role': 'test',
'roles_path': '/tmp/altrole',
'playbook': [{'playbook': [{'hosts': 'all'}]}],
}
dump_artifacts(kwargs)
assert mock_dump_artifact.call_count == 2
mock_dump_artifact.assert_called_with('{"ANSIBLE_ROLES_PATH": "/tmp/altrole:/tmp/roles"}', '/tmp/env', 'envvars')
def test_dump_artifacts_role_vars(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact', side_effect=AttributeError('Raised intentionally'))
kwargs = {
'private_data_dir': '/tmp',
'role': 'test',
'role_vars': {'name': 'nginx'},
'playbook': [{'playbook': [{'hosts': 'all'}]}],
}
with pytest.raises(AttributeError, match='Raised intentionally'):
dump_artifacts(kwargs)
mock_dump_artifact.assert_called_once_with(
'[{"hosts": "all", "roles": [{"name": "test", "vars": {"name": "nginx"}}]}]',
'/tmp/project',
'main.json'
)
def test_dump_artifacts_role_skip_facts(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact', side_effect=AttributeError('Raised intentionally'))
kwargs = {
'private_data_dir': '/tmp',
'role': 'test',
'role_skip_facts': {'name': 'nginx'},
'playbook': [{'playbook': [{'hosts': 'all'}]}],
}
with pytest.raises(AttributeError, match='Raised intentionally'):
dump_artifacts(kwargs)
mock_dump_artifact.assert_called_once_with(
'[{"hosts": "all", "roles": [{"name": "test"}], "gather_facts": false}]',
'/tmp/project',
'main.json'
)
def test_dump_artifacts_inventory_string(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
inv = '[all]\nlocalhost'
kwargs = {'private_data_dir': '/tmp', 'inventory': inv}
dump_artifacts(kwargs)
mock_dump_artifact.assert_called_once_with(inv, '/tmp/inventory', 'hosts')
def test_dump_artifacts_inventory_path(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
inv = '/tmp'
kwargs = {'private_data_dir': '/tmp', 'inventory': inv}
dump_artifacts(kwargs)
assert mock_dump_artifact.call_count == 0
assert mock_dump_artifact.called is False
assert kwargs['inventory'] == inv
def test_dump_artifacts_inventory_object(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
inv = {'foo': 'bar'}
inv_string = '{"foo": "bar"}'
kwargs = {'private_data_dir': '/tmp', 'inventory': inv}
dump_artifacts(kwargs)
mock_dump_artifact.assert_called_once_with(inv_string, '/tmp/inventory', 'hosts.json')
def test_dump_artifacts_inventory_string_path(mocker):
mocker.patch('ansible_runner.utils.os.path.exists', return_value=True)
inv_string = 'site1'
kwargs = {'private_data_dir': '/tmp', 'inventory': inv_string}
dump_artifacts(kwargs)
assert kwargs['inventory'] == '/tmp/inventory/site1'
def test_dump_artifacts_inventory_string_abs_path(mocker):
mocker.patch('ansible_runner.utils.os.path.exists', return_value=True)
inv_string = '/tmp/site1'
kwargs = {'private_data_dir': '/tmp', 'inventory': inv_string}
dump_artifacts(kwargs)
assert kwargs['inventory'] == '/tmp/site1'
def test_dump_artifacts_passwords(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
kwargs = {
'private_data_dir': '/tmp',
'passwords': {"a": "b"},
'envvars': {"abc": "def"},
'ssh_key': 'asdfg1234',
}
dump_artifacts(kwargs)
assert mock_dump_artifact.call_count == 3
mock_dump_artifact.assert_any_call('{"a": "b"}', '/tmp/env', 'passwords')
mock_dump_artifact.assert_any_call('{"abc": "def"}', '/tmp/env', 'envvars')
mock_dump_artifact.assert_called_with('asdfg1234', '/tmp/env', 'ssh_key')
def test_dont_dump_artifacts_passwords(mocker):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
kwargs = {
'private_data_dir': '/tmp',
'passwords': {"a": "b"},
'envvars': {"abd": "def"},
'ssh_key': 'asdfg1234',
'suppress_env_files': True
}
dump_artifacts(kwargs)
assert mock_dump_artifact.call_count == 0
@pytest.mark.parametrize(
('key', 'value', 'value_str'), (
('extravars', {'foo': 'bar'}, '{"foo": "bar"}'),
('passwords', {'foo': 'bar'}, '{"foo": "bar"}'),
('settings', {'foo': 'bar'}, '{"foo": "bar"}'),
('ssh_key', '1234567890', '1234567890'),
('cmdline', '--tags foo --skip-tags', '--tags foo --skip-tags'),
)
)
def test_dump_artifacts_extra_keys(mocker, key, value, value_str):
mock_dump_artifact = mocker.patch('ansible_runner.utils.dump_artifact')
kwargs = {'private_data_dir': '/tmp'}
kwargs.update({key: value})
dump_artifacts(kwargs)
mock_dump_artifact.assert_called_once_with(value_str, '/tmp/env', key)
assert 'settings' not in kwargs
ansible-runner-2.4.1/test/unit/utils/test_fifo_pipe.py 0000664 0000000 0000000 00000001156 14770573620 0023127 0 ustar 00root root 0000000 0000000 from os import remove
from ansible_runner.utils import open_fifo_write
def test_fifo_write_bytes(tmp_path):
path = tmp_path / "bytes_test"
data = "bytes"
try:
open_fifo_write(path, data.encode())
with open(path, 'r') as f:
results = f.read()
assert results == data
finally:
remove(path)
def test_fifo_write_string(tmp_path):
path = tmp_path / "string_test"
data = "string"
try:
open_fifo_write(path, data)
with open(path, 'r') as f:
results = f.read()
assert results == data
finally:
remove(path)
ansible-runner-2.4.1/test/unit/utils/test_utils.py 0000664 0000000 0000000 00000027434 14770573620 0022336 0 ustar 00root root 0000000 0000000 # pylint: disable=W0212
import datetime
import io
import json
import os
import signal
import time
import stat
from pathlib import Path
import pytest
from ansible_runner.utils import (
isplaybook,
isinventory,
check_isolation_executable_installed,
args2cmdline,
sanitize_container_name,
signal_handler,
)
from ansible_runner.utils.base64io import _to_bytes, Base64IO
from ansible_runner.utils.streaming import stream_dir, unstream_dir
@pytest.mark.parametrize('playbook', ('foo', {}, {'foo': 'bar'}, True, False, None))
def test_isplaybook_invalid(playbook):
assert isplaybook(playbook) is False
@pytest.mark.parametrize('playbook', (['foo'], []))
def test_isplaybook(playbook):
assert isplaybook(playbook) is True
@pytest.mark.parametrize('inventory', ('hosts,', {}, {'foo': 'bar'}))
def test_isinventory(inventory):
assert isinventory(inventory) is True
@pytest.mark.parametrize('inventory', ([], ['foo'], True, False, None))
def test_isinventory_invalid(inventory):
assert isinventory(inventory) is False
def test_args2cmdline():
res = args2cmdline('ansible', '-m', 'setup', 'localhost')
assert res == 'ansible -m setup localhost'
def test_check_isolation_executable_installed():
assert check_isolation_executable_installed("true")
assert not check_isolation_executable_installed("does-not-exist")
@pytest.mark.parametrize('container_name,expected_name', [
('foo?bar', 'foo_bar'),
('096aac5c-024d-453e-9725-779dc8b3faee', '096aac5c-024d-453e-9725-779dc8b3faee'), # uuid4
(42, '42') # AWX will use primary keys and may not be careful about type
])
def test_sanitize_container_name(container_name, expected_name):
assert sanitize_container_name(str(container_name)) == expected_name
@pytest.mark.parametrize('symlink_dest,check_content', [
('/bin', []),
('ordinary_file.txt', ['my_link']),
('ordinary_directory', ['my_link/dir_file.txt']),
('.', ['my_link/ordinary_directory/dir_file.txt', 'my_link/my_link/ordinary_file.txt']),
('filedoesnotexist.txt', [])
], ids=['global', 'local', 'directory', 'recursive', 'bad'])
def test_transmit_symlink(tmp_path, symlink_dest, check_content):
symlink_dest = Path(symlink_dest)
# prepare the input private_data_dir directory to zip
pdd = tmp_path / 'symlink_zip_test'
pdd.mkdir()
# Create some basic shared demo content
with open(pdd / 'ordinary_file.txt', 'w') as f:
f.write('hello world')
ord_dir = pdd / 'ordinary_directory'
ord_dir.mkdir()
with open(ord_dir / 'dir_file.txt', 'w') as f:
f.write('hello world')
old_symlink_path = pdd / 'my_link'
old_symlink_path.symlink_to(symlink_dest)
# SANITY - set expectations for the symlink
assert old_symlink_path.is_symlink()
assert os.readlink(old_symlink_path) == str(symlink_dest)
# zip and stream the data into the in-memory buffer outgoing_buffer
outgoing_buffer = io.BytesIO()
outgoing_buffer.name = 'not_stdout'
stream_dir(pdd, outgoing_buffer)
# prepare the destination private_data_dir to transmit to
dest_dir = tmp_path / 'symlink_zip_dest'
dest_dir.mkdir()
# Extract twice so we assure that existing data does not break things
for _ in range(2):
# rewind the buffer and extract into destination private_data_dir
outgoing_buffer.seek(0)
first_line = outgoing_buffer.readline()
size_data = json.loads(first_line.strip())
unstream_dir(outgoing_buffer, size_data['zipfile'], dest_dir)
# Assure the new symlink is still the same type of symlink
new_symlink_path = dest_dir / 'my_link'
assert new_symlink_path.is_symlink()
assert os.readlink(new_symlink_path) == str(symlink_dest)
for fname in check_content:
abs_path = dest_dir / fname
assert abs_path.exists(), f'Expected "{fname}" in target dir to be a file with content.'
with open(abs_path, 'r') as f:
assert f.read() == 'hello world'
@pytest.mark.timeout(timeout=3)
def test_stream_dir_no_hang_on_pipe(tmp_path):
# prepare the input private_data_dir directory to zip
pdd = tmp_path / 'timeout_test'
pdd.mkdir()
with open(pdd / 'ordinary_file.txt', 'w') as f:
f.write('hello world')
# make pipe, similar to open_fifo_write
os.mkfifo(pdd / 'my_pipe', stat.S_IRUSR | stat.S_IWUSR)
# zip and stream the data into the in-memory buffer outgoing_buffer
outgoing_buffer = io.BytesIO()
outgoing_buffer.name = 'not_stdout'
stream_dir(pdd, outgoing_buffer)
@pytest.mark.timeout(timeout=3)
def test_unstream_dir_no_hang_on_pipe(tmp_path):
# prepare the input private_data_dir directory to zip
pdd = tmp_path / 'timeout_test_source_dir'
pdd.mkdir()
with open(pdd / 'ordinary_file.txt', 'w') as f:
f.write('hello world')
# zip and stream the data into the in-memory buffer outgoing_buffer
outgoing_buffer = io.BytesIO()
outgoing_buffer.name = 'not_stdout'
stream_dir(pdd, outgoing_buffer)
dest_dir = tmp_path / 'timeout_test_dest'
dest_dir.mkdir()
# We create the pipe in the same location as an archived file to trigger the bug
os.mkfifo(dest_dir / 'ordinary_file.txt', stat.S_IRUSR | stat.S_IWUSR)
outgoing_buffer.seek(0)
first_line = outgoing_buffer.readline()
size_data = json.loads(first_line.strip())
unstream_dir(outgoing_buffer, size_data['zipfile'], dest_dir)
@pytest.mark.parametrize('fperm', [
0o777,
0o666,
0o555,
0o700,
])
def test_transmit_permissions(tmp_path, fperm):
# breakpoint()
pdd = tmp_path / 'transmit_permission_test'
pdd.mkdir()
old_file_path = pdd / 'ordinary_file.txt'
with open(old_file_path, 'w') as f:
f.write('hello world')
old_file_path.chmod(fperm)
# SANITY - set expectations for the file
# assert oct(os.stat(old_file_path).st_mode & 0o777) == oct(fperm)
assert oct(old_file_path.stat().st_mode & 0o777) == oct(fperm)
outgoing_buffer = io.BytesIO()
outgoing_buffer.name = 'not_stdout'
stream_dir(pdd, outgoing_buffer)
dest_dir = tmp_path / 'transmit_permission_dest'
outgoing_buffer.seek(0)
first_line = outgoing_buffer.readline()
size_data = json.loads(first_line.strip())
unstream_dir(outgoing_buffer, size_data['zipfile'], dest_dir)
# Assure the new file is the same permissions
new_file_path = dest_dir / 'ordinary_file.txt'
assert oct(new_file_path.stat().st_mode) == oct(old_file_path.stat().st_mode)
def test_transmit_modtimes(tmp_path):
source_dir = tmp_path / 'source'
source_dir.mkdir()
# python ZipFile uses an old standard that stores seconds in 2 second increments
# https://stackoverflow.com/questions/64048499/zipfile-lib-weird-behaviour-with-seconds-in-modified-time
(source_dir / 'b.txt').touch()
time.sleep(2.0) # flaky for anything less
(source_dir / 'a.txt').touch()
very_old_file = source_dir / 'very_old.txt'
very_old_file.touch()
old_datetime = os.path.getmtime(source_dir / 'a.txt') - datetime.timedelta(days=1).total_seconds()
os.utime(very_old_file, (old_datetime, old_datetime))
# sanity, verify assertions pass for source dir
mod_delta = os.path.getmtime(source_dir / 'a.txt') - os.path.getmtime(source_dir / 'b.txt')
assert mod_delta >= 1.0
outgoing_buffer = io.BytesIO()
outgoing_buffer.name = 'not_stdout'
stream_dir(source_dir, outgoing_buffer)
dest_dir = tmp_path / 'dest'
dest_dir.mkdir()
outgoing_buffer.seek(0)
first_line = outgoing_buffer.readline()
size_data = json.loads(first_line.strip())
unstream_dir(outgoing_buffer, size_data['zipfile'], dest_dir)
# Assure modification times are internally consistent
mod_delta = os.path.getmtime(dest_dir / 'a.txt') - os.path.getmtime(dest_dir / 'b.txt')
assert mod_delta >= 1.0
# Assure modification times are same as original (to the rounded second)
for filename in ('a.txt', 'b.txt', 'very_old.txt'):
difference = abs(os.path.getmtime(dest_dir / filename) - os.path.getmtime(source_dir / filename))
assert difference < 2.0
# Assure the very old timestamp is preserved
old_delta = os.path.getmtime(dest_dir / 'a.txt') - os.path.getmtime(source_dir / 'very_old.txt')
assert old_delta >= datetime.timedelta(days=1).total_seconds() - 2.
def test_transmit_file_from_before_1980s(tmp_path):
source_dir = tmp_path / 'source'
source_dir.mkdir()
old_file = source_dir / 'cassette_tape.txt'
old_file.touch()
old_timestamp = datetime.datetime(year=1978, month=7, day=28).timestamp()
os.utime(old_file, (old_timestamp, old_timestamp))
outgoing_buffer = io.BytesIO()
outgoing_buffer.name = 'not_stdout'
stream_dir(source_dir, outgoing_buffer)
dest_dir = tmp_path / 'dest'
dest_dir.mkdir()
outgoing_buffer.seek(0)
first_line = outgoing_buffer.readline()
size_data = json.loads(first_line.strip())
unstream_dir(outgoing_buffer, size_data['zipfile'], dest_dir)
def test_signal_handler(mocker):
"""Test the default handler is set to handle the correct signals"""
class MockEvent:
def __init__(self):
self._is_set = False
def set(self):
self._is_set = True
def is_set(self):
return self._is_set
mocker.patch('ansible_runner.utils.threading.main_thread', return_value='thread0')
mocker.patch('ansible_runner.utils.threading.current_thread', return_value='thread0')
mocker.patch('ansible_runner.utils.threading.Event', MockEvent)
mock_signal = mocker.patch('ansible_runner.utils.signal.signal')
assert signal_handler()() is False
assert mock_signal.call_args_list[0][0][0] == signal.SIGTERM
assert mock_signal.call_args_list[1][0][0] == signal.SIGINT
def test_signal_handler_outside_main_thread(mocker):
"""Test that the default handler will not try to set signal handlers if not in the main thread"""
mocker.patch('ansible_runner.utils.threading.main_thread', return_value='thread0')
mocker.patch('ansible_runner.utils.threading.current_thread', return_value='thread1')
assert signal_handler() is None
def test_signal_handler_set(mocker):
"""Test that the default handler calls the set() method"""
class MockEvent:
def __init__(self):
self._is_set = False
def set(self):
raise AttributeError('Raised intentionally')
def is_set(self):
return self._is_set
mocker.patch('ansible_runner.utils.threading.main_thread', return_value='thread0')
mocker.patch('ansible_runner.utils.threading.current_thread', return_value='thread0')
mocker.patch('ansible_runner.utils.threading.Event', MockEvent)
mock_signal = mocker.patch('ansible_runner.utils.signal.signal')
signal_handler()
with pytest.raises(AttributeError, match='Raised intentionally'):
mock_signal.call_args[0][1]('number', 'frame')
class TestBase64IO:
def test_init_fails(self):
with pytest.raises(TypeError, match='Base64IO wrapped object must have attributes'):
Base64IO(None)
def test__passthrough_interactive_check_bad_method(self):
obj = Base64IO(io.StringIO('test'))
assert not obj._passthrough_interactive_check('invalid_method')
def test_write(self, tmp_path):
tmpfile = tmp_path / "TestBase64IO_test_write.txt"
tmpfile.touch()
with tmpfile.open(mode='br') as t:
obj = Base64IO(t)
with pytest.raises(IOError, match='Stream is not writable'):
obj.write(b'')
obj.close()
with pytest.raises(ValueError, match='I/O operation on closed file.'):
obj.write(b'')
def test__read_additional_data_removing_whitespace(self):
obj = Base64IO(io.StringIO(''))
data = _to_bytes('te s t')
assert obj._read_additional_data_removing_whitespace(data, 4) == b'test'
ansible-runner-2.4.1/test/utils/ 0000775 0000000 0000000 00000000000 14770573620 0016574 5 ustar 00root root 0000000 0000000 ansible-runner-2.4.1/test/utils/common.py 0000664 0000000 0000000 00000002210 14770573620 0020431 0 ustar 00root root 0000000 0000000 import time
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key
from cryptography.hazmat.primitives.serialization import (
Encoding,
NoEncryption,
PrivateFormat,
)
def iterate_timeout(max_seconds, purpose, interval=2):
start = time.time()
count = 0
while time.time() < start + max_seconds:
count += 1
yield count
time.sleep(interval)
raise Exception(f"Timeout waiting for {purpose}")
class RSAKey:
"""In-memory RSA key generation and management utils."""
def __init__(self):
_rsa_key_obj = generate_private_key(
public_exponent=65537,
key_size=1024,
backend=default_backend(),
)
_private_rsa_key_repr = _rsa_key_obj.private_bytes(
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL, # A.K.A. PKCS#1
encryption_algorithm=NoEncryption(),
)
self._private_rsa_key_repr = _private_rsa_key_repr.decode()
@property
def private(self) -> str:
return self._private_rsa_key_repr
ansible-runner-2.4.1/tox.ini 0000664 0000000 0000000 00000003302 14770573620 0015766 0 ustar 00root root 0000000 0000000 [tox]
envlist = linters, ansible{27, 28, 29, -base}
requires =
tox>4
setuptools>=64
[shared]
pytest_cov_args = --cov --cov-report html --cov-report term --cov-report xml
[testenv]
description = Run tests with {basepython}
deps = ansible27: ansible<2.8
ansible28: ansible<2.9
ansible29: ansible<2.10
ansible-base: ansible-base
py{,3,39,310,311}: ansible-core
integration{,-py39,-py310,-py311,-py312}: ansible-core
build
-r {toxinidir}/test/requirements.txt
passenv =
HOME
RUNNER_TEST_IMAGE_NAME
usedevelop = True
commands = pytest -vv -n auto {posargs}
[testenv:linters{,-py39,-py310,-py311,-py312}]
description = Run code linters
commands =
flake8 --version
flake8 docs src/ansible_runner test
yamllint --version
yamllint -s .
mypy src/ansible_runner
pylint src/ansible_runner test
[testenv:unit{,-py39,-py310,-py311,-py312}]
description = Run unit tests
commands = pytest -vv -n auto {posargs:test/unit} {[shared]pytest_cov_args}
[testenv:integration{,-py39,-py310,-py311,-py312}]
description = Run integration tests
commands = pytest -vv -n auto {posargs:test/integration} {[shared]pytest_cov_args}
[testenv:docs]
description = Build documentation
deps = -r{toxinidir}/docs/requirements.txt
commands =
sphinx-build -T -E -W -n --keep-going {tty:--color} -j auto -d docs/build/doctrees -b html docs docs/build/html
[testenv:clean]
description = Erase docs and coverage artifacts
deps =
skip_install = True
allow_external = /bin/sh
commands =
/bin/sh -c "rm -rf {toxinidir}/test/coverage/*"
/bin/sh -c "rm -rf {toxinidir}/docs/{_,}build"