pax_global_header 0000666 0000000 0000000 00000000064 15011310720 0014500 g ustar 00root root 0000000 0000000 52 comment=5bf90f84bf340874592c7e76971b93a3cbbff927
ospd-openvas-22.9.0/ 0000775 0000000 0000000 00000000000 15011310720 0014210 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/.coveragerc 0000664 0000000 0000000 00000000106 15011310720 0016326 0 ustar 00root root 0000000 0000000 [run]
omit =
tests/*
*/__init__.py
source =
ospd_openvas
ospd-openvas-22.9.0/.docker/ 0000775 0000000 0000000 00000000000 15011310720 0015535 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/.docker/entrypoint.sh 0000664 0000000 0000000 00000000377 15011310720 0020313 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Set openvas and nmap caps, with some compose installations docker forgets it.
setcap cap_net_raw,cap_net_admin+eip /usr/local/sbin/openvas
setcap cap_net_raw,cap_net_admin,cap_net_bind_service+eip /usr/bin/nmap
exec gosu ospd-openvas "$@"
ospd-openvas-22.9.0/.docker/prod.Dockerfile 0000664 0000000 0000000 00000005115 15011310720 0020474 0 ustar 00root root 0000000 0000000 ARG VERSION=edge
FROM golang AS tools
COPY smoketest /usr/local/src
WORKDIR /usr/local/src
RUN make build-cmds
FROM debian:stable-slim as builder
COPY . /source
WORKDIR /source
RUN apt-get update && \
apt-get install --no-install-recommends --no-install-suggests -y \
python3 \
python-is-python3 \
python3-pip && \
apt-get remove --purge --auto-remove -y && \
rm -rf /var/lib/apt/lists/*
RUN python3 -m pip install --upgrade --break-system-packages pip && \
python3 -m pip install --break-system-packages poetry
RUN rm -rf dist && poetry build -f wheel
FROM registry.community.greenbone.net/community/openvas-scanner:${VERSION}
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV PIP_NO_CACHE_DIR off
COPY --from=tools /usr/local/src/bin/ospd-scans /usr/local/bin/
COPY ./config/ospd-openvas.conf /etc/gvm/ospd-openvas.conf
COPY .docker/entrypoint.sh /usr/local/bin/entrypoint
WORKDIR /ospd-openvas
RUN apt-get update && \
apt-get install --no-install-recommends --no-install-suggests -y \
# gcc and python3-dev are required for psutil on arm
gcc \
gosu \
procps \
python3 \
python3-pip \
tini \
python3-dev && \
apt-get remove --purge --auto-remove -y && \
rm -rf /var/lib/apt/lists/*
# produces the bug ` ‘/usr/share/doc/python3-impacket/examples/wmiexec.py’: [Errno 2] No such file or directory`
RUN apt-get remove -y python3-impacket || true
RUN apt-get autoremove -y
RUN addgroup --gid 1001 --system ospd-openvas && \
adduser --no-create-home --shell /bin/false --disabled-password \
--uid 1001 --system --group ospd-openvas
RUN mkdir -p /run/ospd && \
mkdir -p /var/lib/openvas && \
mkdir -p /var/lib/notus && \
chown -R ospd-openvas.ospd-openvas \
/run/ospd /var/lib/openvas /var/lib/notus /etc/openvas /var/log/gvm && \
chmod 755 /etc/openvas /var/log/gvm && \
chmod 644 /etc/openvas/openvas_log.conf && \
chmod 755 /usr/local/bin/entrypoint
COPY --from=builder /source/dist/* /ospd-openvas/
RUN python3 -m pip install --break-system-packages /ospd-openvas/*
# install impacket via pip and not apt-get to get the latest version
RUN python3 -m pip install --break-system-packages impacket
# openvas is expecting impacket-wmiexec to be in the path although it got renamed
# until openvas is fixed we create a symlink
RUN ln -s /usr/local/bin/wmiexec.py /usr/local/bin/impacket-wmiexec
RUN apt-get purge -y gcc python3-dev && apt-get autoremove -y
ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/entrypoint"]
CMD ["ospd-openvas", "--config", "/etc/gvm/ospd-openvas.conf", "-f", "-m", "666"]
ospd-openvas-22.9.0/.dockerignore 0000664 0000000 0000000 00000000040 15011310720 0016656 0 ustar 00root root 0000000 0000000 .venv
.github
.git
.vscode
dist
ospd-openvas-22.9.0/.github/ 0000775 0000000 0000000 00000000000 15011310720 0015550 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/.github/CODEOWNERS 0000664 0000000 0000000 00000000262 15011310720 0017143 0 ustar 00root root 0000000 0000000 # default reviewers
* @greenbone/scanner-maintainers
# dev ops
.github/ @greenbone/scanner-maintainers
.docker/ @greenbone/scanner-maintainers
ospd-openvas-22.9.0/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 15011310720 0017733 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/.github/ISSUE_TEMPLATE/bug-report.md 0000664 0000000 0000000 00000003475 15011310720 0022354 0 ustar 00root root 0000000 0000000 ---
name: Bug Report
about: Report an issue with ospd-openvas
title: ''
labels: bug
assignees: ''
---
### Expected behavior
### Actual behavior
### Steps to reproduce
1.
2.
3.
### GVM versions
**gsa:** (gsad --version)
**gvm:** (gvmd --version)
**openvas-scanner:** (openvas --version)
**gvm-libs:**
### Environment
**Operating system:**
**Installation method / source:** (packages, source installation)
### Logfiles
```
```
ospd-openvas-22.9.0/.github/ISSUE_TEMPLATE/config.yml 0000664 0000000 0000000 00000000257 15011310720 0021727 0 ustar 00root root 0000000 0000000 blank_issues_enabled: false
contact_links:
- name: Greenbone Community Forum
url: https://community.greenbone.net/c/gse
about: Please ask and answer questions here.
ospd-openvas-22.9.0/.github/dependabot.yml 0000664 0000000 0000000 00000000610 15011310720 0020375 0 ustar 00root root 0000000 0000000 version: 2
updates:
- package-ecosystem: pip
directory: "/"
schedule:
interval: weekly
time: "04:00"
groups:
python-packages:
patterns:
- "*"
open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
groups:
github-actions:
patterns:
- "*"
ospd-openvas-22.9.0/.github/enhance_version.sh 0000664 0000000 0000000 00000001142 15011310720 0021250 0 ustar 00root root 0000000 0000000 #!/bin/sh
version="$1"
type="$2"
# Split version string into fields
IFS='.' read -r field1 field2 field3 << EOF
$version
EOF
# On major enhance major version, set minor and patch to 0
# On minor enhance minor version, set patch to 0
# On patch enhance patch version
case "$type" in
"major")
field1=$(expr $field1 + 1)
field2=0
field3=0
;;
"minor")
field2=$(expr $field2 + 1)
field3=0
;;
"patch")
field3=$(expr $field3 + 1)
;;
*)
echo "Error: Invalid update type '$type'" >&2
return 1
;;
esac
new_version="$field1.$field2.$field3"
echo "$new_version"
ospd-openvas-22.9.0/.github/install-openvas-smb-dependencies.sh 0000775 0000000 0000000 00000000567 15011310720 0024441 0 ustar 00root root 0000000 0000000 # This script installs openvas-smb-dependencies.
#/bin/sh
set -ex
apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \
build-essential \
cmake \
pkg-config \
gcc-mingw-w64 \
libgnutls28-dev \
perl-base \
heimdal-dev \
libpopt-dev \
libglib2.0-dev \
libunistring-dev \
&& rm -rf /var/lib/apt/lists/*
ospd-openvas-22.9.0/.github/sign-assets.sh 0000664 0000000 0000000 00000001765 15011310720 0020355 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -e
# use own gpg_home to not intefere with other settings
tmp=
trap 'rm -rf "$tmp"' EXIT INT TERM HUP
tmp=$(mktemp -d)
export GNUPGHOME="$tmp"
# enable gpg to work in container environments:
# https://d.sb/2016/11/gpg-inappropriate-ioctl-for-device-errors
printf "use-agent\npinentry-mode loopback" > $GNUPGHOME/gpg.conf
printf "allow-loopback-pinentry" > $GNUPGHOME/gpg-agent.conf
echo RELOADAGENT | gpg-connect-agent
# store password, we need it multiple times
read -s password
# store to file
mv "$1" "$GNUPGHOME/private.pgp"
# import and gather key id
key_id=$(echo "$password" | \
gpg --import --batch --armor --passphrase-fd 0 $GNUPGHOME/private.pgp 2>&1 | \
grep "key [A-Z0-9]*:" | \
head -n 1 | \
sed 's/.*key \([A-Z0-9]*\):.*/\1/')
echo "key_id: $key_id"
# Create a signed ASC for each file in the assets directory
for file in assets/*; do
if [ -f "$file" ]; then
echo $password | gpg --default-key $key_id --batch --passphrase-fd 0 --detach-sign -a "$file"
fi
done
ospd-openvas-22.9.0/.github/workflows/ 0000775 0000000 0000000 00000000000 15011310720 0017605 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/.github/workflows/ci-python.yml 0000664 0000000 0000000 00000002076 15011310720 0022247 0 ustar 00root root 0000000 0000000 name: Build and test Python package
on:
push:
branches: [ main, stable, oldstable, middleware ]
pull_request:
branches: [ main, stable, oldstable, middleware ]
jobs:
linting:
name: Linting
runs-on: 'ubuntu-latest'
strategy:
matrix:
python-version:
- "3.9"
- "3.10"
- "3.11"
steps:
- uses: actions/checkout@v4
- name: Check with black, pylint and pontos.version
uses: greenbone/actions/lint-python@v3
with:
version: ${{ matrix.python-version }}
packages: ospd_openvas ospd tests
linter: pylint
test:
name: Run all tests
runs-on: 'ubuntu-latest'
strategy:
matrix:
python-version:
- "3.9"
- "3.10"
- "3.11"
steps:
- uses: actions/checkout@v4
- name: Install poetry and dependencies
uses: greenbone/actions/poetry@v3
with:
version: ${{ matrix.python-version }}
- name: Run unit tests
run: poetry run python -m unittest
ospd-openvas-22.9.0/.github/workflows/codeql-analysis-python.yml 0000664 0000000 0000000 00000001323 15011310720 0024736 0 ustar 00root root 0000000 0000000 name: "CodeQL"
on:
push:
branches: [ main, stable, oldstable, middleware ]
pull_request:
branches: [ main, stable, oldstable, middleware ]
schedule:
- cron: '30 5 * * 0' # 5:30h on Sundays
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3 ospd-openvas-22.9.0/.github/workflows/container.yml 0000664 0000000 0000000 00000005507 15011310720 0022321 0 ustar 00root root 0000000 0000000 name: Container Image Builds
on:
push:
branches: [main]
tags: ["v*"]
pull_request:
branches: [main]
workflow_dispatch:
jobs:
production:
name: Production Images
runs-on: self-hosted-generic
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: greenbone/actions/is-latest-tag@v3
id: latest
- name: Set container build options
id: container-opts
run: |
if [[ "${{ github.ref_type }}" = 'tag' ]]; then
echo "version=stable" >> $GITHUB_OUTPUT
else
echo "version=edge" >> $GITHUB_OUTPUT
fi
- name: 'Setup meta information (IS_LATEST_TAG: ${{ steps.latest.outputs.is-latest-tag }} )'
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ github.repository }}
labels: |
org.opencontainers.image.vendor=Greenbone
org.opencontainers.image.base.name=greenbone/openvas-scanner
flavor: latest=false # no auto latest container tag for git tags
tags: |
# when IS_LATEST_TAG is set create a stable and a latest tag
type=raw,value=latest,enable=${{ steps.latest.outputs.is-latest-tag == 'true' }}
type=raw,value=stable,enable=${{ steps.latest.outputs.is-latest-tag == 'true' }}
# if tag version is set than create a version tags
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
# if we are on the main branch set edge
type=edge,branch=main
# use branch-sha otherwise for pushes to branches other then main (will not be uploaded)
type=raw,value={{branch}}-{{sha}},enable=${{ github.ref_type == 'branch' && github.event_name == 'push' && github.ref_name != 'main' }}
# use pr-$PR_ID for pull requests (will not be uploaded)
type=ref,event=pr
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push Container image
uses: docker/build-push-action@v6
with:
context: .
push: ${{ github.event_name != 'pull_request' && (github.ref_type == 'tag' || github.ref_name == 'main') }}
file: .docker/prod.Dockerfile
platforms: linux/amd64,linux/arm64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
VERSION=${{ steps.container-opts.outputs.version }}
ospd-openvas-22.9.0/.github/workflows/conventional-commits.yml 0000664 0000000 0000000 00000000375 15011310720 0024505 0 ustar 00root root 0000000 0000000 name: Conventional Commits
on:
pull_request_target:
jobs:
conventional-commits:
name: Conventional Commits
runs-on: ubuntu-latest
steps:
- name: Report Conventional Commits
uses: greenbone/actions/conventional-commits@v3
ospd-openvas-22.9.0/.github/workflows/dependency-review.yml 0000664 0000000 0000000 00000000345 15011310720 0023747 0 ustar 00root root 0000000 0000000 name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Dependency Review'
uses: greenbone/actions/dependency-review@v3
ospd-openvas-22.9.0/.github/workflows/deploy-pypi.yml 0000664 0000000 0000000 00000000503 15011310720 0022601 0 ustar 00root root 0000000 0000000 name: Deploy on PyPI
on:
release:
types: [created]
jobs:
deploy:
permissions:
id-token: write
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/project/ospd-openvas/
steps:
- name: Build and publish to PyPI
uses: greenbone/actions/pypi-upload@v3
ospd-openvas-22.9.0/.github/workflows/push.yml 0000664 0000000 0000000 00000001317 15011310720 0021311 0 ustar 00root root 0000000 0000000 name: Build and Push to Greenbone Registry
on:
push:
branches: [ main ]
tags: ["v*"]
pull_request:
branches: [ main ]
workflow_dispatch:
inputs:
ref-name:
type: string
description: "The ref to build a container image from. For example a tag v23.0.0."
required: true
jobs:
build:
name: Build and Push to Greenbone Registry
uses: greenbone/workflows/.github/workflows/container-build-push-2nd-gen.yml@main
with:
image-url: community/ospd-openvas
image-labels: |
org.opencontainers.image.vendor=Greenbone
org.opencontainers.image.base.name=greenbone/openvas-scanner
ref-name: ${{ inputs.ref-name }}
secrets: inherit
ospd-openvas-22.9.0/.github/workflows/release.yml 0000664 0000000 0000000 00000016053 15011310720 0021755 0 ustar 00root root 0000000 0000000 name: "release"
on:
pull_request:
types: [closed]
workflow_dispatch:
inputs:
release:
description: "Use 'major' for incompatible changes, 'minor' for new features, and 'patch' for fixes."
type: choice
options:
- "major"
- "minor"
- "patch"
required: true
default: "patch"
# This job first determines the target branch of the closed pull request. If the target branch is "main",
# then the latest release tag is used. If no release tag exists, it is set to 0.1.0. If it is a release
# branch (e.g. v22), then the latest tag within that major version is used.
#
# For a patch release, the latest tag is enhanced with 0.0.1, leaving the major and minor versions as
# they are.
#
# For a minor release, the latest tag is enhanced with 0.1.0, and the patch version is set to 0.
#
# For a major release, a branch is created for the latest major release found by tag, and the version
# is enhanced with $latest_tag + 1.0.0, increasing the major version by 1 and setting the minor and
# patch versions to 0.
#
# Major version releases are only valid on the "main" branch.
#
# Once the version is found and enhanced, each __vewrsion__.py or project file is updated to the new
# version, and a commit is created in the found branch.
jobs:
release:
name: release
if: |
(github.event_name == 'workflow_dispatch') ||
(
github.event.pull_request.merged == true &&
(
contains(github.event.pull_request.labels.*.name, 'major_release') ||
contains(github.event.pull_request.labels.*.name, 'minor_release') ||
contains(github.event.pull_request.labels.*.name, 'patch_release')
)
)
runs-on: "ubuntu-latest"
steps:
- name: set RELEASE_KIND = ${{ github.event.inputs.release }}
if: ${{ github.event_name == 'workflow_dispatch' }}
run: |
echo "RELEASE_KIND=${{ github.event.inputs.release }}" >> $GITHUB_ENV
- name: set RELEASE_KIND = major
if: ${{ (contains(github.event.pull_request.labels.*.name, 'major_release')) }}
run: |
echo "RELEASE_KIND=major" >> $GITHUB_ENV
- name: set RELEASE_KIND = minor
if: ${{ (contains(github.event.pull_request.labels.*.name, 'minor_release')) }}
run: |
echo "RELEASE_KIND=minor" >> $GITHUB_ENV
- name: set RELEASE_KIND = patch
if: ${{ (contains(github.event.pull_request.labels.*.name, 'patch_release')) }}
run: |
echo "RELEASE_KIND=patch" >> $GITHUB_ENV
- name: set RELEASE_REF
run: |
if [[ "${{ github.event_name }}" = "workflow_dispatch" ]]; then
echo "RELEASE_REF=${{ github.ref_name }}" >> $GITHUB_ENV
else
echo "RELEASE_REF=${{ github.base_ref }}" >> $GITHUB_ENV
fi
- uses: actions/checkout@v4
with:
token: ${{ secrets.GREENBONE_BOT_TOKEN }}
fetch-depth: '0'
- name: "LATEST_VERSION"
run: |
if [[ "${{ env.RELEASE_REF }}" = "main" ]]; then
echo "LATEST_VERSION=$(git tag | grep "^v" | sed 's/^v//' | sort --version-sort | tail -n 1)" >> $GITHUB_ENV
else
echo "LATEST_VERSION=$(git tag | grep "^v${{ env.RELEASE_REF }}" | sed 's/^v//' | sort --version-sort | tail -n 1)" >> $GITHUB_ENV
fi
- name: "default LATEST_VERSION"
run: |
# default to 0.1.0 when there is no previous tag and on main branch
if ([[ -z "${{ env.LATEST_VERSION }}" ]] && [[ "${{ env.RELEASE_REF }}" = "main" ]]); then
echo "LATEST_VERSION=0.1.0" >> $GITHUB_ENV
fi
# safeguard
- name: RELEASE_REF != NULL
run: ([ -n "${{ env.RELEASE_REF }}" ])
- name: LATEST_VERSION != NULL
run: ([ -n "${{ env.LATEST_VERSION }}" ])
- name: RELEASE_KIND != NULL
run: ([ -n "${{ env.RELEASE_KIND }}" ])
- name: "NEW_VERSION"
run: |
echo "NEW_VERSION=$(sh .github/enhance_version.sh ${{ env.LATEST_VERSION }} ${{ env.RELEASE_KIND }})" >> $GITHUB_ENV
- name: NEW_VERSION != NULL
run: ([ -n "${{ env.NEW_VERSION }}" ])
- name: set git credentials
run: |
git config --global user.email "${{ secrets.GREENBONE_BOT_MAIL }}"
git config --global user.name "${{ secrets.GREENBONE_BOT }}"
- name: "create working branch for previous major release (${{ env.LATEST_VERSION }})"
if: ( env.RELEASE_KIND == 'major' )
run: |
# save a branch so that we can easily create PR for that version when we want to fix something
git checkout "v${{ env.LATEST_VERSION }}"
export BRANCH_NAME=$(echo "${{ env.LATEST_VERSION }}" | sed 's/^\([0-9]*\).*/v\1/')
git checkout -b "$BRANCH_NAME"
git push origin "$BRANCH_NAME"
# create branch of version
- name: prepare project version ${{ env.RELEASE_REF }} ${{ env.LATEST_VERSION }} -> ${{ env.NEW_VERSION }}
run: |
# jump back for the case that we switched to a tag
git checkout "${{ env.RELEASE_REF }}"
# install pontos
python3 -m pip install pontos
#poetry install
#poetry shell
pontos-version update ${{ env.NEW_VERSION }}
if git diff --exit-code --quiet; then
echo "There are no modified files, skipping."
else
git add **/__version__.py
git add pyproject.toml
git commit -m "Automated commit: change version from ${{ env.LATEST_VERSION }} -> ${{ env.NEW_VERSION }}"
git push origin ${{ env.RELEASE_REF }}
fi
- run: mkdir assets/
- name: release ${{ env.LATEST_VERSION }} -> ${{ env.NEW_VERSION }}
run: |
export PROJECT=$(echo "${{ github.repository }}" | sed 's/.*\///' )
pontos-changelog \
--current-version ${{ env.LATEST_VERSION }} \
--next-version ${{ env.NEW_VERSION }} \
--config changelog.toml \
--repository ${{ github.repository }} \
--versioning-scheme semver \
-o /tmp/changelog.md || true
# we would rather have empty release notes than no release
if [ ! -f "/tmp/changelog.md" ]; then
touch /tmp/changelog.md
fi
echo "${{ secrets.GREENBONE_BOT_TOKEN }}" | gh auth login --with-token
# lets see how smart it is
export nrn="v${{ env.NEW_VERSION }}"
export filename="$PROJECT-$nrn"
gh release create "$nrn" -F /tmp/changelog.md
mkdir -p assets
ls -las assets/
curl -Lo assets/$filename.zip https://github.com/${{ github.repository }}/archive/refs/tags/$nrn.zip
curl -Lo assets/$filename.tar.gz https://github.com/${{ github.repository }}/archive/refs/tags/$nrn.tar.gz
echo -e "${{ secrets.GPG_KEY }}" > private.pgp
echo ${{ secrets.GPG_PASSPHRASE }} | bash .github/sign-assets.sh private.pgp
rm assets/$filename.zip
rm assets/$filename.tar.gz
gh release upload $nrn assets/*
ospd-openvas-22.9.0/.github/workflows/sbom-upload.yml 0000664 0000000 0000000 00000000415 15011310720 0022552 0 ustar 00root root 0000000 0000000 name: SBOM upload
on:
workflow_dispatch:
push:
branches: ["main"]
jobs:
SBOM-upload:
runs-on: ubuntu-latest
permissions:
id-token: write
contents: write
steps:
- name: 'SBOM upload'
uses: greenbone/actions/sbom-upload@v3
ospd-openvas-22.9.0/.github/workflows/smoketests.yml 0000664 0000000 0000000 00000000721 15011310720 0022531 0 ustar 00root root 0000000 0000000 name: SmokeTests
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
test:
name: verify
runs-on: 'ubuntu-latest'
steps:
- uses: actions/checkout@v4
- uses: docker/login-action@v3
with:
username: greenbonebot
password: ${{ secrets.GREENBONE_BOT_TOKEN }}
registry: ghcr.io
- uses: docker/setup-buildx-action@v3
- run: make
working-directory: smoketest
ospd-openvas-22.9.0/.github/workflows/update-header.yml 0000664 0000000 0000000 00000000647 15011310720 0023047 0 ustar 00root root 0000000 0000000 name: Update Headers
on:
workflow_dispatch:
schedule:
- cron: '0 0 1 1 *' # At 00:00 on day-of-month 1 in January.
permissions:
contents: write
pull-requests: write
jobs:
update-header:
name: Update headers
runs-on: 'ubuntu-latest'
steps:
- name: Run update header
uses: greenbone/actions/update-header@v3
with:
directories: ospd ospd_openvas tests
target: main
ospd-openvas-22.9.0/.gitignore 0000664 0000000 0000000 00000000233 15011310720 0016176 0 ustar 00root root 0000000 0000000 __pycache__
*.pyc
*.log
.egg
*.egg-info
dist
build
.idea
.vscode
.coverage
.venv
smoketest/bin
smoketest/build.log
smoketest/.scan-config
smoketest/.nasl
ospd-openvas-22.9.0/.mergify.yml 0000664 0000000 0000000 00000002363 15011310720 0016457 0 ustar 00root root 0000000 0000000 pull_request_rules:
# backports from main branch
- name: backport main patches to stable branch
conditions:
- base=main
- label=backport-to-stable
actions:
backport:
branches:
- stable
- name: backport main patches to oldstable branch
conditions:
- base=main
- label=backport-to-oldstable
actions:
backport:
branches:
- oldstable
# backports from upcoming release branch
- name: backport stable patches to main branch
conditions:
- base=stable
- label=backport-to-main
actions:
backport:
branches:
- main
- name: backport stable patches to oldstable branch
conditions:
- base=stable
- label=backport-to-oldstable
actions:
backport:
branches:
- oldstable
# backports from current release branch
- name: backport oldstable patches to main branch
conditions:
- base=oldstable
- label=backport-to-main
actions:
backport:
branches:
- main
- name: backport oldstable patches to stable branch
conditions:
- base=oldstable
- label=backport-to-stable
actions:
backport:
branches:
- stable
ospd-openvas-22.9.0/.pylintrc 0000664 0000000 0000000 00000034160 15011310720 0016061 0 ustar 00root root 0000000 0000000 [MASTER]
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=lxml
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=docs
# Pickle collected data for later comparisons.
persistent=yes
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
#
# bad-continuation is disabled because of a bug in pylint.
# See https://github.com/ambv/black/issues/48 and https://github.com/PyCQA/pylint/issues/289
disable=len-as-condition,
attribute-defined-outside-init,
missing-docstring,
bad-continuation,
R
#disable=print-statement,
# parameter-unpacking,
# unpacking-in-except,
# old-raise-syntax,
# backtick,
# long-suffix,
# old-ne-operator,
# old-octal-literal,
# import-star-module-level,
# non-ascii-bytes-literal,
# raw-checker-failed,
# bad-inline-option,
# locally-disabled,
# locally-enabled,
# file-ignored,
# suppressed-message,
# useless-suppression,
# deprecated-pragma,
# apply-builtin,
# basestring-builtin,
# buffer-builtin,
# cmp-builtin,
# coerce-builtin,
# execfile-builtin,
# file-builtin,
# long-builtin,
# raw_input-builtin,
# reduce-builtin,
# standarderror-builtin,
# unicode-builtin,
# xrange-builtin,
# coerce-method,
# delslice-method,
# getslice-method,
# setslice-method,
# no-absolute-import,
# old-division,
# dict-iter-method,
# dict-view-method,
# next-method-called,
# metaclass-assignment,
# indexing-exception,
# raising-string,
# reload-builtin,
# oct-method,
# hex-method,
# nonzero-method,
# cmp-method,
# input-builtin,
# round-builtin,
# intern-builtin,
# unichr-builtin,
# map-builtin-not-iterating,
# zip-builtin-not-iterating,
# range-builtin-not-iterating,
# filter-builtin-not-iterating,
# using-cmp-argument,
# eq-without-hash,
# div-method,
# idiv-method,
# rdiv-method,
# exception-message-attribute,
# invalid-str-codec,
# sys-max-int,
# bad-python3-import,
# deprecated-string-function,
# deprecated-str-translate-call,
# deprecated-itertools-function,
# deprecated-types-field,
# next-method-defined,
# dict-items-not-iterating,
# dict-keys-not-iterating,
# dict-values-not-iterating
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[REPORTS]
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio).You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Tells whether to display a full report or only the messages
reports=no
# Activate the evaluation score.
score=no
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=optparse.Values,sys.exit
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
[BASIC]
# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-z0-9_]{1,40}$
# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-z0-9_]{1,40}$
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Regular expression matching correct class attribute names.
class-attribute-rgx=([a-z_][a-z0-9_]{1,40})|([A-Z_][A-Z0-9_]{1,30})$
# Naming style matching correct class names
class-naming-style=PascalCase
# Naming style matching correct constant names
const-naming-style=UPPER_CASE
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=3
# Regular expression which should only match correct function names
function-rgx=[a-z_][a-z0-9_]+$
# Good variable names which should always be accepted, separated by a comma
good-names=e,
f,
i,
j,
k,
ex,
Run,
logger,
_
# Include a hint for the correct naming format with invalid-name
include-naming-hint=yes
# Regular expression matching correct inline iteration names.
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression which should only match correct method names
method-rgx=[a-z_][a-z0-9_]+$
# Regular expression which should only match correct module names
module-rgx=([a-z]+)|(test_*)$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty
# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-z0-9_]+$
[SIMILARITIES]
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
# Minimum lines number of a similarity.
min-similarity-lines=4
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=LF
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )??$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=80
# Maximum number of lines in a module
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=optparse,tkinter.tix
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
[DESIGN]
# Maximum number of arguments for function / method
max-args=15
# Maximum number of attributes for a class (see R0902).
max-attributes=20
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=30
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of statements in function / method body
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=0
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=builtins.Exception
ospd-openvas-22.9.0/CHANGELOG.md 0000664 0000000 0000000 00000030264 15011310720 0016026 0 ustar 00root root 0000000 0000000 # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [21.10] (unreleased)
### Added
- Validate port list to be sent to openvas. [#411](https://github.com/greenbone/ospd-openvas/pull/411)
- Validate credentials to be sent to openvas. [#416](https://github.com/greenbone/ospd-openvas/pull/416)
- New Credentials for SSH to get su privileges. [#419](https://github.com/greenbone/ospd-openvas/pull/419)
- Integrate Sentry. [#421](https://github.com/greenbone/ospd-openvas/pull/421)
- Add dry run support. Generate 10 fake results per host. [#424](https://github.com/greenbone/ospd-openvas/pull/424)
### Changed
- Stopping and interrupting scans. [#450](https://github.com/greenbone/ospd-openvas/pull/450)
- Downgrade required version for psutil to 5.5.1 [#489](https://github.com/greenbone/ospd-openvas/pull/489)
### Deprecated
### Removed
- Remove source_iface preferences. [#418](https://github.com/greenbone/ospd-openvas/pull/418)
### Fixed
- Fix pylint warnings. Remove u prefix for strings, no longer necessary. [#495](https://github.com/greenbone/ospd-openvas/pull/495)
[21.10]: https://github.com/greenbone/ospd-openvas/compare/stable...main
## [21.4.1] (unreleased)
### Added
### Changed
### Fixed
- Fix timeout preference handling. [#486](https://github.com/greenbone/ospd-openvas/pull/486)
### Removed
- Remove some special handling for Consider Alive alive test. [#413](https://github.com/greenbone/ospd-openvas/pull/413)
[21.4.1]: https://github.com/greenbone/ospd-openvas/compare/v21.4.0...stable
## [21.4.0] (2021-04-16)
### Added
- Add dedicated port list for alive detection (Boreas only) as scanner preference if supplied via OSP. [#327](https://github.com/greenbone/ospd-openvas/pull/327)
- Add methods for adding VTs to the redis cache. [#337](https://github.com/greenbone/ospd-openvas/pull/337)
- Add support for supplying alive test methods via separate elements. [#331](https://github.com/greenbone/ospd-openvas/pull/331)
- Add support CVSSv3 and accept new tags for severity vector, origin, date. [#346](https://github.com/greenbone/ospd-openvas/pull/346)
### Changed
- Get all results from main kb. [#285](https://github.com/greenbone/ospd-openvas/pull/285)
- Extend severities with origin and date. [#192](https://github.com/greenbone/ospd-openvas/pull/192)
### Deprecated
### Removed
[21.4]: https://github.com/greenbone/ospd-openvas/compare/oldstable...stable
## [20.8.3]
### Added
### Changed
- Use better defaults for for ospd-openvas settings [#454](https://github.com/greenbone/ospd-openvas/pull/454)
- Improved error logging while trying to acquire a lock file [#458](https://github.com/greenbone/ospd-openvas/pull/458)
### Deprecated
### Removed
### Fixed
[20.8.3]: https://github.com/greenbone/ospd-openvas/compare/v20.8.2...oldstable
## [20.8.2] - 2021-06-24
### Added
- Check for scanner error messages before leaving. [#395](https://github.com/greenbone/ospd-openvas/pull/395)
### Fixed
- Don't crash with non-ascii chars in openvas.conf. [#381](https://github.com/greenbone/ospd-openvas/pull/381)
[20.8.2]: https://github.com/greenbone/ospd-openvas/compare/v20.8.1...v20.8.2
## [20.8.1] - 2021-02-01
### Added
- Add debug level log messages. [#373](https://github.com/greenbone/ospd-openvas/pull/373)
### Changed
- Improve logging for unsatisfied vts dependencies. [#336](https://github.com/greenbone/ospd-openvas/pull/336)
- Do not use busy wait when waiting for the openvas scan process to finish. [#360](https://github.com/greenbone/ospd-openvas/pull/360)
- The description field of the systemd ospd-openvas.service file was updated. [#372](https://github.com/greenbone/ospd-openvas/pull/372)
- Improve logging for unexpected data in script_xref tags. [#374](https://github.com/greenbone/ospd-openvas/pull/374)
### Fixed
- Fix nvticache name for stable version from sources. [#317](https://github.com/greenbone/ospd-openvas/pull/317)
- Fix stop scan during preferences handling, before spawning OpenVAS. [#332](https://github.com/greenbone/ospd-openvas/pull/332)
- Fix alive test preferences when a non default method is selected. [#334](https://github.com/greenbone/ospd-openvas/pull/334)
- Check for empty vts preferences list. [#340](https://github.com/greenbone/ospd-openvas/pull/340)
- Fix progress calculation when the host count differs from the target string count. [#343](https://github.com/greenbone/ospd-openvas/pull/343)
- Retry host progress update if the progress is still below 100 percent. [#390](https://github.com/greenbone/ospd-openvas/pull/390)
[20.8.1]: https://github.com/greenbone/ospd-openvas/compare/v20.8.0...v20.8.1
## [20.8.1] (2021-02-01)
### Added
- Add debug level log messages. [#373](https://github.com/greenbone/ospd-openvas/pull/373)
### Changed
- Improve logging for unsatisfied vts dependencies. [#336](https://github.com/greenbone/ospd-openvas/pull/336)
- Do not use busy wait when waiting for the openvas scan process to finish. [#360](https://github.com/greenbone/ospd-openvas/pull/360)
- The description field of the systemd ospd-openvas.service file was updated. [#372](https://github.com/greenbone/ospd-openvas/pull/372)
- Improve logging for unexpected data in script_xref tags. [#374](https://github.com/greenbone/ospd-openvas/pull/374)
### Fixed
- Fix nvticache name for stable version from sources. [#317](https://github.com/greenbone/ospd-openvas/pull/317)
- Fix stop scan during preferences handling, before spawning OpenVAS. [#332](https://github.com/greenbone/ospd-openvas/pull/332)
- Fix alive test preferences when a non default method is selected. [#334](https://github.com/greenbone/ospd-openvas/pull/334)
- Check for empty vts preferences list. [#340](https://github.com/greenbone/ospd-openvas/pull/340)
- Fix progress calculation when the host count differs from the target string count. [#343](https://github.com/greenbone/ospd-openvas/pull/343)
- Don't crash with non-ascii chars in openvas.conf. [#391](https://github.com/greenbone/ospd-openvas/pull/381)
[20.8.1]: https://github.com/greenbone/ospd-openvas/compare/v20.8.0...oldstable
## [20.8.0] (2020-08-12)
### Added
- Add solution method to solution of vt object. [#131](https://github.com/greenbone/ospd-openvas/pull/131)
- Add typing to daemon.py, nvticache.py and db.py. [#161](https://github.com/greenbone/ospd-openvas/pull/161)[#162](https://github.com/greenbone/ospd-openvas/pull/162)[#163](https://github.com/greenbone/ospd-openvas/pull/163)
- Add support for alive test settings. [#182](https://github.com/greenbone/ospd-openvas/pull/182)
- Add missing scan preferences expand_vhosts and test_empty_vhost. [#184](https://github.com/greenbone/ospd-openvas/pull/184)
- Set reverse lookup options. [#185](https://github.com/greenbone/ospd-openvas/pull/185)
- Check if the amount of vts in redis is coherent.
[#195](https://github.com/greenbone/ospd-openvas/pull/195)
[#197](https://github.com/greenbone/ospd-openvas/pull/197)
- Add support for test_alive_hosts_only feature of openvas. [#204](https://github.com/greenbone/ospd-openvas/pull/204)
- Use lock file during feed update to avoid corrupted cache. [#207](https://github.com/greenbone/ospd-openvas/pull/207)
- Add details parameter to get_vt_iterator(). [#215](https://github.com/greenbone/ospd-openvas/pull/215)
- Add [pontos](https://github.com/greenbone/pontos) as dev dependency for
managing the version information in ospd-openvas [#238](https://github.com/greenbone/ospd-openvas/pull/238)
- Pass store directory to OSPDaemon init [#266](https://github.com/greenbone/ospd-openvas/pull/266)
- Add URI field to results for file path or webservice URL [#271](https://github.com/greenbone/ospd-openvas/pull/271)
- Add element to OSPD_PARAMS entries to indicate visibility for client. [#293](https://github.com/greenbone/ospd-openvas/pull/293)
### Changed
- Less strict checks for the nvti cache version
[#150](https://github.com/greenbone/ospd-openvas/pull/150)
[#165](https://github.com/greenbone/ospd-openvas/pull/165)
[#166](https://github.com/greenbone/ospd-openvas/pull/166)
- Set self.vts to None if there is a pending feed. [#172](https://github.com/greenbone/ospd-openvas/pull/172)
- Use the new method clear() from Vts class. [#193](https://github.com/greenbone/ospd-openvas/pull/193)
- Start server before initialize the vts. [#196](https://github.com/greenbone/ospd-openvas/pull/196)
- Get vts metadata from redis and reduce stored data in cache. [#205](https://github.com/greenbone/ospd-openvas/pull/205)
- Update license to AGPL-3.0+ [#228](https://github.com/greenbone/ospd-openvas/pull/228)
- Replaced pipenv with poetry for dependency management. `poetry install` works
a bit different then `pipenv install`. It installs dev packages by default and
also ospd in editable mode. This means after running poetry install ospd will
directly be importable in the virtual python environment. [#235](https://github.com/greenbone/ospd-openvas/pull/235)
- Don't send host details and log messages to the client when Boreas is enabled. [#252](https://github.com/greenbone/ospd-openvas/pull/252)
- Progress bar calculation do not takes in account dead hosts. [#252](https://github.com/greenbone/ospd-openvas/pull/252)
- Host progress is stored as integer. [#256](https://github.com/greenbone/ospd-openvas/pull/256)
- Use flock for the feed lock file. [#257](https://github.com/greenbone/ospd-openvas/pull/257)
- Improvements for fetching results from redis. [#282](https://github.com/greenbone/ospd-openvas/pull/282)
- Add RW permission to the group on the feed lock file.
[#300](https://github.com/greenbone/ospd-openvas/pull/300)
[#301](https://github.com/greenbone/ospd-openvas/pull/301)
### Fixed
- Check vt_aux for None before trying to access it. [#177](https://github.com/greenbone/ospd-openvas/pull/177)
- Fix snmp credentials. [#186](https://github.com/greenbone/ospd-openvas/pull/186)
- Escape script name before adding the result in an xml entity. [#188](https://github.com/greenbone/ospd-openvas/pull/188)
- Fix handling of denied hosts. [#263](https://github.com/greenbone/ospd-openvas/pull/263)
- Fix handling of special chars in credentials. [#294](https://github.com/greenbone/ospd-openvas/pull/294)
- Fix type and default value of optimize_test preference. [#302](https://github.com/greenbone/ospd-openvas/pull/302)
- Fix deploy and upload to pypi. [#315](https://github.com/greenbone/ospd-openvas/pull/315)
- Fix ospd version dependency. [#316](https://github.com/greenbone/ospd-openvas/pull/316)
### Removed
- Remove use_mac_addr, vhost_ip and vhost scan preferences. [#184](https://github.com/greenbone/ospd-openvas/pull/184)
- Handling of finished host for resume task. [#252](https://github.com/greenbone/ospd-openvas/pull/252)
- Don't release vts explicitly. [#261](https://github.com/greenbone/ospd-openvas/pull/261)
- Drop handling of network_scan. [#265](https://github.com/greenbone/ospd-openvas/pull/265)
[20.8.0]: https://github.com/greenbone/ospd-openvas/compare/ospd-openvas-1.0...oldstable
## [1.0.1]
### Added
- Check the vt's preference value for type 'file'. [#130](https://github.com/greenbone/ospd-openvas/pull/130).
- Check for malformed credentials. [#160](https://github.com/greenbone/ospd-openvas/pull/160).
- Send messages generated by the scannner main process. [#171](https://github.com/greenbone/ospd-openvas/pull/171).
### Changed
- Exit with exit code 1 if it was not possible to connect to redis. [#133](https://github.com/greenbone/ospd-openvas/pull/133)
- Return None if the scan finished successfully. [#137](https://github.com/greenbone/ospd-openvas/pull/137)
### Fixed
- Improve redis clean out when stopping a scan. [#128](https://github.com/greenbone/ospd-openvas/pull/128)
- Improve error handling when creating vts xml elements. [#139](https://github.com/greenbone/ospd-openvas/pull/139)
- Init the superclass with kwargs. [#141](https://github.com/greenbone/ospd-openvas/pull/141)
- Avoid ospd-openvas to crash if redis is flushed during vt dictionary creation. [#146](https://github.com/greenbone/ospd-openvas/pull/146)
[1.0.1]: https://github.com/greenbone/ospd-openvas/compare/v1.0.0...ospd-openvas-1.0
## [1.0.0] (2019-10-11)
This is the first release of the ospd-openvas module for the Greenbone
Vulnerability Management (GVM) framework.
[1.0.0]: https://github.com/greenbone/ospd-openvas/compare/v1.0.0
ospd-openvas-22.9.0/COPYING 0000664 0000000 0000000 00000102330 15011310720 0015242 0 ustar 00root root 0000000 0000000 GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
.
ospd-openvas-22.9.0/README.md 0000664 0000000 0000000 00000011410 15011310720 0015464 0 ustar 00root root 0000000 0000000 
# ospd-openvas
[](https://github.com/greenbone/ospd-openvas/releases)
[](https://pypi.org/project/ospd-openvas/)
[](https://github.com/greenbone/ospd-openvas/actions/workflows/ci-python.yml?query=branch%3Amain++)
ospd-openvas is an OSP server implementation to remotely control
[OpenVAS Scanner](https://github.com/greenbone/openvas-scanner) and [Notus Scanner](https://github.com/greenbone/notus-scanner).
Once running, you need to configure OpenVAS Scanner and Notus Scanner for the Greenbone Vulnerability
Manager, for example via the web interface Greenbone Security Assistant. Then
you can create scan tasks to use both scanners.
## Installation
### Requirements
Python 3.7 and later is supported.
`ospd-openvas` has dependencies on the following Python packages:
- `defusedxml`
- `deprecated`
- `lxml`
- `packaging`
- `paho-mqtt`
- `psutil`
- `python-gnupg`
- `redis`
### Mandatory configuration
The `ospd-openvas` startup parameter `--lock-file-dir` or the `lock_file_dir` config
parameter of the `ospd.conf` config file needs to point to the same location / path of
the `gvmd` daemon and the `openvas` command line tool (Default: `/var/run`).
Examples for both are shipped within the `config` sub-folder of this project.
Also in order to be able to use Notus ospd-openvas must connect to a MQTT broker, such as [Mosquitto](https://mosquitto.org/) in order to communicate. With the parameter `--mqtt-broker-address` (Default: localhost) the correct address must be given as well as the corresponding port with `--mqtt-broker-port` (Default: 1883).
Please see the `Details` section of the [GVM release notes](https://community.greenbone.net/t/gvm-20-08-stable-initial-release-2020-08-12/6312)
for more details.
### Optional configuration
Please note that although you can run `openvas` (launched from an `ospd-openvas`
process) as a user without elevated privileges, it is recommended that you start
`openvas` as `root` since a number of Network Vulnerability Tests (NVTs) require
root privileges to perform certain operations like packet forgery. If you run
`openvas` as a user without permission to perform these operations, your scan
results are likely to be incomplete.
As `openvas` will be launched from an `ospd-openvas` process with sudo,
the next configuration is required in the sudoers file:
sudo visudo
add this line to allow the user running `ospd-openvas`, to launch `openvas`
with root permissions
ALL = NOPASSWD: /sbin/openvas
If you set an install prefix, you have to update the path in the sudoers
file too:
Defaults secure_path=:/sbin
## Usage
There are no special usage aspects for this module beyond the generic usage
guide.
Please follow the general usage guide for ospd-based scanners:
## Support
For any question on the usage of ospd-openvas please use the [Greenbone
Community Portal](https://community.greenbone.net/). If you found a problem
with the software, please [create an
issue](https://github.com/greenbone/ospd-openvas/issues) on GitHub. If you are a
Greenbone customer you may alternatively or additionally forward your issue to
the Greenbone Support Portal.
## Maintainer
This project is maintained by [Greenbone AG](https://www.greenbone.net/).
## Contributing
Your contributions are highly appreciated. Please [create a pull
request](https://github.com/greenbone/ospd-openvas/pulls) on GitHub. Bigger
changes need to be discussed with the development team via the [issues section
at GitHub](https://github.com/greenbone/ospd-openvas/issues) first.
For development you should use [poetry](https://python-poetry.org)
to keep your python packages separated in different environments. First install
poetry via pip
python3 -m pip install --user poetry
Afterwards run
poetry install
in the checkout directory of ospd-openvas (the directory containing the
`pyproject.toml` file) to install all dependencies including the packages only
required for development.
The ospd-openvas repository uses [autohooks](https://github.com/greenbone/autohooks)
to apply linting and auto formatting via git hooks. Please ensure the git hooks
are active.
poetry install
poetry run autohooks activate --force
## License
Copyright (C) 2018-2022 [Greenbone AG](https://www.greenbone.net/)
Licensed under the [GNU Affero General Public License v3.0 or later](COPYING).
ospd-openvas-22.9.0/changelog.toml 0000664 0000000 0000000 00000000346 15011310720 0017037 0 ustar 00root root 0000000 0000000 commit_types = [
{ message = "^add", group = "Added"},
{ message = "^remove", group = "Removed"},
{ message = "^change", group = "Changed"},
{ message = "^fix", group = "Bug Fixes"},
]
changelog_dir = "changelog"
ospd-openvas-22.9.0/changelog/ 0000775 0000000 0000000 00000000000 15011310720 0016137 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/changelog/v22.4.0.md 0000664 0000000 0000000 00000010653 15011310720 0017377 0 ustar 00root root 0000000 0000000 # Changelog
All notable changes to this project will be documented in this file.
## [22.4.0] - 2022-07-18
## Added
* documentation about new features [bd705d3](https://github.com/greenbone/ospd-openvas/commit/bd705d3)
* documentation and examples for credentials [5b6b668](https://github.com/greenbone/ospd-openvas/commit/5b6b668)
* description of table_driven_lsc to OSPD_PARAMS [ecdb67c](https://github.com/greenbone/ospd-openvas/commit/ecdb67c)
* log info for VT update (#619) [fc3b80c](https://github.com/greenbone/ospd-openvas/commit/fc3b80c)
* possibility to handle redis tcp connections configured in openvas [29373b7](https://github.com/greenbone/ospd-openvas/commit/29373b7)
* package_unreliable qod_type to nvti_cache [61a3a35](https://github.com/greenbone/ospd-openvas/commit/61a3a35)
* use qod_type from advisory when available [47927c7](https://github.com/greenbone/ospd-openvas/commit/47927c7)
* possibility to disable advisories hashsum verification [614a2ec](https://github.com/greenbone/ospd-openvas/commit/614a2ec)
* gpg verification for notus (#557) [d73d03d](https://github.com/greenbone/ospd-openvas/commit/d73d03d)
* documentation for notus-scanner (#537) [9508f09](https://github.com/greenbone/ospd-openvas/commit/9508f09)
* Notus integration (#510) [2f39d2a](https://github.com/greenbone/ospd-openvas/commit/2f39d2a)
* notus advisories in get_vts (#518) [f78c64a](https://github.com/greenbone/ospd-openvas/commit/f78c64a)
* merge ospd into ospd-openvas [44cc135](https://github.com/greenbone/ospd-openvas/commit/44cc135)
* smoke test for get.vts (#513) [b3c0b6b](https://github.com/greenbone/ospd-openvas/commit/b3c0b6b)
## Changed
* version of the OSP documentation [76d5586](https://github.com/greenbone/ospd-openvas/commit/76d5586)
* Allow openvas access raw sockets and network within container [0eefae9](https://github.com/greenbone/ospd-openvas/commit/0eefae9)
* Extend the cmd and add cmd (#558) [c810de8](https://github.com/greenbone/ospd-openvas/commit/c810de8)
* Use poetry as build tool for pip installations #559 [f3e42c2](https://github.com/greenbone/ospd-openvas/commit/f3e42c2)
* Handle script timeout as script preferences instead of server preference (#509) [af97d06](https://github.com/greenbone/ospd-openvas/commit/af97d06)
## Bug Fixes
* using empty dict as scanner-params instead of returning 404 [7850833](https://github.com/greenbone/ospd-openvas/commit/7850833)
* within python 3.9 and higher notus results are missing [b42c758](https://github.com/greenbone/ospd-openvas/commit/b42c758)
* set to cvss2 when cvss3 is either not present or NULL [2963af4](https://github.com/greenbone/ospd-openvas/commit/2963af4)
* check for param to be present in calculate_vts_collection_hash [358456d](https://github.com/greenbone/ospd-openvas/commit/358456d)
* missing reload on update_vts [96bc329](https://github.com/greenbone/ospd-openvas/commit/96bc329)
* crash when no severity got found [30a760b](https://github.com/greenbone/ospd-openvas/commit/30a760b)
* Stop and resume scan (#604) [41b25c2](https://github.com/greenbone/ospd-openvas/commit/41b25c2)
* stacktrace on sensor when unexpectedly closing a ssh connection [487a58e](https://github.com/greenbone/ospd-openvas/commit/487a58e)
* do not load oids in openvas when handled by notus [15f50bb](https://github.com/greenbone/ospd-openvas/commit/15f50bb)
* ospd-openvas should not crash on missing plugin_feed_info.inc [989a876](https://github.com/greenbone/ospd-openvas/commit/989a876)
* mqtt network loop (#587) [9b5cecc](https://github.com/greenbone/ospd-openvas/commit/9b5cecc)
* Hash calculation of NVT fields [552b5f6](https://github.com/greenbone/ospd-openvas/commit/552b5f6)
* prepare notus parser to use family when defined [deb850c](https://github.com/greenbone/ospd-openvas/commit/deb850c)
* set sentry-sdk dependency to optional [80ed7ca](https://github.com/greenbone/ospd-openvas/commit/80ed7ca)
* warning on ERRMSG (#570) [bf26ad0](https://github.com/greenbone/ospd-openvas/commit/bf26ad0)
* missing category of notus advisories (#569) [02cfd3b](https://github.com/greenbone/ospd-openvas/commit/02cfd3b)
* mqtt on_disconnect method (#538) [915f02e](https://github.com/greenbone/ospd-openvas/commit/915f02e)
* fix dead host count (#511) (#516) [2a17e18](https://github.com/greenbone/ospd-openvas/commit/2a17e18)
* Fix get_status (#471) [866ea24](https://github.com/greenbone/ospd-openvas/commit/866ea24)
[22.4.0]: https://github.com/greenbone/ospd-openvas/compare/22.4.0.dev1...22.4.0 ospd-openvas-22.9.0/config/ 0000775 0000000 0000000 00000000000 15011310720 0015455 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/config/ospd-openvas.conf 0000664 0000000 0000000 00000000257 15011310720 0020746 0 ustar 00root root 0000000 0000000 [OSPD - openvas]
log_level = INFO
socket_mode = 0o770
unix_socket = /run/ospd/ospd-openvas.sock
pid_file = /run/ospd/ospd-openvas.pid
log_file = /var/log/gvm/ospd-openvas.log
ospd-openvas-22.9.0/config/ospd-openvas.service 0000664 0000000 0000000 00000001174 15011310720 0021460 0 ustar 00root root 0000000 0000000 [Unit]
Description=OSPd Wrapper for the OpenVAS Scanner (ospd-openvas)
Documentation=man:ospd-openvas(8) man:openvas(8)
After=network.target networking.service redis-server@openvas.service mosquitto.service
Wants=redis-server@openvas.service mosquitto.service notus-scanner.service
ConditionKernelCommandLine=!recovery
[Service]
Type=forking
User=gvm
RuntimeDirectory=ospd
RuntimeDirectoryMode=2775
PIDFile=/run/ospd/ospd-openvas.pid
ExecStart=/usr/bin/ospd-openvas --config /etc/gvm/ospd-openvas.conf --log-config /etc/gvm/ospd-logging.conf
SuccessExitStatus=SIGKILL
Restart=always
RestartSec=60
[Install]
WantedBy=multi-user.target
ospd-openvas-22.9.0/docs/ 0000775 0000000 0000000 00000000000 15011310720 0015140 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/docs/HTML.xsl 0000664 0000000 0000000 00000061512 15011310720 0016441 0 ustar 00root root 0000000 0000000
0
1
0
="
"
="
"
0
...
<
/>
<
>
</
>
<
>
</
>
|
.
|
|
.
|
1 Summary of Data Types
4.
Data Type
In short: .
.1 RNC
4 Data Types Details
|
.
|
2 Summary of Elements
5 Element Details
5.
Element
In short: .
.1 Structure
.2 RNC
.3 Example:
,
or
"
"
text
@
()
.
<>
<_response>
<>
()
.
<>
.
The group
One of
6.
Command
In short: .
.1 Structure
.2 RNC
.3 Example:
|
. |
3 Summary of Commands
6 Command Details
7 Summary of Scanner Parameters Types
8.
8 Compatibility Changes in Version
Protocol definition
ospd-openvas-22.9.0/docs/INSTALL-ospd-scanner 0000777 0000000 0000000 00000000000 15011310720 0024600 2INSTALL-ospd-scanner.md ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/docs/INSTALL-ospd-scanner.md 0000664 0000000 0000000 00000012701 15011310720 0021163 0 ustar 00root root 0000000 0000000 General Installation Instructions for OSPD-based Scanners
=========================================================
This is a general description about installing an ospd-based scanner wrapper
implementation.
The actual scanner implementation usually has individual installation
instructions and may refer to this general guide.
In the following guide, replace `ospd-scanner` with the name of the actual OSPD
scanner.
Install in a Virtual Environment
--------------------------------
The recommended way to install `ospd-scanner` is to do so inside a virtual
environment (`virtualenv` or `venv`).
This way, the server and its dependency are well isolated from system-wide
updates, making it easier to upgrade it, delete it, or install dependencies
only for it.
Refer to the Python documentation for setting up virtual environments for
further information.
First you need to create a virtual environment somewhere on your system, for
example with the following command:
virtualenv ospd-scanner
Installing `ospd-scanner` inside your newly created virtual environment could
then be done with the following command:
ospd-scanner/bin/pip install ospd_scanner-x.y.z.tar.gz
Note: As `ospd` is not (yet) available through PyPI, you probably want to
install it manually first inside your virtual environment prior to installing
`ospd-scanner`.
To run `ospd-scanner`, just start the Python script installed inside the
virtual environment:
ospd-scanner/bin/ospd-scanner
Install (Sub-)System-wide
-------------------------
To install `ospd-scanner` into directory `` run this command:
python3 setup.py install --prefix=
The default for `` is `/usr/local`.
Be aware that this might automatically download and install missing
Python packages. To prevent this, you should install the prerequisites
first with the mechanism of your system (for example via `apt` or `rpm`).
You may need to set the `PYTHONPATH` like this before running
the install command:
export PYTHONPATH=/lib/python3.7/site-packages/
The actual value for `PYTHONPATH` depends on your Python version.
Creating certificates
---------------------
An OSPD service can be started using a Unix domain socket (only on
respective systems) or using a TCP socket. The latter uses TLS-based
encryption and authorization while the first is not encrypted and uses
the standard file access rights for authorization.
For the TCP socket communication it is mandatory to use adequate
TLS certificates which you need for each of your OSPD service. You may use
the same certificates for all services if you like.
By default, those certificates are used which are also used by GVM
(see paths with `ospd-scanner --help`). Of course this works only
if installed in the same environment.
In case you do not have already a certificate to use, you may quickly
create your own one (can be used for multiple ospd daemons) using the
`gvm-manage-certs` tool provided with `gvmd`
():
gvm-manage-certs -s
And sign it with the CA checked for by the client. The client is usually
Greenbone Vulnerability Manager for which a global trusted CA certificate
can be configured.
Registering an OSP daemon at Greenbone Vulnerability Manager
------------------------------------------------------------
The file [README](../README.md) explains how to control the OSP daemon via
command line.
It is also possible to register an OSP daemon at the Greenbone Vulnerability
Manager and then use GMP clients to control the OSP daemon, for example the
web interface GSA.
You can register either via the GUI (`Configuration -> Scanners`) and create
a new Scanner there.
Or you can create a scanner via `gvmd` command line (adjust host,
port, paths, etc. for your daemon):
gvmd --create-scanner="OSP Scanner" --scanner-host=127.0.0.1 --scanner-port=1234 \
--scanner-type="OSP" --scanner-ca-pub=/usr/var/lib/gvm/CA/cacert.pem \
--scanner-key-pub=/usr/var/lib/gvm/CA/clientcert.pem \
--scanner-key-priv=/usr/var/lib/gvm/private/CA/clientkey.pem
or for local running ospd-scanner via file socket:
gvmd --create-scanner="OSP Scanner" --scanner-type="OSP" --scanner-host=/var/run/ospd-scanner.sock
Please note that the scanner created via `gvmd` like above will be created with
read permissions to all pre-configured roles.
Check whether Greenbone Vulnerability Manager can connect to the OSP daemon:
$ gvmd --get-scanners
08b69003-5fc2-4037-a479-93b440211c73 OpenVAS Default
3566ddf1-cecf-4491-8bcc-5d62a87404c3 OSP Scanner
$ gvmd --verify-scanner=3566ddf1-cecf-4491-8bcc-5d62a87404c3
Scanner version: 1.0.
Of course, using GMP via command line tools provided by
[gvm-tools](https://github.com/greenbone/gvm-tools) to register an OSP Scanner
is also possible as a third option.
Documentation
-------------
Source code documentation can be accessed over the usual methods,
for example (replace "scanner" by the scanner name):
$ python3
>>> import ospd_scanner.wrapper
>>> help (ospd_scanner.wrapper)
An equivalent to this is:
pydoc3 ospd_scanner.wrapper
To explore the code documentation in a web browser:
$ pydoc3 -p 12345
pydoc server ready at http://localhost:12345/
For further options see the `man` page of `pydoc`.
Creating a source archive
-------------------------
To create a .tar.gz file for the `ospd-scanner` module run this command:
python3 setup.py sdist
This will create the archive file in the subdirectory `dist`.
ospd-openvas-22.9.0/docs/OSP.xml 0000664 0000000 0000000 00000164255 15011310720 0016340 0 ustar 00root root 0000000 0000000
Open Scanner Protocol
OSP
The Open Scanner Protocol
24.10
boolean
0 or 1
xsd:token { pattern = "[01]" }
epoch_time
A date, in Unix format
integer
integer
An integer
integer
status
Status code describing the result of a command
xsd:token { pattern = "[1-5][0-9][0-9]" }
string
A string
text
uuid
A Universally Unique Identifier (UUID)
xsd:token { pattern = "[0-9abcdefABCDEF\-]{1,40}" }
vt_id
Identifier for a vulnerability test
xsd:token { pattern = "[0-9a-zA-Z_\-.:]{1,80}" }
credential
A credential consisting of type, service, port, username and password.
type
string
1
service
string
1
port
string
username
password
private
priv_username
priv_password
community
auth_algorithm
privacy_password
privacy_algorithm
realm
kdc
username
text
password
text
private
file
priv_username
text
priv_password
text
community
text
auth_algorithm
text
privacy_password
text
privacy_algorithm
text
realm
text
kdc
text
SSH Credential with username + password authentication and port
scanuser
mypass
SSH Credential with username + SSH Key authentication and port
scanuser
keypass
/path/to/ssh/keyfile
SSH Credential with elevated privileges
scanuser
mypass
root
rootpw
ESXi Credential with username + password authentication
smbuser
mypass
SNMP Credentials
smbuser
mypass
public
md5
privpass
aes
Kerberos Credentials. The kdc value is only used when no matching realm was found.
krb5user
mypass
myrealm
mykdc
scanner_params
Contains elements that represent scanner specific parameters
e
e
Element that represents a scanner specific parameters
string
string
scanner_params
443
1
fast_scan
targets
List of targets
target
Two targets
...
...
target
A scan target consisting of hosts, a port selection and credentials
hosts
ports
credentials
exclude_hosts
finished_hosts
alive_test_ports
alive_test
alive_test_methods
reverse_lookup_unify
reverse_lookup_only
hosts
One or many hosts. The list is comma-separated. Each entry can be a IP address, a CIDR notation, a hostname, a IP range. IPs can be v4 or v6
string
ports
string
A list of ports that is the same for the given hosts
credentials
One or many credentials containing the credential for the given hosts
credential
exclude_hosts
One or many hosts to exclude. The list is comma-separated. Each entry can be a IP address, a CIDR notation, a hostname, a IP range. IPs can be v4 or v6. Each wrapper must handle the exclude hosts
string
finished_hosts
One or many finished hosts to exclude when the client resumes a task. The list is comma-separated. Each entry can be an IP address, a CIDR notation, a hostname, a IP range. IPs can be v4 or v6. The listed hosts will be set as finished before starting the scan. Each wrapper must handle the finished hosts
string
alive_test
Alive test type to be performed against the target
integer
alive_test_methods
Alive test methods to be performed against the target
icmp
icmp
ICMP ping
boolean
tcp_syn
tcp_syn
TCP-SYN ping
boolean
tcp_ack
tcp_ack
TCP-ACK ping
boolean
arp
arp
ARP ping
boolean
consider_alive
consider_alive
Consider the target to be alive
boolean
alive_test_ports
Dedicated port list for alive detection. Used for TCP-SYN and TCP-ACK ping when Boreas (scanner preference test_alive_hosts_only) is enabled. If no port list is provided ports 80, 137, 587, 3128, 8081 are used as defaults
string
reverse_lookup_only
Only scan IP addresses that can be resolved into a DNS name
string
reverse_lookup_unify
If multiple IP addresses resolve to the same DNS name the DNS name will only get scanned once
string
Target without credentials
example.org
T:22,U:5060
0
22,80,123
0
0
Target with two credentials
192.168.1.0/24
1,2,3,80,443
...
...
192.168.1.10-15
192.168.1.1-3
vt_group
Collection of Vulnerability Test
filter
string
1
VT group filtered by family name
vt_selection
Contains elements that represent a Vulnerability Test or a collection of Vulnerability Tests to be executed and their parameters
vt_single
vt_group
VT with parameters and VT group
200
yes
vt_single
Elements that represent Vulnerability Tests
id
Identifier for a vulnerability test
vt_id
1
vt_value
VT with parameters
200
yes
vt_value
Vulnerability Test parameter
id
string
1
string
Parameters for a single VT
200
yes
help
Get the help text
format
Help format
xml
text
status
status
1
status_text
text
1
text
Get the help text
ID of scan to delete
Delete a finished scan
Help format. Could be text or xml
Print the commands help
Return various versions
Perform sync feed self test and check lockfile status
ID of scan stop.
Stop a currently running scan.
Return scanner description and parameters
Optional UUID value to set as scan ID
Target hosts to scan in a comma-separated list
Ports list to scan as comma-separated list
Optional number of parallel scans to run
Scan profile
Target port
Use HTTPS
w3af scan timeout
Start a new scan
ID of scan to stop
Stop a currently running scan
Mandatory ID of a specific scan to get
Whether to return the full scan report
Whether to remove the fetched results
Maximum number of results to fetch. Only considered if pop_results is enabled. Default = None, which means that all available results are returned
List the scans in buffer
Return system report
Name of report.
Time of first data point in report.
Time of last data point in report.
get_performance
Return performance information from an external program
start
Interval start
int
end
Interval end
int
titles
Interval title to get
text
status
status
1
status_text
text
1
text
Some output.
get_scans
Get a stored scan in buffer
scan_id
Scan UUID
uuid
details
Whether to get full scan reports
boolean
progress
Whether to return a detailed progress information
boolean
pop_results
Whether to remove the fetched results
boolean
max_results
Maximum number of results to fetch. Only considered if pop_results is enabled. Default = None, which means that all available results are returned
int
status
status
1
status_text
text
1
scan
scan
id
uuid
target
string
start_time
epoch_time
end_time
epoch_time
progress
integer
status
string
results
Get a scan report summary
The URL: "https://192.168.1.252/" has a path
disclosure vulnerability which discloses "/var/www/phpinfo.php"
...
...
Get a scan report summary
The URL: "https://192.168.1.252/" has a path
disclosure vulnerability which discloses "/var/www/phpinfo.php"
...
Get a scan progress summary
delete_scan
Delete a finished scan
scan_id
Scan UUID
uuid
status
status
1
status_text
text
1
Delete a scan successfully
get_version
Return various versions
status
status
1
status_text
text
1
protocol
daemon
scanner
vts
protocol
name
version
version
name
daemon
name
version
version
name
scanner
name
version
version
name
vts
name
version
home
vendor
version
name
home
vendor
Get protocol, scanner and daemon versions
1.0
OSP
generic version
generic ospd
1.6.0.4
w3af
202112070837
Greenbone AG
https://www.greenbone.net/en/feed-comparison/
Greenbone Security Feed
check_feed
Perform sync feed self test and check lockfile status
status
status
1
status_text
text
1
feed
feed
lockfile_in_use
self_test_exit_error
self_test_error_msg
lockfile_in_use
self_test_exit_error
self_test_error_msg
Perform sync feed self test and check lockfile status
0
0
get_scanner_details
Return scanner description and parameters
list_all
List all available scanner parameters. Not only those visible to the client.
boolean
status
status
1
status_text
text
1
description
scanner_params
description
scanner_params
scanner_param
scanner_param
id
string
type
string
name
description
default
mandatory
name
description
default
mandatory
Get scanner details
...
Scan profile
Scan profiles are predefined set of plugins and customized configurations.
fast_scan|fast_scan|audit_high_risk|full_audit|OWASP_TOP10|bruteforce|empty_profile|web_infrastructure|full_audit_spider_man|sitemap
Show HTTP request status
Whether to show the HTTP request's status in results
0
Dry Run
Whether to dry run scan.
0
Show HTTP response status
Whether to show the HTTP response's status in results
0
Seed path
Path to start with
/
Debug Mode
Whether to get extra scan debug information.
0
Target port
Port on target host to scan
80
Use HTTPS
Whether the target application is running over HTTPS
0
get_vts
Return information about vulnerability tests, if offered by scanner
vt_id
Identifier for vulnerability test
vt_id
filter
Filter to get a sub group of a VT collection
string
details
Return more details about vulnerability tests, if offered by the scanner
string
version_only
Return only feed information
boolean
status
status
1
status_text
text
1
vts
vts
vts_version
text
feed_vendor
text
feed_home
text
feed_name
text
total
integer
sha256_hash
text
vt
vt
id
vt_id
name
creation_time
modification_time
params
refs
dependencies
summary
impact
affected
insight
solution
detection
severities
custom
name
creation_time
modification_time
params
param
param
type
string
id
string
refs
ref
ref
type
string
id
string
dependencies
dependency
dependency
vt_id
vt_id
summary
impact
affected
insight
solution
type
Solution type, for example "VendorFix"
string
method
Solution method, for example "DebianAPTUpgrade"
string
detection
severities
severity
severity
type
string
origin
date
value
origin
Optional reference to the origin of the severity
string
date
Optional timestamp in seconds since epoch. Defaults to VT creation date
string
value
The actual value, the format must correspond with the type
string
custom
Get only feed information
Get information for all available vulnerability tests
Check for presence of vulnerability X
Check for presence of vulnerability Y
Get information for a single vulnerability test
Check for presence of vulnerability X
1200603949
1567429142
Check the version of App
App in OS v2
App is a small but very powerful app.
Please Install the Updated Packages.
Get the installed version with the help of detect NVT and check if the version is vulnerable or not.
CVE-2014-9116
1200603949
AV:N/AC:L/Au:N/C:N/I:N/A:P
Get information for a filtered collection of vulnerability test without details
Check for presence of vulnerability X
1200603949
1567429142
Check the version of App
App in OS v2
App is a small but very powerful app.
Please Install the Updated Packages.
Get the installed version with the help of detect NVT and check if the version is vulnerable or not.
CVE-2014-9116
1200603949
AV:N/AC:L/Au:N/C:N/I:N/A:P
Get information for a vulnerability test with custom data
Check for presence of vulnerability X
First custom element
second custom element
Get information for a vulnerability test with VT parameters data
Check for presence of vulnerability X
Timeout
Vulnerability Test Timeout
300
Scan UDP
1
First custom element
second custom element
start_scan
Start a new scan
target
Target hosts to scan in a comma-separated list
string
ports
Ports list to scan as comma-separated list
string
scan_id
Optional UUID value to use as scan ID
uuid
parallel
Optional number of parallel scan to run
integer
scanner_params
vt_selection
targets
scanner_params
Contains elements that represent scanner specific parameters
vt_selection
Contains elements that represent a Vulnerability Test or a collection of Vulnerability Tests to be executed and their parameters
targets
Contains elements that represent a target to execute a scan against. If target and port attributes are present this element is not take into account
status
status
1
status_text
text
1
id
id
New scan's UUID
Start a new scan. Legacy mode
443
1
fast_scan
2f616d53-595f-4785-9b97-4395116ca118
Start a new scan with multi-targets running simultaneously. Each one has a different port list and one of them has credentials for authenticated scans.
...
....
...
192.168.1.0/24
1,2,3,80,443
...
192.168.1.10-15
192.168.1.1-3
2f616d53-595f-4785-9b97-4395116ca118
stop_scan
Stop a currently running scan
scan_id
ID of scan to stop
uuid
status
status
1
status_text
text
1
Stop a scan
get_memory_usage
Return memory usage information of the osp daemon
unit
Size unit for the memory. b for bytes, kb for kilobytes and mb for megabytes.
text
status
status
1
status_text
text
1
processes
List of running processes
process
Single running processes
name
Name of the process
string
pid
Process ID
int
rss
Resident Set Size. Non-swapped physical memory of the process
int
vms
Virtual Memory Size. Total amount of virtual memory used by the process.
int
shared
Memory shared with other processes
int
127182
239616
135168
...
integer
An integer value
string
A string
boolean
0 or 1
selection
A value out of the | separated values list
credential_up
The username:password of a credential
file
A file's content
ovaldef_file
An ovaldef file's content that is base64 encoded
CHECK_FEED
New command to perform a sync feed self test
Perform a sync feed self test and check if the lockfile is locked
22.04
GET_VTS
Add feed information
Include as arguments the feed's vendor, name and home.
22.04
GET_VERSION
Add feed information
Include as arguments the feed's vendor, name and home.
22.04
START_SCAN
Add support for privileged access to target
Credential Element got new Elements in case of SSH type. These new Elements allow an additional root access to the target system.
22.04
START_SCAN
Add support for dedicated port list for alive detection
Target element received new target option alive_test_ports.
21.04
START_SCAN
Add support for supplying alive test methods via separate elements.
Target element received new optional target option alive_test_methods with subelements imcp, tcp_ack, tcp_syn, arp and consider_alive.
21.04
GET_VTS
Returned object changes and extends severity information
The element SEVERITY inside SEVERITIES of the returned VT moves the origin and the value
into explicit elements ORIGIN and VALUE. Furthremore the element DATE is added.
21.04
GET_VTS
Returned object extended with solution method
The element SOLUTION of the returned VT object has a new optional attribute METHOD
that describes the solution method in case the VT offers such a detail.
20.08
GET_VTS
Returned object extended with amount of VTS
The main element vts has the new TOTAL attribute with the amount of vts in the collection and the new optional SENT attribute with the amount of vts which matched a given filter.
20.08
GET_VTS
Add attribute DETAILS
The new attribute DETAILS allows to get more details about a vulnerability test if the scanner support it. By default is set True, and for a lighter response must be explicitly set to False.
20.08
GET_VTS
Add attribute SHA256_HASH
The new attribute SHA256_HASH is calculated from the vulnerability test OID, modification time and VT preferences, sorted by OID. The new attribute is helpful for feed integrity check.
20.08
GET_VTS
Add attribute VTS_VERSION
The feed version is included in the get_vts command response.
20.08
START_SCAN
Add support for target options
Target element receive new target options reverse_lookup_only, reverse_lookup_unify and alive_test.
20.08
GET_SCANS
Add scan progress details
New attribute PROGRESS to specify whether to return a detailed progress information.
20.08
GET_VTS
command added
Added new command to retrieve information about vulnerability tests a scanner might offer.
1.2
START_SCAN
vts optional element added
Added optional element vts to allow the client to specify a vts list
to use for the scan and their parameters.
1.2
START_SCAN
target optional element added
Added optional element targets to specify different hosts with a different port list and credentials. This is take in account only if target and port attributes are not present in start_scan tag.
1.2
START_SCAN
parallel attribute added
Added optional attribute parallel to specify the number of simultaneous scans to be run.
1.2
STOP_SCAN
command added
Added new command to stop a currently running scan.
1.1
START_SCAN
scan_id attribute added
The scan_id attribute was added to allow the client to specify a UUID as
the ID of the scan.
1.1
START_SCAN
ports attribute added
The ports attribute was added to allow the client to specify a ports list
to use for the scan.
1.1
Scanner Parameters Types
Type credential_up added
Introduce an aggregated type to express a username:password tuple.
1.1
GET_PERFORMANCE
Command added
Added new command to get performance from an external program.
1.2
ospd-openvas-22.9.0/docs/USAGE-ospd-scanner 0000777 0000000 0000000 00000000000 15011310720 0024074 2USAGE-ospd-scanner.md ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/docs/USAGE-ospd-scanner.md 0000664 0000000 0000000 00000006736 15011310720 0020734 0 ustar 00root root 0000000 0000000 General Usage Instructions for ospd-based Scanners
--------------------------------------------------
This is a general description about using an ospd-based scanner wrapper
implementation.
The actual scanner implementation has individual usage instructions for anything
that goes beyond this general guide.
In the following description replace `ospd-scanner` with the name of the actual
OSPD scanner.
See the documentation of your ospd-based scanner and the general instructions in
the [INSTALL-ospd-scanner.md](INSTALL-ospd-scanner.md) file on how to hand over
full control to the Greenbone Vulnerability Manager.
This usage guide explains how to use an OSP scanner independently of Greenbone
Vulnerability Manager, for example when developing a new ospd-based scanner or
for testing purposes.
Open Scanner Protocol
---------------------
Using an ospd-based scanner means using the Open Scanner Protocol (OSP). This is
what Greenbone Vulnerability Manager does. See the ospd module for the original
specification available in [ospd/doc/OSP.xml](OSP.xml).
There is also an online version available at
.
gvm-tools
---------
The `gvm-tools` help to make accessing the OSP interface easier.
They can be obtained from .
This module provides the commands `gvm-cli` and `gvm-pyshell`.
Starting an ospd-based scanner
------------------------------
All ospd-based scanners share a set of command-line options such as
`--help`, `--bind-address`, `--port`, `--key-file`, `--timeout`, etc.
For example, to see the command line options you can run:
ospd-scanner --help
To run an instance of `ospd-scanner` listening on Unix domain socket:
ospd-scanner -u /var/run/ospd-scanner.sock &
To run a test instance of `ospd-scanner` on local TCP port 1234:
ospd-scanner -b 127.0.0.1 -p 1234 &
Add `--log-level=DEBUG` to enable maximum debugging output.
Parameter for `--log-level` can be one of `DEBUG`, `INFO`, `WARNING`, `ERROR` or
`CRITICAL` (in order of priority).
Controlling an OSP scanner
--------------------------
You can use command line tools provided by the `gvm-tools` module to interact
with an OSP scanner.
To get a description of the interface:
gvm-cli socket --sockpath /var/run/ospd-scanner.sock --xml ""
Starting a scan (scanner parameters can be added according to the description
printed as response to the `` command):
gvm-cli socket --sockpath /var/run/ospd-scanner.sock --xml=""
Start a scan for ospd-based scanners that use the builtin-support for SSH
authentication:
gvm-cli socket --sockpath /var/run/ospd-scanner.sock --xml="myuser:mypassword"
Start a scan for two vulnerability tests `vt_id_1` and `vt_id_2` of an ospd-based
scanner:
gvm-cli socket --sockpath /var/run/ospd-scanner.sock --xml="vt_id_1, vt_id_2"
Show the list of scans with status and results:
gvm-cli socket --sockpath /var/run/ospd-scanner.sock --xml=""
Delete a scan from this list (only finished scans can be deleted):
gvm-cli socket --sockpath /var/run/ospd-scanner.sock --xml=""
ospd-openvas-22.9.0/docs/example-ospd-logging.conf 0000664 0000000 0000000 00000002434 15011310720 0022034 0 ustar 00root root 0000000 0000000 ### If a custom logging configuration is required, the following
### items under General must be present in the configuration file.
### Examples for customization are given. Just comment/uncomment
### the corresponding lines and do necessary adjustments.
### For official documentation visit
### https://docs.python.org/3.7/library/logging.config.html#configuration-file-format
## General
####################
[loggers]
keys=root
#keys=root, ospd_openvas
[logger_root]
level=NOTSET
handlers=default_handler
### There is already an existen default_handler.
### Uncomment the following to extend the existent handler list
####################
#[handlers]
#keys=default_handler, custom_syslog
### Example for a custom handler. Custom must be added to the handlers list,
####################
#[handler_custom]
#class=FileHandler
#level=DEBUG
#formatter=file
#args=('some_path_to_log_file.log', 'a')
#[handler_custom_syslog]
#class=handlers.SysLogHandler
#level=DEBUG
#formatter=file
#args=('/dev/log', handlers.SysLogHandler.LOG_USER)
### Specific logging configuration for a single module. In the following
### example, the ospd_openvas.lock module will log with debug level.
####################
#[logger_ospd_openvas]
#level=DEBUG
#handlers=custom_syslog
#qualname=ospd_openvas.lock
#propagate=0
ospd-openvas-22.9.0/docs/example-ospd.conf 0000664 0000000 0000000 00000001635 15011310720 0020412 0 ustar 00root root 0000000 0000000 [OSPD - openvas]
## General
pid_file = install-prefix/var/run/ospd/openvas.pid
lock_file_dir = install-prefix/var/run/
stream_timeout = 1
max_scans = 3
min_free_mem_scan_queue = 1000
max_queued_scans = 0
# Log config
log_level = DEBUG
log_file = install-prefix/var/log/gvm/openvas.log
log_config = install-prefix/.config/ospd-logging.conf
## Unix socket settings
socket_mode = 0o770
unix_socket = install-prefix/var/run/ospd/openvas.sock
## TLS socket settings and certificates.
#port = 9390
#bind_address = 0.0.0.0
#key_file = install-prefix/var/lib/gvm/private/CA/serverkey.pem
#cert_file = install-prefix/var/lib/gvm/CA/servercert.pem
#ca_file = install-prefix/var/lib/gvm/CA/cacert.pem
[OSPD - some wrapper]
log_level = DEBUG
socket_mode = 0o770
unix_socket = install-prefix/var/run/ospd/ospd-wrapper.sock
pid_file = install-prefix/var/run/ospd/ospd-wrapper.pid
log_file = install-prefix/var/log/gvm/ospd-wrapper.log
ospd-openvas-22.9.0/docs/generate 0000775 0000000 0000000 00000001700 15011310720 0016656 0 ustar 00root root 0000000 0000000 #!/bin/bash
# Copyright (C) 2014-2021 Greenbone AG
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
# Schema generator script: HTML.
DOCDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
xsltproc ${DOCDIR}/HTML.xsl ${DOCDIR}/OSP.xml > ${DOCDIR}/osp.html
ospd-openvas-22.9.0/docs/ospd-openvas.8 0000664 0000000 0000000 00000011554 15011310720 0017655 0 ustar 00root root 0000000 0000000 .TH OSPD-OpenVAS 8 "August 2019" "Greenbone Vulnerability Management" "User Manuals"
.SH NAME
ospd-openvas \- The OpenVAS Wrapper of the Greenbone Vulnerability Management
.SH SYNOPSIS
.BI "ospd-openvas [\|-v\|] [\|-h\|] [\|-c " config-file\| "] [\|--log-file " log-file\| "]
.SH DESCRIPTION
.B Greenbone Vulnerability Management (GVM)
is a vulnerability auditing and management framework made up of several modules.
The OSPD OpenVAS Wrapper,
.BR ospd-openvas
is in charge of the communication between the scanner OpenVAS and the clients
(GVMd and gvm-tools).
.BR ospd-openvas
inspects the remote hosts to list all the vulnerabilities and common
misconfigurations that affects them.
It is a command line tool with parameters to start a daemon which keeps
waiting for instructions to update the feed of vulnerability tests and
to start a scan.
The second part of the interface is the redis store where the parameters
about a scan task need to be placed and from where the results can be
retrieved, being the unique communication channel between OSPD-OpenVAS
and OpenVAS.
.SH OPTIONS
.TP
.BI "-s " ", --config-file"
Use the alternate configuration file instead of
.I ~/.config/ospd.conf
.TP
.B "--version"
Print the version number and exit
.TP
.B "-h, --help"
Show a summary of the commands
.TP
.BI "-p " PORT ", --port "PORT
TCP Port to listen on. Default: 0
.TP
.BI "-b " ADDRESS ", --bind-address "ADDRESS
Address to listen on. Default: 0.0.0.0
.TP
.BI "-u " UNIX_SOCKET ", --unix-socket "UNIX_SOCKET
Unix file socket to listen on. Default: /var/run/ospd/ospd.sock
.TP
.BI "-m " SOCKET_MODE ", --socket-mode "SOCKET_MODE
Unix file socket mode. Default: 0o700
.TP
.BI "--pid-file "PID_FILE
Location of the file for the process ID. Default: /var/run/ospd.pid
.TP
.BI "--lock-file-dir "LOCK_FILE_DIR
Directory where the feed lock file is placed. Default: /var/run/ospd
.TP
.BI "-k " KEY_FILE ", --key-file "KEY_FILE
Server key file. Default:
/usr/var/lib/gvm/private/CA/serverkey.pem
.TP
.BI "-c " CERT_FILE ", --cert-file "CERT_FILE
Server cert file. Default:
/usr/var/lib/gvm/CA/servercert.pem
.TP
.BI "--ca-file "CA_FILE
CA cert file. Default: /usr/var/lib/gvm/CA/cacert.pem
.TP
.BI "-L " LOG_LEVEL ", --log-level "LOG_LEVEL
Desired level of logging. Default: WARNING
.TP
.BI "-f, --foreground"
Run in foreground and logs all messages to console.
.TP
.BI "-l " LOG_FILE ", --log-file "LOG_FILE
Path to the logging file.
.TP
.BI "--stream-timeout "TIMEOUT
Set a timeout on socket operations. Default 10 seconds
.TP
.BI "--niceness "NICENESS
Start the scan with the given niceness. Default 10
.TP
.BI "--scaninfo-store-time "TIME
Time in hours a scan is stored before being considered forgotten and being delete from
the scan table. Default 0, disabled.
.TP
.BI "--max-scans "VALUE
Max. amount of parallel task that can be started. Default 0, disabled.
.TP
.BI "--min-free-mem-scan-queue "MB
Minimum free memory in MB required to run the scan. If no enough free memory is
available, the scan is queued. Default 0, disabled.
.TP
.BI "--max-queued-scans "VALUE
Maximum number allowed of queued scans before starting to reject new scans.
Default 0, disabled.
.SH THE CONFIGURATION FILE
The default
.B ospd-openvas
configuration file,
.I ~/.config/ospd.conf
contains these options under the section [OSPD - openvas]:
.IP log_level
Wished level of logging.
.IP socket_mode
This option defines the permissions on a socket.
It must be set in octal format. E.g. socket_mode = 0o770
.IP unix_socket
This option specifies the socket path.
.IP pid_file
Location of the file for the process ID.
.IP log_file
Path to the log file. If no log file is given, the system log
facility is used by default.
.IP foreground
If this option is set to yes, the daemon logs to the standard output instead of logging
to a file or syslog.
.IP niceness
Start the scan with the given niceness. Default 10
.IP stream_timeout
Set a timeout on socket operations. Default 10 seconds
.IP scaninfo_store_time
Time in hours a scan is stored before being considered forgotten and being delete from
the scan table. Default 0, disabled.
.IP max_scans
Max. amount of parallel task that can be started. Default 0, disabled.
.IP min_free_mem_scan_queue
Minimum free memory in MB required to run the scan. If no enough free memory is
available, the scan is queued. Default 0, disabled.
.IP max_queued_scans
Maximum number allowed of queued scans before starting to reject new scans.
Default 0, disabled.
.SH SEE ALSO
\fBopenvas(8)\f1, \fBgsad(8)\f1, \fBgvmd(8)\f1, \fBgreenbone-nvt-sync(8)\f1,
.SH MORE INFORMATION
The canonical places where you will find more information
about OSPD-OpenVAS are:
.RS
.UR https://community.greenbone.net
Community Portal
.UE
.br
.UR https://github.com/greenbone
Development Platform
.UE
.br
.UR https://www.openvas.org
Traditional home site
.UE
.RE
.SH AUTHORS
ospd-openvas code is developed by Greenbone AG.
ospd-openvas-22.9.0/docs/rnc.xsl 0000664 0000000 0000000 00000036462 15011310720 0016465 0 ustar 00root root 0000000 0000000
↵
↵
### Preamble
start = command | response
command
=
|
response
=
_response
|
|
|
" }
xsd:token { pattern = "
#
.
attribute
{
}
?
_response
|
|
" }
xsd:token { pattern = "
*
(
&
)
?
(
|
)
ERROR
""
text # RNC limitation:
&
&
#
.
= element
# type
{
}
= element
{
}
= element
{
}
ospd-openvas-22.9.0/ospd/ 0000775 0000000 0000000 00000000000 15011310720 0015155 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/ospd/__init__.py 0000664 0000000 0000000 00000000303 15011310720 0017262 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""OSPd core module."""
from ospd_openvas.__version__ import __version__
ospd-openvas-22.9.0/ospd/command/ 0000775 0000000 0000000 00000000000 15011310720 0016573 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/ospd/command/__init__.py 0000664 0000000 0000000 00000000336 15011310720 0020706 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import ospd.command.command # required to register all commands
from .registry import get_commands
ospd-openvas-22.9.0/ospd/command/command.py 0000664 0000000 0000000 00000054113 15011310720 0020567 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import multiprocessing
import re
import logging
import subprocess
from decimal import Decimal
from typing import Optional, Dict, Any, Union, Iterator
from xml.etree.ElementTree import Element, SubElement
import psutil
from ospd.errors import OspdCommandError
from ospd.misc import valid_uuid
from ospd.protocol import OspRequest, OspResponse
from ospd.xml import (
simple_response_str,
get_elements_from_dict,
XmlStringHelper,
)
from .initsubclass import InitSubclassMeta
from .registry import register_command
logger = logging.getLogger(__name__)
class BaseCommand(metaclass=InitSubclassMeta):
name = None
description = None
attributes = None
elements = None
must_be_initialized = None
def __init_subclass__(cls, **kwargs):
super_cls = super()
if hasattr(super_cls, '__init_subclass__'):
super_cls.__init_subclass__(**kwargs)
register_command(cls)
def __init__(self, daemon):
self._daemon = daemon
def get_name(self) -> str:
return self.name
def get_description(self) -> str:
return self.description
def get_attributes(self) -> Optional[Dict[str, Any]]:
return self.attributes
def get_elements(self) -> Optional[Dict[str, Any]]:
return self.elements
def handle_xml(self, xml: Element) -> Union[bytes, Iterator[bytes]]:
raise NotImplementedError()
def as_dict(self):
return {
'name': self.get_name(),
'attributes': self.get_attributes(),
'description': self.get_description(),
'elements': self.get_elements(),
}
def __repr__(self):
return (
f'<{self.name} description="{self.description}" '
f'attributes={self.attributes} elements={self.elements}>'
)
class HelpCommand(BaseCommand):
name = "help"
description = 'Print the commands help.'
attributes = {'format': 'Help format. Could be text or xml.'}
must_be_initialized = False
def handle_xml(self, xml: Element) -> bytes:
help_format = xml.get('format')
if help_format is None or help_format == "text":
# Default help format is text.
return simple_response_str(
'help', 200, 'OK', self._daemon.get_help_text()
)
elif help_format == "xml":
text = get_elements_from_dict(
{k: v.as_dict() for k, v in self._daemon.commands.items()}
)
return simple_response_str('help', 200, 'OK', text)
raise OspdCommandError('Bogus help format', 'help')
class CheckFeed(BaseCommand):
name = "check_feed"
description = 'Perform a sync feed self test and return the status'
must_be_initialized = False
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
Return:
Response string for command.
"""
feed = Element('feed')
feed_status = self._daemon.check_feed_self_test()
if not feed_status or not isinstance(feed_status, dict):
raise OspdCommandError('No feed status available', 'check_feed')
for key, value in feed_status.items():
elem = SubElement(feed, key)
elem.text = value
content = [feed]
return simple_response_str('check_feed', 200, 'OK', content)
class GetVersion(BaseCommand):
name = "get_version"
description = 'Return various version information'
must_be_initialized = False
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
Return:
Response string for command.
"""
protocol = Element('protocol')
for name, value in [
('name', 'OSP'),
('version', self._daemon.get_protocol_version()),
]:
elem = SubElement(protocol, name)
elem.text = value
daemon = Element('daemon')
for name, value in [
('name', self._daemon.get_daemon_name()),
('version', self._daemon.get_daemon_version()),
]:
elem = SubElement(daemon, name)
elem.text = value
scanner = Element('scanner')
for name, value in [
('name', self._daemon.get_scanner_name()),
('version', self._daemon.get_scanner_version()),
]:
elem = SubElement(scanner, name)
elem.text = value
content = [protocol, daemon, scanner]
vts_version = self._daemon.get_vts_version()
if vts_version:
vts = Element('vts')
elem = SubElement(vts, 'version')
elem.text = vts_version
elem = SubElement(vts, 'vendor')
elem.text = self._daemon.get_feed_vendor()
elem = SubElement(vts, 'home')
elem.text = self._daemon.get_feed_home()
elem = SubElement(vts, 'name')
elem.text = self._daemon.get_feed_name()
content.append(vts)
return simple_response_str('get_version', 200, 'OK', content)
GVMCG_TITLES = [
'cpu-.*',
'proc',
'mem',
'swap',
'load',
'df-.*',
'disk-sd[a-z][0-9]-rw',
'disk-sd[a-z][0-9]-load',
'disk-sd[a-z][0-9]-io-load',
'interface-eth.*-traffic',
'interface-eth.*-err-rate',
'interface-eth.*-err',
'sensors-.*_temperature-.*',
'sensors-.*_fanspeed-.*',
'sensors-.*_voltage-.*',
'titles',
] # type: List[str]
class GetPerformance(BaseCommand):
name = "get_performance"
description = 'Return system report'
attributes = {
'start': 'Time of first data point in report.',
'end': 'Time of last data point in report.',
'title': 'Name of report.',
}
must_be_initialized = False
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
@return: Response string for command.
"""
start = xml.attrib.get('start')
end = xml.attrib.get('end')
titles = xml.attrib.get('titles')
cmd = ['gvmcg']
if start:
try:
int(start)
except ValueError:
raise OspdCommandError(
'Start argument must be integer.', 'get_performance'
) from None
cmd.append(start)
if end:
try:
int(end)
except ValueError:
raise OspdCommandError(
'End argument must be integer.', 'get_performance'
) from None
cmd.append(end)
if titles:
combined = "(" + ")|(".join(GVMCG_TITLES) + ")"
forbidden = "^[^|&;]+$"
if re.match(combined, titles) and re.match(forbidden, titles):
cmd.append(titles)
else:
raise OspdCommandError(
'Arguments not allowed', 'get_performance'
)
try:
output = subprocess.check_output(cmd)
except (subprocess.CalledProcessError, OSError) as e:
raise OspdCommandError(
f'Bogus get_performance format. {e}', 'get_performance'
) from None
return simple_response_str(
'get_performance', 200, 'OK', output.decode()
)
class GetScannerDetails(BaseCommand):
name = 'get_scanner_details'
description = 'Return scanner description and parameters'
must_be_initialized = True
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
@return: Response string for command.
"""
list_all = xml.get('list_all')
list_all = True if list_all == '1' else False
desc_xml = Element('description')
desc_xml.text = self._daemon.get_scanner_description()
scanner_params = self._daemon.get_scanner_params()
if not list_all:
scanner_params = {
key: value
for (key, value) in scanner_params.items()
if value.get('visible_for_client')
}
details = [
desc_xml,
OspResponse.create_scanner_params_xml(scanner_params),
]
return simple_response_str('get_scanner_details', 200, 'OK', details)
class DeleteScan(BaseCommand):
name = 'delete_scan'
description = 'Delete a finished scan.'
attributes = {'scan_id': 'ID of scan to delete.'}
must_be_initialized = False
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
@return: Response string for command.
"""
scan_id = xml.get('scan_id')
if scan_id is None:
return simple_response_str(
'delete_scan', 404, 'No scan_id attribute'
)
if not self._daemon.scan_exists(scan_id):
text = f"Failed to find scan '{scan_id}'"
logger.debug(text)
return simple_response_str('delete_scan', 404, text)
self._daemon.check_scan_process(scan_id)
if self._daemon.delete_scan(scan_id):
return simple_response_str('delete_scan', 200, 'OK')
raise OspdCommandError('Scan in progress', 'delete_scan')
class GetVts(BaseCommand):
name = 'get_vts'
description = 'List of available vulnerability tests.'
attributes = {
'vt_id': 'ID of a specific vulnerability test to get.',
'filter': 'Optional filter to get an specific vt collection.',
}
must_be_initialized = True
def handle_xml(self, xml: Element) -> Iterator[bytes]:
"""Handles command.
Writes the vt collection on the stream.
The element accept two optional arguments.
vt_id argument receives a single vt id.
filter argument receives a filter selecting a sub set of vts.
If both arguments are given, the vts which match with the filter
are return.
@return: Response string for command on fail.
"""
self._daemon.vts.is_cache_available = False
xml_helper = XmlStringHelper()
vt_id = xml.get('vt_id')
vt_filter = xml.get('filter')
_details = xml.get('details')
version_only = xml.get('version_only')
vt_details = False if _details == '0' else True
if self._daemon.vts and vt_id and vt_id not in self._daemon.vts:
self._daemon.vts.is_cache_available = True
text = f"Failed to find vulnerability test '{vt_id}'"
raise OspdCommandError(text, 'get_vts', 404)
filtered_vts = None
if vt_filter and not version_only:
try:
filtered_vts = self._daemon.vts_filter.get_filtered_vts_list(
self._daemon.vts, vt_filter
)
except OspdCommandError as filter_error:
self._daemon.vts.is_cache_available = True
raise filter_error
if not version_only:
vts_selection = self._daemon.get_vts_selection_list(
vt_id, filtered_vts
)
# List of xml pieces with the generator to be iterated
yield xml_helper.create_response('get_vts')
begin_vts_tag = xml_helper.create_element('vts')
# Add Feed information as attributes
begin_vts_tag = xml_helper.add_attr(
begin_vts_tag, "vts_version", self._daemon.get_vts_version()
)
begin_vts_tag = xml_helper.add_attr(
begin_vts_tag, "feed_vendor", self._daemon.get_feed_vendor()
)
begin_vts_tag = xml_helper.add_attr(
begin_vts_tag, "feed_home", self._daemon.get_feed_home()
)
begin_vts_tag = xml_helper.add_attr(
begin_vts_tag, "feed_name", self._daemon.get_feed_name()
)
val = len(self._daemon.vts)
begin_vts_tag = xml_helper.add_attr(begin_vts_tag, "total", val)
if filtered_vts and not version_only:
val = len(filtered_vts)
begin_vts_tag = xml_helper.add_attr(begin_vts_tag, "sent", val)
if self._daemon.vts.sha256_hash is not None:
begin_vts_tag = xml_helper.add_attr(
begin_vts_tag, "sha256_hash", self._daemon.vts.sha256_hash
)
yield begin_vts_tag
if not version_only:
for vt in self._daemon.get_vt_iterator(vts_selection, vt_details):
yield xml_helper.add_element(self._daemon.get_vt_xml(vt))
yield xml_helper.create_element('vts', end=True)
yield xml_helper.create_response('get_vts', end=True)
self._daemon.vts.is_cache_available = True
class StopScan(BaseCommand):
name = 'stop_scan'
description = 'Stop a currently running scan.'
attributes = {'scan_id': 'ID of scan to stop.'}
must_be_initialized = True
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
@return: Response string for command.
"""
scan_id = xml.get('scan_id')
if scan_id is None or scan_id == '':
raise OspdCommandError('No scan_id attribute', 'stop_scan')
self._daemon.stop_scan(scan_id)
# Don't send response until the scan is stopped.
try:
self._daemon.scan_processes[scan_id].join()
except KeyError:
pass
return simple_response_str('stop_scan', 200, 'OK')
class GetScans(BaseCommand):
name = 'get_scans'
description = 'Get information about a scan in buffer.'
attributes = {
'scan_id': 'Mandatory ID of a specific scan to get.',
'details': 'Whether to return the full scan report.',
'pop_results': 'Whether to remove the fetched results.',
'max_results': 'Maximum number of results to fetch.',
'progress': 'Whether to return a detailed scan progress',
}
must_be_initialized = False
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
@return: Response string for command.
"""
scan_id = xml.get('scan_id')
if scan_id is None or scan_id == '':
raise OspdCommandError('No scan_id attribute', 'get_scans')
details = xml.get('details')
pop_res = xml.get('pop_results')
max_res = xml.get('max_results')
progress = xml.get('progress')
if details and details == '0':
details = False
else:
details = True
pop_res = pop_res and pop_res == '1'
if max_res:
max_res = int(max_res)
progress = progress and progress == '1'
responses = []
if scan_id in self._daemon.scan_collection.ids_iterator():
self._daemon.check_scan_process(scan_id)
scan = self._daemon.get_scan_xml(
scan_id, details, pop_res, max_res, progress
)
responses.append(scan)
else:
text = f"Failed to find scan '{scan_id}'"
return simple_response_str('get_scans', 404, text)
return simple_response_str('get_scans', 200, 'OK', responses)
class StartScan(BaseCommand):
name = 'start_scan'
description = 'Start a new scan.'
attributes = {
'target': 'Target host to scan',
'ports': 'Ports list to scan',
'scan_id': 'Optional UUID value to use as scan ID',
'parallel': 'Optional nummer of parallel target to scan',
}
must_be_initialized = False
def get_elements(self):
elements = {}
if self.elements:
elements.update(self.elements)
scanner_params = elements.get('scanner_params', {}).copy()
elements['scanner_params'] = scanner_params
scanner_params.update(
{
k: v['description']
for k, v in self._daemon.scanner_params.items()
}
)
return elements
def handle_xml(self, xml: Element) -> bytes:
"""Handles command.
Return:
Response string for command.
"""
with self._daemon.scan_collection.scan_collection_lock:
current_queued_scans = self._daemon.get_count_queued_scans()
if (
self._daemon.max_queued_scans
and current_queued_scans >= self._daemon.max_queued_scans
):
logger.info(
'Maximum number of queued scans set to %d reached.',
self._daemon.max_queued_scans,
)
raise OspdCommandError(
'Maximum number of queued scans set to '
f'{str(self._daemon.max_queued_scans)} reached.',
'start_scan',
)
target_str = xml.get('target')
ports_str = xml.get('ports')
# For backward compatibility, if target and ports attributes
# are set, element is ignored.
if target_str is None or ports_str is None:
target_element = xml.find('targets/target')
if target_element is None:
raise OspdCommandError('No targets or ports', 'start_scan')
else:
scan_target = OspRequest.process_target_element(
target_element
)
else:
scan_target = {
'hosts': target_str,
'ports': ports_str,
'credentials': {},
'exclude_hosts': '',
'finished_hosts': '',
'options': {},
}
logger.warning(
"Legacy start scan command format is being used, which "
"is deprecated since 20.08. Please read the documentation "
"for start scan command."
)
scan_id = xml.get('scan_id')
if (
scan_id is not None
and scan_id != ''
and not valid_uuid(scan_id)
):
raise OspdCommandError('Invalid scan_id UUID', 'start_scan')
if xml.get('parallel'):
logger.warning(
"parallel attribute of start_scan will be ignored, sice "
"parallel scan is not supported by OSPd."
)
scanner_params = xml.find('scanner_params')
if scanner_params is None:
scanner_params = {}
# params are the parameters we got from the XML.
params = self._daemon.preprocess_scan_params(scanner_params)
# VTS is an optional element. If present should not be empty.
vt_selection = {} # type: Dict
scanner_vts = xml.find('vt_selection')
if scanner_vts is not None:
if len(scanner_vts) == 0:
raise OspdCommandError('VTs list is empty', 'start_scan')
else:
vt_selection = OspRequest.process_vts_params(scanner_vts)
scan_params = self._daemon.process_scan_params(params)
scan_id_aux = scan_id
scan_id = self._daemon.create_scan(
scan_id, scan_target, scan_params, vt_selection
)
if not scan_id:
id_ = Element('id')
id_.text = scan_id_aux
return simple_response_str('start_scan', 100, 'Continue', id_)
logger.info(
'Scan %s added to the queue in position %d.',
scan_id,
self._daemon.get_count_queued_scans() + 1,
)
id_ = Element('id')
id_.text = scan_id
return simple_response_str('start_scan', 200, 'OK', id_)
class GetMemoryUsage(BaseCommand):
name = "get_memory_usage"
description = "print the memory consumption of all processes"
attributes = {
'unit': 'Unit for displaying memory consumption (b = bytes, '
'kb = kilobytes, mb = megabytes). Defaults to b.'
}
must_be_initialized = False
@staticmethod
def _get_memory(value: int, unit: str = None) -> str:
if not unit:
return str(value)
unit = unit.lower()
if unit == 'kb':
return str(Decimal(value) / 1024)
if unit == 'mb':
return str(Decimal(value) / (1024 * 1024))
return str(value)
@staticmethod
def _create_process_element(name: str, pid: int):
process_element = Element('process')
process_element.set('name', name)
process_element.set('pid', str(pid))
return process_element
@classmethod
def _add_memory_info(
cls, process_element: Element, pid: int, unit: str = None
):
try:
ps_process = psutil.Process(pid)
except psutil.NoSuchProcess:
return
memory = ps_process.memory_info()
rss_element = Element('rss')
rss_element.text = cls._get_memory(memory.rss, unit)
process_element.append(rss_element)
vms_element = Element('vms')
vms_element.text = cls._get_memory(memory.vms, unit)
process_element.append(vms_element)
shared_element = Element('shared')
shared_element.text = cls._get_memory(memory.shared, unit)
process_element.append(shared_element)
def handle_xml(self, xml: Element) -> bytes:
processes_element = Element('processes')
unit = xml.get('unit')
current_process = multiprocessing.current_process()
process_element = self._create_process_element(
current_process.name, current_process.pid
)
self._add_memory_info(process_element, current_process.pid, unit)
processes_element.append(process_element)
for proc in multiprocessing.active_children():
process_element = self._create_process_element(proc.name, proc.pid)
self._add_memory_info(process_element, proc.pid, unit)
processes_element.append(process_element)
return simple_response_str('get_memory', 200, 'OK', processes_element)
ospd-openvas-22.9.0/ospd/command/initsubclass.py 0000664 0000000 0000000 00000002050 15011310720 0021645 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=bad-mcs-classmethod-argument, no-member
_has_init_subclass = hasattr( # pylint: disable=invalid-name
type, "__init_subclass__"
)
if not _has_init_subclass:
class InitSubclassMeta(type):
"""Metaclass that implements PEP 487 protocol"""
def __new__(cls, name, bases, ns, **kwargs):
__init_subclass__ = ns.pop("__init_subclass__", None)
if __init_subclass__:
__init_subclass__ = classmethod(__init_subclass__)
ns["__init_subclass__"] = __init_subclass__
return super().__new__(cls, name, bases, ns, **kwargs)
def __init__(cls, name, bases, ns, **kwargs):
super().__init__(name, bases, ns)
super_class = super(cls, cls)
if hasattr(super_class, "__init_subclass__"):
super_class.__init_subclass__.__func__(cls, **kwargs)
else:
InitSubclassMeta = type # type: ignore
ospd-openvas-22.9.0/ospd/command/registry.py 0000664 0000000 0000000 00000000774 15011310720 0021025 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from typing import List
__COMMANDS = []
def register_command(command: object) -> None:
"""Register a command class"""
__COMMANDS.append(command)
def remove_command(command: object) -> None:
"""Unregister a command class"""
__COMMANDS.remove(command)
def get_commands() -> List[object]:
"""Return the list of registered command classes"""
return __COMMANDS
ospd-openvas-22.9.0/ospd/config.py 0000664 0000000 0000000 00000001756 15011310720 0017005 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Module to store ospd configuration settings
"""
import configparser
import logging
from pathlib import Path
from typing import Dict
logger = logging.getLogger(__name__)
class Config:
def __init__(self, section: str = 'main') -> None:
self._parser = configparser.ConfigParser(default_section=section)
self._config = {} # type: Dict
self._defaults = {} # type: Dict
def load(self, filepath: Path, def_section: str = 'main') -> None:
path = filepath.expanduser()
parser = configparser.ConfigParser(default_section=def_section)
with path.open() as f:
parser.read_file(f)
self._defaults.update(parser.defaults())
for key, value in parser.items(def_section):
self._config.setdefault(def_section, dict())[key] = value
def defaults(self) -> Dict:
return self._defaults
ospd-openvas-22.9.0/ospd/cvss.py 0000664 0000000 0000000 00000011014 15011310720 0016502 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Common Vulnerability Scoring System handling class."""
import logging
import math
from typing import List, Dict, Optional
logger = logging.getLogger(__name__)
CVSS_V2_METRICS = {
'AV': {'L': 0.395, 'A': 0.646, 'N': 1.0},
'AC': {'H': 0.35, 'M': 0.61, 'L': 0.71},
'Au': {'M': 0.45, 'S': 0.56, 'N': 0.704},
'C': {'N': 0.0, 'P': 0.275, 'C': 0.660},
'I': {'N': 0.0, 'P': 0.275, 'C': 0.660},
'A': {'N': 0.0, 'P': 0.275, 'C': 0.660},
} # type: Dict
CVSS_V3_METRICS = {
'AV': {'N': 0.85, 'A': 0.62, 'L': 0.55, 'P': 0.2},
'AC': {'L': 0.77, 'H': 0.44},
'PR_SU': {'N': 0.85, 'L': 0.62, 'H': 0.27},
'PR_SC': {'N': 0.85, 'L': 0.68, 'H': 0.50},
'UI': {'N': 0.85, 'R': 0.62},
'S': {'U': False, 'C': True},
'C': {'H': 0.56, 'L': 0.22, 'N': 0},
'I': {'H': 0.56, 'L': 0.22, 'N': 0},
'A': {'H': 0.56, 'L': 0.22, 'N': 0},
} # type: Dict
class CVSS(object):
"""Handle cvss vectors and calculate the cvss scoring"""
@staticmethod
def roundup(value: float) -> float:
"""It rounds up to 1 decimal."""
return math.ceil(value * 10) / 10
@staticmethod
def _parse_cvss_base_vector(cvss_vector: str) -> List:
"""Parse a string containing a cvss base vector.
Arguments:
cvss_vector (str): cvss base vector to be parsed.
Return list with the string values of each vector element.
"""
vector_as_list = cvss_vector.split('/')
return [item.split(':')[1] for item in vector_as_list]
@classmethod
def cvss_base_v2_value(cls, cvss_base_vector: str) -> Optional[float]:
"""Calculate the cvss base score from a cvss base vector
for cvss version 2.
Arguments:
cvss_base_vector (str) Cvss base vector v2.
Return the calculated score
"""
if not cvss_base_vector:
return None
try:
_av, _ac, _au, _c, _i, _a = cls._parse_cvss_base_vector(
cvss_base_vector
)
except ValueError:
logger.warning('Invalid severity vector %s', cvss_base_vector)
return None
_impact = 10.41 * (
1
- (1 - CVSS_V2_METRICS['C'].get(_c))
* (1 - CVSS_V2_METRICS['I'].get(_i))
* (1 - CVSS_V2_METRICS['A'].get(_a))
)
_exploitability = (
20
* CVSS_V2_METRICS['AV'].get(_av)
* CVSS_V2_METRICS['AC'].get(_ac)
* CVSS_V2_METRICS['Au'].get(_au)
)
f_impact = 0 if _impact == 0 else 1.176
cvss_base = ((0.6 * _impact) + (0.4 * _exploitability) - 1.5) * f_impact
return round(cvss_base, 1)
@classmethod
def cvss_base_v3_value(cls, cvss_base_vector: str) -> Optional[float]:
"""Calculate the cvss base score from a cvss base vector
for cvss version 3.
Arguments:
cvss_base_vector (str) Cvss base vector v3.
Return the calculated score, None on fail.
"""
if not cvss_base_vector:
return None
try:
(
_ver,
_av,
_ac,
_pr,
_ui,
_s,
_c,
_i,
_a,
) = cls._parse_cvss_base_vector(cvss_base_vector)
except ValueError:
logger.warning('Invalid severity vector %s', cvss_base_vector)
return None
scope_changed = CVSS_V3_METRICS['S'].get(_s)
isc_base = 1 - (
(1 - CVSS_V3_METRICS['C'].get(_c))
* (1 - CVSS_V3_METRICS['I'].get(_i))
* (1 - CVSS_V3_METRICS['A'].get(_a))
)
if scope_changed:
_priv_req = CVSS_V3_METRICS['PR_SC'].get(_pr)
else:
_priv_req = CVSS_V3_METRICS['PR_SU'].get(_pr)
_exploitability = (
8.22
* CVSS_V3_METRICS['AV'].get(_av)
* CVSS_V3_METRICS['AC'].get(_ac)
* _priv_req
* CVSS_V3_METRICS['UI'].get(_ui)
)
if scope_changed:
_impact = 7.52 * (isc_base - 0.029) - 3.25 * pow(
isc_base - 0.02, 15
)
_base_score = min(1.08 * (_impact + _exploitability), 10)
else:
_impact = 6.42 * isc_base
_base_score = min(_impact + _exploitability, 10)
if _impact > 0:
return cls.roundup(_base_score)
return 0
ospd-openvas-22.9.0/ospd/datapickler.py 0000664 0000000 0000000 00000010251 15011310720 0020011 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Pickle Handler class"""
import logging
import pickle
import os
from hashlib import sha256
from pathlib import Path
from typing import BinaryIO, Any
from ospd.errors import OspdCommandError
logger = logging.getLogger(__name__)
OWNER_ONLY_RW_PERMISSION = 0o600
class DataPickler:
def __init__(self, storage_path: str):
self._storage_path = storage_path
self._storage_fd = None
def _fd_opener(self, path: str, flags: int) -> BinaryIO:
os.umask(0)
flags = os.O_CREAT | os.O_WRONLY
self._storage_fd = os.open(path, flags, mode=OWNER_ONLY_RW_PERMISSION)
return self._storage_fd
def _fd_close(self) -> None:
try:
self._storage_fd.close()
self._storage_fd = None
except Exception: # pylint: disable=broad-except
pass
def remove_file(self, filename: str) -> None:
"""Remove the file containing a scan_info pickled object"""
storage_file_path = Path(self._storage_path) / filename
try:
storage_file_path.unlink()
except Exception as e: # pylint: disable=broad-except
logger.error('Not possible to delete %s. %s', filename, e)
def store_data(self, filename: str, data_object: Any) -> str:
"""Pickle a object and store it in a file named"""
storage_file_path = Path(self._storage_path) / filename
try:
# create parent directories recursively
parent_dir = storage_file_path.parent
parent_dir.mkdir(parents=True, exist_ok=True)
except Exception as e:
raise OspdCommandError(
f'Not possible to access dir for {filename}. {e}',
'start_scan',
) from e
try:
pickled_data = pickle.dumps(data_object)
except pickle.PicklingError as e:
raise OspdCommandError(
f'Not possible to pickle scan info for {filename}. {e}',
'start_scan',
) from e
try:
with open(
str(storage_file_path), 'wb', opener=self._fd_opener
) as scan_info_f:
scan_info_f.write(pickled_data)
except Exception as e: # pylint: disable=broad-except
self._fd_close()
raise OspdCommandError(
f'Not possible to store scan info for {filename}. {e}',
'start_scan',
) from e
self._fd_close()
return self._pickled_data_hash_generator(pickled_data)
def load_data(self, filename: str, original_data_hash: str) -> Any:
"""Unpickle the stored data in the filename. Perform an
intengrity check of the read data with the the hash generated
with the original data.
Return:
Dictionary containing the scan info. None otherwise.
"""
storage_file_path = Path(self._storage_path) / filename
pickled_data = None
try:
with storage_file_path.open('rb') as scan_info_f:
pickled_data = scan_info_f.read()
except Exception as e: # pylint: disable=broad-except
logger.error(
'Not possible to read pickled data from %s. %s', filename, e
)
return
unpickled_scan_info = None
try:
unpickled_scan_info = pickle.loads(pickled_data)
except pickle.UnpicklingError as e:
logger.error(
'Not possible to read pickled data from %s. %s', filename, e
)
return
pickled_scan_info_hash = self._pickled_data_hash_generator(pickled_data)
if original_data_hash != pickled_scan_info_hash:
logger.error('Unpickled data from %s corrupted.', filename)
return
return unpickled_scan_info
def _pickled_data_hash_generator(self, pickled_data: bytes) -> str:
"""Calculate the sha256 hash of a pickled data"""
if not pickled_data:
return
hash_sha256 = sha256()
hash_sha256.update(pickled_data)
return hash_sha256.hexdigest()
ospd-openvas-22.9.0/ospd/errors.py 0000664 0000000 0000000 00000002272 15011310720 0017046 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""OSP class for handling errors."""
from ospd.xml import simple_response_str
class OspdError(Exception):
"""Base error class for all Ospd related errors"""
class RequiredArgument(OspdError):
"""Raised if a required argument/parameter is missing
Derives from :py:class:`OspdError`
"""
def __init__(self, function: str, argument: str) -> None:
# pylint: disable=super-init-not-called
self.function = function
self.argument = argument
def __str__(self) -> str:
return f"{self.function}: Argument {self.argument} is required"
class OspdCommandError(OspdError):
"""This is an exception that will result in an error message to the
client"""
def __init__(
self, message: str, command: str = 'osp', status: int = 400
) -> None:
super().__init__(message)
self.message = message
self.command = command
self.status = status
def as_xml(self) -> str:
"""Return the error in xml format."""
return simple_response_str(self.command, self.status, self.message)
ospd-openvas-22.9.0/ospd/logger.py 0000664 0000000 0000000 00000005177 15011310720 0017020 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import configparser
import logging
import os
import time
from logging.config import fileConfig
from pathlib import Path
from typing import Optional
DEFAULT_HANDLER_CONSOLE = {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'file',
'args': 'sys.stdout,',
}
DEFAULT_HANDLER_FILE = {
'class': 'handlers.WatchedFileHandler',
'level': 'INFO',
'formatter': 'file',
'args': '("/dev/null", "a")',
}
DEFAULT_HANDLER_SYSLOG = {
'class': 'handlers.SysLogHandler',
'level': 'INFO',
'formatter': 'syslog',
'args': '("/dev/log", handlers.SysLogHandler.LOG_USER)',
}
DEFAULT_HANDLERS = {'keys': 'console,file,syslog'}
DEFAULT_FORMATTERS = {'keys': 'file,syslog'}
DEFAULT_FORMATTER_FILE = {
'format': f'OSPD[{os.getpid()}] %(asctime)s: %(levelname)s: '
'(%(name)s) %(message)s',
'datefmt': '',
}
DEFAULT_FORMATTER_SYSLOG = {
'format': f'OSPD[{os.getpid()}] %(levelname)s: (%(name)s) %(message)s',
'datefmt': '',
}
DEFAULT_LOGGERS = {'keys': 'root'}
DEFAULT_ROOT_LOGGER = {
'level': 'NOTSET',
'handlers': 'file',
'propagate': '0',
}
def init_logging(
log_level: int,
*,
log_file: Optional[str] = None,
log_config: Optional[str] = None,
foreground: Optional[bool] = False,
) -> None:
config = configparser.ConfigParser()
config['handlers'] = DEFAULT_HANDLERS
config['formatters'] = DEFAULT_FORMATTERS
config['formatter_file'] = DEFAULT_FORMATTER_FILE
config['formatter_syslog'] = DEFAULT_FORMATTER_SYSLOG
config['handler_console'] = DEFAULT_HANDLER_CONSOLE
config['handler_syslog'] = DEFAULT_HANDLER_SYSLOG
config['handler_file'] = DEFAULT_HANDLER_FILE
config['loggers'] = DEFAULT_LOGGERS
config['logger_root'] = DEFAULT_ROOT_LOGGER
if foreground:
config['logger_root']['handlers'] = 'console'
if log_file:
if foreground:
config['logger_root']['handlers'] = 'console,file'
else:
config['logger_root']['handlers'] = 'file'
config['handler_file']['args'] = f"('{log_file}', 'a')"
if not foreground and not log_file:
config['logger_root']['handlers'] = 'syslog'
config['handler_file']['level'] = log_level
config['handler_console']['level'] = log_level
config['handler_syslog']['level'] = log_level
log_config_path = Path(log_config)
if log_config_path.exists():
config.read(log_config)
fileConfig(config, disable_existing_loggers=False)
logging.Formatter.converter = time.gmtime
logging.getLogger()
ospd-openvas-22.9.0/ospd/main.py 0000664 0000000 0000000 00000007422 15011310720 0016460 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import logging
import os
import sys
import atexit
import signal
from functools import partial
from typing import Type, Optional
from pathlib import Path
from ospd.misc import go_to_background, create_pid
from ospd.ospd import OSPDaemon
from ospd.parser import create_parser, ParserType
from ospd.server import TlsServer, UnixSocketServer, BaseServer
from ospd.logger import init_logging
COPYRIGHT = """Copyright (C) 2014-2021 Greenbone AG
License GPLv2+: GNU GPL version 2 or later
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law."""
logger = logging.getLogger(__name__)
def print_version(daemon: OSPDaemon, file=sys.stdout):
"""Prints the server version and license information."""
scanner_name = daemon.get_scanner_name()
server_version = daemon.get_server_version()
protocol_version = daemon.get_protocol_version()
daemon_name = daemon.get_daemon_name()
daemon_version = daemon.get_daemon_version()
print(
f"OSP Server for {scanner_name}: {server_version}",
file=file,
)
print(f"OSP: {protocol_version}", file=file)
print(f"{daemon_name}: {daemon_version}", file=file)
print(file=file)
print(COPYRIGHT, file=file)
def exit_cleanup(
pidfile: str,
server: BaseServer,
daemon: OSPDaemon,
_signum=None,
_frame=None,
) -> None:
"""Removes the pidfile before ending the daemon."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
pidpath = Path(pidfile)
if not pidpath.is_file():
return
with pidpath.open(encoding='utf-8') as f:
if int(f.read()) == os.getpid():
logger.debug("Performing exit clean up")
daemon.daemon_exit_cleanup()
logger.info("Shutting-down server ...")
server.close()
logger.debug("Finishing daemon process")
pidpath.unlink()
sys.exit()
def main(
name: str,
daemon_class: Type[OSPDaemon],
parser: Optional[ParserType] = None,
):
"""OSPD Main function."""
if not parser:
parser = create_parser(name)
args = parser.parse_arguments()
if args.version:
args.foreground = True
init_logging(
args.log_level,
log_file=args.log_file,
log_config=args.log_config,
foreground=args.foreground,
)
if args.port == 0:
server = UnixSocketServer(
args.unix_socket,
args.socket_mode,
args.stream_timeout,
)
else:
server = TlsServer(
args.address,
args.port,
args.cert_file,
args.key_file,
args.ca_file,
args.stream_timeout,
)
daemon = daemon_class(**vars(args))
if args.version:
print_version(daemon)
sys.exit()
if args.list_commands:
print(daemon.get_help_text())
sys.exit()
if not args.foreground:
go_to_background()
if not create_pid(args.pid_file):
sys.exit()
# Set signal handler and cleanup
atexit.register(
exit_cleanup, pidfile=args.pid_file, server=server, daemon=daemon
)
signal.signal(
signal.SIGTERM, partial(exit_cleanup, args.pid_file, server, daemon)
)
signal.signal(
signal.SIGINT, partial(exit_cleanup, args.pid_file, server, daemon)
)
signal.signal(
signal.SIGQUIT, partial(exit_cleanup, args.pid_file, server, daemon)
)
if not daemon.check():
return 1
logger.info(
"Starting %s version %s.",
daemon.daemon_info['name'],
daemon.daemon_info['version'],
)
daemon.init(server)
daemon.run()
return 0
ospd-openvas-22.9.0/ospd/misc.py 0000664 0000000 0000000 00000007456 15011310720 0016476 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=too-many-lines
"""Miscellaneous classes and functions related to OSPD."""
import logging
import os
import sys
import uuid
import multiprocessing
from typing import Any, Callable, Iterable
from pathlib import Path
import psutil
logger = logging.getLogger(__name__)
def create_process(
func: Callable, *, args: Iterable[Any] = None
) -> multiprocessing.Process:
return multiprocessing.Process(target=func, args=args)
class ResultType(object):
"""Various scan results types values."""
ALARM = 0
LOG = 1
ERROR = 2
HOST_DETAIL = 3
@classmethod
def get_str(cls, result_type: int) -> str:
"""Return string name of a result type."""
if result_type == cls.ALARM:
return "Alarm"
elif result_type == cls.LOG:
return "Log Message"
elif result_type == cls.ERROR:
return "Error Message"
elif result_type == cls.HOST_DETAIL:
return "Host Detail"
else:
assert False, f"Erroneous result type {result_type}."
@classmethod
def get_type(cls, result_name: str) -> int:
"""Return string name of a result type."""
if result_name == "Alarm":
return cls.ALARM
elif result_name == "Log Message":
return cls.LOG
elif result_name == "Error Message":
return cls.ERROR
elif result_name == "Host Detail":
return cls.HOST_DETAIL
else:
assert False, f"Erroneous result name {result_name}."
def valid_uuid(value) -> bool:
"""Check if value is a valid UUID."""
try:
uuid.UUID(value, version=4)
return True
except (TypeError, ValueError, AttributeError):
return False
def go_to_background() -> None:
"""Daemonize the running process."""
try:
if os.fork():
sys.exit()
except OSError as errmsg:
logger.error('Fork failed: %s', errmsg)
sys.exit(1)
def create_pid(pidfile: str) -> bool:
"""Check if there is an already running daemon and creates the pid file.
Otherwise gives an error."""
pid = os.getpid()
current_process = psutil.Process(pid)
current_process_name = current_process.name()
pidpath = Path(pidfile)
pf_process_name = ""
pf_pid = ""
if pidpath.is_file():
with pidpath.open('r', encoding='utf-8') as file:
pf_pid = file.read().strip()
try:
pf_pid = int(pf_pid)
except (TypeError, ValueError):
pf_pid = None
if pf_pid:
try:
process = psutil.Process(pf_pid)
pf_process_name = process.name()
except psutil.NoSuchProcess:
pass
if pf_process_name == current_process_name and pf_pid != pid:
logger.error(
"There is an already running process. See %s.",
str(pidpath.absolute()),
)
return False
else:
logger.debug(
"There is an existing pid file '%s', but the PID %s "
"belongs to the process %s. It seems that %s was "
"abruptly stopped. Removing the pid file.",
str(pidpath.absolute()),
pf_pid,
pf_process_name,
current_process_name,
)
try:
with pidpath.open(mode='w', encoding='utf-8') as f:
f.write(str(pid))
except (FileNotFoundError, PermissionError) as e:
logger.error(
"Failed to create pid file %s. %s", str(pidpath.absolute()), e
)
return False
return True
ospd-openvas-22.9.0/ospd/network.py 0000664 0000000 0000000 00000036346 15011310720 0017234 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Helper module for network related functions"""
import binascii
import collections
import itertools
import logging
import re
import socket
import struct
from typing import List, Optional, Tuple
logger = logging.getLogger(__name__)
def target_to_ipv4(target: str) -> Optional[List]:
"""Attempt to return a single IPv4 host list from a target string."""
try:
socket.inet_pton(socket.AF_INET, target)
return [target]
except socket.error:
return None
def target_to_ipv6(target: str) -> Optional[List]:
"""Attempt to return a single IPv6 host list from a target string."""
try:
socket.inet_pton(socket.AF_INET6, target)
return [target]
except socket.error:
return None
def ipv4_range_to_list(start_packed, end_packed) -> Optional[List]:
"""Return a list of IPv4 entries from start_packed to end_packed."""
new_list = list()
start = struct.unpack('!L', start_packed)[0]
end = struct.unpack('!L', end_packed)[0]
for value in range(start, end + 1):
new_ip = socket.inet_ntoa(struct.pack('!L', value))
new_list.append(new_ip)
return new_list
def target_to_ipv4_short(target: str) -> Optional[List]:
"""Attempt to return a IPv4 short range list from a target string."""
splitted = target.split('-')
if len(splitted) != 2:
return None
try:
start_packed = socket.inet_pton(socket.AF_INET, splitted[0])
end_value = int(splitted[1])
except (socket.error, ValueError):
return None
# For subnet with mask lower than /24, ip addresses ending in .0 are
# allowed.
# The next code checks for a range starting with a A.B.C.0.
# For the octet equal to 0, bytes() returns an empty binary b'',
# which must be handle in a special way.
_start_value = bytes(start_packed[3])
if _start_value:
start_value = int(binascii.hexlify(_start_value), 16)
elif _start_value == b'':
start_value = 0
else:
return None
if end_value < 0 or end_value > 255 or end_value < start_value:
return None
end_packed = start_packed[0:3] + struct.pack('B', end_value)
return ipv4_range_to_list(start_packed, end_packed)
def target_to_ipv4_cidr(target: str) -> Optional[List]:
"""Attempt to return a IPv4 CIDR list from a target string."""
splitted = target.split('/')
if len(splitted) != 2:
return None
try:
start_packed = socket.inet_pton(socket.AF_INET, splitted[0])
block = int(splitted[1])
except (socket.error, ValueError):
return None
if block <= 0 or block > 30:
return None
start_value = int(binascii.hexlify(start_packed), 16) >> (32 - block)
start_value = (start_value << (32 - block)) + 1
end_value = (start_value | (0xFFFFFFFF >> block)) - 1
start_packed = struct.pack('!I', start_value)
end_packed = struct.pack('!I', end_value)
return ipv4_range_to_list(start_packed, end_packed)
def target_to_ipv6_cidr(target: str) -> Optional[List]:
"""Attempt to return a IPv6 CIDR list from a target string."""
splitted = target.split('/')
if len(splitted) != 2:
return None
try:
start_packed = socket.inet_pton(socket.AF_INET6, splitted[0])
block = int(splitted[1])
except (socket.error, ValueError):
return None
if block <= 0 or block > 126:
return None
start_value = int(binascii.hexlify(start_packed), 16) >> (128 - block)
start_value = (start_value << (128 - block)) + 1
end_value = (start_value | (int('ff' * 16, 16) >> block)) - 1
high = start_value >> 64
low = start_value & ((1 << 64) - 1)
start_packed = struct.pack('!QQ', high, low)
high = end_value >> 64
low = end_value & ((1 << 64) - 1)
end_packed = struct.pack('!QQ', high, low)
return ipv6_range_to_list(start_packed, end_packed)
def target_to_ipv4_long(target: str) -> Optional[List]:
"""Attempt to return a IPv4 long-range list from a target string."""
splitted = target.split('-')
if len(splitted) != 2:
return None
try:
start_packed = socket.inet_pton(socket.AF_INET, splitted[0])
end_packed = socket.inet_pton(socket.AF_INET, splitted[1])
except socket.error:
return None
if end_packed < start_packed:
return None
return ipv4_range_to_list(start_packed, end_packed)
def ipv6_range_to_list(start_packed, end_packed) -> List:
"""Return a list of IPv6 entries from start_packed to end_packed."""
new_list = list()
start = int(binascii.hexlify(start_packed), 16)
end = int(binascii.hexlify(end_packed), 16)
for value in range(start, end + 1):
high = value >> 64
low = value & ((1 << 64) - 1)
new_ip = socket.inet_ntop(
socket.AF_INET6, struct.pack('!2Q', high, low)
)
new_list.append(new_ip)
return new_list
def target_to_ipv6_short(target: str) -> Optional[List]:
"""Attempt to return a IPv6 short-range list from a target string."""
splitted = target.split('-')
if len(splitted) != 2:
return None
try:
start_packed = socket.inet_pton(socket.AF_INET6, splitted[0])
end_value = int(splitted[1], 16)
except (socket.error, ValueError):
return None
start_value = int(binascii.hexlify(start_packed[14:]), 16)
if end_value < 0 or end_value > 0xFFFF or end_value < start_value:
return None
end_packed = start_packed[:14] + struct.pack('!H', end_value)
return ipv6_range_to_list(start_packed, end_packed)
def target_to_ipv6_long(target: str) -> Optional[List]:
"""Attempt to return a IPv6 long-range list from a target string."""
splitted = target.split('-')
if len(splitted) != 2:
return None
try:
start_packed = socket.inet_pton(socket.AF_INET6, splitted[0])
end_packed = socket.inet_pton(socket.AF_INET6, splitted[1])
except socket.error:
return None
if end_packed < start_packed:
return None
return ipv6_range_to_list(start_packed, end_packed)
def target_to_hostname(target: str) -> Optional[List]:
"""Attempt to return a single hostname list from a target string."""
if len(target) == 0 or len(target) > 255:
return None
if not re.match(r'^[\w.-]+$', target):
return None
return [target]
def target_to_list(target: str) -> Optional[List]:
"""Attempt to return a list of single hosts from a target string."""
# Is it an IPv4 address ?
new_list = target_to_ipv4(target)
# Is it an IPv6 address ?
if not new_list:
new_list = target_to_ipv6(target)
# Is it an IPv4 CIDR ?
if not new_list:
new_list = target_to_ipv4_cidr(target)
# Is it an IPv6 CIDR ?
if not new_list:
new_list = target_to_ipv6_cidr(target)
# Is it an IPv4 short-range ?
if not new_list:
new_list = target_to_ipv4_short(target)
# Is it an IPv4 long-range ?
if not new_list:
new_list = target_to_ipv4_long(target)
# Is it an IPv6 short-range ?
if not new_list:
new_list = target_to_ipv6_short(target)
# Is it an IPv6 long-range ?
if not new_list:
new_list = target_to_ipv6_long(target)
# Is it a hostname ?
if not new_list:
new_list = target_to_hostname(target)
return new_list
def target_str_to_list(target_str: str) -> Optional[List]:
"""Parses a targets string into a list of individual targets.
Return a list of hosts, None if supplied target_str is None or
empty, or an empty list in case of malformed target.
"""
new_list = list()
if not target_str:
return None
target_str = target_str.strip(',')
for target in target_str.split(','):
target = target.strip()
target_list = target_to_list(target)
if target_list:
new_list.extend(target_list)
else:
logger.info("%s: Invalid target value", target)
return []
return list(collections.OrderedDict.fromkeys(new_list))
def resolve_hostname(hostname: str) -> Optional[str]:
"""Returns IP of a hostname."""
assert hostname
try:
return socket.gethostbyname(hostname)
except socket.gaierror:
return None
def is_valid_address(address: str) -> bool:
if not address:
return False
try:
socket.inet_pton(socket.AF_INET, address)
except OSError:
# invalid IPv4 address
try:
socket.inet_pton(socket.AF_INET6, address)
except OSError:
# invalid IPv6 address
return False
return True
def get_hostname_by_address(address: str) -> str:
"""Returns hostname of an address."""
if not is_valid_address(address):
return ''
try:
hostname = socket.getfqdn(address)
except (socket.gaierror, socket.herror):
return ''
if hostname == address:
return ''
return hostname
def port_range_expand(portrange: str) -> Optional[List]:
"""
Receive a port range and expands it in individual ports.
@input Port range.
e.g. "4-8"
@return List of integers.
e.g. [4, 5, 6, 7, 8]
"""
if not portrange or '-' not in portrange:
return None
try:
port_range_min = int(portrange[: portrange.index('-')])
port_range_max = int(portrange[portrange.index('-') + 1 :]) + 1
except (IndexError, ValueError) as e:
logger.info("Invalid port range format %s", e)
return None
port_list = list()
for single_port in range(
port_range_min,
port_range_max,
):
port_list.append(single_port)
return port_list
def port_str_arrange(ports: str) -> str:
"""Gives a str in the format (always tcp listed first).
T:U:
"""
b_tcp = ports.find("T")
b_udp = ports.find("U")
if (b_udp != -1 and b_tcp != -1) and b_udp < b_tcp:
return ports[b_tcp:] + ports[b_udp:b_tcp]
return ports
def ports_str_check_failed(port_str: str) -> bool:
"""
Check if the port string is well formed.
Return True if fail, False other case.
"""
pattern = r'[^TU:0-9, \-\n]'
if (
re.search(pattern, port_str)
or port_str.count('T') > 1
or port_str.count('U') > 1
or '-\n' in port_str
or '\n-' in port_str
or port_str[0] == '-'
or port_str[len(port_str) - 1] == '-'
or port_str.count(':') < (port_str.count('T') + port_str.count('U'))
):
logger.error("Invalid port range format")
return True
index = 0
while index <= len(port_str) - 1:
if port_str[index] == '-':
try:
int(port_str[index - 1])
int(port_str[index + 1])
except (TypeError, ValueError) as e:
logger.error("Invalid port range format: %s", e)
return True
index += 1
return False
def ports_as_list(port_str: str) -> Tuple[Optional[List], Optional[List]]:
"""
Parses a ports string into two list of individual tcp and udp ports.
@input string containing a port list
e.g. T:1,2,3,5-8 U:22,80,600-1024
@return two list of sorted integers, for tcp and udp ports respectively.
"""
if not port_str:
logger.info("Invalid port value")
return [None, None]
if ports_str_check_failed(port_str):
logger.info("{0}: Port list malformed.")
return [None, None]
tcp_list = list()
udp_list = list()
ports = port_str.replace(' ', '')
ports = ports.replace('\n', '')
b_tcp = ports.find("T")
b_udp = ports.find("U")
if b_tcp != -1 and "T:" not in ports:
return [None, None]
if b_udp != -1 and "U:" not in ports:
return [None, None]
if len(ports) > 1 and ports[b_tcp - 1] == ',':
ports = ports[: b_tcp - 1] + ports[b_tcp:]
if len(ports) > 1 and ports[b_udp - 1] == ',':
ports = ports[: b_udp - 1] + ports[b_udp:]
ports = port_str_arrange(ports)
tports = ''
uports = ''
# TCP ports listed first, then UDP ports
if b_udp != -1 and b_tcp != -1:
tports = ports[ports.index('T:') + 2 : ports.index('U:')]
uports = ports[ports.index('U:') + 2 :]
# Only UDP ports
elif b_tcp == -1 and b_udp != -1:
uports = ports[ports.index('U:') + 2 :]
# Only TCP ports
elif b_udp == -1 and b_tcp != -1:
tports = ports[ports.index('T:') + 2 :]
else:
tports = ports
if tports:
for port in tports.split(','):
port_range_expanded = port_range_expand(port)
if '-' in port and port_range_expanded:
tcp_list.extend(port_range_expanded)
elif port != '' and '-' not in port:
tcp_list.append(int(port))
tcp_list.sort()
if uports:
for port in uports.split(','):
port_range_expanded = port_range_expand(port)
if '-' in port and port_range_expanded:
udp_list.extend(port_range_expanded)
elif port and '-' not in port:
udp_list.append(int(port))
udp_list.sort()
if len(tcp_list) == 0 and len(udp_list) == 0:
return [None, None]
return (tcp_list, udp_list)
def get_tcp_port_list(port_str: str) -> Optional[List]:
"""Return a list with tcp ports from a given port list in string format"""
return ports_as_list(port_str)[0]
def get_udp_port_list(port_str: str) -> Optional[List]:
"""Return a list with udp ports from a given port list in string format"""
return ports_as_list(port_str)[1]
def port_list_compress(port_list: List) -> str:
"""Compress a port list and return a string."""
if not port_list or len(port_list) == 0:
logger.info("Invalid or empty port list.")
return ''
port_list = sorted(set(port_list))
compressed_list = []
for _key, group in itertools.groupby(
enumerate(port_list), lambda t: t[1] - t[0]
):
group = list(group)
if group[0][1] == group[-1][1]:
compressed_list.append(str(group[0][1]))
else:
compressed_list.append(str(group[0][1]) + '-' + str(group[-1][1]))
return ','.join(compressed_list)
def valid_port_list(port_list: str) -> bool:
"""Validate a port list string.
Parameters:
port_list: string containing UDP and/or TCP
port list as ranges or single comma
separated ports "
Return True if it is a valid port list, False otherwise.
"""
# No port list provided
if not port_list:
return False
# Remove white spaces
port_list = port_list.replace(' ', '')
# Special case is ignored.
if port_list == 'U:,T:':
return True
# Invalid chars in the port list, like \0 or \n
if ports_str_check_failed(port_list):
return False
tcp, udp = ports_as_list(port_list)
# There is a port list but no tcp and no udp.
if not tcp and not udp:
return False
if tcp:
for port in tcp:
if port < 1 or port > 65535:
return False
if udp:
for port in udp:
if port < 1 or port > 65535:
return False
return True
ospd-openvas-22.9.0/ospd/ospd.py 0000664 0000000 0000000 00000140161 15011310720 0016477 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=too-many-lines
"""OSP Daemon core class."""
import logging
import multiprocessing
import os
from pathlib import Path
import re
import socket
import ssl
import time
from pprint import pformat
from typing import (
List,
Any,
Iterator,
Dict,
Optional,
Iterable,
Tuple,
Union,
)
from xml.etree.ElementTree import Element, SubElement
import defusedxml.ElementTree as secET
import psutil
from ospd import __version__
from ospd.command import get_commands
from ospd.errors import OspdCommandError
from ospd.misc import ResultType, create_process
from ospd.network import target_str_to_list
from ospd.protocol import RequestParser
from ospd.scan import ScanCollection, ScanStatus, ScanProgress
from ospd.server import BaseServer, Stream
from ospd.vtfilter import VtsFilter
from ospd.vts import Vts
from ospd.xml import (
elements_as_text,
get_result_xml,
get_progress_xml,
)
from ospd.xmlvt import XmlStringVTHelper
logger = logging.getLogger(__name__)
PROTOCOL_VERSION = __version__
SCHEDULER_CHECK_PERIOD = 10 # in seconds
MIN_TIME_BETWEEN_START_SCAN = 60 # in seconds
BASE_SCANNER_PARAMS = {
'debug_mode': {
'type': 'boolean',
'name': 'Debug Mode',
'default': 0,
'mandatory': 0,
'description': 'Whether to get extra scan debug information.',
},
'dry_run': {
'type': 'boolean',
'name': 'Dry Run',
'default': 0,
'mandatory': 0,
'description': 'Whether to dry run scan.',
},
} # type: Dict
def _terminate_process_group(process: multiprocessing.Process) -> None:
os.killpg(os.getpgid(process.pid), 15)
is_uuid_re = re.compile(
'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
)
class OSPDaemon:
"""Daemon class for OSP traffic handling.
Every scanner wrapper should subclass it and make necessary additions and
changes.
* Add any needed parameters in __init__.
* Implement check() method which verifies scanner availability and other
environment related conditions.
* Implement process_scan_params and exec_scan methods which are
specific to handling the command, executing the wrapped
scanner and storing the results.
* Implement other methods that assert to False such as get_scanner_name,
get_scanner_version.
* Use Call set_command_attributes at init time to add scanner command
specific options eg. the w3af profile for w3af wrapper.
"""
def __init__(
self,
*,
customvtfilter=None,
storage=None,
max_scans=0,
min_free_mem_scan_queue=0,
file_storage_dir='/run/ospd',
max_queued_scans=0,
**kwargs,
): # pylint: disable=unused-argument
"""Initializes the daemon's internal data."""
def remove_previous_data_pickler_files():
logger.debug("removing uuid files in %s", file_storage_dir)
root = Path(file_storage_dir)
for dp in root.glob('*'):
if is_uuid_re.match(dp.name):
if dp.exists():
dp.unlink()
return
self.scan_collection = ScanCollection(file_storage_dir)
self.scan_processes = dict()
remove_previous_data_pickler_files()
self.daemon_info = dict()
self.daemon_info['name'] = "OSPd"
self.daemon_info['version'] = __version__
self.daemon_info['description'] = "No description"
self.scanner_info = dict()
self.scanner_info['name'] = 'No name'
self.scanner_info['version'] = 'No version'
self.scanner_info['description'] = 'No description'
self.server_version = None # Set by the subclass.
self.initialized = None # Set after initialization finished
self.max_scans = max_scans
self.min_free_mem_scan_queue = min_free_mem_scan_queue
self.max_queued_scans = max_queued_scans
self.last_scan_start_time = 0
self.scaninfo_store_time = kwargs.get('scaninfo_store_time')
self.protocol_version = PROTOCOL_VERSION
self.commands = {}
for command_class in get_commands():
command = command_class(self)
self.commands[command.get_name()] = command
self.scanner_params = dict()
for name, params in BASE_SCANNER_PARAMS.items():
self.set_scanner_param(name, params)
self.vts = Vts(storage)
self.vts_version = None
self.feed_name = None
self.feed_vendor = None
self.feed_home = None
if customvtfilter:
self.vts_filter = customvtfilter
else:
self.vts_filter = VtsFilter()
def init(self, server: BaseServer) -> None:
"""Should be overridden by a subclass if the initialization is costly.
Will be called after check.
"""
self.scan_collection.init()
server.start(self.handle_client_stream)
self.initialized = True
def set_command_attributes(self, name: str, attributes: Dict) -> None:
"""Sets the xml attributes of a specified command."""
if self.command_exists(name):
command = self.commands.get(name)
command.attributes = attributes
def set_scanner_param(self, name: str, scanner_params: Dict) -> None:
"""Set a scanner parameter."""
assert name
assert scanner_params
self.scanner_params[name] = scanner_params
def get_scanner_params(self) -> Dict:
return self.scanner_params
def add_vt(
self,
vt_id: str,
name: str = None,
vt_params: str = None,
vt_refs: str = None,
custom: str = None,
vt_creation_time: str = None,
vt_modification_time: str = None,
vt_dependencies: str = None,
summary: str = None,
impact: str = None,
affected: str = None,
insight: str = None,
solution: str = None,
solution_t: str = None,
solution_m: str = None,
detection: str = None,
qod_t: str = None,
qod_v: str = None,
severities: str = None,
) -> None:
"""Add a vulnerability test information.
IMPORTANT: The VT's Data Manager will store the vts collection.
If the collection is considerably big and it will be consultated
intensible during a routine, consider to do a deepcopy(), since
accessing the shared memory in the data manager is very expensive.
At the end of the routine, the temporal copy must be set to None
and deleted.
"""
self.vts.add(
vt_id,
name=name,
vt_params=vt_params,
vt_refs=vt_refs,
custom=custom,
vt_creation_time=vt_creation_time,
vt_modification_time=vt_modification_time,
vt_dependencies=vt_dependencies,
summary=summary,
impact=impact,
affected=affected,
insight=insight,
solution=solution,
solution_t=solution_t,
solution_m=solution_m,
detection=detection,
qod_t=qod_t,
qod_v=qod_v,
severities=severities,
)
def set_vts_version(self, vts_version: str) -> None:
"""Add into the vts dictionary an entry to identify the
vts version.
Parameters:
vts_version (str): Identifies a unique vts version.
"""
if not vts_version:
raise OspdCommandError(
'A vts_version parameter is required', 'set_vts_version'
)
self.vts_version = vts_version
def set_feed_vendor(self, feed_vendor: str) -> None:
"""Set the feed vendor.
Parameters:
feed_home (str): Identifies the feed home.
"""
if not feed_vendor:
raise OspdCommandError(
'A feed vendor parameter is required', 'set_feed_vendor'
)
self.feed_vendor = feed_vendor
def set_feed_home(self, feed_home: str) -> None:
"""Set the feed home.
Parameters:
feed_home (str): Identifies the feed home.
"""
if not feed_home:
raise OspdCommandError(
'A feed home parameter is required', 'set_feed_home'
)
self.feed_home = feed_home
def set_feed_name(self, feed_name: str) -> None:
"""Set the feed name.
Parameters:
feed_name (str): Identifies the feed name.
"""
if not feed_name:
raise OspdCommandError(
'A feed name parameter is required', 'set_feed_name'
)
self.feed_name = feed_name
def get_vts_version(self) -> Optional[str]:
"""Return the vts version."""
return self.vts_version
def get_feed_vendor(self) -> Optional[str]:
"""Return the feed vendor."""
return self.feed_vendor
def get_feed_home(self) -> Optional[str]:
"""Return the feed home."""
return self.feed_home
def get_feed_name(self) -> Optional[str]:
"""Return the feed name."""
return self.feed_name
def command_exists(self, name: str) -> bool:
"""Checks if a commands exists."""
return name in self.commands
def get_scanner_name(self) -> str:
"""Gives the wrapped scanner's name."""
return self.scanner_info['name']
def get_scanner_version(self) -> str:
"""Gives the wrapped scanner's version."""
return self.scanner_info['version']
def get_scanner_description(self) -> str:
"""Gives the wrapped scanner's description."""
return self.scanner_info['description']
def get_server_version(self) -> str:
"""Gives the specific OSP server's version."""
assert self.server_version
return self.server_version
def get_protocol_version(self) -> str:
"""Gives the OSP's version."""
return self.protocol_version
def preprocess_scan_params(self, xml_params):
"""Processes the scan parameters."""
params = {}
for param in xml_params:
params[param.tag] = param.text or ''
# Validate values.
for key in list(params.keys()):
param_type = self.get_scanner_param_type(key)
if not param_type:
continue
if param_type in ['integer', 'boolean']:
try:
params[key] = int(params[key])
except ValueError:
raise OspdCommandError(
f'Invalid {key} value', 'start_scan'
) from None
if param_type == 'boolean':
if params[key] not in [0, 1]:
raise OspdCommandError(f'Invalid {key} value', 'start_scan')
elif param_type == 'selection':
selection = self.get_scanner_param_default(key).split('|')
if params[key] not in selection:
raise OspdCommandError(f'Invalid {key} value', 'start_scan')
if self.get_scanner_param_mandatory(key) and params[key] == '':
raise OspdCommandError(
f'Mandatory {key} value is missing', 'start_scan'
)
return params
def process_scan_params(self, params: Dict) -> Dict:
"""This method is to be overridden by the child classes if necessary"""
return params
def stop_scan(self, scan_id: str) -> None:
if (
scan_id in self.scan_collection.ids_iterator()
and self.get_scan_status(scan_id) == ScanStatus.QUEUED
):
logger.info('Scan %s has been removed from the queue.', scan_id)
self.scan_collection.remove_file_pickled_scan_info(scan_id)
self.set_scan_status(scan_id, ScanStatus.STOPPED)
return
scan_process = self.scan_processes.get(scan_id)
if not scan_process:
raise OspdCommandError(f'Scan not found {scan_id}.', 'stop_scan')
if not scan_process.is_alive():
raise OspdCommandError(
'Scan already stopped or finished.', 'stop_scan'
)
self.set_scan_status(scan_id, ScanStatus.STOPPED)
logger.info(
'%s: Stopping Scan with the PID %s.', scan_id, scan_process.ident
)
try:
scan_process.terminate()
except AttributeError:
logger.debug('%s: The scanner task stopped unexpectedly.', scan_id)
try:
logger.debug(
'%s: Terminating process group after stopping.', scan_id
)
_terminate_process_group(scan_process)
except ProcessLookupError:
logger.info(
'%s: Scan with the PID %s is already stopped.',
scan_id,
scan_process.pid,
)
if scan_process.ident != os.getpid():
scan_process.join(0)
logger.info('%s: Scan stopped.', scan_id)
def exec_scan(self, scan_id: str):
"""Asserts to False. Should be implemented by subclass."""
raise NotImplementedError
def finish_scan(self, scan_id: str) -> None:
"""Sets a scan as finished."""
self.scan_collection.set_progress(scan_id, ScanProgress.FINISHED.value)
self.set_scan_status(scan_id, ScanStatus.FINISHED)
logger.info("%s: Scan finished.", scan_id)
def interrupt_scan(self, scan_id: str) -> None:
"""Set scan status as interrupted."""
self.set_scan_status(scan_id, ScanStatus.INTERRUPTED)
logger.info("%s: Scan interrupted.", scan_id)
def daemon_exit_cleanup(self) -> None:
"""Perform a cleanup before exiting"""
self.scan_collection.clean_up_pickled_scan_info()
# Stop scans which are not already stopped.
for scan_id in self.scan_collection.ids_iterator():
status = self.get_scan_status(scan_id)
if (
status != ScanStatus.STOPPED
and status != ScanStatus.FINISHED
and status != ScanStatus.INTERRUPTED
):
logger.debug("%s: Stopping scan before daemon exit.", scan_id)
self.stop_scan(scan_id)
# Wait for scans to be in some stopped state.
while True:
all_stopped = True
for scan_id in self.scan_collection.ids_iterator():
status = self.get_scan_status(scan_id)
if (
status != ScanStatus.STOPPED
and status != ScanStatus.FINISHED
and status != ScanStatus.INTERRUPTED
):
all_stopped = False
if all_stopped:
logger.debug(
"All scans stopped and daemon clean and ready to exit"
)
return
logger.debug("Waiting for running scans before daemon exit. ")
time.sleep(1)
def get_daemon_name(self) -> str:
"""Gives osp daemon's name."""
return self.daemon_info['name']
def get_daemon_version(self) -> str:
"""Gives osp daemon's version."""
return self.daemon_info['version']
def get_scanner_param_type(self, param: str):
"""Returns type of a scanner parameter."""
assert isinstance(param, str)
entry = self.scanner_params.get(param)
if not entry:
return None
return entry.get('type')
def get_scanner_param_mandatory(self, param: str):
"""Returns if a scanner parameter is mandatory."""
assert isinstance(param, str)
entry = self.scanner_params.get(param)
if not entry:
return False
return entry.get('mandatory')
def get_scanner_param_default(self, param: str):
"""Returns default value of a scanner parameter."""
assert isinstance(param, str)
entry = self.scanner_params.get(param)
if not entry:
return None
return entry.get('default')
def handle_client_stream(self, stream: Stream) -> None:
"""Handles stream of data received from client."""
data = b''
request_parser = RequestParser()
while True:
try:
buf = stream.read()
if not buf:
break
data += buf
if request_parser.has_ended(buf):
break
except (AttributeError, ValueError) as message:
logger.error(message)
return
except ssl.SSLError as exception:
logger.debug('Error: %s', exception)
break
except socket.timeout as exception:
logger.debug('Request timeout: %s', exception)
break
if len(data) <= 0:
logger.debug("Empty client stream")
return
response = None
try:
self.handle_command(data, stream)
except OspdCommandError as exception:
response = exception.as_xml()
logger.debug('Command error: %s', exception.message)
except Exception: # pylint: disable=broad-except
logger.exception('While handling client command:')
exception = OspdCommandError('Fatal error', 'error')
response = exception.as_xml()
if response:
stream.write(response)
stream.close()
def process_finished_hosts(self, scan_id: str) -> None:
"""Process the finished hosts before launching the scans."""
finished_hosts = self.scan_collection.get_finished_hosts(scan_id)
if not finished_hosts:
return
exc_finished_hosts_list = target_str_to_list(finished_hosts)
self.scan_collection.set_host_finished(scan_id, exc_finished_hosts_list)
def start_scan(self, scan_id: str) -> None:
"""Starts the scan with scan_id."""
# A hidden BrokenPipeError can occur on a sensor when a SSH
# connection got lost in between while shutting down ospd in a
# when handled in a subprocess.
# When this happens a stack trace should not be logged because it is
# self correcting occurance.
try:
os.setsid()
self.process_finished_hosts(scan_id)
try:
self.set_scan_status(scan_id, ScanStatus.RUNNING)
self.exec_scan(scan_id)
except Exception as e: # pylint: disable=broad-except
self.add_scan_error(
scan_id,
name='',
host=self.get_scan_host(scan_id),
value=f'Host process failure ({e}).',
)
logger.exception('%s: Exception %s while scanning', scan_id, e)
else:
logger.info("%s: Host scan finished.", scan_id)
status = self.get_scan_status(scan_id)
is_stopped = status == ScanStatus.STOPPED
self.set_scan_progress(scan_id)
progress = self.get_scan_progress(scan_id)
if not is_stopped and progress == ScanProgress.FINISHED:
self.finish_scan(scan_id)
elif not is_stopped:
logger.info(
"%s: Host scan got interrupted. Progress: %d, Status: %s",
scan_id,
progress,
status.name,
)
self.interrupt_scan(scan_id)
# For debug purposes
self._get_scan_progress_raw(scan_id)
except BrokenPipeError:
logger.warning(
"Error sending data to the client while executing a scan %s.",
scan_id,
)
def handle_timeout(self, scan_id: str, host: str) -> None:
"""Handles scanner reaching timeout error."""
self.add_scan_error(
scan_id,
host=host,
name="Timeout",
value=f"{self.get_scanner_name()} exec timeout.",
)
def sort_host_finished(
self,
scan_id: str,
finished_hosts: Union[List[str], str],
) -> None:
"""Check if the finished host in the list was alive or dead
and update the corresponding alive_count or dead_count."""
if isinstance(finished_hosts, str):
finished_hosts = [finished_hosts]
alive_hosts = []
dead_hosts = []
current_hosts = self.scan_collection.get_current_target_progress(
scan_id
)
for finished_host in finished_hosts:
progress = current_hosts.get(finished_host)
if progress == ScanProgress.FINISHED:
alive_hosts.append(finished_host)
elif progress == ScanProgress.DEAD_HOST:
dead_hosts.append(finished_host)
else:
logger.debug(
'The host %s is considered dead or finished, but '
'its progress is still %d. This can lead to '
'interrupted scan.',
finished_host,
progress,
)
self.scan_collection.set_host_dead(scan_id, dead_hosts)
self.scan_collection.set_host_finished(scan_id, alive_hosts)
self.scan_collection.remove_hosts_from_target_progress(
scan_id, finished_hosts
)
def set_scan_progress(self, scan_id: str):
"""Calculate the target progress with the current host states
and stores in the scan table."""
# Get current scan progress for debugging purposes
logger.debug("Calculating scan progress with the following data:")
self._get_scan_progress_raw(scan_id)
scan_progress = self.scan_collection.calculate_target_progress(scan_id)
self.scan_collection.set_progress(scan_id, scan_progress)
def set_scan_progress_batch(
self, scan_id: str, host_progress: Dict[str, int]
):
self.scan_collection.set_host_progress(scan_id, host_progress)
self.set_scan_progress(scan_id)
def set_scan_host_progress(
self,
scan_id: str,
host: str = None,
progress: int = None,
) -> None:
"""Sets host's progress which is part of target.
Each time a host progress is updated, the scan progress
is updated too.
"""
if host is None or progress is None:
return
if not isinstance(progress, int):
try:
progress = int(progress)
except (TypeError, ValueError):
return
host_progress = {host: progress}
self.set_scan_progress_batch(scan_id, host_progress)
def get_scan_host_progress(
self,
scan_id: str,
host: str = None,
) -> int:
"""Get host's progress which is part of target."""
current_progress = self.scan_collection.get_current_target_progress(
scan_id
)
return current_progress.get(host)
def set_scan_status(self, scan_id: str, status: ScanStatus) -> None:
"""Set the scan's status."""
logger.debug('%s: Set scan status %s,', scan_id, status.name)
self.scan_collection.set_status(scan_id, status)
def get_scan_status(self, scan_id: str) -> ScanStatus:
"""Get scan_id scans's status."""
status = self.scan_collection.get_status(scan_id)
st = status.name if status else None
logger.debug('%s: Current scan status: %s,', scan_id, st)
return status
def scan_exists(self, scan_id: str) -> bool:
"""Checks if a scan with ID scan_id is in collection.
Returns:
1 if scan exists, 0 otherwise.
"""
return self.scan_collection.id_exists(scan_id)
def get_help_text(self) -> str:
"""Returns the help output in plain text format."""
txt = ''
for name, info in self.commands.items():
description = info.get_description()
attributes = info.get_attributes()
elements = info.get_elements()
command_txt = f"\t{name: <22} {description}\n"
if attributes:
command_txt = ''.join([command_txt, "\t Attributes:\n"])
for attrname, attrdesc in attributes.items():
attr_txt = f"\t {attrname: <22} {attrdesc}\n"
command_txt = ''.join([command_txt, attr_txt])
if elements:
command_txt = ''.join(
[
command_txt,
"\t Elements:\n",
elements_as_text(elements),
]
)
txt += command_txt
return txt
def delete_scan(self, scan_id: str) -> int:
"""Deletes scan_id scan from collection.
Returns:
1 if scan deleted, 0 otherwise.
"""
if self.get_scan_status(scan_id) == ScanStatus.RUNNING:
return 0
# Don't delete the scan until the process stops
exitcode = None
try:
self.scan_processes[scan_id].join()
exitcode = self.scan_processes[scan_id].exitcode
except KeyError:
logger.debug('Scan process for %s never started,', scan_id)
if exitcode or exitcode == 0:
del self.scan_processes[scan_id]
return self.scan_collection.delete_scan(scan_id)
def get_scan_results_xml(
self, scan_id: str, pop_res: bool, max_res: Optional[int]
):
"""Gets scan_id scan's results in XML format.
Returns:
String of scan results in xml.
"""
results = Element('results')
for result in self.scan_collection.results_iterator(
scan_id, pop_res, max_res
):
results.append(get_result_xml(result))
logger.debug('Returning %d results', len(results))
return results
def _get_scan_progress_raw(self, scan_id: str) -> Dict:
"""Returns a dictionary with scan_id scan's progress information."""
current_progress = dict()
current_progress['current_hosts'] = (
self.scan_collection.get_current_target_progress(scan_id)
)
current_progress['overall'] = self.get_scan_progress(scan_id)
current_progress['count_alive'] = self.scan_collection.get_count_alive(
scan_id
)
current_progress['count_dead'] = self.scan_collection.get_count_dead(
scan_id
)
current_progress['count_excluded'] = (
self.scan_collection.get_simplified_exclude_host_count(scan_id)
)
current_progress['count_total'] = self.scan_collection.get_count_total(
scan_id
) + self.scan_collection.get_finished_hosts_count(scan_id)
logging.debug(
"%s: Current progress: \n%s",
scan_id,
pformat(current_progress),
)
return current_progress
def _get_scan_progress_xml(self, scan_id: str):
"""Gets scan_id scan's progress in XML format.
Returns:
String of scan progress in xml.
"""
current_progress = self._get_scan_progress_raw(scan_id)
return get_progress_xml(current_progress)
def get_scan_xml(
self,
scan_id: str,
detailed: bool = True,
pop_res: bool = False,
max_res: int = 0,
progress: bool = False,
):
"""Gets scan in XML format.
Returns:
String of scan in XML format.
"""
if not scan_id:
return Element('scan')
if self.get_scan_status(scan_id) == ScanStatus.QUEUED:
target = ''
scan_progress = 0
status = self.get_scan_status(scan_id)
start_time = 0
end_time = 0
response = Element('scan')
detailed = False
progress = False
response.append(Element('results'))
else:
target = self.get_scan_host(scan_id)
scan_progress = self.get_scan_progress(scan_id)
status = self.get_scan_status(scan_id)
start_time = self.get_scan_start_time(scan_id)
end_time = self.get_scan_end_time(scan_id)
response = Element('scan')
for name, value in [
('id', scan_id),
('target', target),
('progress', scan_progress),
('status', status.name.lower()),
('start_time', start_time),
('end_time', end_time),
]:
response.set(name, str(value))
if detailed:
response.append(
self.get_scan_results_xml(scan_id, pop_res, max_res)
)
if progress:
response.append(self._get_scan_progress_xml(scan_id))
return response
def get_vt_iterator( # pylint: disable=unused-argument
self, vt_selection: List[str] = None, details: bool = True
) -> Iterator[Tuple[str, Dict]]:
"""Return iterator object for getting elements
from the VTs dictionary."""
return self.vts.items()
def get_vt_xml(self, single_vt: Tuple[str, Dict]) -> Element:
"""Gets a single vulnerability test information in XML format.
Returns:
String of single vulnerability test information in XML format.
"""
if not single_vt or single_vt[1] is None:
return Element('vt')
vt_id, vt = single_vt
name = vt.get('name')
vt_xml = Element('vt')
vt_xml.set('id', vt_id)
for name, value in [('name', name)]:
elem = SubElement(vt_xml, name)
elem.text = str(value)
xml_helper = XmlStringVTHelper()
if vt.get('vt_params'):
params_xml_str = xml_helper.get_params_vt_as_xml_str(
vt_id, vt.get('vt_params')
)
vt_xml.append(secET.fromstring(params_xml_str))
if vt.get('vt_refs'):
refs_xml_str = xml_helper.get_refs_vt_as_xml_str(
vt_id, vt.get('vt_refs')
)
vt_xml.append(secET.fromstring(refs_xml_str))
if vt.get('vt_dependencies'):
dependencies = xml_helper.get_dependencies_vt_as_xml_str(
vt_id, vt.get('vt_dependencies')
)
vt_xml.append(secET.fromstring(dependencies))
if vt.get('creation_time'):
vt_ctime = xml_helper.get_creation_time_vt_as_xml_str(
vt_id, vt.get('creation_time')
)
vt_xml.append(secET.fromstring(vt_ctime))
if vt.get('modification_time'):
vt_mtime = xml_helper.get_modification_time_vt_as_xml_str(
vt_id, vt.get('modification_time')
)
vt_xml.append(secET.fromstring(vt_mtime))
if vt.get('summary'):
summary_xml_str = xml_helper.get_summary_vt_as_xml_str(
vt_id, vt.get('summary')
)
vt_xml.append(secET.fromstring(summary_xml_str))
if vt.get('impact'):
impact_xml_str = xml_helper.get_impact_vt_as_xml_str(
vt_id, vt.get('impact')
)
vt_xml.append(secET.fromstring(impact_xml_str))
if vt.get('affected'):
affected_xml_str = xml_helper.get_affected_vt_as_xml_str(
vt_id, vt.get('affected')
)
vt_xml.append(secET.fromstring(affected_xml_str))
if vt.get('insight'):
insight_xml_str = xml_helper.get_insight_vt_as_xml_str(
vt_id, vt.get('insight')
)
vt_xml.append(secET.fromstring(insight_xml_str))
if vt.get('solution'):
solution_xml_str = xml_helper.get_solution_vt_as_xml_str(
vt_id,
vt.get('solution'),
vt.get('solution_type'),
vt.get('solution_method'),
)
vt_xml.append(secET.fromstring(solution_xml_str))
if vt.get('detection') or vt.get('qod_type') or vt.get('qod'):
detection_xml_str = xml_helper.get_detection_vt_as_xml_str(
vt_id, vt.get('detection'), vt.get('qod_type'), vt.get('qod')
)
vt_xml.append(secET.fromstring(detection_xml_str))
if vt.get('severities'):
severities_xml_str = xml_helper.get_severities_vt_as_xml_str(
vt_id, vt.get('severities')
)
vt_xml.append(secET.fromstring(severities_xml_str))
if vt.get('custom'):
custom_xml_str = xml_helper.get_custom_vt_as_xml_str(
vt_id, vt.get('custom')
)
vt_xml.append(secET.fromstring(custom_xml_str))
return vt_xml
def get_vts_selection_list(
self, vt_id: str = None, filtered_vts: Dict = None
) -> Iterable[str]:
"""
Get list of VT's OID.
If vt_id is specified, the collection will contain only this vt, if
found.
If no vt_id is specified or filtered_vts is None (default), the
collection will contain all vts. Otherwise those vts passed
in filtered_vts or vt_id are returned. In case of both vt_id and
filtered_vts are given, filtered_vts has priority.
Arguments:
vt_id (vt_id, optional): ID of the vt to get.
filtered_vts (list, optional): Filtered VTs collection.
Returns:
List of selected VT's OID.
"""
vts_xml = []
# No match for the filter
if filtered_vts is not None and len(filtered_vts) == 0:
return vts_xml
if filtered_vts:
vts_list = filtered_vts
elif vt_id:
vts_list = [vt_id]
else:
vts_list = self.vts.keys()
return vts_list
def handle_command(self, data: bytes, stream: Stream) -> None:
"""Handles an osp command in a string."""
try:
tree = secET.fromstring(data)
except secET.ParseError as e:
logger.debug("Erroneous client input: %s", data)
raise OspdCommandError('Invalid data') from e
command_name = tree.tag
logger.debug('Handling %s command request.', command_name)
command = self.commands.get(command_name, None)
if not command and command_name != "authenticate":
raise OspdCommandError('Bogus command name')
if not self.initialized and command.must_be_initialized:
exception = OspdCommandError(
f'{self.daemon_info["name"]} is still starting', 'error'
)
response = exception.as_xml()
stream.write(response)
return
response = command.handle_xml(tree)
write_success = True
if isinstance(response, bytes):
write_success = stream.write(response)
else:
for data in response:
write_success = stream.write(data)
if not write_success:
break
scan_id = tree.get('scan_id')
if self.scan_exists(scan_id) and command_name == "get_scans":
if write_success:
logger.debug(
'%s: Results sent successfully to the client. Cleaning '
'temporary result list.',
scan_id,
)
self.scan_collection.clean_temp_result_list(scan_id)
else:
logger.debug(
'%s: Failed sending results to the client. Restoring '
'result list into the cache.',
scan_id,
)
self.scan_collection.restore_temp_result_list(scan_id)
def check(self):
"""Asserts to False. Should be implemented by subclass."""
raise NotImplementedError
def run(self) -> None:
"""Starts the Daemon, handling commands until interrupted."""
try:
while True:
time.sleep(SCHEDULER_CHECK_PERIOD)
self.scheduler()
self.clean_forgotten_scans()
self.start_queued_scans()
self.wait_for_children()
except KeyboardInterrupt:
logger.info("Received Ctrl-C shutting-down ...")
def start_queued_scans(self) -> None:
"""Starts a queued scan if it is allowed"""
current_queued_scans = self.get_count_queued_scans()
if not current_queued_scans:
return
if not self.initialized:
logger.info(
"Queued task can not be started because a feed "
"update is being performed."
)
return
logger.info('Currently %d queued scans.', current_queued_scans)
for scan_id in self.scan_collection.ids_iterator():
scan_allowed = (
self.is_new_scan_allowed() and self.is_enough_free_memory()
)
scan_is_queued = self.get_scan_status(scan_id) == ScanStatus.QUEUED
if scan_is_queued and scan_allowed:
try:
self.scan_collection.unpickle_scan_info(scan_id)
except OspdCommandError as e:
logger.error("Start scan error %s", e)
self.stop_scan(scan_id)
continue
scan_func = self.start_scan
scan_process = create_process(func=scan_func, args=(scan_id,))
self.scan_processes[scan_id] = scan_process
scan_process.start()
self.set_scan_status(scan_id, ScanStatus.INIT)
current_queued_scans = current_queued_scans - 1
self.last_scan_start_time = time.time()
logger.info('Starting scan %s.', scan_id)
elif scan_is_queued and not scan_allowed:
return
def is_new_scan_allowed(self) -> bool:
"""Check if max_scans has been reached.
Returns:
True if a new scan can be launch.
"""
if (self.max_scans != 0) and (
len(self.scan_processes) >= self.max_scans
):
logger.info(
'Not possible to run a new scan. Max scan limit set '
'to %d reached.',
self.max_scans,
)
return False
return True
def is_enough_free_memory(self) -> bool:
"""Check if there is enough free memory in the system to run
a new scan. The necessary memory is a rough calculation and very
conservative.
Returns:
True if there is enough memory for a new scan.
"""
if not self.min_free_mem_scan_queue:
return True
# If min_free_mem_scan_queue option is set, also wait some time
# between scans. Consider the case in which the last scan
# finished in a few seconds and there is no need to wait.
time_between_start_scan = time.time() - self.last_scan_start_time
if (
time_between_start_scan < MIN_TIME_BETWEEN_START_SCAN
and self.get_count_running_scans()
):
logger.debug(
'Not possible to run a new scan right now, a scan have been '
'just started.'
)
return False
free_mem = psutil.virtual_memory().available / (1024 * 1024)
if free_mem > self.min_free_mem_scan_queue:
return True
logger.info(
'Not possible to run a new scan. Not enough free memory. '
'Only %d MB available but at least %d are required',
free_mem,
self.min_free_mem_scan_queue,
)
return False
def scheduler(self):
"""Should be implemented by subclass in case of need
to run tasks periodically."""
def wait_for_children(self):
"""Join the zombie process to releases resources."""
for _, process in self.scan_processes.items():
process.join(0)
def create_scan(
self,
scan_id: str,
targets: Dict,
options: Optional[Dict],
vt_selection: Dict,
) -> Optional[str]:
"""Creates a new scan.
Arguments:
target: Target to scan.
options: Miscellaneous scan options supplied via
XML element.
Returns:
New scan's ID. None if the scan_id already exists.
"""
status = None
scan_exists = self.scan_exists(scan_id)
if scan_id and scan_exists:
status = self.get_scan_status(scan_id)
logger.info(
"Scan %s exists with status %s.", scan_id, status.name.lower()
)
return
return self.scan_collection.create_scan(
scan_id, targets, options, vt_selection
)
def get_scan_options(self, scan_id: str) -> str:
"""Gives a scan's list of options."""
return self.scan_collection.get_options(scan_id)
def set_scan_option(self, scan_id: str, name: str, value: Any) -> None:
"""Sets a scan's option to a provided value."""
return self.scan_collection.set_option(scan_id, name, value)
def set_scan_total_hosts(self, scan_id: str, count_total: int) -> None:
"""Sets a scan's total hosts. Allow the scanner to update
the total count of host to be scanned."""
self.scan_collection.update_count_total(scan_id, count_total)
def set_scan_total_excluded_hosts(
self, scan_id: str, excluded_hosts: int
) -> None:
"""Sets a scan's total excluded hosts. Allow the scanner to update
the total excluded count of hosts from the host to be scanned."""
self.scan_collection.update_count_excluded(scan_id, excluded_hosts)
def clean_forgotten_scans(self) -> None:
"""Check for old stopped or finished scans which have not been
deleted and delete them if the are older than the set value."""
if not self.scaninfo_store_time:
return
for scan_id in list(self.scan_collection.ids_iterator()):
end_time = int(self.get_scan_end_time(scan_id))
scan_status = self.get_scan_status(scan_id)
if (
scan_status == ScanStatus.STOPPED
or scan_status == ScanStatus.FINISHED
or scan_status == ScanStatus.INTERRUPTED
) and end_time:
stored_time = int(time.time()) - end_time
if stored_time > self.scaninfo_store_time * 3600:
logger.debug(
'Scan %s is older than %d hours and seems have been '
'forgotten. Scan info will be deleted from the '
'scan table',
scan_id,
self.scaninfo_store_time,
)
self.delete_scan(scan_id)
def check_scan_process(self, scan_id: str) -> None:
"""Check the scan's process, and terminate the scan if not alive."""
status = self.get_scan_status(scan_id)
if status == ScanStatus.QUEUED:
return
scan_process = self.scan_processes.get(scan_id)
progress = self.get_scan_progress(scan_id)
if (
progress < ScanProgress.FINISHED
and scan_process
and not scan_process.is_alive()
):
if not status == ScanStatus.STOPPED:
self.add_scan_error(
scan_id, name="", host="", value="Scan process Failure"
)
logger.info(
"%s: Scan process is dead and its progress is %d",
scan_id,
progress,
)
self.interrupt_scan(scan_id)
elif progress == ScanProgress.FINISHED:
scan_process.join(0)
logger.debug(
"%s: Check scan process: \n\tProgress %d\n\t Status: %s",
scan_id,
progress,
status.name,
)
def get_count_queued_scans(self) -> int:
"""Get the amount of scans with queued status"""
count = 0
for scan_id in self.scan_collection.ids_iterator():
if self.get_scan_status(scan_id) == ScanStatus.QUEUED:
count += 1
return count
def get_count_running_scans(self) -> int:
"""Get the amount of scans with INIT/RUNNING status"""
count = 0
for scan_id in self.scan_collection.ids_iterator():
status = self.get_scan_status(scan_id)
if status == ScanStatus.RUNNING or status == ScanStatus.INIT:
count += 1
return count
def get_scan_progress(self, scan_id: str) -> int:
"""Gives a scan's current progress value."""
progress = self.scan_collection.get_progress(scan_id)
logger.debug('%s: Current scan progress: %s,', scan_id, progress)
return progress
def get_scan_host(self, scan_id: str) -> str:
"""Gives a scan's target."""
return self.scan_collection.get_host_list(scan_id)
def get_scan_ports(self, scan_id: str) -> str:
"""Gives a scan's ports list."""
return self.scan_collection.get_ports(scan_id)
def get_scan_exclude_hosts(self, scan_id: str):
"""Gives a scan's exclude host list. If a target is passed gives
the exclude host list for the given target."""
return self.scan_collection.get_exclude_hosts(scan_id)
def get_scan_credentials(self, scan_id: str) -> Dict:
"""Gives a scan's credential list. If a target is passed gives
the credential list for the given target."""
return self.scan_collection.get_credentials(scan_id)
def get_scan_target_options(self, scan_id: str) -> Dict:
"""Gives a scan's target option dict. If a target is passed gives
the credential list for the given target."""
return self.scan_collection.get_target_options(scan_id)
def get_scan_vts(self, scan_id: str) -> Dict:
"""Gives a scan's vts."""
return self.scan_collection.get_vts(scan_id)
def get_scan_start_time(self, scan_id: str) -> str:
"""Gives a scan's start time."""
return self.scan_collection.get_start_time(scan_id)
def get_scan_end_time(self, scan_id: str) -> str:
"""Gives a scan's end time."""
return self.scan_collection.get_end_time(scan_id)
def add_scan_log(
self,
scan_id: str,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id: str = '',
qod: str = '',
uri: str = '',
) -> None:
"""Adds a log result to scan_id scan."""
self.scan_collection.add_result(
scan_id,
ResultType.LOG,
host,
hostname,
name,
value,
port,
test_id,
'0.0',
qod,
uri,
)
def add_scan_error(
self,
scan_id: str,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id='',
uri: str = '',
) -> None:
"""Adds an error result to scan_id scan."""
self.scan_collection.add_result(
scan_id,
ResultType.ERROR,
host,
hostname,
name,
value,
port,
test_id,
uri,
)
def add_scan_host_detail(
self,
scan_id: str,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
uri: str = '',
) -> None:
"""Adds a host detail result to scan_id scan."""
self.scan_collection.add_result(
scan_id, ResultType.HOST_DETAIL, host, hostname, name, value, uri
)
def add_scan_alarm(
self,
scan_id: str,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id: str = '',
severity: str = '',
qod: str = '',
uri: str = '',
) -> None:
"""Adds an alarm result to scan_id scan."""
self.scan_collection.add_result(
scan_id,
ResultType.ALARM,
host,
hostname,
name,
value,
port,
test_id,
severity,
qod,
uri,
)
ospd-openvas-22.9.0/ospd/parser.py 0000664 0000000 0000000 00000023652 15011310720 0017033 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import argparse
import logging
from pathlib import Path
from ospd.config import Config
# Default file locations as used by a OpenVAS default installation
DEFAULT_KEY_FILE = "/var/lib/gvm/private/CA/serverkey.pem"
DEFAULT_CERT_FILE = "/var/lib/gvm/CA/servercert.pem"
DEFAULT_CA_FILE = "/var/lib/gvm/CA/cacert.pem"
DEFAULT_PORT = 0
DEFAULT_ADDRESS = "0.0.0.0"
DEFAULT_NICENESS = 10
DEFAULT_UNIX_SOCKET_MODE = "0o770"
DEFAULT_CONFIG_PATH = "~/.config/ospd.conf"
DEFAULT_LOG_CONFIG_PATH = "~/.config/ospd-logging.conf"
DEFAULT_UNIX_SOCKET_PATH = "/run/ospd/ospd-openvas.sock"
DEFAULT_PID_PATH = "/run/ospd/ospd.pid"
DEFAULT_LOCKFILE_DIR_PATH = "/run/ospd"
DEFAULT_STREAM_TIMEOUT = 10 # ten seconds
DEFAULT_SCANINFO_STORE_TIME = 0 # in hours
DEFAULT_MAX_SCAN = 0 # 0 = disable
DEFAULT_MIN_FREE_MEM_SCAN_QUEUE = 0 # 0 = Disable
DEFAULT_MAX_QUEUED_SCANS = 0 # 0 = Disable
DEFAULT_MQTT_BROKER_ADDRESS = "localhost"
DEFAULT_MQTT_BROKER_PORT = 1883
ParserType = argparse.ArgumentParser
Arguments = argparse.Namespace
logger = logging.getLogger(__name__)
class CliParser:
def __init__(self, description: str) -> None:
"""Create a command-line arguments parser for OSPD."""
self._name = description
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'--version', action='store_true', help='Print version then exit.'
)
parser.add_argument(
'-s',
'--config',
nargs='?',
help=f'Configuration file path (default: {DEFAULT_CONFIG_PATH})',
)
parser.add_argument(
'--log-config',
nargs='?',
default=DEFAULT_LOG_CONFIG_PATH,
help='Log configuration file path (default: %(default)s)',
)
parser.add_argument(
'-p',
'--port',
default=DEFAULT_PORT,
type=self.network_port,
help='TCP Port to listen on. Default: %(default)s',
)
parser.add_argument(
'-b',
'--bind-address',
default=DEFAULT_ADDRESS,
dest='address',
help='Address to listen on. Default: %(default)s',
)
parser.add_argument(
'-u',
'--unix-socket',
default=DEFAULT_UNIX_SOCKET_PATH,
help='Unix file socket to listen on. Default: %(default)s',
)
parser.add_argument(
'--pid-file',
default=DEFAULT_PID_PATH,
help=(
'Location of the file for the process ID. Default: %(default)s'
),
)
parser.add_argument(
'--lock-file-dir',
default=DEFAULT_LOCKFILE_DIR_PATH,
help='Directory where lock files are placed. Default: %(default)s',
)
parser.add_argument(
'-m',
'--socket-mode',
default=DEFAULT_UNIX_SOCKET_MODE,
help='Unix file socket mode. Default: %(default)s',
)
parser.add_argument(
'-k',
'--key-file',
default=DEFAULT_KEY_FILE,
help='Server key file. Default: %(default)s',
)
parser.add_argument(
'-c',
'--cert-file',
default=DEFAULT_CERT_FILE,
help='Server cert file. Default: %(default)s',
)
parser.add_argument(
'--ca-file',
default=DEFAULT_CA_FILE,
help='CA cert file. Default: %(default)s',
)
parser.add_argument(
'-L',
'--log-level',
default='INFO',
type=self.log_level,
help='Wished level of logging. Default: %(default)s',
)
parser.add_argument(
'-f',
'--foreground',
action='store_true',
help='Run in foreground and logs all messages to console.',
)
parser.add_argument(
'-t',
'--stream-timeout',
default=DEFAULT_STREAM_TIMEOUT,
type=int,
help='Stream timeout. Default: %(default)s',
)
parser.add_argument(
'-l', '--log-file', help='Path to the logging file.'
)
parser.add_argument(
'--niceness',
default=DEFAULT_NICENESS,
type=int,
help='Start the scan with the given niceness. Default %(default)s',
)
parser.add_argument(
'--scaninfo-store-time',
default=DEFAULT_SCANINFO_STORE_TIME,
type=int,
help=(
'Time in hours a scan is stored before being considered '
'forgotten and being delete from the scan table. '
'Default %(default)s, disabled.'
),
)
parser.add_argument(
'--list-commands',
action='store_true',
help='Display all protocol commands',
)
parser.add_argument(
'--max-scans',
default=DEFAULT_MAX_SCAN,
type=int,
help=(
'Max. amount of parallel task that can be started. '
'Default %(default)s, disabled'
),
)
parser.add_argument(
'--min-free-mem-scan-queue',
default=DEFAULT_MIN_FREE_MEM_SCAN_QUEUE,
type=int,
help=(
'Minimum free memory in MB required to run the scan. '
'If no enough free memory is available, the scan queued. '
'Default %(default)s, disabled'
),
)
parser.add_argument(
'--max-queued-scans',
default=DEFAULT_MAX_QUEUED_SCANS,
type=int,
help=(
'Maximum number allowed of queued scans before '
'starting to reject new scans. '
'Default %(default)s, disabled'
),
)
parser.add_argument(
'--mqtt-broker-address',
default=DEFAULT_MQTT_BROKER_ADDRESS,
type=str,
help=(
'Broker address to connect to for MQTT communication.'
' Neccessary to get results from Notus-Scanner.Default'
' %(default)s'
),
)
parser.add_argument(
'--mqtt-broker-port',
default=DEFAULT_MQTT_BROKER_PORT,
type=self.network_port,
help=(
'Broker port to connect to for MQTT communication.'
' Neccessary to get results from Notus-Scanner.Default'
'Default %(default)s'
),
)
parser.add_argument(
'--mqtt-broker-username',
default=None,
type=str,
help=(
'Username to connect to MQTT broker for MQTT communication.'
'Default %(default)s'
),
)
parser.add_argument(
'--mqtt-broker-password',
default=None,
type=str,
help=(
'PASSWORD to connect to MQTT broker for MQTT communication.'
'Default %(default)s'
),
)
parser.add_argument(
'--feed-updater',
default="openvas",
choices=['openvas', 'nasl-cli'],
help=(
'Sets the method of updating the feed.'
' Can either be openvas or nasl-cli.'
' Default: %(default)s.'
),
)
parser.add_argument(
'-x',
'--signature-check',
default=False,
action='store_true',
help=('Enable feed signature check.' ' Default: %(default)s.'),
)
self.parser = parser
def network_port(self, string: str) -> int:
"""Check if provided string is a valid network port."""
value = int(string)
if not 0 < value <= 65535:
raise argparse.ArgumentTypeError(
'port must be in ]0,65535] interval'
)
return value
def log_level(self, string: str) -> str:
"""Check if provided string is a valid log level."""
if not hasattr(logging, string.upper()):
raise argparse.ArgumentTypeError(
'log level must be one of {debug,info,warning,error,critical}'
)
return string.upper()
def _set_defaults(self, configfilename=None) -> None:
self._config = self._load_config(configfilename)
self.parser.set_defaults(**self._config.defaults())
def _load_config(self, configfile: str) -> Config:
config = Config()
configpath = Path(configfile or DEFAULT_CONFIG_PATH)
if not configpath.expanduser().resolve().exists():
if configfile:
# user has passed an config file
# print error and exit
self.parser.error(f'Config file {configpath} does not exist')
else:
logger.debug('Ignoring non existing config file %s', configfile)
return config
try:
config.load(configpath, def_section=self._name)
logger.debug('Loaded config %s', configfile)
except Exception as e: # pylint: disable=broad-except
raise RuntimeError(
f'Error while parsing config file {configfile}. Error was {e}'
) from None
return config
def parse_arguments(self, args=None):
# Parse args to get the config file path passed as option
_args, _ = self.parser.parse_known_args(args)
# Load the defaults from the config file if it exists.
# This override also what it was passed as cmd option.
self._set_defaults(_args.config)
args, _ = self.parser.parse_known_args(args)
return args
def create_parser(description: str) -> CliParser:
return CliParser(description)
ospd-openvas-22.9.0/ospd/protocol.py 0000664 0000000 0000000 00000025021 15011310720 0017370 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Helper classes for parsing and creating OSP XML requests and responses"""
from typing import Dict, Union, List, Any
from xml.etree.ElementTree import SubElement, Element, XMLPullParser
from ospd.errors import OspdError
class RequestParser:
def __init__(self):
self._parser = XMLPullParser(['start', 'end'])
self._root_element = None
def has_ended(self, data: bytes) -> bool:
self._parser.feed(data)
for event, element in self._parser.read_events():
if event == 'start' and self._root_element is None:
self._root_element = element
elif event == 'end' and self._root_element is not None:
if element.tag == self._root_element.tag:
return True
return False
class OspRequest:
@staticmethod
def process_vts_params(
scanner_vts: Element,
) -> Dict[str, Union[Dict[str, str], List]]:
"""Receive an XML object with the Vulnerability Tests an their
parameters to be use in a scan and return a dictionary.
@param: XML element with vt subelements. Each vt has an
id attribute. Optional parameters can be included
as vt child.
Example form:
value
@return: Dictionary containing the vts attribute and subelements,
like the VT's id and VT's parameters.
Example form:
{'vt1': {},
'vt2': {'value_id': 'value'},
'vt_groups': ['family=debian', 'family=general']}
"""
vt_selection = {} # type: Dict
filters = []
for vt in scanner_vts:
if vt.tag == 'vt_single':
vt_id = vt.attrib.get('id')
vt_selection[vt_id] = {}
for vt_value in vt:
if not vt_value.attrib.get('id'):
raise OspdError(
'Invalid VT preference. No attribute id'
)
vt_value_id = vt_value.attrib.get('id')
vt_value_value = vt_value.text if vt_value.text else ''
vt_selection[vt_id][vt_value_id] = vt_value_value
if vt.tag == 'vt_group':
vts_filter = vt.attrib.get('filter', None)
if vts_filter is None:
raise OspdError('Invalid VT group. No filter given.')
filters.append(vts_filter)
vt_selection['vt_groups'] = filters
return vt_selection
@staticmethod
def process_credentials_elements(cred_tree: Element) -> Dict:
"""Receive an XML object with the credentials to run
a scan against a given target.
@param:
scanuser
mypass
smbuser
mypass
@return: Dictionary containing the credentials for a given target.
Example form:
{'ssh': {'type': type,
'port': port,
'username': username,
'password': pass,
},
'smb': {'type': type,
'username': username,
'password': pass,
},
}
"""
credentials = {} # type: Dict
for credential in cred_tree:
service = credential.attrib.get('service')
credentials[service] = {}
credentials[service]['type'] = credential.attrib.get('type')
if service == 'ssh':
credentials[service]['port'] = credential.attrib.get('port')
for param in credential:
credentials[service][param.tag] = (
param.text if param.text else ""
)
return credentials
@staticmethod
def process_alive_test_methods(
alive_test_tree: Element, options: Dict
) -> None:
"""Receive an XML object with the alive test methods to run
a scan with. Methods are added to the options Dict.
@param
boolean(1 or 0)
boolean(1 or 0)
boolean(1 or 0)
boolean(1 or 0)
boolean(1 or 0)
"""
for child in alive_test_tree:
if child.tag == 'icmp':
if child.text is not None:
options['icmp'] = child.text
if child.tag == 'tcp_ack':
if child.text is not None:
options['tcp_ack'] = child.text
if child.tag == 'tcp_syn':
if child.text is not None:
options['tcp_syn'] = child.text
if child.tag == 'arp':
if child.text is not None:
options['arp'] = child.text
if child.tag == 'consider_alive':
if child.text is not None:
options['consider_alive'] = child.text
@classmethod
def process_target_element(cls, scanner_target: Element) -> Dict:
"""Receive an XML object with the target, ports and credentials to run
a scan against.
Arguments:
Single XML target element. The target has and
subelements. Hosts can be a single host, a host range, a
comma-separated host list or a network address.
and are optional. Therefore each
ospd-scanner should check for a valid ones if needed.
Example form:
192.168.0.0/24
22
scanuser
mypass
smbuser
mypass
1
0
Return:
A Dict hosts, port, {credentials}, exclude_hosts, options].
Example form:
{
'hosts': '192.168.0.0/24',
'port': '22',
'credentials': {'smb': {'type': type,
'port': port,
'username': username,
'password': pass,
}
},
'exclude_hosts': '',
'finished_hosts': '',
'options': {'alive_test': 'ALIVE_TEST_CONSIDER_ALIVE',
'alive_test_ports: '22,80,123',
'reverse_lookup_only': '1',
'reverse_lookup_unify': '0',
},
}
"""
if scanner_target:
exclude_hosts = ''
finished_hosts = ''
ports = ''
hosts = None
credentials = {} # type: Dict
options = {}
for child in scanner_target:
if child.tag == 'hosts':
hosts = child.text
if child.tag == 'exclude_hosts':
exclude_hosts = child.text
if child.tag == 'finished_hosts':
finished_hosts = child.text
if child.tag == 'ports':
ports = child.text
if child.tag == 'credentials':
credentials = cls.process_credentials_elements(child)
if child.tag == 'alive_test_methods':
options['alive_test_methods'] = '1'
cls.process_alive_test_methods(child, options)
if child.tag == 'alive_test':
options['alive_test'] = child.text
if child.tag == 'alive_test_ports':
options['alive_test_ports'] = child.text
if child.tag == 'reverse_lookup_unify':
options['reverse_lookup_unify'] = child.text
if child.tag == 'reverse_lookup_only':
options['reverse_lookup_only'] = child.text
if hosts:
return {
'hosts': hosts,
'ports': ports,
'credentials': credentials,
'exclude_hosts': exclude_hosts,
'finished_hosts': finished_hosts,
'options': options,
}
else:
raise OspdError('No target to scan')
class OspResponse:
@staticmethod
def create_scanner_params_xml(scanner_params: Dict[str, Any]) -> Element:
"""Returns the OSP Daemon's scanner params in xml format."""
scanner_params_xml = Element('scanner_params')
for param_id, param in scanner_params.items():
param_xml = SubElement(scanner_params_xml, 'scanner_param')
for name, value in [('id', param_id), ('type', param['type'])]:
param_xml.set(name, value)
for name, value in [
('name', param['name']),
('description', param['description']),
('default', param['default']),
('mandatory', param['mandatory']),
]:
elem = SubElement(param_xml, name)
elem.text = str(value)
return scanner_params_xml
ospd-openvas-22.9.0/ospd/resultlist.py 0000664 0000000 0000000 00000006356 15011310720 0017753 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=too-many-lines
"""Class for handling list of resutls."""
from collections import OrderedDict
from typing import Dict
from ospd.misc import ResultType
class ResultList:
"""Class for handling list of resutls."""
def __init__(self):
self._result_list = list()
def __len__(self):
return len(self._result_list)
def add_scan_host_detail_to_list(
self,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
uri: str = '',
) -> None:
"""Adds a host detail result to result list."""
self.add_result_to_list(
ResultType.HOST_DETAIL,
host,
hostname,
name,
value,
uri,
)
def add_scan_error_to_list(
self,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id='',
uri: str = '',
) -> None:
"""Adds an error result to result list."""
self.add_result_to_list(
ResultType.ERROR,
host,
hostname,
name,
value,
port,
test_id,
uri,
)
def add_scan_log_to_list(
self,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id: str = '',
qod: str = '',
uri: str = '',
) -> None:
"""Adds log result to a list of results."""
self.add_result_to_list(
ResultType.LOG,
host,
hostname,
name,
value,
port,
test_id,
'0.0',
qod,
uri,
)
def add_scan_alarm_to_list(
self,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id: str = '',
severity: str = '',
qod: str = '',
uri: str = '',
) -> None:
"""Adds an alarm result to a result list."""
self.add_result_to_list(
ResultType.ALARM,
host,
hostname,
name,
value,
port,
test_id,
severity,
qod,
uri,
)
def add_result_to_list(
self,
result_type: int,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id: str = '',
severity: str = '',
qod: str = '',
uri: str = '',
) -> None:
result = OrderedDict() # type: Dict
result['type'] = result_type
result['name'] = name
result['severity'] = severity
result['test_id'] = test_id
result['value'] = value
result['host'] = host
result['hostname'] = hostname
result['port'] = port
result['qod'] = qod
result['uri'] = uri
self._result_list.append(result)
def __iter__(self):
return iter(self._result_list)
ospd-openvas-22.9.0/ospd/scan.py 0000664 0000000 0000000 00000051402 15011310720 0016455 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import logging
import multiprocessing
import time
import uuid
from pprint import pformat
from collections import OrderedDict
from enum import Enum, IntEnum
from typing import List, Any, Dict, Iterator, Optional, Iterable, Union
from ospd.network import target_str_to_list
from ospd.datapickler import DataPickler
from ospd.errors import OspdCommandError
logger = logging.getLogger(__name__)
class ScanStatus(Enum):
"""Scan status."""
QUEUED = 0
INIT = 1
RUNNING = 2
STOPPED = 3
FINISHED = 4
INTERRUPTED = 5
class ScanProgress(IntEnum):
"""Scan or host progress."""
FINISHED = 100
INIT = 0
DEAD_HOST = -1
INTERRUPTED = -2
class ScanCollection:
"""Scans collection, managing scans and results read and write, exposing
only needed information.
Each scan has meta-information such as scan ID, current progress (from 0 to
100), start time, end time, scan target and options and a list of results.
There are 4 types of results: Alarms, Logs, Errors and Host Details.
Todo:
- Better checking for Scan ID existence and handling otherwise.
- More data validation.
- Mutex access per table/scan_info.
"""
def __init__(self, file_storage_dir: str) -> None:
"""Initialize the Scan Collection."""
self.data_manager = (
None
) # type: Optional[multiprocessing.managers.SyncManager]
self.scans_table = dict() # type: Dict
self.file_storage_dir = file_storage_dir
self.scan_collection_lock = (
None
) # type: Optional[multiprocessing.managers.Lock]
def init(self):
self.data_manager = multiprocessing.Manager()
self.scan_collection_lock = self.data_manager.RLock()
def add_result(
self,
scan_id: str,
result_type: int,
host: str = '',
hostname: str = '',
name: str = '',
value: str = '',
port: str = '',
test_id: str = '',
severity: str = '',
qod: str = '',
uri: str = '',
) -> None:
"""Add a result to a scan in the table."""
assert scan_id
assert len(name) or len(value)
result = OrderedDict() # type: Dict
result['type'] = result_type
result['name'] = name
result['severity'] = severity
result['test_id'] = test_id
result['value'] = value
result['host'] = host
result['hostname'] = hostname
result['port'] = port
result['qod'] = qod
result['uri'] = uri
results = self.scans_table[scan_id]['results']
results.append(result)
# Set scan_info's results to propagate results to parent process.
self.scans_table[scan_id]['results'] = results
def add_result_list(
self, scan_id: str, result_list: Iterable[Dict[str, str]]
) -> None:
"""
Add a batch of results to the result's table for the corresponding
scan_id
"""
results = self.scans_table[scan_id]['results']
results.extend(result_list)
# Set scan_info's results to propagate results to parent process.
self.scans_table[scan_id]['results'] = results
def remove_hosts_from_target_progress(
self, scan_id: str, hosts: List
) -> None:
"""Remove a list of hosts from the main scan progress table to avoid
the hosts to be included in the calculation of the scan progress"""
if not hosts:
return
logger.debug(
'%s: Remove the following hosts from the target list, '
'as they are already finished or are dead: %s',
scan_id,
pformat(hosts),
)
target = self.scans_table[scan_id].get('target_progress')
for host in hosts:
if host in target:
del target[host]
# Set scan_info's target_progress to propagate progresses
# to parent process.
self.scans_table[scan_id]['target_progress'] = target
def set_progress(self, scan_id: str, progress: int) -> None:
"""Sets scan_id scan's progress."""
if progress > ScanProgress.INIT and progress <= ScanProgress.FINISHED:
self.scans_table[scan_id]['progress'] = progress
if progress == ScanProgress.FINISHED:
self.scans_table[scan_id]['end_time'] = int(time.time())
def set_host_progress(
self, scan_id: str, host_progress_batch: Dict[str, int]
) -> None:
"""Sets scan_id scan's progress."""
host_progresses = self.scans_table[scan_id].get('target_progress')
host_progresses.update(host_progress_batch)
# Set scan_info's target_progress to propagate progresses
# to parent process.
self.scans_table[scan_id]['target_progress'] = host_progresses
def set_host_finished(self, scan_id: str, hosts: List[str]) -> None:
"""Increase the amount of finished hosts which were alive."""
logger.debug(
'%s: Setting the following hosts as finished: %s',
scan_id,
pformat(hosts),
)
total_finished = len(hosts)
count_alive = (
self.scans_table[scan_id].get('count_alive') + total_finished
)
self.scans_table[scan_id]['count_alive'] = count_alive
def set_host_dead(self, scan_id: str, hosts: List[str]) -> None:
"""Increase the amount of dead hosts."""
logger.debug(
'%s: Setting the following hosts as dead: %s',
scan_id,
pformat(hosts),
)
total_dead = len(hosts)
count_dead = self.scans_table[scan_id].get('count_dead') + total_dead
self.scans_table[scan_id]['count_dead'] = count_dead
def set_amount_dead_hosts(self, scan_id: str, total_dead: int) -> None:
"""Increase the amount of dead hosts."""
count_dead = self.scans_table[scan_id].get('count_dead') + total_dead
self.scans_table[scan_id]['count_dead'] = count_dead
def clean_temp_result_list(self, scan_id):
"""Clean the results stored in the temporary list."""
self.scans_table[scan_id]['temp_results'] = list()
def restore_temp_result_list(self, scan_id):
"""Add the results stored in the temporary list into the results
list again."""
result_aux = self.scans_table[scan_id].get('results', list())
result_aux.extend(self.scans_table[scan_id].get('temp_results', list()))
# Propagate results
self.scans_table[scan_id]['results'] = result_aux
self.clean_temp_result_list(scan_id)
def results_iterator(
self, scan_id: str, pop_res: bool = False, max_res: int = None
) -> Iterator[Any]:
"""Returns an iterator over scan_id scan's results. If pop_res is True,
it removed the fetched results from the list.
If max_res is None, return all the results.
Otherwise, if max_res = N > 0 return N as maximum number of results.
max_res works only together with pop_results.
"""
if pop_res and max_res:
result_aux = self.scans_table[scan_id].get('results', list())
self.scans_table[scan_id]['results'] = result_aux[max_res:]
self.scans_table[scan_id]['temp_results'] = result_aux[:max_res]
return iter(self.scans_table[scan_id]['temp_results'])
elif pop_res:
self.scans_table[scan_id]['temp_results'] = self.scans_table[
scan_id
].get('results', list())
self.scans_table[scan_id]['results'] = list()
return iter(self.scans_table[scan_id]['temp_results'])
return iter(self.scans_table[scan_id]['results'])
def ids_iterator(self) -> Iterator[str]:
"""Returns an iterator over the collection's scan IDS."""
# Do not iterate over the scans_table because it can change
# during iteration, since it is accessed by multiple processes.
scan_id_list = list(self.scans_table)
return iter(scan_id_list)
def clean_up_pickled_scan_info(self) -> None:
"""Remove files of pickled scan info"""
for scan_id in self.ids_iterator():
if self.get_status(scan_id) == ScanStatus.QUEUED:
self.remove_file_pickled_scan_info(scan_id)
def remove_file_pickled_scan_info(self, scan_id: str) -> None:
pickler = DataPickler(self.file_storage_dir)
pickler.remove_file(scan_id)
def unpickle_scan_info(self, scan_id: str) -> None:
"""Unpickle a stored scan_inf corresponding to the scan_id
and store it in the scan_table"""
scan_info = self.scans_table.get(scan_id)
scan_info_hash = scan_info.pop('scan_info_hash')
pickler = DataPickler(self.file_storage_dir)
unpickled_scan_info = pickler.load_data(scan_id, scan_info_hash)
if not unpickled_scan_info:
pickler.remove_file(scan_id)
raise OspdCommandError(
f'Not possible to unpickle stored scan info for {scan_id}',
'start_scan',
)
scan_info['results'] = list()
scan_info['temp_results'] = list()
scan_info['progress'] = ScanProgress.INIT.value
scan_info['target_progress'] = dict()
scan_info['count_alive'] = 0
scan_info['count_dead'] = 0
scan_info['count_total'] = None
scan_info['count_excluded'] = 0
scan_info['excluded_simplified'] = None
scan_info['target'] = unpickled_scan_info.pop('target')
scan_info['vts'] = unpickled_scan_info.pop('vts')
scan_info['options'] = unpickled_scan_info.pop('options')
scan_info['start_time'] = int(time.time())
scan_info['end_time'] = 0
self.scans_table[scan_id] = scan_info
pickler.remove_file(scan_id)
def create_scan(
self,
scan_id: str = '',
target: Dict = None,
options: Optional[Dict] = None,
vts: Dict = None,
) -> str:
"""Creates a new scan with provided scan information.
@target: Target to scan.
@options: Miscellaneous scan options supplied via
XML element.
@return: Scan's ID. None if error occurs.
"""
if not options:
options = dict()
credentials = target.pop('credentials')
scan_info = self.data_manager.dict() # type: Dict
scan_info['status'] = ScanStatus.QUEUED
scan_info['credentials'] = credentials
scan_info['start_time'] = int(time.time())
scan_info['end_time'] = 0
scan_info_to_pickle = {
'target': target,
'options': options,
'vts': vts,
}
if scan_id is None or scan_id == '':
scan_id = str(uuid.uuid4())
pickler = DataPickler(self.file_storage_dir)
scan_info_hash = None
try:
scan_info_hash = pickler.store_data(scan_id, scan_info_to_pickle)
except OspdCommandError as e:
logger.error(e)
return
scan_info['scan_id'] = scan_id
scan_info['scan_info_hash'] = scan_info_hash
self.scans_table[scan_id] = scan_info
return scan_id
def set_status(self, scan_id: str, status: ScanStatus) -> None:
"""Sets scan_id scan's status."""
self.scans_table[scan_id]['status'] = status
if status == ScanStatus.STOPPED or status == ScanStatus.INTERRUPTED:
self.scans_table[scan_id]['end_time'] = int(time.time())
def get_status(self, scan_id: str) -> ScanStatus:
"""Get scan_id scans's status."""
status = self.scans_table.get(scan_id, {}).get('status', None)
if not status:
logger.warning("Scan ID %s not found", scan_id)
return status
def get_options(self, scan_id: str) -> Dict:
"""Get scan_id scan's options list."""
return self.scans_table[scan_id].get('options')
def set_option(self, scan_id, name: str, value: Any) -> None:
"""Set a scan_id scan's name option to value."""
self.scans_table[scan_id]['options'][name] = value
def get_progress(self, scan_id: str) -> int:
"""Get a scan's current progress value."""
return self.scans_table[scan_id].get('progress', ScanProgress.INIT)
def get_count_dead(self, scan_id: str) -> int:
"""Get a scan's current dead host count."""
return self.scans_table[scan_id]['count_dead']
def get_count_alive(self, scan_id: str) -> int:
"""Get a scan's current alive host count."""
return self.scans_table[scan_id]['count_alive']
def update_count_total(self, scan_id: str, count_total: int) -> int:
"""Sets a scan's total hosts."""
self.scans_table[scan_id]['count_total'] = count_total
def update_count_excluded(self, scan_id: str, count_excluded: int) -> int:
"""Sets a scan's total hosts."""
self.scans_table[scan_id]['count_excluded'] = count_excluded
def get_count_excluded(self, scan_id: str) -> int:
"""Get a scan's total host count."""
count_excluded = self.scans_table[scan_id]['count_excluded']
return count_excluded
def get_count_total(self, scan_id: str) -> int:
"""Get a scan's total host count."""
count_total = self.scans_table[scan_id]['count_total']
# The value set by the server has priority over the value
# calculated from the original target list by ospd.
# As ospd is not intelligent enough to check the amount of valid
# hosts, check for duplicated or invalid hosts, consider a negative
# value set for the server, in case it detects an invalid target string
# or a different amount than the original amount in the target list.
if count_total == -1:
count_total = 0
# If the server does not set the total host count
# ospd set the amount of host from the original host list.
elif count_total is None:
count_total = self.get_host_count(scan_id)
self.update_count_total(scan_id, count_total)
return count_total
def get_current_target_progress(self, scan_id: str) -> Dict[str, int]:
"""Get a scan's current hosts progress"""
return self.scans_table[scan_id]['target_progress']
def simplify_exclude_host_count(self, scan_id: str) -> int:
"""Remove from exclude_hosts the received hosts in the finished_hosts
list sent by the client.
The finished hosts are sent also as exclude hosts for backward
compatibility purposses.
Return:
Count of excluded host.
"""
exc_hosts_list = target_str_to_list(self.get_exclude_hosts(scan_id))
logger.debug(
'%s: Excluded Hosts: %s',
scan_id,
pformat(exc_hosts_list),
)
finished_hosts_list = target_str_to_list(
self.get_finished_hosts(scan_id)
)
logger.debug(
'%s: Finished Hosts: %s',
scan_id,
pformat(finished_hosts_list),
)
# Remove finished hosts from excluded host list
if finished_hosts_list and exc_hosts_list:
for finished in finished_hosts_list:
if finished in exc_hosts_list:
exc_hosts_list.remove(finished)
# Remove excluded hosts which don't belong to the target list
host_list = target_str_to_list(self.get_host_list(scan_id))
excluded_simplified = 0
invalid_exc_hosts = 0
if exc_hosts_list:
for exc_host in exc_hosts_list:
if exc_host in host_list:
excluded_simplified += 1
else:
invalid_exc_hosts += 1
if invalid_exc_hosts > 0:
logger.warning(
"Please check the excluded host list. It contains hosts which "
"do not belong to the target. This warning can be ignored if "
"this was done on purpose (e.g. to exclude specific hostname)."
)
# Set scan_info's excluded simplified to propagate excluded count
# to parent process.
self.scans_table[scan_id]['excluded_simplified'] = excluded_simplified
return excluded_simplified
def get_simplified_exclude_host_count(self, scan_id: str) -> int:
"""Get a scan's excluded host count."""
excluded_simplified = self.scans_table[scan_id]['excluded_simplified']
# Check for None because it is the init value, as excluded can be 0
# as well
if excluded_simplified is not None:
return excluded_simplified
return self.simplify_exclude_host_count(scan_id)
def calculate_target_progress(self, scan_id: str) -> int:
"""Get a target's current progress value.
The value is calculated with the progress of each single host
in the target."""
total_hosts = self.get_count_total(scan_id)
count_alive = self.get_count_alive(scan_id)
count_dead = self.get_count_dead(scan_id)
host_progresses = self.get_current_target_progress(scan_id)
finished_hosts = self.get_finished_hosts_count(scan_id)
try:
t_prog = int(
(sum(host_progresses.values()) + 100 * count_alive)
/ (total_hosts + finished_hosts - count_dead)
)
except ZeroDivisionError:
# Consider the case in which all hosts are dead or excluded
logger.debug('%s: All hosts dead or excluded.', scan_id)
t_prog = ScanProgress.FINISHED.value
return t_prog
def get_start_time(self, scan_id: str) -> str:
"""Get a scan's start time."""
return self.scans_table[scan_id]['start_time']
def get_end_time(self, scan_id: str) -> str:
"""Get a scan's end time."""
return self.scans_table[scan_id]['end_time']
def get_host_list(self, scan_id: str) -> Dict:
"""Get a scan's host list."""
target = None
try:
target = self.scans_table[scan_id]['target'].get('hosts')
except KeyError:
logger.warning(
'%s: Scan ID is in the scan table, but it was '
'not initialized.',
scan_id,
)
return target
def get_host_count(self, scan_id: str) -> int:
"""Get total host count in the target."""
host = self.get_host_list(scan_id)
total_hosts = 0
if host:
total_hosts = len(target_str_to_list(host))
return total_hosts
def get_ports(self, scan_id: str) -> str:
"""Get a scan's ports list."""
target = self.scans_table[scan_id].get('target')
ports = target.pop('ports')
self.scans_table[scan_id]['target'] = target
return ports
def get_exclude_hosts(self, scan_id: str) -> str:
"""Get an exclude host list for a given target."""
return self.scans_table[scan_id]['target'].get('exclude_hosts')
def get_finished_hosts(self, scan_id: str) -> str:
"""Get the finished host list sent by the client for a given target."""
return self.scans_table[scan_id]['target'].get('finished_hosts')
def get_finished_hosts_count(self, scan_id: str) -> int:
"""Get the finished host list sent by the client for a given target."""
fin_hosts = target_str_to_list(self.get_finished_hosts(scan_id))
finish_count = 0
if fin_hosts:
finish_count = len(fin_hosts)
return finish_count
def get_credentials(self, scan_id: str) -> Dict[str, Dict[str, str]]:
"""Get a scan's credential list. It return dictionary with
the corresponding credential for a given target.
"""
return self.scans_table[scan_id].get('credentials')
def get_target_options(self, scan_id: str) -> Dict[str, str]:
"""Get a scan's target option dictionary.
It return dictionary with the corresponding options for
a given target.
"""
return self.scans_table[scan_id]['target'].get('options')
def get_vts(self, scan_id: str) -> Dict[str, Union[Dict[str, str], List]]:
"""Get a scan's vts."""
scan_info = self.scans_table[scan_id]
vts = scan_info.pop('vts')
self.scans_table[scan_id] = scan_info
return vts
def id_exists(self, scan_id: str) -> bool:
"""Check whether a scan exists in the table."""
return self.scans_table.get(scan_id) is not None
def delete_scan(self, scan_id: str) -> bool:
"""Delete a scan if fully finished."""
if self.get_status(scan_id) == ScanStatus.RUNNING:
return False
scans_table = self.scans_table
try:
del scans_table[scan_id]
self.scans_table = scans_table
except KeyError:
return False
return True
ospd-openvas-22.9.0/ospd/server.py 0000664 0000000 0000000 00000020470 15011310720 0017040 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Module for serving and streaming data
"""
import logging
import socket
import ssl
import time
import threading
import socketserver
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
from ospd.errors import OspdError
logger = logging.getLogger(__name__)
DEFAULT_BUFSIZE = 1024
class Stream:
def __init__(self, sock: socket.socket, stream_timeout: int):
self.socket = sock
self.socket.settimeout(stream_timeout)
def close(self):
"""Close the stream"""
try:
self.socket.shutdown(socket.SHUT_RDWR)
except OSError as e:
logger.debug(
"Ignoring error while shutting down the connection. %s", e
)
self.socket.close()
def read(self, bufsize: Optional[int] = DEFAULT_BUFSIZE) -> bytes:
"""Read at maximum bufsize data from the stream"""
data = self.socket.recv(bufsize)
if not data:
logger.debug('Client closed the connection')
return data
def write(self, data: bytes) -> bool:
"""Send data in chunks of DEFAULT_BUFSIZE to the client"""
b_start = 0
b_end = DEFAULT_BUFSIZE
ret_success = True
while True:
if b_end > len(data):
try:
self.socket.send(data[b_start:])
except (socket.error, BrokenPipeError) as e:
logger.error("Error sending data to the client. %s", e)
ret_success = False
return ret_success
try:
b_sent = self.socket.send(data[b_start:b_end])
except (socket.error, BrokenPipeError) as e:
logger.error("Error sending data to the client. %s", e)
return False
b_start = b_end
b_end += b_sent
return ret_success
StreamCallbackType = Callable[[Stream], None]
InetAddress = Tuple[str, int]
def validate_cacert_file(cacert: str):
"""Check if provided file is a valid CA Certificate"""
try:
context = ssl.create_default_context(cafile=cacert)
except AttributeError:
# Python version < 2.7.9
return
except IOError:
raise OspdError('CA Certificate not found') from None
try:
not_after = context.get_ca_certs()[0]['notAfter']
not_after = ssl.cert_time_to_seconds(not_after)
not_before = context.get_ca_certs()[0]['notBefore']
not_before = ssl.cert_time_to_seconds(not_before)
except (KeyError, IndexError):
raise OspdError('CA Certificate is erroneous') from None
now = int(time.time())
if not_after < now:
raise OspdError('CA Certificate expired')
if not_before > now:
raise OspdError('CA Certificate not active yet')
class RequestHandler(socketserver.BaseRequestHandler):
"""Class to handle the request."""
def handle(self):
self.server.handle_request(self.request, self.client_address)
class BaseServer(ABC):
def __init__(self, stream_timeout: int):
self.server = None
self.stream_timeout = stream_timeout
@abstractmethod
def start(self, stream_callback: StreamCallbackType):
"""Starts a server with capabilities to handle multiple client
connections simultaneously.
If a new client connects the stream_callback is called with a Stream
Arguments:
stream_callback (function): Callback function to be called when
a stream is ready
"""
def close(self):
"""Shutdown the server"""
if self.server:
self.server.shutdown()
self.server.server_close()
@abstractmethod
def handle_request(self, request, client_address):
"""Handle an incoming client request"""
def _start_threading_server(self):
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
class SocketServerMixin:
# Use daemon mode to circrumvent a memory leak
# (reported at https://bugs.python.org/issue37193).
#
# Daemonic threads are killed immediately by the python interpreter without
# waiting for until they are finished.
#
# Maybe block_on_close = True could work too.
# In that case the interpreter waits for the threads to finish but doesn't
# track them in the _threads list.
daemon_threads = True
def __init__(self, server: BaseServer, address: Union[str, InetAddress]):
self.server = server
super().__init__(address, RequestHandler, bind_and_activate=True)
def handle_request(self, request, client_address):
self.server.handle_request(request, client_address)
class ThreadedUnixSocketServer(
SocketServerMixin,
socketserver.ThreadingUnixStreamServer,
):
pass
class ThreadedTlsSocketServer(
SocketServerMixin,
socketserver.ThreadingTCPServer,
):
pass
class UnixSocketServer(BaseServer):
"""Server for accepting connections via a Unix domain socket"""
def __init__(self, socket_path: str, socket_mode: str, stream_timeout: int):
super().__init__(stream_timeout)
self.socket_path = Path(socket_path)
self.socket_mode = int(socket_mode, 8)
def _cleanup_socket(self):
if self.socket_path.exists():
self.socket_path.unlink()
def _create_parent_dirs(self):
# create all parent directories for the socket path
parent = self.socket_path.parent
parent.mkdir(parents=True, exist_ok=True)
def start(self, stream_callback: StreamCallbackType):
self._cleanup_socket()
self._create_parent_dirs()
try:
self.stream_callback = stream_callback
self.server = ThreadedUnixSocketServer(self, str(self.socket_path))
self._start_threading_server()
except OSError as e:
logger.error("Couldn't bind socket on %s", str(self.socket_path))
raise OspdError(
f"Couldn't bind socket on {str(self.socket_path)}. {e}"
) from e
if self.socket_path.exists():
self.socket_path.chmod(self.socket_mode)
def close(self):
super().close()
self._cleanup_socket()
def handle_request(self, request, client_address):
logger.debug("New request from %s", str(self.socket_path))
stream = Stream(request, self.stream_timeout)
self.stream_callback(stream)
class TlsServer(BaseServer):
"""Server for accepting TLS encrypted connections via a TCP socket"""
def __init__(
self,
address: str,
port: int,
cert_file: str,
key_file: str,
ca_file: str,
stream_timeout: int,
):
super().__init__(stream_timeout)
self.socket = (address, port)
if not Path(cert_file).exists():
raise OspdError(f'cert file {cert_file} not found')
if not Path(key_file).exists():
raise OspdError(f'key file {key_file} not found')
if not Path(ca_file).exists():
raise OspdError(f'CA file {ca_file} not found')
validate_cacert_file(ca_file)
protocol = ssl.PROTOCOL_SSLv23
self.tls_context = ssl.SSLContext(protocol)
self.tls_context.verify_mode = ssl.CERT_REQUIRED
self.tls_context.load_cert_chain(cert_file, keyfile=key_file)
self.tls_context.load_verify_locations(ca_file)
def start(self, stream_callback: StreamCallbackType):
try:
self.stream_callback = stream_callback
self.server = ThreadedTlsSocketServer(self, self.socket)
self._start_threading_server()
except OSError as e:
logger.error(
"Couldn't bind socket on %s:%s", self.socket[0], self.socket[1]
)
raise OspdError(
f"Couldn't bind socket on {self.socket[0]}:{self.socket[1]}. "
f"{e}"
) from e
def handle_request(self, request, client_address):
logger.debug("New connection from %s", client_address)
req_socket = self.tls_context.wrap_socket(request, server_side=True)
stream = Stream(req_socket, self.stream_timeout)
self.stream_callback(stream)
ospd-openvas-22.9.0/ospd/timer.py 0000664 0000000 0000000 00000002335 15011310720 0016652 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import time
import logging
from ospd.errors import OspdError
logger = logging.getLogger(__name__)
class TimerError(OspdError):
"""Timer errors"""
class Timer:
def __init__(
self,
name: str = None,
text: str = "{}: Elapsed time: {:0.4f} seconds",
logger=logger.debug, # pylint: disable=redefined-outer-name
):
self._start_time = None
self._name = name
self._text = text
self._logger = logger
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.stop()
@staticmethod
def create(name) -> "Timer":
timer = Timer(name)
timer.start()
return timer
def start(self):
"""Start a new timer"""
self._start_time = time.perf_counter()
def stop(self):
if not self._start_time:
raise TimerError('Timer is not running.')
duration = time.perf_counter() - self._start_time
if self._logger:
self._logger(self._text.format(self._name, duration))
return duration
ospd-openvas-22.9.0/ospd/vtfilter.py 0000664 0000000 0000000 00000010032 15011310720 0017362 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Vulnerability Test Filter class."""
import re
import operator
from typing import Dict, List, Optional
from ospd.errors import OspdCommandError
from .vts import Vts
class VtsFilter:
"""Helper class to filter Vulnerability Tests"""
def __init__(self) -> None:
"""Initialize filter operator and allowed filters."""
self.filter_operator = {
'<': operator.lt,
'>': operator.gt,
'=': operator.eq,
}
self.allowed_filter = {
'creation_time': self.format_vt_creation_time,
'modification_time': self.format_vt_modification_time,
}
def parse_filters(self, vt_filter: str) -> List:
"""Parse a string containing one or more filters
and return a list of filters
Arguments:
vt_filter (string): String containing filters separated with
semicolon.
Return:
List with filters. Each filters is a list with 3 elements
e.g. [arg, operator, value]
"""
filter_list = vt_filter.split(';')
filters = list()
for single_filter in filter_list:
filter_aux = re.split(r'(\W)', single_filter, 1)
if len(filter_aux) < 3:
raise OspdCommandError(
"Invalid number of argument in the filter", "get_vts"
)
_element, _oper, _val = filter_aux
if _element not in self.allowed_filter:
raise OspdCommandError("Invalid filter element", "get_vts")
if _oper not in self.filter_operator:
raise OspdCommandError("Invalid filter operator", "get_vts")
filters.append(filter_aux)
return filters
def format_vt_creation_time(self, value):
"""In case the given creationdatetime value must be formatted,
this function must be implemented by the wrapper
"""
return value
def format_vt_modification_time(self, value):
"""In case the given modification datetime value must be formatted,
this function must be implemented by the wrapper
"""
return value
def format_filter_value(self, element: str, value: Dict):
"""Calls the specific function to format value,
depending on the given element.
Arguments:
element (string): The element of the VT to be formatted.
value (dictionary): The element value.
Returns:
Returns a formatted value.
"""
format_func = self.allowed_filter.get(element)
return format_func(value)
def get_filtered_vts_list(
self, vts: Vts, vt_filter: str
) -> Optional[List[str]]:
"""Gets a collection of vulnerability test from the vts dictionary,
which match the filter.
Arguments:
vt_filter: Filter to apply to the vts collection.
vts: The complete vts collection.
Returns:
List with filtered vulnerability tests. The list can be empty.
None in case of filter parse failure.
"""
if not vt_filter:
raise OspdCommandError('vt_filter: A valid filter is required.')
filters = self.parse_filters(vt_filter)
if not filters:
return None
vt_oid_list = list(vts)
for _element, _oper, _filter_val in filters:
for vt_oid in vts:
if vt_oid not in vt_oid_list:
continue
vt = vts.get(vt_oid)
if vt is None or not vt.get(_element):
vt_oid_list.remove(vt_oid)
continue
_elem_val = vt.get(_element)
_val = self.format_filter_value(_element, _elem_val)
if self.filter_operator[_oper](_val, _filter_val):
continue
else:
vt_oid_list.remove(vt_oid)
return vt_oid_list
ospd-openvas-22.9.0/ospd/vts.py 0000664 0000000 0000000 00000013154 15011310720 0016347 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Classes for storing VTs"""
import logging
import multiprocessing
from hashlib import sha256
import re
from copy import deepcopy
from typing import (
Dict,
Any,
Type,
Iterator,
Iterable,
Tuple,
)
from ospd.errors import OspdError
logger = logging.getLogger(__name__)
DEFAULT_VT_ID_PATTERN = re.compile("[0-9a-zA-Z_\\-:.]{1,80}")
class Vts:
def __init__(
self,
storage: Type[Dict] = None,
vt_id_pattern=DEFAULT_VT_ID_PATTERN,
):
self.storage = storage
self.vt_id_pattern = vt_id_pattern
self._vts = None
self.sha256_hash = None
self.is_cache_available = True
def __contains__(self, key: str) -> bool:
return key in self._vts
def __iter__(self) -> Iterator[str]:
if hasattr(self.vts, '__iter__'):
return self.vts.__iter__()
def __getitem__(self, key):
return self.vts[key]
def items(self) -> Iterator[Tuple[str, Dict]]:
return iter(self.vts.items())
def __len__(self) -> int:
return len(self.vts)
def __init_vts(self):
if self.storage:
self._vts = self.storage()
else:
self._vts = multiprocessing.Manager().dict()
@property
def vts(self) -> Dict[str, Any]:
if self._vts is None:
self.__init_vts()
return self._vts
def add(
self,
vt_id: str,
name: str = None,
vt_params: str = None,
vt_refs: str = None,
custom: str = None,
vt_creation_time: str = None,
vt_modification_time: str = None,
vt_dependencies: str = None,
summary: str = None,
impact: str = None,
affected: str = None,
insight: str = None,
solution: str = None,
solution_t: str = None,
solution_m: str = None,
detection: str = None,
qod_t: str = None,
qod_v: str = None,
severities: str = None,
) -> None:
"""Add a vulnerability test information.
IMPORTANT: The VT's Data Manager will store the vts collection.
If the collection is considerably big and it will be consultated
intensible during a routine, consider to do a deepcopy(), since
accessing the shared memory in the data manager is very expensive.
At the end of the routine, the temporal copy must be set to None
and deleted.
"""
if not vt_id:
raise OspdError(f'Invalid vt_id {vt_id}')
if self.vt_id_pattern.fullmatch(vt_id) is None:
raise OspdError(f'Invalid vt_id {vt_id}')
if vt_id in self.vts:
raise OspdError(f'vt_id {vt_id} already exists')
if name is None:
name = ''
vt = {'name': name}
if custom is not None:
vt["custom"] = custom
if vt_params is not None:
vt["vt_params"] = vt_params
if vt_refs is not None:
vt["vt_refs"] = vt_refs
if vt_dependencies is not None:
vt["vt_dependencies"] = vt_dependencies
if vt_creation_time is not None:
vt["creation_time"] = vt_creation_time
if vt_modification_time is not None:
vt["modification_time"] = vt_modification_time
if summary is not None:
vt["summary"] = summary
if impact is not None:
vt["impact"] = impact
if affected is not None:
vt["affected"] = affected
if insight is not None:
vt["insight"] = insight
if solution is not None:
vt["solution"] = solution
if solution_t is not None:
vt["solution_type"] = solution_t
if solution_m is not None:
vt["solution_method"] = solution_m
if detection is not None:
vt["detection"] = detection
if qod_t is not None:
vt["qod_type"] = qod_t
elif qod_v is not None:
vt["qod"] = qod_v
if severities is not None:
vt["severities"] = severities
self.vts[vt_id] = vt
def get(self, vt_id: str) -> Dict[str, Any]:
return self.vts.get(vt_id)
def keys(self) -> Iterable[str]:
return self.vts.keys()
def clear(self) -> None:
self._vts.clear()
self._vts = None
def copy(self) -> "Vts":
copy = Vts(self.storage, vt_id_pattern=self.vt_id_pattern)
copy._vts = deepcopy(self._vts) # pylint: disable=protected-access
return copy
def calculate_vts_collection_hash(self, include_vt_params: bool = True):
"""Calculate the vts collection sha256 hash."""
if not self._vts:
logger.debug(
"Error calculating VTs collection hash. Cache is empty"
)
return
m = sha256() # pylint: disable=invalid-name
# for a reproducible hash calculation
# the vts must already be sorted in the dictionary.
for vt_id, vt in self.vts.items():
param_chain = ""
vt_params = vt.get('vt_params')
if include_vt_params and vt_params:
for _, param in sorted(vt_params.items()):
param_chain += (
param.get('id')
+ param.get('name')
+ param.get('default')
)
m.update(
(vt_id + vt.get('modification_time')).encode('utf-8')
+ param_chain.encode('utf-8')
)
self.sha256_hash = m.hexdigest()
ospd-openvas-22.9.0/ospd/xml.py 0000664 0000000 0000000 00000020702 15011310720 0016330 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""OSP XML utils class."""
import re
from typing import List, Dict, Any, Union
from xml.sax.saxutils import escape, quoteattr
from xml.etree.ElementTree import Element, tostring
from ospd.misc import ResultType
r = re.compile( # pylint: disable=invalid-name
r'(.*?)(?:([^\x09\x0A\x0D\x20-\x7E\x85\xA0-\xFF'
+ r'\u0100-\uD7FF\uE000-\uFDCF\uFDE0-\uFFFD])|([\n])|$)'
)
def split_invalid_xml(result_text: str) -> Union[List[Union[str, int]], str]:
"""Search for occurrence of non printable chars and replace them
with the integer representation the Unicode code. The original string
is splitted where a non printable char is found.
"""
splitted_string = []
def replacer(match):
regex_g1 = match.group(1)
if len(regex_g1) > 0:
splitted_string.append(regex_g1)
regex_g2 = match.group(2)
if regex_g2 is not None:
splitted_string.append(ord(regex_g2))
regex_g3 = match.group(3)
if regex_g3 is not None:
splitted_string.append(regex_g3)
return ""
re.sub(r, replacer, result_text)
return splitted_string
def escape_ctrl_chars(result_text):
"""Replace non printable chars in result_text with an hexa code
in string format.
"""
escaped_str = ''
for fragment in split_invalid_xml(result_text):
if isinstance(fragment, int):
escaped_str += f'\\x{fragment:00004X}'
else:
escaped_str += fragment
return escaped_str
def get_result_xml(result):
"""Formats a scan result to XML format.
Arguments:
result (dict): Dictionary with a scan result.
Return:
Result as xml element object.
"""
result_xml = Element('result')
for name, value in [
('name', result['name']),
('type', ResultType.get_str(result['type'])),
('severity', result['severity']),
('host', result['host']),
('hostname', result['hostname']),
('test_id', result['test_id']),
('port', result['port']),
('qod', result['qod']),
('uri', result['uri']),
]:
result_xml.set(name, escape(str(value)))
if result['value'] is not None:
result_xml.text = escape_ctrl_chars(result['value'])
return result_xml
def get_progress_xml(progress: Dict[str, int]):
"""Formats a scan progress to XML format.
Arguments:
progress (dict): Dictionary with a scan progress.
Return:
Progress as xml element object.
"""
progress_xml = Element('progress')
for progress_item, value in progress.items():
elem = None
if progress_item == 'current_hosts':
for host, h_progress in value.items():
elem = Element('host')
elem.set('name', host)
elem.text = str(h_progress)
progress_xml.append(elem)
else:
elem = Element(progress_item)
elem.text = str(value)
progress_xml.append(elem)
return progress_xml
def simple_response_str(
command: str,
status: int,
status_text: str,
content: Union[str, Element, List[str], List[Element]] = "",
) -> bytes:
"""Creates an OSP response XML string.
Arguments:
command (str): OSP Command to respond to.
status (int): Status of the response.
status_text (str): Status text of the response.
content (str): Text part of the response XML element.
Return:
String of response in xml format.
"""
response = Element(f'{command}_response')
for name, value in [('status', str(status)), ('status_text', status_text)]:
response.set(name, escape(str(value)))
if isinstance(content, list):
for elem in content:
if isinstance(elem, Element):
response.append(elem)
elif isinstance(content, Element):
response.append(content)
elif content is not None:
response.text = escape_ctrl_chars(content)
return tostring(response, encoding='utf-8')
def get_elements_from_dict(data: Dict[str, Any]) -> List[Element]:
"""Creates a list of etree elements from a dictionary
Args:
Dictionary of tags and their elements.
Return:
List of xml elements.
"""
responses = []
for tag, value in data.items():
elem = Element(tag)
if isinstance(value, dict):
for val in get_elements_from_dict(value):
elem.append(val)
elif isinstance(value, list):
elem.text = ', '.join(value)
elif value is not None:
elem.text = escape_ctrl_chars(value)
responses.append(elem)
return responses
def elements_as_text(
elements: Dict[str, Union[str, Dict]], indent: int = 2
) -> str:
"""Returns the elements dictionary as formatted plain text."""
text = ""
for elename, eledesc in elements.items():
if isinstance(eledesc, dict):
desc_txt = elements_as_text(eledesc, indent + 2)
desc_txt = ''.join(['\n', desc_txt])
elif isinstance(eledesc, str):
desc_txt = ''.join([eledesc, '\n'])
else:
assert False, "Only string or dictionary"
ele_txt = f"\t{' ' * indent}{elename: <22} {desc_txt}"
text = ''.join([text, ele_txt])
return text
class XmlStringHelper:
"""Class with methods to help the creation of a xml object in
string format.
"""
def create_element(self, elem_name: str, end: bool = False) -> bytes:
"""Get a name and create the open element of an entity.
Arguments:
elem_name (str): The name of the tag element.
end (bool): Create a initial tag if False, otherwise the end tag.
Return:
Encoded string representing a part of an xml element.
"""
if end:
ret = f"{elem_name}>"
else:
ret = f"<{elem_name}>"
return ret.encode('utf-8')
def create_response(self, command: str, end: bool = False) -> bytes:
"""Create or end an xml response.
Arguments:
command (str): The name of the command for the response element.
end (bool): Create a initial tag if False, otherwise the end tag.
Return:
Encoded string representing a part of an xml element.
"""
if not command:
return
if end:
return (f'{command}_response>').encode('utf-8')
return (f'<{command}_response status="200" status_text="OK">').encode(
'utf-8'
)
def add_element(
self,
content: Union[Element, str, list],
xml_str: bytes = None,
end: bool = False,
) -> bytes:
"""Create the initial or ending tag for a subelement, or add
one or many xml elements
Arguments:
content (Element, str, list): Content to add.
xml_str (bytes): Initial string where content to be added to.
end (bool): Create a initial tag if False, otherwise the end tag.
It will be added to the xml_str.
Return:
Encoded string representing a part of an xml element.
"""
if not xml_str:
xml_str = b''
if content:
if isinstance(content, list):
for elem in content:
xml_str = xml_str + tostring(elem, encoding='utf-8')
elif isinstance(content, Element):
xml_str = xml_str + tostring(content, encoding='utf-8')
else:
if end:
xml_str = xml_str + self.create_element(content, False)
else:
xml_str = xml_str + self.create_element(content)
return xml_str
def add_attr(
self, tag: bytes, attribute: str, value: Union[str, int] = None
) -> bytes:
"""Add an attribute to the beginning tag of an xml element.
Arguments:
tag (bytes): Tag to add the attribute to.
attribute (str): Attribute name
value (str): Attribute value
Return:
Tag in encoded string format with the given attribute
"""
if not tag:
return None
if not attribute:
return tag
if not value:
value = ''
return tag[:-1] + (f" {attribute}={quoteattr(str(value))}>").encode(
'utf-8'
)
ospd-openvas-22.9.0/ospd/xmlvt.py 0000664 0000000 0000000 00000027460 15011310720 0016712 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""OSP XML utils class for VTs."""
import logging
from typing import List, Dict, Optional
from lxml.etree import Element, SubElement, tostring
logger = logging.getLogger(__name__)
VT_BASE_OID = "1.3.6.1.4.1.25623."
class XmlStringVTHelper:
"""Class with methods to help the creation of a VT's xml object in
string format.
"""
@staticmethod
def get_custom_vt_as_xml_str(vt_id: str, custom: Dict) -> str:
"""Return an xml element with custom metadata formatted as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
custom: Dictionary with the custom metadata.
Return:
Xml element as string.
"""
_custom = Element('custom')
for key, val in custom.items():
xml_key = SubElement(_custom, key)
try:
xml_key.text = val
except ValueError as e:
logger.warning(
"Not possible to parse custom tag for VT %s: %s", vt_id, e
)
return tostring(_custom).decode('utf-8')
@staticmethod
def get_severities_vt_as_xml_str(vt_id: str, severities: Dict) -> str:
"""Return an xml element with severities as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
severities: Dictionary with the severities.
Return:
Xml element as string.
"""
_severities = Element('severities')
_severity = SubElement(_severities, 'severity')
if 'severity_base_vector' in severities:
try:
_value = SubElement(_severity, 'value')
_value.text = severities.get('severity_base_vector')
except ValueError as e:
logger.warning(
"Not possible to parse severity tag for vt %s: %s", vt_id, e
)
if 'severity_origin' in severities:
_origin = SubElement(_severity, 'origin')
_origin.text = severities.get('severity_origin')
if 'severity_date' in severities:
_date = SubElement(_severity, 'date')
_date.text = severities.get('severity_date')
if 'severity_type' in severities:
_severity.set('type', severities.get('severity_type'))
return tostring(_severities).decode('utf-8')
@staticmethod
def get_params_vt_as_xml_str(vt_id: str, vt_params: Dict) -> str:
"""Return an xml element with params formatted as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
vt_params: Dictionary with the VT parameters.
Return:
Xml element as string.
"""
vt_params_xml = Element('params')
for _pref_id, prefs in vt_params.items():
vt_param = Element('param')
vt_param.set('type', prefs['type'])
vt_param.set('id', _pref_id)
xml_name = SubElement(vt_param, 'name')
try:
xml_name.text = prefs['name']
except ValueError as e:
logger.warning(
"Not possible to parse parameter for VT %s: %s", vt_id, e
)
if prefs['default']:
xml_def = SubElement(vt_param, 'default')
try:
xml_def.text = prefs['default']
except ValueError as e:
logger.warning(
"Not possible to parse default parameter for VT %s: %s",
vt_id,
e,
)
vt_params_xml.append(vt_param)
return tostring(vt_params_xml).decode('utf-8')
@staticmethod
def get_refs_vt_as_xml_str(vt_id: str, vt_refs: Dict) -> str:
"""Return an xml element with references formatted as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
vt_refs: Dictionary with the VT references.
Return:
Xml element as string.
"""
vt_refs_xml = Element('refs')
for ref_type, ref_values in vt_refs.items():
for value in ref_values:
vt_ref = Element('ref')
if ref_type == "xref" and value:
for xref in value.split(', '):
try:
_type, _id = xref.split(':', 1)
except ValueError as e:
logger.error(
'Not possible to parse xref "%s" for VT %s: %s',
xref,
vt_id,
e,
)
continue
vt_ref.set('type', _type.lower())
vt_ref.set('id', _id)
elif value:
vt_ref.set('type', ref_type.lower())
vt_ref.set('id', value)
else:
continue
vt_refs_xml.append(vt_ref)
return tostring(vt_refs_xml).decode('utf-8')
@staticmethod
def get_dependencies_vt_as_xml_str(
vt_id: str, vt_dependencies: List
) -> str:
"""Return an xml element with dependencies as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
vt_dependencies: List with the VT dependencies.
Return:
Xml element as string.
"""
vt_deps_xml = Element('dependencies')
for dep in vt_dependencies:
_vt_dep = Element('dependency')
if VT_BASE_OID in dep:
_vt_dep.set('vt_id', dep)
else:
logger.error(
'Not possible to add dependency %s for VT %s', dep, vt_id
)
continue
vt_deps_xml.append(_vt_dep)
return tostring(vt_deps_xml).decode('utf-8')
@staticmethod
def get_creation_time_vt_as_xml_str(
vt_id: str, vt_creation_time: str
) -> str:
"""Return creation time as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
vt_creation_time: String with the VT creation time.
Return:
Xml element as string.
"""
_time = Element('creation_time')
try:
_time.text = vt_creation_time
except ValueError as e:
logger.warning(
"Not possible to parse creation time for VT %s: %s", vt_id, e
)
return tostring(_time).decode('utf-8')
@staticmethod
def get_modification_time_vt_as_xml_str(
vt_id: str, vt_modification_time: str
) -> str:
"""Return modification time as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
vt_modification_time: String with the VT modification time.
Return:
Xml element as string.
"""
_time = Element('modification_time')
try:
_time.text = vt_modification_time
except ValueError as e:
logger.warning(
"Not possible to parse modification time for VT %s: %s",
vt_id,
e,
)
return tostring(_time).decode('utf-8')
@staticmethod
def get_summary_vt_as_xml_str(vt_id: str, summary: str) -> str:
"""Return summary as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
summary: String with a VT summary.
Return:
Xml element as string.
"""
_summary = Element('summary')
try:
_summary.text = summary
except ValueError as e:
logger.warning(
"Not possible to parse summary tag for VT %s: %s", vt_id, e
)
return tostring(_summary).decode('utf-8')
@staticmethod
def get_impact_vt_as_xml_str(vt_id: str, impact) -> str:
"""Return impact as string.
Arguments:
vt_id (str): VT OID. Only used for logging in error case.
impact (str): String which explain the vulneravility impact.
Return:
string: xml element as string.
"""
_impact = Element('impact')
try:
_impact.text = impact
except ValueError as e:
logger.warning(
"Not possible to parse impact tag for VT %s: %s", vt_id, e
)
return tostring(_impact).decode('utf-8')
@staticmethod
def get_affected_vt_as_xml_str(vt_id: str, affected: str) -> str:
"""Return affected as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
affected: String which explain what is affected.
Return:
Xml element as string.
"""
_affected = Element('affected')
try:
_affected.text = affected
except ValueError as e:
logger.warning(
"Not possible to parse affected tag for VT %s: %s", vt_id, e
)
return tostring(_affected).decode('utf-8')
@staticmethod
def get_insight_vt_as_xml_str(vt_id: str, insight: str) -> str:
"""Return insight as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
insight: String giving an insight of the vulnerability.
Return:
Xml element as string.
"""
_insight = Element('insight')
try:
_insight.text = insight
except ValueError as e:
logger.warning(
"Not possible to parse insight tag for VT %s: %s", vt_id, e
)
return tostring(_insight).decode('utf-8')
@staticmethod
def get_solution_vt_as_xml_str(
vt_id: str,
solution: str,
solution_type: Optional[str] = None,
solution_method: Optional[str] = None,
) -> str:
"""Return solution as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
solution: String giving a possible solution.
solution_type: A solution type
solution_method: A solution method
Return:
Xml element as string.
"""
_solution = Element('solution')
try:
_solution.text = solution
except ValueError as e:
logger.warning(
"Not possible to parse solution tag for VT %s: %s", vt_id, e
)
if solution_type:
_solution.set('type', solution_type)
if solution_method:
_solution.set('method', solution_method)
return tostring(_solution).decode('utf-8')
@staticmethod
def get_detection_vt_as_xml_str(
vt_id: str,
detection: Optional[str] = None,
qod_type: Optional[str] = None,
qod: Optional[str] = None,
) -> str:
"""Return detection as string.
Arguments:
vt_id: VT OID. Only used for logging in error case.
detection: String which explain how the vulnerability
was detected.
qod_type: qod type.
qod: qod value.
Return:
Xml element as string.
"""
_detection = Element('detection')
if detection:
try:
_detection.text = detection
except ValueError as e:
logger.warning(
"Not possible to parse detection tag for VT %s: %s",
vt_id,
e,
)
if qod_type:
_detection.set('qod_type', qod_type)
elif qod:
_detection.set('qod', qod)
return tostring(_detection).decode('utf-8')
ospd-openvas-22.9.0/ospd_openvas/ 0000775 0000000 0000000 00000000000 15011310720 0016710 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/ospd_openvas/__init__.py 0000664 0000000 0000000 00000000236 15011310720 0021022 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from .__version__ import __version__
ospd-openvas-22.9.0/ospd_openvas/__version__.py 0000664 0000000 0000000 00000000147 15011310720 0021545 0 ustar 00root root 0000000 0000000 # pylint: disable=invalid-name
# THIS IS AN AUTOGENERATED FILE. DO NOT TOUCH!
__version__ = "22.9.0"
ospd-openvas-22.9.0/ospd_openvas/daemon.py 0000664 0000000 0000000 00000130143 15011310720 0020527 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=too-many-lines
"""Setup for the OSP OpenVAS Server."""
import logging
import time
import copy
from typing import Optional, Dict, List, Tuple, Iterator, Any
from datetime import datetime
from pathlib import Path
from os import geteuid
import psutil
from ospd.ospd import OSPDaemon
from ospd.scan import ScanProgress, ScanStatus
from ospd.server import BaseServer
from ospd.main import main as daemon_main
from ospd.vtfilter import VtsFilter
from ospd.resultlist import ResultList
from ospd_openvas import __version__
from ospd_openvas.errors import OspdOpenvasError
from ospd_openvas.notus import Cache, Notus, NotusParser, NotusResultHandler
from ospd_openvas.dryrun import DryRun
from ospd_openvas.messages.result import ResultMessage
from ospd_openvas.nvticache import NVTICache
from ospd_openvas.db import MainDB, BaseDB
from ospd_openvas.lock import LockFile
from ospd_openvas.preferencehandler import PreferenceHandler
from ospd_openvas.openvas import NASLCli, Openvas
from ospd_openvas.vthelper import VtHelper
from ospd_openvas.messaging.mqtt import MQTTClient, MQTTDaemon, MQTTSubscriber
logger = logging.getLogger(__name__)
OSPD_DESC = """
This scanner runs OpenVAS to scan the target hosts.
OpenVAS (Open Vulnerability Assessment Scanner) is a powerful scanner
for vulnerabilities in IT infrastrucutres. The capabilities include
unauthenticated scanning as well as authenticated scanning for
various types of systems and services.
For more details about OpenVAS see:
http://www.openvas.org/
The current version of ospd-openvas is a simple frame, which sends
the server parameters to the Greenbone Vulnerability Manager daemon (GVMd) and
checks the existence of OpenVAS binary. But it can not run scans yet.
"""
OSPD_PARAMS = {
'auto_enable_dependencies': {
'type': 'boolean',
'name': 'auto_enable_dependencies',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': 'Automatically enable the plugins that are depended on',
},
'cgi_path': {
'type': 'string',
'name': 'cgi_path',
'default': '/cgi-bin:/scripts',
'mandatory': 1,
'visible_for_client': True,
'description': 'Look for default CGIs in /cgi-bin and /scripts',
},
'checks_read_timeout': {
'type': 'integer',
'name': 'checks_read_timeout',
'default': 5,
'mandatory': 1,
'visible_for_client': True,
'description': (
'Number of seconds that the security checks will '
+ 'wait for when doing a recv()'
),
},
'non_simult_ports': {
'type': 'string',
'name': 'non_simult_ports',
'default': '139, 445, 3389, Services/irc',
'mandatory': 1,
'visible_for_client': True,
'description': (
'Prevent to make two connections on the same given '
+ 'ports at the same time.'
),
},
'open_sock_max_attempts': {
'type': 'integer',
'name': 'open_sock_max_attempts',
'default': 5,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Number of unsuccessful retries to open the socket '
+ 'before to set the port as closed.'
),
},
'timeout_retry': {
'type': 'integer',
'name': 'timeout_retry',
'default': 5,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Number of retries when a socket connection attempt ' + 'timesout.'
),
},
'optimize_test': {
'type': 'boolean',
'name': 'optimize_test',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': (
'By default, optimize_test is enabled which means openvas does '
+ 'trust the remote host banners and is only launching plugins '
+ 'against the services they have been designed to check. '
+ 'For example it will check a web server claiming to be IIS only '
+ 'for IIS related flaws but will skip plugins testing for Apache '
+ 'flaws, and so on. This default behavior is used to optimize '
+ 'the scanning performance and to avoid false positives. '
+ 'If you are not sure that the banners of the remote host '
+ 'have been tampered with, you can disable this option.'
),
},
'plugins_timeout': {
'type': 'integer',
'name': 'plugins_timeout',
'default': 5,
'mandatory': 0,
'visible_for_client': True,
'description': 'This is the maximum lifetime, in seconds of a plugin.',
},
'report_host_details': {
'type': 'boolean',
'name': 'report_host_details',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': '',
},
'safe_checks': {
'type': 'boolean',
'name': 'safe_checks',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': (
'Disable the plugins with potential to crash '
+ 'the remote services'
),
},
'scanner_plugins_timeout': {
'type': 'integer',
'name': 'scanner_plugins_timeout',
'default': 36000,
'mandatory': 1,
'visible_for_client': True,
'description': 'Like plugins_timeout, but for ACT_SCANNER plugins.',
},
'time_between_request': {
'type': 'integer',
'name': 'time_between_request',
'default': 0,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Allow to set a wait time between two actions '
+ '(open, send, close).'
),
},
'unscanned_closed': {
'type': 'boolean',
'name': 'unscanned_closed',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': '',
},
'unscanned_closed_udp': {
'type': 'boolean',
'name': 'unscanned_closed_udp',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': '',
},
'expand_vhosts': {
'type': 'boolean',
'name': 'expand_vhosts',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': 'Whether to expand the target hosts '
+ 'list of vhosts with values gathered from sources '
+ 'such as reverse-lookup queries and VT checks '
+ 'for SSL/TLS certificates.',
},
'test_empty_vhost': {
'type': 'boolean',
'name': 'test_empty_vhost',
'default': 0,
'mandatory': 0,
'visible_for_client': True,
'description': 'If set to yes, the scanner will '
+ 'also test the target by using empty vhost value '
+ 'in addition to the targets associated vhost values.',
},
'max_hosts': {
'type': 'integer',
'name': 'max_hosts',
'default': 30,
'mandatory': 0,
'visible_for_client': False,
'description': (
'The maximum number of hosts to test at the same time which '
+ 'should be given to the client (which can override it). '
+ 'This value must be computed given your bandwidth, '
+ 'the number of hosts you want to test, your amount of '
+ 'memory and the performance of your processor(s).'
),
},
'max_checks': {
'type': 'integer',
'name': 'max_checks',
'default': 10,
'mandatory': 0,
'visible_for_client': False,
'description': (
'The number of plugins that will run against each host being '
+ 'tested. Note that the total number of process will be max '
+ 'checks x max_hosts so you need to find a balance between '
+ 'these two options. Note that launching too many plugins at '
+ 'the same time may disable the remote host, either temporarily '
+ '(ie: inetd closes its ports) or definitely (the remote host '
+ 'crash because it is asked to do too many things at the '
+ 'same time), so be careful.'
),
},
'port_range': {
'type': 'string',
'name': 'port_range',
'default': '',
'mandatory': 0,
'visible_for_client': False,
'description': (
'This is the default range of ports that the scanner plugins will '
+ 'probe. The syntax of this option is flexible, it can be a '
+ 'single range ("1-1500"), several ports ("21,23,80"), several '
+ 'ranges of ports ("1-1500,32000-33000"). Note that you can '
+ 'specify UDP and TCP ports by prefixing each range by T or U. '
+ 'For instance, the following range will make openvas scan UDP '
+ 'ports 1 to 1024 and TCP ports 1 to 65535 : '
+ '"T:1-65535,U:1-1024".'
),
},
'alive_test_ports': {
'type': 'string',
'name': 'alive_test_ports',
'default': '21-23,25,53,80,110-111,135,139,143,443,445,'
+ '993,995,1723,3306,3389,5900,8080',
'mandatory': 0,
'visible_for_client': True,
'description': ('Port list used for host alive detection.'),
},
'test_alive_hosts_only': {
'type': 'boolean',
'name': 'test_alive_hosts_only',
'default': 0,
'mandatory': 0,
'visible_for_client': False,
'description': (
'If this option is set, openvas will scan the target list for '
+ 'alive hosts in a separate process while only testing those '
+ 'hosts which are identified as alive. This boosts the scan '
+ 'speed of target ranges with a high amount of dead hosts '
+ 'significantly.'
),
},
'test_alive_wait_timeout': {
'type': 'integer',
'name': 'test_alive_wait_timeout',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': (
'This is the default timeout to wait for replies after last '
+ 'packet was sent.'
),
},
'hosts_allow': {
'type': 'string',
'name': 'hosts_allow',
'default': '',
'mandatory': 0,
'visible_for_client': False,
'description': (
'Comma-separated list of the only targets that are authorized '
+ 'to be scanned. Supports the same syntax as the list targets. '
+ 'Both target hostnames and the address to which they resolve '
+ 'are checked. Hostnames in hosts_allow list are not resolved '
+ 'however.'
),
},
'hosts_deny': {
'type': 'string',
'name': 'hosts_deny',
'default': '',
'mandatory': 0,
'visible_for_client': False,
'description': (
'Comma-separated list of targets that are not authorized to '
+ 'be scanned. Supports the same syntax as the list targets. '
+ 'Both target hostnames and the address to which they resolve '
+ 'are checked. Hostnames in hosts_deny list are not '
+ 'resolved however.'
),
},
'results_per_host': {
'type': 'integer',
'name': 'results_per_host',
'default': 10,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Amount of fake results generated per each host in the target '
+ 'list for a dry run scan.'
),
},
'table_driven_lsc': {
'type': 'boolean',
'name': 'table_driven_lsc',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': (
'If this option is enabled a scanner for table_driven_lsc will '
+ 'scan package results.'
),
},
'max_mem_kb': {
'type': 'integer',
'name': 'max_mem_kb',
'default': 0,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Maximum amount of memory (in MB) allowed to use for a single '
+ 'script. If this value is set, the amount of memory put into '
+ 'redis is tracked for every Script. If the amount of memory '
+ 'exceeds this limit, the script is not able to set more kb '
+ 'items. The tracked the value written into redis is only '
+ 'estimated, as it does not check, if a value was replaced or '
+ 'appended. The size of the key is also not tracked. If this '
+ 'value is not set or <= 0, the maximum amount is unlimited '
+ '(Default).'
),
},
}
def safe_int(value: str) -> Optional[int]:
"""Convert a string into an integer and return None in case of errors
during conversion
"""
try:
return int(value)
except (ValueError, TypeError):
return None
class OpenVasVtsFilter(VtsFilter):
"""Methods to overwrite the ones in the original class."""
def __init__(self, nvticache: NVTICache, notus: Notus) -> None:
super().__init__()
self.nvti = nvticache
self.notus = notus
def format_vt_modification_time(self, value: str) -> str:
"""Convert the string seconds since epoch into a 19 character
string representing YearMonthDayHourMinuteSecond,
e.g. 20190319122532. This always refers to UTC.
"""
return datetime.utcfromtimestamp(int(value)).strftime("%Y%m%d%H%M%S")
def get_filtered_vts_list(self, vts, vt_filter: str) -> Optional[List[str]]:
"""Gets a collection of vulnerability test from the redis cache,
which match the filter.
Arguments:
vt_filter: Filter to apply to the vts collection.
vts: The complete vts collection.
Returns:
List with filtered vulnerability tests. The list can be empty.
None in case of filter parse failure.
"""
filters = self.parse_filters(vt_filter)
if not filters:
return None
if not self.nvti:
return None
# build a list with nvts and notus advisories
nvt_oid_list = [vtlist[1] for vtlist in self.nvti.get_oids()]
if self.notus:
notus_oid_list = [vtlist[1] for vtlist in self.notus.get_oids()]
vt_oid_list = notus_oid_list + nvt_oid_list
else:
vt_oid_list = nvt_oid_list
vt_oid_list_temp = copy.copy(vt_oid_list)
vthelper = VtHelper(self.nvti, self.notus)
for element, oper, filter_val in filters:
for vt_oid in vt_oid_list_temp:
if vt_oid not in vt_oid_list:
continue
vt = vthelper.get_single_vt(vt_oid)
if vt is None or not vt.get(element):
vt_oid_list.remove(vt_oid)
continue
elem_val = vt.get(element)
val = self.format_filter_value(element, elem_val)
if self.filter_operator[oper](val, filter_val):
continue
else:
vt_oid_list.remove(vt_oid)
return vt_oid_list
class OSPDopenvas(OSPDaemon):
"""Class for ospd-openvas daemon."""
def __init__(
self,
*,
niceness=None,
lock_file_dir='/var/lib/openvas',
mqtt_broker_address="localhost",
mqtt_broker_port=1883,
feed_updater="openvas",
disable_notus_hashsum_verification=False,
**kwargs,
):
"""Initializes the ospd-openvas daemon's internal data."""
self.main_db = MainDB()
notus_dir = kwargs.get('notus_feed_dir')
self.notus = None
if notus_dir:
ndir = Path(notus_dir)
self.notus = Notus(
ndir,
Cache(self.main_db),
disable_notus_hashsum_verification,
)
self.feed_updater = feed_updater
self.signature_check = kwargs.get('signature_check')
self.nvti = NVTICache(self.main_db)
super().__init__(
customvtfilter=OpenVasVtsFilter(self.nvti, self.notus),
storage=dict,
file_storage_dir=lock_file_dir,
**kwargs,
)
self.server_version = __version__
self._niceness = str(niceness)
self.feed_lock = LockFile(Path(lock_file_dir) / 'feed-update.lock')
self.daemon_info['name'] = 'OSPd OpenVAS'
self.scanner_info['name'] = 'openvas'
self.scanner_info['version'] = '' # achieved during self.init()
self.scanner_info['description'] = OSPD_DESC
for name, param in OSPD_PARAMS.items():
self.set_scanner_param(name, param)
self._sudo_available = None
self._is_running_as_root = None
self.scan_only_params = dict()
self._mqtt_broker_address = mqtt_broker_address
self._mqtt_broker_port = mqtt_broker_port
self._mqtt_broker_username = kwargs.get('mqtt_broker_username')
self._mqtt_broker_password = kwargs.get('mqtt_broker_password')
def init(self, server: BaseServer) -> None:
self.scan_collection.init()
server.start(self.handle_client_stream)
self.scanner_info['version'] = Openvas.get_version()
self.set_params_from_openvas_settings()
# Do not init MQTT daemon if Notus runs via openvasd.
if not self.scan_only_params.get("openvasd_server"):
notus_handler = NotusResultHandler(self.report_results)
if self._mqtt_broker_address:
client = MQTTClient(
self._mqtt_broker_address, self._mqtt_broker_port, "ospd"
)
daemon = MQTTDaemon(client)
subscriber = MQTTSubscriber(client)
subscriber.subscribe(
ResultMessage, notus_handler.result_handler
)
daemon.run()
else:
logger.info(
"MQTT Broker Address empty. MQTT disabled. "
"Unable to get Notus results."
)
with self.feed_lock.wait_for_lock():
self.update_vts()
self.set_feed_info()
logger.debug("Calculating vts integrity check hash...")
vthelper = VtHelper(self.nvti, self.notus)
self.vts.sha256_hash = vthelper.calculate_vts_collection_hash()
self.initialized = True
def set_params_from_openvas_settings(self):
"""Set OSPD_PARAMS with the params taken from the openvas executable."""
param_list = Openvas.get_settings()
for elem in param_list: # pylint: disable=consider-using-dict-items
if elem not in OSPD_PARAMS:
self.scan_only_params[elem] = param_list[elem]
else:
OSPD_PARAMS[elem]['default'] = param_list[elem]
def feed_is_outdated(self, current_feed: str) -> Optional[bool]:
"""Compare the current feed with the one in the disk.
Return:
False if there is no new feed.
True if the feed version in disk is newer than the feed in
redis cache.
None if there is no feed on the disk.
"""
current_feed = safe_int(current_feed)
if current_feed is None:
logger.debug(
"Wrong PLUGIN_SET format in plugins feed file "
"'plugin_feed_info.inc'. Format has to"
" be yyyymmddhhmm. For example 'PLUGIN_SET = \"201910251033\"'"
)
feed_date = None
feed_info = self.get_feed_info()
if feed_info:
feed_date = safe_int(feed_info.get("PLUGIN_SET"))
logger.debug("Current feed version: %s", current_feed)
logger.debug("Plugin feed version: %s", feed_date)
return (
(not feed_date) or (not current_feed) or (current_feed < feed_date)
)
def get_feed_info(self) -> Dict[str, Any]:
"""Parses the current plugin_feed_info.inc file"""
plugins_folder = self.scan_only_params.get('plugins_folder')
if not plugins_folder:
raise OspdOpenvasError("Error: Path to plugins folder not found.")
feed_info_file = Path(plugins_folder) / 'plugin_feed_info.inc'
if not feed_info_file.exists():
self.set_params_from_openvas_settings()
logger.debug('Plugins feed file %s not found.', feed_info_file)
return {}
feed_info = {}
with feed_info_file.open(encoding='utf-8') as fcontent:
for line in fcontent:
try:
key, value = line.split('=', 1)
except ValueError:
continue
key = key.strip()
value = value.strip()
value = value.replace(';', '')
value = value.replace('"', '')
if value:
feed_info[key] = value
return feed_info
def set_feed_info(self):
"""Set feed current information to be included in the response of
command
"""
current_feed = self.nvti.get_feed_version()
self.set_vts_version(vts_version=current_feed)
feed_info = self.get_feed_info()
self.set_feed_vendor(feed_info.get("FEED_VENDOR", "unknown"))
self.set_feed_home(feed_info.get("FEED_HOME", "unknown"))
self.set_feed_name(feed_info.get("PLUGIN_FEED", "unknown"))
def check_feed_self_test(self) -> Dict:
"""Perform a feed sync self tests and check if the feed lock file is
locked.
"""
feed_status = dict()
# It is locked by the current process
if self.feed_lock.has_lock():
feed_status["lockfile_in_use"] = '1'
# Check if we can get the lock
else:
with self.feed_lock as fl:
# It is available
if fl.has_lock():
feed_status["lockfile_in_use"] = '0'
# Locked by another process
else:
feed_status["lockfile_in_use"] = '1'
# The feed self test is not performed any more, but the following
# entries are kept for backward compatibility.
feed_status["self_test_exit_error"] = "0"
feed_status["self_test_error_msg"] = None
return feed_status
def update_vts(self):
"""Updates VTs in redis via the openvas-scanner"""
logger.info(
"Loading VTs. Scans will be [requested|queued] until VTs are"
" loaded. This may take a few minutes, please wait..."
)
old = self.nvti.get_feed_version() or 0
# reload notus cache
if self.notus:
self.notus.reload_cache()
loaded = False
if self.feed_updater == "nasl-cli":
loaded = NASLCli.load_vts_into_redis(self.signature_check)
else:
loaded = Openvas.load_vts_into_redis()
if loaded:
new = self.nvti.get_feed_version()
if new != old:
logger.info(
"Finished loading VTs. The VT cache has been updated from"
" version %s to %s.",
old,
new,
)
else:
logger.info("VTs were up to date. Feed version is %s.", new)
else:
logger.error("Updating VTs failed.")
def check_feed(self):
"""Check if there is a feed update.
Wait until all the running scans finished. Set a flag to announce there
is a pending feed update, which avoids to start a new scan.
"""
if not self.vts.is_cache_available:
return
current_feed = self.nvti.get_feed_version()
is_outdated = self.feed_is_outdated(current_feed)
# Check if the nvticache in redis is outdated
if not current_feed or is_outdated:
with self.feed_lock as fl:
if fl.has_lock():
self.initialized = False
self.update_vts()
self.set_feed_info()
vthelper = VtHelper(self.nvti, self.notus)
self.vts.sha256_hash = (
vthelper.calculate_vts_collection_hash()
)
self.initialized = True
else:
logger.debug(
"The feed was not upload or it is outdated, "
"but other process is locking the update. "
"Trying again later..."
)
return
def scheduler(self):
"""This method is called periodically to run tasks."""
self.check_feed()
def get_vt_iterator(
self, vt_selection: List[str] = None, details: bool = True
) -> Iterator[Tuple[str, Dict]]:
vthelper = VtHelper(self.nvti, self.notus)
return vthelper.get_vt_iterator(vt_selection, details)
@property
def is_running_as_root(self) -> bool:
"""Check if it is running as root user."""
if self._is_running_as_root is not None:
return self._is_running_as_root
self._is_running_as_root = False
if geteuid() == 0:
self._is_running_as_root = True
return self._is_running_as_root
@property
def sudo_available(self) -> bool:
"""Checks that sudo is available"""
if self._sudo_available is not None:
return self._sudo_available
if self.is_running_as_root:
self._sudo_available = False
return self._sudo_available
self._sudo_available = Openvas.check_sudo()
return self._sudo_available
def check(self) -> bool:
"""Checks that openvas command line tool is found and
is executable."""
has_openvas = Openvas.check()
if not has_openvas:
logger.error(
'openvas executable not available. Please install openvas'
' into your PATH.'
)
return has_openvas
def report_openvas_scan_status(self, kbdb: BaseDB, scan_id: str):
"""Get all status entries from redis kb.
Arguments:
kbdb: KB context where to get the status from.
scan_id: Scan ID to identify the current scan.
"""
all_status = kbdb.get_scan_status()
all_hosts = dict()
finished_hosts = list()
for res in all_status:
try:
current_host, launched, total = res.split('/')
except ValueError:
continue
try:
if float(total) == 0:
continue
elif float(total) == ScanProgress.DEAD_HOST:
host_prog = ScanProgress.DEAD_HOST
else:
host_prog = int((float(launched) / float(total)) * 100)
except TypeError:
continue
all_hosts[current_host] = host_prog
if (
host_prog == ScanProgress.DEAD_HOST
or host_prog == ScanProgress.FINISHED
):
finished_hosts.append(current_host)
logger.debug(
'%s: Host %s has progress: %d', scan_id, current_host, host_prog
)
self.set_scan_progress_batch(scan_id, host_progress=all_hosts)
self.sort_host_finished(scan_id, finished_hosts)
def report_openvas_results(self, db: BaseDB, scan_id: str) -> bool:
"""Get all result entries from redis kb.
Arguments:
db: KB context where to get the results from.
scan_id: Scan ID to identify the current scan.
"""
# result_type|||host ip|||hostname|||port|||OID|||value[|||uri]
all_results = db.get_result()
results = []
for res in all_results:
if not res:
continue
msg = res.split('|||')
result = {
"result_type": msg[0],
"host_ip": msg[1],
"host_name": msg[2],
"port": msg[3],
"oid": msg[4],
"value": msg[5],
}
if len(msg) > 6:
result["uri"] = msg[6]
results.append(result)
return self.report_results(results, scan_id)
def report_results(self, results: list, scan_id: str) -> bool:
"""Reports all results given in a list.
Arguments:
results: list of results each list item must contain a dictionary
with following fields: result_type, host_ip, host_name, port, oid,
value, uri (optional)
Returns:
True if the results have been reported
"""
if not self.scan_collection.id_exists(scan_id):
logger.warning("Unknown scan_id %s", scan_id)
return False
vthelper = VtHelper(self.nvti, self.notus)
res_list = ResultList()
total_dead = 0
for res in results:
if not res:
continue
roid = res["oid"].strip()
rqod = ''
rname = ''
current_host = res["host_ip"].strip() if res["host_ip"] else ''
rhostname = res["host_name"].strip() if res["host_name"] else ''
host_is_dead = (
"Host dead" in res["value"] or res["result_type"] == "DEADHOST"
)
host_deny = "Host access denied" in res["value"]
start_end_msg = (
res["result_type"] == "HOST_START"
or res["result_type"] == "HOST_END"
)
host_count = res["result_type"] == "HOSTS_COUNT"
host_excluded = res["result_type"] == "HOSTS_EXCLUDED"
vt_aux = None
# URI is optional and containing must be checked
ruri = res["uri"] if "uri" in res else ""
if (
not host_is_dead
and not host_deny
and not start_end_msg
and not host_count
and not host_excluded
):
if not roid and res["result_type"] != 'ERRMSG':
logger.warning('Missing VT oid for a result')
vt_aux = vthelper.get_single_vt(roid)
if not vt_aux:
logger.warning('Invalid VT oid %s for a result', roid)
else:
if vt_aux.get('qod_type'):
qod_t = vt_aux.get('qod_type')
rqod = self.nvti.QOD_TYPES[qod_t]
elif vt_aux.get('qod'):
rqod = vt_aux.get('qod')
rname = vt_aux.get('name')
if res["result_type"] == 'ERRMSG':
res_list.add_scan_error_to_list(
host=current_host,
hostname=rhostname,
name=rname,
value=res["value"],
port=res["port"],
test_id=roid,
uri=ruri,
)
elif (
res["result_type"] == 'HOST_START'
or res["result_type"] == 'HOST_END'
):
res_list.add_scan_log_to_list(
host=current_host,
name=res["result_type"],
value=res["value"],
)
elif res["result_type"] == 'LOG':
res_list.add_scan_log_to_list(
host=current_host,
hostname=rhostname,
name=rname,
value=res["value"],
port=res["port"],
qod=rqod,
test_id=roid,
uri=ruri,
)
elif res["result_type"] == 'HOST_DETAIL':
res_list.add_scan_host_detail_to_list(
host=current_host,
hostname=rhostname,
name=rname,
value=res["value"],
uri=ruri,
)
elif res["result_type"] == 'ALARM':
rseverity = vthelper.get_severity_score(vt_aux)
res_list.add_scan_alarm_to_list(
host=current_host,
hostname=rhostname,
name=rname,
value=res["value"],
port=res["port"],
test_id=roid,
severity=rseverity,
qod=rqod,
uri=ruri,
)
# To process non-scanned dead hosts when
# test_alive_host_only in openvas is enable
elif res["result_type"] == 'DEADHOST':
try:
total_dead = total_dead + int(res["value"])
except TypeError:
logger.debug('Error processing dead host count')
# To update total host count
if res["result_type"] == 'HOSTS_COUNT':
try:
count_total = int(res["value"])
logger.debug(
'%s: Set total hosts counted by OpenVAS: %d',
scan_id,
count_total,
)
self.set_scan_total_hosts(scan_id, count_total)
except TypeError:
logger.debug('Error processing total host count')
# To update total excluded hosts
if res["result_type"] == 'HOSTS_EXCLUDED':
try:
total_excluded = int(res["value"])
logger.debug(
'%s: Set total excluded counted by OpenVAS: %d',
scan_id,
total_excluded,
)
self.set_scan_total_excluded_hosts(scan_id, total_excluded)
except TypeError:
logger.debug('Error processing total excluded hosts')
# Insert result batch into the scan collection table.
if len(res_list):
self.scan_collection.add_result_list(scan_id, res_list)
logger.debug(
'%s: Inserting %d results into scan collection table',
scan_id,
len(res_list),
)
if total_dead:
logger.debug(
'%s: Set dead hosts counted by OpenVAS: %d',
scan_id,
total_dead,
)
self.scan_collection.set_amount_dead_hosts(
scan_id, total_dead=total_dead
)
return len(res_list) > 0
@staticmethod
def is_openvas_process_alive(openvas_process: psutil.Popen) -> bool:
try:
if openvas_process.status() == psutil.STATUS_ZOMBIE:
logger.debug("Process is a Zombie, waiting for it to clean up")
openvas_process.wait()
except psutil.NoSuchProcess:
return False
return openvas_process.is_running()
def stop_scan_cleanup(
self,
kbdb: BaseDB,
scan_id: str,
ovas_pid: str, # pylint: disable=arguments-differ
):
"""Set a key in redis to indicate the wrapper is stopped.
It is done through redis because it is a new multiprocess
instance and it is not possible to reach the variables
of the grandchild process.
Indirectly sends SIGUSR1 to the running openvas scan process
via an invocation of openvas with the --scan-stop option to
stop it."""
if kbdb:
# Set stop flag in redis
kbdb.stop_scan(scan_id)
try:
ovas_process = psutil.Process(int(ovas_pid))
except psutil.NoSuchProcess:
ovas_process = None
# Check if openvas is running
if (
ovas_process
and ovas_process.is_running()
and ovas_process.name() == "openvas"
):
# Cleaning in case of Zombie Process
if ovas_process.status() == psutil.STATUS_ZOMBIE:
logger.debug(
'%s: Process with PID %s is a Zombie process.'
' Cleaning up...',
scan_id,
ovas_process.pid,
)
ovas_process.wait()
# Stop openvas process and wait until it stopped
else:
can_stop_scan = Openvas.stop_scan(
scan_id,
not self.is_running_as_root and self.sudo_available,
)
if not can_stop_scan:
logger.debug(
'Not possible to stop scan process: %s.',
ovas_process,
)
return
logger.debug('Stopping process: %s', ovas_process)
while ovas_process.is_running():
if ovas_process.status() == psutil.STATUS_ZOMBIE:
ovas_process.wait()
else:
time.sleep(0.1)
else:
logger.debug(
"%s: Process with PID %s already stopped",
scan_id,
ovas_pid,
)
# Clean redis db
for scan_db in kbdb.get_scan_databases():
self.main_db.release_database(scan_db)
def exec_scan(self, scan_id: str):
"""Starts the OpenVAS scanner for scan_id scan."""
params = self.scan_collection.get_options(scan_id)
if params.get("dry_run"):
dryrun = DryRun(self)
dryrun.exec_dry_run_scan(scan_id, self.nvti, OSPD_PARAMS)
return
kbdb, err = self.main_db.check_consistency(scan_id)
if err < 0:
logger.debug(
"An old scan with the same scanID was found in the kb. "
"Waiting for the kb clean up to finish."
)
self.stop_scan_cleanup(kbdb, scan_id, kbdb.get_scan_process_id())
self.main_db.release_database(kbdb)
do_not_launch = False
kbdb = self.main_db.get_new_kb_database()
scan_prefs = PreferenceHandler(
scan_id, kbdb, self.scan_collection, self.nvti, self.notus.exists
)
kbdb.add_scan_id(scan_id)
scan_prefs.prepare_target_for_openvas()
if not scan_prefs.prepare_ports_for_openvas():
self.add_scan_error(
scan_id, name='', host='', value='Invalid port list.'
)
do_not_launch = True
# Set credentials
if not scan_prefs.prepare_credentials_for_openvas():
error = (
'All authentifications contain errors.'
+ 'Starting unauthenticated scan instead.'
)
self.add_scan_error(
scan_id,
name='',
host='',
value=error,
)
logger.error(error)
errors = scan_prefs.get_error_messages()
for e in errors:
error = 'Malformed credential. ' + e
self.add_scan_error(
scan_id,
name='',
host='',
value=error,
)
logger.error(error)
if not scan_prefs.prepare_plugins_for_openvas():
self.add_scan_error(
scan_id, name='', host='', value='No VTS to run.'
)
do_not_launch = True
scan_prefs.prepare_main_kbindex_for_openvas()
scan_prefs.prepare_host_options_for_openvas()
scan_prefs.prepare_scan_params_for_openvas(OSPD_PARAMS)
scan_prefs.prepare_reverse_lookup_opt_for_openvas()
scan_prefs.prepare_alive_test_option_for_openvas()
# VT preferences are stored after all preferences have been processed,
# since alive tests preferences have to be able to overwrite default
# preferences of ping_host.nasl for the classic method.
scan_prefs.prepare_nvt_preferences()
scan_prefs.prepare_boreas_alive_test()
# Release memory used for scan preferences.
del scan_prefs
scan_stopped = self.get_scan_status(scan_id) == ScanStatus.STOPPED
if do_not_launch or kbdb.scan_is_stopped(scan_id) or scan_stopped:
self.main_db.release_database(kbdb)
return
openvas_process = Openvas.start_scan(
scan_id,
not self.is_running_as_root and self.sudo_available,
self._niceness,
)
if openvas_process is None:
self.main_db.release_database(kbdb)
return
kbdb.add_scan_process_id(openvas_process.pid)
logger.debug('pid = %s', openvas_process.pid)
# Wait until the scanner starts and loads all the preferences.
while kbdb.get_status(scan_id) == 'new':
res = openvas_process.poll()
if res and res < 0:
self.stop_scan_cleanup(
kbdb, scan_id, kbdb.get_scan_process_id()
)
logger.error(
'It was not possible run the task %s, since openvas ended '
'unexpectedly with errors during launching.',
scan_id,
)
return
time.sleep(1)
got_results = False
while True:
openvas_process_is_alive = self.is_openvas_process_alive(
openvas_process
)
target_is_finished = kbdb.target_is_finished(scan_id)
scan_stopped = self.get_scan_status(scan_id) == ScanStatus.STOPPED
# Report new Results and update status
got_results = self.report_openvas_results(kbdb, scan_id)
self.report_openvas_scan_status(kbdb, scan_id)
# Check if the client stopped the whole scan
if scan_stopped:
logger.debug('%s: Scan stopped by the client', scan_id)
self.stop_scan_cleanup(
kbdb, scan_id, kbdb.get_scan_process_id()
)
# clean main_db, but wait for scanner to finish.
while not kbdb.target_is_finished(scan_id):
if not self.is_openvas_process_alive(openvas_process):
break
logger.debug('%s: Waiting for openvas to finish', scan_id)
time.sleep(1)
self.main_db.release_database(kbdb)
return
# Scan end. No kb in use for this scan id
if target_is_finished:
logger.debug('%s: Target is finished', scan_id)
break
if not openvas_process_is_alive:
logger.error(
'Task %s was unexpectedly stopped or killed.',
scan_id,
)
self.add_scan_error(
scan_id,
name='',
host='',
value='Task was unexpectedly stopped or killed.',
)
# check for scanner error messages before leaving.
self.report_openvas_results(kbdb, scan_id)
kbdb.stop_scan(scan_id)
for scan_db in kbdb.get_scan_databases():
self.main_db.release_database(scan_db)
self.main_db.release_database(kbdb)
return
# Wait a second before trying to get result from redis if there
# was no results before.
# Otherwise, wait 50 msec to give access other process to redis.
if not got_results:
time.sleep(1)
else:
time.sleep(0.05)
got_results = False
# Sleep a second to be sure to get all notus results
time.sleep(1)
# Delete keys from KB related to this scan task.
logger.debug('%s: End Target. Release main database', scan_id)
self.main_db.release_database(kbdb)
def main():
"""OSP openvas main function."""
daemon_main('OSPD - openvas', OSPDopenvas, NotusParser())
if __name__ == '__main__':
main()
ospd-openvas-22.9.0/ospd_openvas/db.py 0000664 0000000 0000000 00000052757 15011310720 0017667 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Access management for redis-based OpenVAS Scanner Database."""
import logging
import sys
import time
from typing import List, NewType, Optional, Iterable, Iterator, Tuple, Callable
from urllib import parse
import redis
from ospd.errors import RequiredArgument
from ospd_openvas.errors import OspdOpenvasError
from ospd_openvas.openvas import Openvas
SOCKET_TIMEOUT = 60 # in seconds
LIST_FIRST_POS = 0
LIST_LAST_POS = -1
LIST_ALL = 0
# Possible positions of nvt values in cache list.
NVT_META_FIELDS = [
"NVT_FILENAME_POS",
"NVT_REQUIRED_KEYS_POS",
"NVT_MANDATORY_KEYS_POS",
"NVT_EXCLUDED_KEYS_POS",
"NVT_REQUIRED_UDP_PORTS_POS",
"NVT_REQUIRED_PORTS_POS",
"NVT_DEPENDENCIES_POS",
"NVT_TAGS_POS",
"NVT_CVES_POS",
"NVT_BIDS_POS",
"NVT_XREFS_POS",
"NVT_CATEGORY_POS",
"NVT_FAMILY_POS",
"NVT_NAME_POS",
]
# Name of the namespace usage bitmap in redis.
DBINDEX_NAME = "GVM.__GlobalDBIndex"
logger = logging.getLogger(__name__)
# Types
RedisCtx = NewType('RedisCtx', redis.Redis)
class OpenvasDB:
"""Class to connect to redis, to perform queries, and to move
from a KB to another."""
_db_address = None
@classmethod
def get_database_address(cls) -> Optional[str]:
if not cls._db_address:
if not Openvas.check():
logger.error(
'openvas executable not available. Please install openvas'
' into your PATH.'
)
sys.exit(1)
settings = Openvas.get_settings()
cls._db_address = settings.get('db_address')
if cls._db_address:
# translate openvas tcp:// configuration to redis://
cls._db_address = cls._db_address.replace("tcp://", "redis://")
# translate non scheme to unix://
if not parse.urlparse(cls._db_address).scheme:
cls._db_address = "unix://" + cls._db_address
if cls._db_address.startswith("redis://"):
logger.warning(
"A Redis TCP connection is being used. "
"This feature is experimental and insecure. "
"It is not recommended in production environments."
)
return cls._db_address
@classmethod
def create_context(
cls, dbnum: int = 0, encoding: str = 'latin-1'
) -> RedisCtx:
"""Connect to redis to the given database or to the default db 0 .
Arguments:
dbnum: The db number to connect to.
encoding: The encoding to be used to read and write.
Return a new redis context on success.
"""
tries = 5
while tries:
try:
ctx = redis.Redis.from_url(
url=cls.get_database_address(),
db=dbnum,
socket_timeout=SOCKET_TIMEOUT,
encoding=encoding,
decode_responses=True,
)
ctx.keys("test")
except (redis.exceptions.ConnectionError, FileNotFoundError) as err:
logger.debug(
'Redis connection lost: %s. Trying again in 5 seconds.', err
)
tries = tries - 1
time.sleep(5)
continue
break
if not tries:
logger.error('Redis Error: Not possible to connect to the kb.')
sys.exit(1)
return ctx
@classmethod
def find_database_by_pattern(
cls, pattern: str, max_database_index: int
) -> Tuple[Optional[RedisCtx], Optional[int]]:
"""Search a pattern inside all kbs up to max_database_index.
Returns the redis context for the db and its index as a tuple or
None, None if the db with the pattern couldn't be found.
"""
for i in range(0, max_database_index):
ctx = cls.create_context(i)
if ctx.keys(pattern):
return (ctx, i)
return (None, None)
@staticmethod
def select_database(ctx: RedisCtx, kbindex: str):
"""Use an existent redis connection and select a redis kb.
Arguments:
ctx: Redis context to use.
kbindex: The new kb to select
"""
if not ctx:
raise RequiredArgument('select_database', 'ctx')
if not kbindex:
raise RequiredArgument('select_database', 'kbindex')
ctx.execute_command('SELECT ' + str(kbindex))
@staticmethod
def get_list_item(
ctx: RedisCtx,
name: str,
start: Optional[int] = LIST_FIRST_POS,
end: Optional[int] = LIST_LAST_POS,
) -> Optional[list]:
"""Returns the specified elements from `start` to `end` of the
list stored as `name`.
Arguments:
ctx: Redis context to use.
name: key name of a list.
start: first range element to get.
end: last range element to get.
Return List specified elements in the key.
"""
if not ctx:
raise RequiredArgument('get_list_item', 'ctx')
if not name:
raise RequiredArgument('get_list_item', 'name')
return ctx.lrange(name, start, end)
@staticmethod
def get_last_list_item(ctx: RedisCtx, name: str) -> str:
if not ctx:
raise RequiredArgument('get_last_list_item', 'ctx')
if not name:
raise RequiredArgument('get_last_list_item', 'name')
return ctx.rpop(name)
@staticmethod
def pop_list_items(ctx: RedisCtx, name: str) -> List[str]:
if not ctx:
raise RequiredArgument('pop_list_items', 'ctx')
if not name:
raise RequiredArgument('pop_list_items', 'name')
pipe = ctx.pipeline()
pipe.lrange(name, LIST_FIRST_POS, LIST_LAST_POS)
pipe.delete(name)
results, redis_return_code = pipe.execute()
# The results are left-pushed. To preserver the order
# the result list must be reversed.
if redis_return_code:
results.reverse()
else:
results = []
return results
@staticmethod
def get_key_count(ctx: RedisCtx, pattern: Optional[str] = None) -> int:
"""Get the number of keys matching with the pattern.
Arguments:
ctx: Redis context to use.
pattern: pattern used as filter.
"""
if not pattern:
pattern = "*"
if not ctx:
raise RequiredArgument('get_key_count', 'ctx')
return len(ctx.keys(pattern))
@staticmethod
def remove_list_item(ctx: RedisCtx, key: str, value: str):
"""Remove item from the key list.
Arguments:
ctx: Redis context to use.
key: key name of a list.
value: Value to be removed from the key.
"""
if not ctx:
raise RequiredArgument('remove_list_item ', 'ctx')
if not key:
raise RequiredArgument('remove_list_item', 'key')
if not value:
raise RequiredArgument('remove_list_item ', 'value')
ctx.lrem(key, count=LIST_ALL, value=value)
@staticmethod
def get_single_item(
ctx: RedisCtx,
name: str,
index: Optional[int] = LIST_FIRST_POS,
) -> Optional[str]:
"""Get a single KB element.
Arguments:
ctx: Redis context to use.
name: key name of a list.
index: index of the element to be return.
Defaults to the first element in the list.
Return the first element of the list or None if the name couldn't be
found.
"""
if not ctx:
raise RequiredArgument('get_single_item', 'ctx')
if not name:
raise RequiredArgument('get_single_item', 'name')
return ctx.lindex(name, index)
@staticmethod
def add_single_list(ctx: RedisCtx, name: str, values: Iterable):
"""Add a single KB element with one or more values.
The values can be repeated. If the key already exists will
be removed an completely replaced.
Arguments:
ctx: Redis context to use.
name: key name of a list.
value: Elements to add to the key.
"""
if not ctx:
raise RequiredArgument('add_single_list', 'ctx')
if not name:
raise RequiredArgument('add_single_list', 'name')
if not values:
raise RequiredArgument('add_single_list', 'value')
pipe = ctx.pipeline()
pipe.delete(name)
pipe.rpush(name, *values)
pipe.execute()
@staticmethod
def add_single_item(
ctx: RedisCtx, name: str, values: Iterable, lpush: bool = False
):
"""Add a single KB element with one or more values. Don't add
duplicated values during this operation, but if the the same
values already exists under the key, this will not be overwritten.
Arguments:
ctx: Redis context to use.
name: key name of a list.
value: Elements to add to the key.
"""
if not ctx:
raise RequiredArgument('add_single_item', 'ctx')
if not name:
raise RequiredArgument('add_single_item', 'name')
if not values:
raise RequiredArgument('add_single_item', 'value')
if lpush:
ctx.lpush(name, *set(values))
return
ctx.rpush(name, *set(values))
@staticmethod
def set_single_item(ctx: RedisCtx, name: str, value: Iterable):
"""Set (replace) a single KB element. If the same key exists
in the kb, it is completed removed. Values added are unique.
Arguments:
ctx: Redis context to use.
name: key name of a list.
value: New elements to add to the key.
"""
if not ctx:
raise RequiredArgument('set_single_item', 'ctx')
if not name:
raise RequiredArgument('set_single_item', 'name')
if not value:
raise RequiredArgument('set_single_item', 'value')
pipe = ctx.pipeline()
pipe.delete(name)
pipe.rpush(name, *set(value))
pipe.execute()
@staticmethod
def get_pattern(ctx: RedisCtx, pattern: str) -> List:
"""Get all items stored under a given pattern.
Arguments:
ctx: Redis context to use.
pattern: key pattern to match.
Return a list with the elements under the matched key.
"""
if not ctx:
raise RequiredArgument('get_pattern', 'ctx')
if not pattern:
raise RequiredArgument('get_pattern', 'pattern')
items = ctx.keys(pattern)
elem_list = []
for item in items:
elem_list.append(
[
item,
ctx.lrange(item, start=LIST_FIRST_POS, end=LIST_LAST_POS),
]
)
return elem_list
@classmethod
def get_keys_by_pattern(cls, ctx: RedisCtx, pattern: str) -> List[str]:
"""Get all items with index 'index', stored under
a given pattern.
Arguments:
ctx: Redis context to use.
pattern: key pattern to match.
Return a sorted list with the elements under the matched key
"""
if not ctx:
raise RequiredArgument('get_elem_pattern_by_index', 'ctx')
if not pattern:
raise RequiredArgument('get_elem_pattern_by_index', 'pattern')
return sorted(ctx.keys(pattern))
@classmethod
def get_filenames_and_oids(
cls, ctx: RedisCtx, pattern: str, parser: Callable[[str], str]
) -> Iterable[Tuple[str, str]]:
"""Get all items with index 'index', stored under
a given pattern.
Arguments:
ctx: Redis context to use.
pattern: Pattern used for searching the keys
parser: Callable method to remove the pattern from the keys.
Return an iterable where each single tuple contains the filename
as first element and the oid as the second one.
"""
if not ctx:
raise RequiredArgument('get_filenames_and_oids', 'ctx')
if not pattern:
raise RequiredArgument('get_filenames_and_oids', 'pattern')
if not parser:
raise RequiredArgument('get_filenames_and_oids', 'parser')
items = cls.get_keys_by_pattern(ctx, pattern)
return ((ctx.lindex(item, 0), parser(item)) for item in items)
@staticmethod
def exists(ctx: RedisCtx, key: str) -> bool:
"""Check that the given key exists in the given context.
Arguments:
ctx: Redis context to use.
patternkey: key to check.
Return a True if exists, False otherwise.
"""
if not ctx:
raise RequiredArgument('exists', 'ctx')
return ctx.exists(key) == 1
class BaseDB:
def __init__(self, kbindex: int, ctx: Optional[RedisCtx] = None):
if ctx is None:
self.ctx = OpenvasDB.create_context(kbindex)
else:
self.ctx = ctx
self.index = kbindex
def flush(self):
"""Flush the database"""
self.ctx.flushdb()
class BaseKbDB(BaseDB):
def _add_single_item(
self, name: str, values: Iterable, utf8_enc: Optional[bool] = False
):
"""Changing the encoding format of an existing redis context
is not possible. Therefore a new temporary redis context is
created to store key-values encoded with utf-8."""
if utf8_enc:
ctx = OpenvasDB.create_context(self.index, encoding='utf-8')
OpenvasDB.add_single_item(ctx, name, values)
else:
OpenvasDB.add_single_item(self.ctx, name, values)
def _set_single_item(self, name: str, value: Iterable):
"""Set (replace) a single KB element.
Arguments:
name: key name of a list.
value: New elements to add to the key.
"""
OpenvasDB.set_single_item(self.ctx, name, value)
def _get_single_item(self, name: str) -> Optional[str]:
"""Get a single KB element.
Arguments:
name: key name of a list.
"""
return OpenvasDB.get_single_item(self.ctx, name)
def _get_list_item(
self,
name: str,
) -> Optional[List]:
"""Returns the specified elements from `start` to `end` of the
list stored as `name`.
Arguments:
name: key name of a list.
Return List specified elements in the key.
"""
return OpenvasDB.get_list_item(self.ctx, name)
def _pop_list_items(self, name: str) -> List:
return OpenvasDB.pop_list_items(self.ctx, name)
def _remove_list_item(self, key: str, value: str):
"""Remove item from the key list.
Arguments:
key: key name of a list.
value: Value to be removed from the key.
"""
OpenvasDB.remove_list_item(self.ctx, key, value)
def get_result(self) -> Optional[str]:
"""Get and remove the oldest result from the list.
Return the oldest scan results
"""
return self._pop_list_items("internal/results")
def get_status(self, openvas_scan_id: str) -> Optional[str]:
"""Return the status of the host scan"""
return self._get_single_item(f'internal/{openvas_scan_id}')
def __repr__(self):
return f'<{self.__class__.__name__} index={self.index}>'
class ScanDB(BaseKbDB):
"""Database for a scanning a single host"""
def select(self, kbindex: int) -> "ScanDB":
"""Select a redis kb.
Arguments:
kbindex: The new kb to select
"""
OpenvasDB.select_database(self.ctx, kbindex)
self.index = kbindex
return self
class KbDB(BaseKbDB):
def get_scan_databases(self) -> Iterator[ScanDB]:
"""Returns an iterator yielding corresponding ScanDBs
The returned Iterator can't be converted to an Iterable like a List.
Each yielded ScanDB must be used independently in a for loop. If the
Iterator gets converted into an Iterable all returned ScanDBs will use
the same redis context pointing to the same redis database.
"""
dbs = self._get_list_item('internal/dbindex')
scan_db = ScanDB(self.index)
for kbindex in dbs:
if kbindex == self.index:
continue
yield scan_db.select(kbindex)
def add_scan_id(self, scan_id: str):
self._add_single_item(f'internal/{scan_id}', ['new'])
self._add_single_item('internal/scanid', [scan_id])
def add_scan_preferences(self, openvas_scan_id: str, preferences: Iterable):
self._add_single_item(
f'internal/{openvas_scan_id}/scanprefs', preferences
)
def add_credentials_to_scan_preferences(
self, openvas_scan_id: str, preferences: Iterable
):
"""Force the usage of the utf-8 encoding, since some credentials
contain special chars not supported by latin-1 encoding."""
self._add_single_item(
f'internal/{openvas_scan_id}/scanprefs',
preferences,
utf8_enc=True,
)
def add_scan_process_id(self, pid: int):
self._add_single_item('internal/ovas_pid', [pid])
def get_scan_process_id(self) -> Optional[str]:
return self._get_single_item('internal/ovas_pid')
def remove_scan_database(self, scan_db: ScanDB):
self._remove_list_item('internal/dbindex', scan_db.index)
def target_is_finished(self, scan_id: str) -> bool:
"""Check if a target has finished."""
status = self._get_single_item(f'internal/{scan_id}')
if status is None:
logger.error(
"%s: Target set as finished because redis returned None as "
"scanner status.",
scan_id,
)
return status == 'finished' or status is None
def stop_scan(self, openvas_scan_id: str):
self._set_single_item(f'internal/{openvas_scan_id}', ['stop_all'])
def scan_is_stopped(self, scan_id: str) -> bool:
"""Check if the scan should be stopped"""
status = self._get_single_item(f'internal/{scan_id}')
return status == 'stop_all'
def get_scan_status(self) -> List:
"""Get and remove the oldest host scan status from the list.
Return a string which represents the host scan status.
"""
return self._pop_list_items("internal/status")
class MainDB(BaseDB):
"""Main Database"""
DEFAULT_INDEX = 0
def __init__(self, ctx=None):
super().__init__(self.DEFAULT_INDEX, ctx)
self._max_dbindex = None
@property
def max_database_index(self):
"""Set the number of databases have been configured into kbr struct."""
if self._max_dbindex is None:
resp = self.ctx.config_get('databases')
if len(resp) == 1:
self._max_dbindex = int(resp.get('databases'))
else:
raise OspdOpenvasError(
'Redis Error: Not possible to get max_dbindex.'
) from None
return self._max_dbindex
def try_database(self, index: int) -> bool:
"""Check if a redis db is already in use. If not, set it
as in use and return.
Arguments:
ctx: Redis object connected to the kb with the
DBINDEX_NAME key.
index: Number intended to be used.
Return True if it is possible to use the db. False if the given db
number is already in use.
"""
_in_use = 1
try:
resp = self.ctx.hsetnx(DBINDEX_NAME, index, _in_use)
except:
raise OspdOpenvasError(
f'Redis Error: Not possible to set {DBINDEX_NAME}.'
) from None
return resp == 1
def get_new_kb_database(self) -> Optional[KbDB]:
"""Return a new kb db to an empty kb."""
for index in range(1, self.max_database_index):
if self.try_database(index):
kbdb = KbDB(index)
kbdb.flush()
return kbdb
return None
def find_kb_database_by_scan_id(
self, scan_id: str
) -> Tuple[Optional[str], Optional["KbDB"]]:
"""Find a kb db by via a scan id"""
for index in range(1, self.max_database_index):
ctx = OpenvasDB.create_context(index)
if OpenvasDB.get_key_count(ctx, f'internal/{scan_id}'):
return KbDB(index, ctx)
return None
def check_consistency(self, scan_id) -> Tuple[Optional[KbDB], int]:
"""Check if the current scan id already exists in a kb.
Return a tuple with the kb or none, and an error code, being 0 if
the db is clean, -1 on old finished scan, -2 on still running scan.
"""
err = 0
kb = self.find_kb_database_by_scan_id(scan_id)
current_status = None
if kb:
current_status = kb.get_status(scan_id)
if current_status == "finished":
err = -1
elif current_status == "stop_all" or current_status == "ready":
err = -2
return (kb, err)
def release_database(self, database: BaseDB):
self.release_database_by_index(database.index)
database.flush()
def release_database_by_index(self, index: int):
self.ctx.hdel(DBINDEX_NAME, index)
def release(self):
self.release_database(self)
ospd-openvas-22.9.0/ospd_openvas/dryrun.py 0000664 0000000 0000000 00000014561 15011310720 0020614 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=too-many-lines
"""Methods for dry run"""
import logging
import time
from random import uniform, choice
from ospd.scan import ScanProgress, ScanStatus
from ospd.network import target_str_to_list, ports_as_list
from ospd.resultlist import ResultList
from ospd_openvas.vthelper import VtHelper
logger = logging.getLogger(__name__)
class DryRun:
def __init__(self, daemon):
self._daemon = daemon
def exec_dry_run_scan(self, scan_id, nvti, ospd_params):
options = self._daemon.scan_collection.get_options(scan_id)
results_per_host = None
if "results_per_host" in options:
results_per_host = options.get("results_per_host")
if not results_per_host or not isinstance(results_per_host, int):
logger.debug("Using default value for results_per_host options")
results_per_host = ospd_params["results_per_host"].get("default")
# Get the host list
target = self._daemon.scan_collection.get_host_list(scan_id)
logger.info("The target list %s", target)
host_list = target_str_to_list(target)
# Get the port list
ports = self._daemon.scan_collection.get_ports(scan_id)
logger.info("The port list %s", ports)
tcp, _ = ports_as_list(ports)
# Get exclude hosts list. It must not be scanned
exclude_hosts = self._daemon.scan_collection.get_exclude_hosts(scan_id)
logger.info("The exclude hosts list %s", exclude_hosts)
self._daemon.set_scan_total_hosts(
scan_id,
count_total=len(host_list),
)
self._daemon.scan_collection.set_amount_dead_hosts(
scan_id, total_dead=0
)
# Get list of VTS. Ignore script params
vts = list(self._daemon.scan_collection.get_vts(scan_id))
if "vt_groups" in vts:
vts.remove("vt_groups")
vthelper = VtHelper(nvti, None)
# Run the scan.
# Scan simulation for each single host.
# Run the scan against the host, and generates results.
while host_list:
# Get a host from the list
current_host = host_list.pop()
# Check if the scan was stopped.
status = self._daemon.get_scan_status(scan_id)
if status == ScanStatus.STOPPED or status == ScanStatus.FINISHED:
logger.debug(
'Task %s stopped or finished.',
scan_id,
)
return
res_list = ResultList()
res_list.add_scan_log_to_list(
host=current_host,
name="HOST_START",
value=str(int(time.time())),
)
# Generate N results per host. Default 10 results
res_count = 0
while res_count < results_per_host:
res_count += 1
oid = choice(vts)
port = choice(tcp)
vt = vthelper.get_single_vt(oid)
rname = ''
rqod = ''
if vt:
if vt.get('qod_type'):
qod_t = vt.get('qod_type')
rqod = nvti.QOD_TYPES[qod_t]
elif vt.get('qod'):
rqod = vt.get('qod')
rname = vt.get('name')
else:
logger.debug("oid %s not found", oid)
res_type = int(uniform(1, 5))
# Error
if res_type == 1:
res_list.add_scan_error_to_list(
host=current_host,
hostname=current_host + ".hostname.net",
name=rname,
value="error running the script " + oid,
port=port,
test_id=oid,
uri="No location",
)
# Log
elif res_type == 2:
res_list.add_scan_log_to_list(
host=current_host,
hostname=current_host + ".hostname.net",
name=rname,
value="Log generate from a dry run scan for the script "
+ oid,
port=port,
qod=rqod,
test_id=oid,
uri="No location",
)
# Alarm
else:
r_severity = vthelper.get_severity_score(vt)
res_list.add_scan_alarm_to_list(
host=current_host,
hostname=current_host + ".hostname.net",
name=rname,
value="Log generate from a dry run scan for the script "
+ oid,
port=port,
test_id=oid,
severity=r_severity,
qod=rqod,
uri="No location",
)
res_list.add_scan_log_to_list(
host=current_host,
name="HOST_END",
value=str(int(time.time())),
)
# Add the result to the scan collection
if len(res_list):
logger.debug(
'%s: Inserting %d results into scan '
'scan collection table',
scan_id,
len(res_list),
)
self._daemon.scan_collection.add_result_list(scan_id, res_list)
# Set the host scan progress as finished
host_progress = dict()
host_progress[current_host] = ScanProgress.FINISHED
self._daemon.set_scan_progress_batch(
scan_id, host_progress=host_progress
)
# Update the host status, Finished host. So ospd can
# calculate the scan progress.
# This is quite importan, since the final scan status depends on
# the progress calculation.
finished_host = list()
finished_host.append(current_host)
self._daemon.sort_host_finished(scan_id, finished_host)
time.sleep(1)
logger.debug('%s: End task', scan_id)
ospd-openvas-22.9.0/ospd_openvas/errors.py 0000664 0000000 0000000 00000000522 15011310720 0020575 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Module for OSPD OpenVAS errors
"""
from ospd.errors import OspdError
class OspdOpenvasError(OspdError):
"""An exception for gvm errors
Base class for all exceptions originated in ospd-openvas.
"""
ospd-openvas-22.9.0/ospd_openvas/gpg_sha_verifier.py 0000664 0000000 0000000 00000011016 15011310720 0022564 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import hashlib
import os
import logging
from pathlib import Path
from typing import Callable, Dict, Optional
from dataclasses import dataclass
from gnupg import GPG
logger = logging.getLogger(__name__)
OPENVAS_GPG_HOME = "/etc/openvas/gnupg"
def __determine_default_gpg_home() -> Path:
gos_default = Path(OPENVAS_GPG_HOME)
if gos_default.exists():
return gos_default
user_default = Path.home() / ".gnupg"
if not user_default.exists():
logger.warning(
"No GnuPG home found; "
"please verify setup and set the GNUPGHOME variable if necessary"
)
return user_default
def __default_gpg_home() -> GPG:
"""
__defaultGpgHome tries to load the variable 'GNUPGHOME' or to guess it
"""
manual = os.getenv("GNUPGHOME")
home = Path(manual) if manual else __determine_default_gpg_home()
logger.debug("Using %s as GnuPG home.", home)
return GPG(gnupghome=f"{home.absolute()}")
@dataclass
class ReloadConfiguration:
hash_file: Path
on_verification_failure: Callable[
[Optional[Dict[str, str]]], Dict[str, str]
]
gpg: Optional[GPG] = None
cache: Optional[Dict[str, str]] = None
fingerprint: str = ""
def reload_sha256sums(
config: ReloadConfiguration,
) -> Callable[[], Dict[str, str]]:
"""
reload_sha256sums reloads sha256sums if a threshold has been reached.
"""
if not config.gpg:
config.gpg = __default_gpg_home()
def create_hash(file: Path) -> str:
# we just use the hash to identify we have to reload the sha256sums
# therefore a collision is not the end of the world and sha1 is more
# than sufficient
hasher = hashlib.sha1()
try:
with file.open(mode="rb") as f:
for hash_file_bytes in iter(lambda: f.read(1024), b""):
hasher.update(hash_file_bytes)
return hasher.hexdigest()
except FileNotFoundError:
return ""
def internal_reload() -> Dict[str, str]:
fingerprint = create_hash(config.hash_file)
if not fingerprint:
return {}
if not config.cache or config.fingerprint != fingerprint:
config.fingerprint = fingerprint
config.cache = gpg_sha256sums(config.hash_file, config.gpg)
if not config.cache:
return config.on_verification_failure(None)
return config.cache
return internal_reload
def gpg_sha256sums(
hash_file: Path, gpg: Optional[GPG] = None
) -> Optional[Dict[str, str]]:
"""
gpg_sha256sums verifies given hash_file with a asc file
This functions assumes that the asc file is in the same directory as the
hashfile and has the same name but with the suffix '.asc'
"""
# when doing that via paramater list it is loading eagerly on import
# which may fail on some systems
if not gpg:
gpg = __default_gpg_home()
asc_path = hash_file.parent / f"{hash_file.name}.asc"
with asc_path.open(mode="rb") as f:
verified = gpg.verify_file(f, str(hash_file.absolute()))
if not verified:
return None
result = {}
with hash_file.open() as f:
for line in f.readlines():
hsum, fname = line.split(" ")
# the second part can contain a newline
# sometimes the hash sum got generated outside the current dir
# and may contain leading paths.
# Since we check against the filename we should normalize to
# prevent false positives.
result[hsum] = fname.split("/")[-1].strip()
return result
def create_verify(
sha256sums: Callable[[], Dict[str, str]],
) -> Callable[[Path], bool]:
"""
create_verify is returning a closure based on the sha256sums.
This allows to load sha256sums and verify there instead of verifying and
loading on each verification request.
"""
def verify(advisory_path: Path) -> bool:
s256h = hashlib.sha256()
if not advisory_path.is_file():
return False
with advisory_path.open(mode="rb") as f:
for hash_file_bytes in iter(lambda: f.read(1024), b""):
s256h.update(hash_file_bytes)
hash_sum = s256h.hexdigest()
assumed_name = sha256sums().get(hash_sum)
if not assumed_name:
return False
return assumed_name == advisory_path.name
return verify
ospd-openvas-22.9.0/ospd_openvas/lock.py 0000664 0000000 0000000 00000006334 15011310720 0020220 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import logging
import time
import fcntl
from pathlib import Path
logger = logging.getLogger(__name__)
class LockFile:
def __init__(self, path: Path):
self._lock_file_path = path
self._has_lock = False
self._fd = None
def has_lock(self) -> bool:
return self._has_lock
def _acquire_lock(self) -> "LockFile":
"""Acquire a lock by creating a lock file."""
if self.has_lock():
return self
parent_dir = self._lock_file_path.parent
try:
# create parent directories recursively
parent_dir.mkdir(parents=True, mode=0o770, exist_ok=True)
except OSError as e:
logger.error(
"Could not create parent dir %s for lock file. %s",
str(parent_dir),
e,
)
return self
try:
# Open the fd with append flag to create the file
# if not exists and to avoid deleting the content
# something else wrote in it.
self._fd = self._lock_file_path.open('a')
except Exception as e: # pylint: disable=broad-except
logger.error(
"Failed to open lock file %s. %s",
str(self._lock_file_path),
e,
)
try:
self._fd.close()
self._fd = None
except Exception: # pylint: disable=broad-except
pass
return self
try:
self._lock_file_path.chmod(0o660)
except OSError as e:
# ignore error because it is very likely that the file exists, has
# the correct permissions but we are not the owner
logger.debug(
"Could not change permissions of lock file %s",
str(self._lock_file_path),
)
# Try to acquire the lock.
try:
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self._has_lock = True
logger.debug("Created lock file %s.", str(self._lock_file_path))
except BlockingIOError as e:
logger.debug(
"Failed to lock the file %s. %s",
str(self._lock_file_path),
e,
)
try:
self._fd.close()
self._fd = None
except Exception: # pylint: disable=broad-except
pass
return self
def wait_for_lock(self):
while not self.has_lock():
self._acquire_lock()
time.sleep(10)
return self
def _release_lock(self) -> None:
"""Release the lock by deleting the lock file"""
if self.has_lock() and self._fd:
fcntl.flock(self._fd, fcntl.LOCK_UN)
self._fd.close()
self._fd = None
self._has_lock = False
logger.debug(
"Removed lock from file %s.", str(self._lock_file_path)
)
def __enter__(self):
self._acquire_lock()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._release_lock()
ospd-openvas-22.9.0/ospd_openvas/messages/ 0000775 0000000 0000000 00000000000 15011310720 0020517 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/ospd_openvas/messages/__init__.py 0000664 0000000 0000000 00000000170 15011310720 0022626 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
ospd-openvas-22.9.0/ospd_openvas/messages/message.py 0000664 0000000 0000000 00000004376 15011310720 0022527 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import json
from datetime import datetime, timezone
from enum import Enum
from typing import Any, Dict, Union, Optional
from uuid import UUID, uuid4
class MessageType(Enum):
RESULT = "result.scan"
SCAN_STATUS = "scan.status"
SCAN_START = "scan.start"
class Message:
topic: str = None
message_type: MessageType = None
message_id: UUID = None
group_id: str = None
created: datetime = None
def __init__(
self,
*,
message_id: Optional[UUID] = None,
group_id: Optional[str] = None,
created: Optional[datetime] = None,
):
self.message_id = message_id if message_id else uuid4()
self.group_id = group_id if group_id else str(uuid4())
self.created = created if created else datetime.utcnow()
@classmethod
def _parse(cls, data: Dict[str, Union[int, str]]) -> Dict[str, Any]:
message_type = MessageType(data.get('message_type'))
if message_type != cls.message_type:
raise ValueError(
f"Invalid message type {message_type} for {cls.__name__}. "
f"Must be {cls.message_type}.",
)
return {
'message_id': UUID(data.get("message_id")),
'group_id': data.get("group_id"),
'created': datetime.fromtimestamp(
float(data.get("created")), timezone.utc
),
}
def serialize(self) -> Dict[str, Union[int, str]]:
return {
"message_id": str(self.message_id),
"message_type": (
self.message_type.value if self.message_type else None
),
"group_id": str(self.group_id),
"created": self.created.timestamp(),
}
@classmethod
def deserialize(cls, data: Dict[str, Union[int, str]]) -> "Message":
kwargs = cls._parse(data)
return cls(**kwargs)
@classmethod
def load(cls, payload: Union[str, bytes]) -> "Message":
data = json.loads(payload)
return cls.deserialize(data)
def dump(self) -> str:
return json.dumps(self.serialize())
def __str__(self) -> str:
return self.dump()
ospd-openvas-22.9.0/ospd_openvas/messages/result.py 0000664 0000000 0000000 00000004337 15011310720 0022416 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from datetime import datetime
from enum import Enum
from typing import Dict, Union, Any, Optional
from uuid import UUID
from .message import Message, MessageType
class ResultType(Enum):
ALARM = "ALARM"
class ResultMessage(Message):
message_type: MessageType = MessageType.RESULT
topic = "scanner/scan/info"
def __init__(
self,
*,
scan_id: str,
host_ip: str,
host_name: str,
oid: str,
value: str,
port: str = "package",
uri: str = None,
result_type: ResultType = ResultType.ALARM,
message_id: Optional[UUID] = None,
group_id: Optional[UUID] = None,
created: Optional[datetime] = None,
):
super().__init__(
message_id=message_id, group_id=group_id, created=created
)
self.scan_id = scan_id
self.host_ip = host_ip
self.host_name = host_name
self.oid = oid
self.value = value
self.port = port
self.uri = uri
self.result_type = result_type
def serialize(self) -> Dict[str, Union[int, str]]:
message = super().serialize()
message.update(
{
"scan_id": self.scan_id,
"host_ip": self.host_ip,
"host_name": self.host_name,
"oid": self.oid,
"value": self.value,
"port": self.port,
"uri": self.uri,
"result_type": self.result_type.value,
}
)
return message
@classmethod
def _parse(cls, data: Dict[str, Union[int, str]]) -> Dict[str, Any]:
kwargs = super()._parse(data)
kwargs.update(
{
"scan_id": data.get("scan_id"),
"host_ip": data.get("host_ip"),
"host_name": data.get("host_name"),
"oid": data.get("oid"),
"value": data.get("value"),
"port": data.get("port"),
"uri": data.get("uri"),
"result_type": ResultType(data.get("result_type")),
}
)
return kwargs
ospd-openvas-22.9.0/ospd_openvas/messaging/ 0000775 0000000 0000000 00000000000 15011310720 0020665 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/ospd_openvas/messaging/__init__.py 0000664 0000000 0000000 00000000170 15011310720 0022774 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
ospd-openvas-22.9.0/ospd_openvas/messaging/mqtt.py 0000664 0000000 0000000 00000013440 15011310720 0022226 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import json
import logging
from functools import partial
from socket import gaierror, timeout
from threading import Thread
from time import sleep
from typing import Callable, Type
import paho.mqtt.client as mqtt
from paho.mqtt import __version__ as paho_mqtt_version
from ..messages.message import Message
from .publisher import Publisher
from .subscriber import Subscriber
logger = logging.getLogger(__name__)
OSPD_OPENVAS_MQTT_CLIENT_ID = "ospd-openvas"
QOS_AT_LEAST_ONCE = 1
def is_paho_mqtt_version_2() -> bool:
return paho_mqtt_version.startswith("2")
class MQTTClient(mqtt.Client):
def __init__(
self,
mqtt_broker_address: str,
mqtt_broker_port: int,
client_id=OSPD_OPENVAS_MQTT_CLIENT_ID,
):
self._mqtt_broker_address = mqtt_broker_address
self._mqtt_broker_port = mqtt_broker_port
mqtt_client_args = {
"client_id": client_id,
"protocol": mqtt.MQTTv5,
}
if is_paho_mqtt_version_2():
logger.debug("Using Paho MQTT version 2")
# pylint: disable=no-member
mqtt_client_args["callback_api_version"] = (
mqtt.CallbackAPIVersion.VERSION1
)
else:
logger.debug("Using Paho MQTT version 1")
super().__init__(**mqtt_client_args)
self.enable_logger()
def connect(
self,
host=None,
port=None,
keepalive=60,
bind_address="",
bind_port=0,
clean_start=mqtt.MQTT_CLEAN_START_FIRST_ONLY,
properties=None,
):
if not host:
host = self._mqtt_broker_address
if not port:
port = self._mqtt_broker_port
return super().connect(
host,
port=port,
keepalive=keepalive,
bind_address=bind_address,
bind_port=bind_port,
clean_start=clean_start,
properties=properties,
)
class MQTTPublisher(Publisher):
def __init__(self, client: MQTTClient):
self._client = client
def publish(self, message: Message) -> None:
logger.debug('Publish message %s', message)
self._client.publish(message.topic, str(message), qos=QOS_AT_LEAST_ONCE)
class MQTTSubscriber(Subscriber):
def __init__(self, client: MQTTClient):
self.client = client
# Save the active subscriptions on subscribe() so we can resubscribe
# after reconnect
self.subscriptions: dict = {}
self.client.on_connect = self.on_connect
self.client.user_data_set(self.subscriptions)
def subscribe(
self, message_class: Type[Message], callback: Callable[[Message], None]
) -> None:
func = partial(self._handle_message, message_class, callback)
func.__name__ = callback.__name__
logger.debug("Subscribing to topic %s", message_class.topic)
self.client.subscribe(message_class.topic, qos=QOS_AT_LEAST_ONCE)
self.client.message_callback_add(message_class.topic, func)
self.subscriptions[message_class.topic] = func
@staticmethod
def on_connect(_client, _userdata, _flags, rc, _properties):
if rc == 0:
# If we previously had active subscription we subscribe to them
# again because they got lost after a broker disconnect.
# Userdata is set in __init__() and filled in subscribe()
if _userdata:
for topic, func in _userdata.items():
_client.subscribe(topic, qos=QOS_AT_LEAST_ONCE)
_client.message_callback_add(topic, func)
@staticmethod
def _handle_message(
message_class: Type[Message],
callback: Callable[[Message], None],
_client,
_userdata,
msg: mqtt.MQTTMessage,
) -> None:
logger.debug("Incoming message for topic %s", msg.topic)
try:
# Load message from payload
message = message_class.load(msg.payload)
except json.JSONDecodeError:
logger.error(
"Got MQTT message in non-json format for topic %s.", msg.topic
)
logger.debug("Got: %s", msg.payload)
return
except ValueError as e:
logger.error(
"Could not parse message for topic %s. Error was %s",
msg.topic,
e,
)
logger.debug("Got: %s", msg.payload)
return
callback(message)
class MQTTDaemon:
"""A class to start and stop the MQTT client"""
def __init__(
self,
client: MQTTClient,
):
self._client: MQTTClient = client
def _try_connect_loop(self):
while True:
try:
self._client.connect()
self._client.loop_start()
logger.info("Successfully connected to MQTT broker")
return
except (gaierror, ValueError) as e:
logger.error(
"Could not connect to MQTT broker, error was: %s."
" Unable to get results from Notus.",
e,
)
return
# ConnectionRefusedError - when mqtt declines connection
# timeout - when address is not reachable
# OSError - in container when address cannot be assigned
except (ConnectionRefusedError, timeout, OSError) as e:
logger.warning(
"Could not connect to MQTT broker, error was: %s."
" Trying again in 10s.",
e,
)
sleep(10)
def run(self):
Thread(target=self._try_connect_loop, daemon=True).start()
ospd-openvas-22.9.0/ospd_openvas/messaging/publisher.py 0000664 0000000 0000000 00000000742 15011310720 0023237 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from abc import ABC, abstractmethod
from ..messages.message import Message
class Publisher(ABC):
"""An Abstract Base Class (ABC) for publishing Messages
When updating to Python > 3.7 this should be converted into a
typing.Protocol
"""
@abstractmethod
def publish(self, message: Message) -> None:
raise NotImplementedError()
ospd-openvas-22.9.0/ospd_openvas/messaging/subscriber.py 0000664 0000000 0000000 00000001112 15011310720 0023375 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from abc import ABC, abstractmethod
from typing import Callable, Type
from ..messages.message import Message
class Subscriber(ABC):
"""An Abstract Base Class (ABC) for subscribing to messages
When updating to Python > 3.7 this should be converted into a
typing.Protocol
"""
@abstractmethod
def subscribe(
self, message_class: Type[Message], callback: Callable[[Message], None]
) -> None:
raise NotImplementedError()
ospd-openvas-22.9.0/ospd_openvas/notus.py 0000664 0000000 0000000 00000020432 15011310720 0020433 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from pathlib import Path
from time import sleep
from typing import Any, Dict, Iterator, Optional, Callable, Tuple
from threading import Timer
import json
import logging
from ospd.parser import CliParser
from ospd_openvas.messages.result import ResultMessage
from ospd_openvas.db import OpenvasDB, MainDB
from ospd_openvas.gpg_sha_verifier import (
ReloadConfiguration,
create_verify,
reload_sha256sums,
)
logger = logging.getLogger(__name__)
NOTUS_CACHE_NAME = "notuscache"
def hashsum_verificator(
advisories_directory_path: Path, disable: bool
) -> Callable[[Path], bool]:
if disable:
logger.info("hashsum verification is disabled")
return lambda _: True
def on_hash_sum_verification_failure(
_: Optional[Dict[str, str]],
) -> Dict[str, str]:
logger.warning(
"GPG verification of notus sha256sums failed."
" Notus advisories are not loaded."
)
return {}
sha_sum_file_path = advisories_directory_path / "sha256sums"
sha_sum_reload_config = ReloadConfiguration(
hash_file=sha_sum_file_path,
on_verification_failure=on_hash_sum_verification_failure,
)
sums = reload_sha256sums(sha_sum_reload_config)
return create_verify(sums)
class Cache:
def __init__(
self, main_db: MainDB, prefix: str = "internal/notus/advisories"
):
self._main_db = main_db
# Check if it was previously uploaded
self.ctx, _ = OpenvasDB.find_database_by_pattern(
NOTUS_CACHE_NAME, self._main_db.max_database_index
)
# Get a new namespace for the Notus Cache
if not self.ctx:
new_db = self._main_db.get_new_kb_database()
self.ctx = new_db.ctx
OpenvasDB.add_single_item(
self.ctx, NOTUS_CACHE_NAME, set([1]), lpush=True
)
self.__prefix = prefix
def store_advisory(self, oid: str, value: Dict[str, str]):
return OpenvasDB.set_single_item(
self.ctx, f"{self.__prefix}/{oid}", [json.dumps(value)]
)
def exists(self, oid: str) -> bool:
return OpenvasDB.exists(self.ctx, f"{self.__prefix}/{oid}")
def get_advisory(self, oid: str) -> Optional[Dict[str, str]]:
result = OpenvasDB.get_single_item(self.ctx, f"{self.__prefix}/{oid}")
if result:
return json.loads(result)
return None
def get_oids(self) -> Iterator[Tuple[str, str]]:
"""Get the list of NVT file names and OIDs.
Returns:
An iterable of tuples of file name and oid.
"""
def parse_oid(item):
return str(item).rsplit('/', maxsplit=1)[-1]
for f, oid in OpenvasDB.get_filenames_and_oids(
self.ctx, f"{self.__prefix}*", parse_oid
):
yield (f, oid)
class Notus:
"""Stores and access notus advisory data in redis"""
cache: Cache
loaded: bool = False
loading: bool = False
path: Path
disable_hashsum_verification: bool
_verifier: Optional[Callable[[Path], bool]]
def __init__(
self,
path: Path,
cache: Cache,
disable_hashsum_verification: bool = False,
):
self.path = path
self.cache = cache
self._verifier = None
self.disable_hashsum_verification = disable_hashsum_verification
def reload_cache(self):
if self.loading:
# block until loading is done
while not self.loading:
sleep(1)
return
self.loading = True
self.loaded = False
for f in self.path.glob('*.notus'):
if not self._verifier:
self._verifier = hashsum_verificator(
self.path, self.disable_hashsum_verification
)
if self._verifier:
if self._verifier(f):
data = json.loads(f.read_bytes())
advisories = data.pop("advisories", [])
for advisory in advisories:
res = self.__to_ospd(f, advisory, data)
self.cache.store_advisory(advisory["oid"], res)
else:
logger.log(
logging.WARN, "ignoring %s due to invalid signature", f
)
self.loading = False
self.loaded = True
def __to_ospd(
self, path: Path, advisory: Dict[str, Any], meta_data: Dict[str, Any]
):
result = {}
result["vt_params"] = []
result["creation_date"] = str(advisory.get("creation_date", 0))
result["last_modification"] = str(advisory.get("last_modification", 0))
result["modification_time"] = str(advisory.get("last_modification", 0))
result["summary"] = advisory.get("summary")
result["impact"] = advisory.get("impact")
result["affected"] = advisory.get("affected")
result["insight"] = advisory.get("insight")
result['solution'] = "Please install the updated package(s)."
result['solution_type'] = "VendorFix"
result['vuldetect'] = (
'Checks if a vulnerable package version is present on the target'
' host.'
)
result['qod_type'] = advisory.get('qod_type', 'package')
severity = advisory.get('severity', {})
cvss = severity.get("cvss_v3", None)
if not cvss:
cvss = severity.get("cvss_v2", None)
result["severity_vector"] = cvss
result["filename"] = path.name
cves = advisory.get("cves", None)
xrefs = advisory.get("xrefs", None)
advisory_xref = advisory.get("advisory_xref", "")
refs = {}
refs['url'] = [advisory_xref]
advisory_id = advisory.get("advisory_id", None)
if cves:
refs['cve'] = cves
if xrefs:
refs['url'] = refs['url'] + xrefs
if advisory_id:
refs['advisory_id'] = [advisory_id]
result["refs"] = refs
result["family"] = meta_data.get("family", path.stem)
result["name"] = advisory.get("title", "")
result["category"] = "3"
return result
def get_oids(self):
if not self.loaded:
self.reload_cache()
return self.cache.get_oids()
def exists(self, oid: str) -> bool:
return self.cache.exists(oid)
def get_nvt_metadata(self, oid: str) -> Optional[Dict[str, str]]:
return self.cache.get_advisory(oid)
class NotusResultHandler:
"""Class to handle results generated by the Notus-Scanner"""
def __init__(self, report_func: Callable[[list, str], bool]) -> None:
self._results = {}
self._report_func = report_func
def _report_results(self, scan_id: str) -> None:
"""Reports all results collected for a scan"""
results = self._results.pop(scan_id)
if not self._report_func(results, scan_id):
logger.warning(
"Unable to report %d notus results for scan id %s.",
len(results),
scan_id,
)
def result_handler(self, res_msg: ResultMessage) -> None:
"""Handles results generated by the Notus-Scanner.
When receiving a result for a scan a time gets started to publish all
results given within 0.25 seconds."""
result = res_msg.serialize()
scan_id = result.pop("scan_id")
timer = None
if not scan_id in self._results:
self._results[scan_id] = []
timer = Timer(0.25, self._report_results, [scan_id])
self._results[scan_id].append(result)
if timer:
timer.start()
DEFAULT_NOTUS_FEED_DIR = "/var/lib/notus/advisories"
class NotusParser(CliParser):
def __init__(self):
super().__init__('OSPD - openvas')
self.parser.add_argument(
'--notus-feed-dir',
default=DEFAULT_NOTUS_FEED_DIR,
help='Directory where notus feed is placed. Default: %(default)s',
)
self.parser.add_argument(
'--disable-notus-hashsum-verification',
default=False,
type=bool,
help=(
'Disables hashsum verification for notus advisories.'
' %(default)s'
),
)
ospd-openvas-22.9.0/ospd_openvas/nvticache.py 0000664 0000000 0000000 00000021641 15011310720 0021232 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Provide functions to handle NVT Info Cache."""
import logging
from typing import List, Dict, Optional, Iterator, Tuple
from pathlib import Path
from time import time
from ospd.errors import RequiredArgument
from ospd_openvas.errors import OspdOpenvasError
from ospd_openvas.db import NVT_META_FIELDS, OpenvasDB, MainDB, BaseDB, RedisCtx
NVTI_CACHE_NAME = "nvticache"
logger = logging.getLogger(__name__)
LIST_FIRST_POS = 0
LIST_LAST_POS = -1
class NVTICache(BaseDB):
QOD_TYPES = {
'exploit': '100',
'remote_vul': '99',
'remote_app': '98',
'package': '97',
'registry': '97',
'remote_active': '95',
'remote_banner': '80',
'executable_version': '80',
'remote_analysis': '70',
'remote_probe': '50',
'package_unreliable': '30',
'remote_banner_unreliable': '30',
'executable_version_unreliable': '30',
'general_note': '1',
'default': '70',
}
def __init__( # pylint: disable=super-init-not-called
self, main_db: MainDB
):
self._ctx = None
self.index = None
self._main_db = main_db
@property
def ctx(self) -> Optional[RedisCtx]:
if self._ctx is None:
self._ctx, self.index = OpenvasDB.find_database_by_pattern(
NVTI_CACHE_NAME, self._main_db.max_database_index
)
return self._ctx
def get_feed_version(self) -> Optional[str]:
"""Get feed version of the nvti cache db.
Returns the feed version or None if the nvt feed isn't available.
"""
if not self.ctx:
# no nvti cache db available yet
return None
# no feed version for notus otherwise tha would be a contract change
return OpenvasDB.get_single_item(self.ctx, NVTI_CACHE_NAME)
def get_oids(self) -> Iterator[Tuple[str, str]]:
"""Get the list of NVT file names and OIDs.
Returns:
An iterable of tuples of file name and oid.
"""
def parse_oid(item):
return item[4:]
if self.ctx:
for f, oid in OpenvasDB.get_filenames_and_oids(
self.ctx, 'nvt:*', parse_oid
):
yield (f, oid)
def get_nvt_params(self, oid: str) -> Optional[Dict[str, str]]:
"""Get NVT's preferences.
Arguments:
oid: OID of VT from which to get the parameters.
Returns:
A dictionary with preferences and timeout.
"""
prefs = self.get_nvt_prefs(oid)
vt_params = {}
if prefs:
for nvt_pref in prefs:
elem = nvt_pref.split('|||')
param_id = elem[0]
param_name = elem[1]
param_type = elem[2]
vt_params[param_id] = dict()
vt_params[param_id]['id'] = param_id
vt_params[param_id]['type'] = param_type
vt_params[param_id]['name'] = param_name.strip()
vt_params[param_id]['description'] = 'Description'
if len(elem) > 3:
param_default = elem[3]
vt_params[param_id]['default'] = param_default
else:
vt_params[param_id]['default'] = ''
return vt_params
@staticmethod
def _parse_metadata_tags(tags_str: str, oid: str) -> Dict[str, str]:
"""Parse a string with multiple tags.
Arguments:
tags_str: String with tags separated by `|`.
oid: VT OID. Only used for logging in error case.
Returns:
A dictionary with the tags.
"""
tags_dict = dict()
tags = tags_str.split('|')
for tag in tags:
try:
_tag, _value = tag.split('=', 1)
except ValueError:
logger.error('Tag %s in %s has no value.', tag, oid)
continue
tags_dict[_tag] = _value
return tags_dict
def get_nvt_metadata(self, oid: str) -> Optional[Dict[str, str]]:
"""Get a full NVT. Returns an XML tree with the NVT metadata.
Arguments:
oid: OID of VT from which to get the metadata.
Returns:
A dictionary with the VT metadata.
"""
resp = OpenvasDB.get_list_item(
self.ctx,
f"nvt:{oid}",
start=NVT_META_FIELDS.index("NVT_FILENAME_POS"),
end=NVT_META_FIELDS.index("NVT_NAME_POS"),
)
if not isinstance(resp, list) or len(resp) == 0:
return None
subelem = [
'filename',
'required_keys',
'mandatory_keys',
'excluded_keys',
'required_udp_ports',
'required_ports',
'dependencies',
'tag',
'cve',
'bid',
'xref',
'category',
'family',
'name',
]
custom = dict()
custom['refs'] = dict()
for child, res in zip(subelem, resp):
if child not in ['cve', 'bid', 'xref', 'tag'] and res:
custom[child] = res
elif child == 'tag':
custom.update(self._parse_metadata_tags(res, oid))
elif child in ['cve', 'bid', 'xref'] and res:
custom['refs'][child] = res.split(", ")
custom['vt_params'] = dict()
custom['vt_params'].update(self.get_nvt_params(oid))
return custom
def get_nvt_refs(self, oid: str) -> Optional[Dict[str, str]]:
"""Get a full NVT.
Arguments:
oid: OID of VT from which to get the VT references.
Returns:
A dictionary with the VT references.
"""
resp = OpenvasDB.get_list_item(
self.ctx,
f"nvt:{oid}",
start=NVT_META_FIELDS.index("NVT_CVES_POS"),
end=NVT_META_FIELDS.index("NVT_XREFS_POS"),
)
if not isinstance(resp, list) or len(resp) == 0:
return None
subelem = ['cve', 'bid', 'xref']
refs = dict()
for child, res in zip(subelem, resp):
refs[child] = res.split(", ")
return refs
def get_nvt_family(self, oid: str) -> Optional[str]:
"""Get NVT family
Arguments:
oid: OID of VT from which to get the VT family.
Returns:
A str with the VT family.
"""
return OpenvasDB.get_single_item(
self.ctx,
f"nvt:{oid}",
index=NVT_META_FIELDS.index("NVT_FAMILY_POS"),
)
def get_nvt_prefs(self, oid: str) -> Optional[List[str]]:
"""Get NVT preferences.
Arguments:
ctx: Redis context to be used.
oid: OID of VT from which to get the VT preferences.
Returns:
A list with the VT preferences.
"""
key = f'oid:{oid}:prefs'
# notus doesn't seem to have preferences, ignoring
return OpenvasDB.get_list_item(self.ctx, key)
def get_nvt_tags(self, oid: str) -> Optional[Dict[str, str]]:
"""Get Tags of the given OID.
Arguments:
ctx: Redis context to be used.
oid: OID of VT from which to get the VT tags.
Returns:
A dictionary with the VT tags.
"""
tag = OpenvasDB.get_single_item(
self.ctx,
f"nvt:{oid}",
index=NVT_META_FIELDS.index('NVT_TAGS_POS'),
)
tags = tag.split('|')
return dict([item.split('=', 1) for item in tags])
def get_nvt_files_count(self) -> int:
return OpenvasDB.get_key_count(self.ctx, "filename:*")
def get_nvt_count(self) -> int:
return OpenvasDB.get_key_count(self.ctx, "nvt:*")
def add_vt_to_cache(self, vt_id: str, vt: List[str]):
if not vt_id:
raise RequiredArgument('add_vt_to_cache', 'vt_id')
if not vt:
raise RequiredArgument('add_vt_to_cache', 'vt')
if not isinstance(vt, list) or len(vt) != 15:
raise OspdOpenvasError(f'Error trying to load the VT {vt} in cache')
OpenvasDB.add_single_list(self.ctx, vt_id, vt)
OpenvasDB.add_single_item(self.ctx, f'filename:{vt[0]}', [int(time())])
def get_file_checksum(self, file_abs_path: Path) -> str:
"""Get file sha256 checksum or md5 checksum
Arguments:
file_abs_path: File to get the checksum
Returns:
The checksum
"""
# Try to get first sha256 checksum
sha256sum = OpenvasDB.get_single_item(
self.ctx,
f'sha256sums:{file_abs_path}',
)
if sha256sum:
return sha256sum
# Search for md5 checksum
md5sum = OpenvasDB.get_single_item(
self.ctx,
f'md5sums:{file_abs_path}',
)
if md5sum:
return md5sum
ospd-openvas-22.9.0/ospd_openvas/openvas.py 0000664 0000000 0000000 00000013246 15011310720 0020743 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from typing import Optional, Dict, Any
import logging
import subprocess
import psutil
logger = logging.getLogger(__name__)
_BOOL_DICT = {'no': 0, 'yes': 1}
class NASLCli:
"""Class for calling nasl-cli executable"""
@staticmethod
def load_vts_into_redis(signature_check: bool) -> bool:
"""Loads all VTs into the redis database"""
try:
if signature_check:
subprocess.check_call(
['nasl-cli', 'feed', 'update', '-x'],
stdout=subprocess.DEVNULL,
)
else:
subprocess.check_call(
['nasl-cli', 'feed', 'update'], stdout=subprocess.DEVNULL
)
return True
except (subprocess.SubprocessError, OSError) as err:
logger.error('nasl-cli failed to load VTs. %s', err)
return False
class Openvas:
"""Class for calling the openvas executable"""
@staticmethod
def _get_version_output() -> Optional[str]:
try:
result = subprocess.check_output(
['openvas', '-V'], stderr=subprocess.STDOUT
)
return result.decode('ascii')
except (subprocess.SubprocessError, OSError) as e:
logger.debug(
'Is was not possible to call openvas to get the version '
'information. Reason %s',
e,
)
return None
@staticmethod
def check() -> bool:
"""Checks that openvas command line tool is found and
is executable.
"""
try:
subprocess.check_call(['openvas', '-V'], stdout=subprocess.DEVNULL)
return True
except (subprocess.SubprocessError, OSError) as e:
logger.debug(
'It was not possible to call the openvas executable. Reason %s',
e,
)
return False
@staticmethod
def check_sudo() -> bool:
"""Checks if openvas can be run with sudo"""
try:
subprocess.check_call(
['sudo', '-n', 'openvas', '-s'], stdout=subprocess.DEVNULL
)
return True
except (subprocess.SubprocessError, OSError) as e:
logger.debug(
'It was not possible to call openvas with sudo. '
'The scanner will run as non-root user. Reason %s',
e,
)
return False
@classmethod
def get_version(cls) -> Optional[str]:
"""Returns the version string of the openvas executable"""
result = cls._get_version_output()
if result is None:
return None
version = result.split('\n')
if version[0].find('OpenVAS') < 0:
return None
return version[0]
@staticmethod
def get_settings() -> Dict[str, Any]:
"""Parses the current settings of the openvas executable"""
param_list = dict()
try:
result = subprocess.check_output(['openvas', '-s'])
result = result.decode('ascii')
except (subprocess.SubprocessError, OSError, UnicodeDecodeError) as e:
logger.warning('Could not gather openvas settings. Reason %s', e)
return param_list
for conf in result.split('\n'):
if not conf:
continue
try:
key, value = conf.split('=', 1)
except ValueError:
logger.warning("Could not parse openvas setting '%s'", conf)
continue
key = key.strip()
value = value.strip()
if value:
value = _BOOL_DICT.get(value, value)
param_list[key] = value
return param_list
@staticmethod
def load_vts_into_redis() -> bool:
"""Loads all VTs into the redis database"""
logger.debug('Loading VTs into Redis DB...')
try:
subprocess.check_call(
['openvas', '--update-vt-info'], stdout=subprocess.DEVNULL
)
logger.debug('Finished loading VTs into Redis DB')
return True
except (subprocess.SubprocessError, OSError) as err:
logger.error('OpenVAS Scanner failed to load VTs. %s', err)
return False
@staticmethod
def start_scan(
scan_id: str,
sudo: bool = False,
niceness: int = None,
) -> Optional[psutil.Popen]:
"""Calls openvas to start a scan process"""
cmd = []
if niceness:
cmd += ['nice', '-n', niceness]
logger.debug("Starting scan with niceness %s", niceness)
if sudo:
cmd += ['sudo', '-n']
cmd += ['openvas', '--scan-start', scan_id]
try:
return psutil.Popen(cmd, shell=False)
except (psutil.Error, OSError, FileNotFoundError) as e:
# the command is not available
logger.warning("Could not start scan process. Reason %s", e)
return None
@staticmethod
def stop_scan(scan_id: str, sudo: bool = False) -> bool:
"""Calls openvas to stop a scan process"""
cmd = []
if sudo:
cmd += ['sudo', '-n']
cmd += ['openvas', '--scan-stop', scan_id]
try:
subprocess.check_call(cmd)
return True
except (subprocess.SubprocessError, OSError) as e:
# the command is not available
logger.warning(
'Not possible to stop scan: %s. Reason %s',
scan_id,
e,
)
return False
ospd-openvas-22.9.0/ospd_openvas/preferencehandler.py 0000664 0000000 0000000 00000073424 15011310720 0022750 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=too-many-lines
"""Prepare the preferences to be used by OpenVAS. Get the data from the scan
collection and store the data in a redis KB in the right format to be used by
OpenVAS."""
import logging
import binascii
from enum import IntEnum
from typing import Callable, Optional, Dict, List, Tuple
from base64 import b64decode
from ospd.scan import ScanCollection, ScanStatus
from ospd.ospd import BASE_SCANNER_PARAMS
from ospd.network import valid_port_list
from ospd_openvas.openvas import Openvas
from ospd_openvas.db import KbDB
from ospd_openvas.nvticache import NVTICache
from ospd_openvas.vthelper import VtHelper
logger = logging.getLogger(__name__)
OID_SSH_AUTH = "1.3.6.1.4.1.25623.1.0.103591"
OID_SMB_AUTH = "1.3.6.1.4.1.25623.1.0.90023"
OID_ESXI_AUTH = "1.3.6.1.4.1.25623.1.0.105058"
OID_SNMP_AUTH = "1.3.6.1.4.1.25623.1.0.105076"
OID_PING_HOST = "1.3.6.1.4.1.25623.1.0.100315"
OID_KRB5_AUTH = "1.3.6.1.4.1.25623.1.0.102114"
BOREAS_ALIVE_TEST = "ALIVE_TEST"
BOREAS_ALIVE_TEST_PORTS = "ALIVE_TEST_PORTS"
BOREAS_SETTING_NAME = "test_alive_hosts_only"
class AliveTest(IntEnum):
"""Alive Tests."""
ALIVE_TEST_SCAN_CONFIG_DEFAULT = 0
ALIVE_TEST_TCP_ACK_SERVICE = 1
ALIVE_TEST_ICMP = 2
ALIVE_TEST_ARP = 4
ALIVE_TEST_CONSIDER_ALIVE = 8
ALIVE_TEST_TCP_SYN_SERVICE = 16
def alive_test_methods_to_bit_field(
icmp: bool, tcp_syn: bool, tcp_ack: bool, arp: bool, consider_alive: bool
) -> int:
"""Internally a bit field is used as alive test. This function creates
such a bit field out of the supplied alive test methods.
"""
icmp_enum = AliveTest.ALIVE_TEST_ICMP if icmp else 0
tcp_syn_enum = AliveTest.ALIVE_TEST_TCP_SYN_SERVICE if tcp_syn else 0
tcp_ack_enum = AliveTest.ALIVE_TEST_TCP_ACK_SERVICE if tcp_ack else 0
arp_enum = AliveTest.ALIVE_TEST_ARP if arp else 0
consider_alive_enum = (
AliveTest.ALIVE_TEST_CONSIDER_ALIVE if consider_alive else 0
)
bit_field = (
icmp_enum | tcp_syn_enum | tcp_ack_enum | arp_enum | consider_alive_enum
)
return bit_field
def _from_bool_to_str(value: int) -> str:
"""The OpenVAS scanner use yes and no as boolean values, whereas ospd
uses 1 and 0."""
return 'yes' if value == 1 else 'no'
class PreferenceHandler:
def __init__(
self,
scan_id: str,
kbdb: KbDB,
scan_collection: ScanCollection,
nvticache: NVTICache,
is_handled_by_notus: Callable[[str], bool],
):
self.scan_id = scan_id
self.kbdb = kbdb
self.scan_collection = scan_collection
self._target_options = None
self._nvts_params = None
self.nvti = nvticache
if is_handled_by_notus:
self.is_handled_by_notus = is_handled_by_notus
else:
self.is_handled_by_notus = lambda _: False
self.errors = []
def prepare_scan_id_for_openvas(self):
"""Create the openvas scan id and store it in the redis kb.
Return the openvas scan_id.
"""
self.kbdb.add_scan_id(self.scan_id)
def get_error_messages(self) -> List:
"""Returns the Error List and reset it"""
ret = self.errors
self.errors = []
return ret
@property
def target_options(self) -> Dict:
"""Return target options from Scan collection"""
if self._target_options is not None:
return self._target_options
self._target_options = self.scan_collection.get_target_options(
self.scan_id
)
return self._target_options
def _get_vts_in_groups(
self,
filters: List[str],
) -> List[str]:
"""Return a list of vts which match with the given filter.
Arguments:
filters A list of filters. Each filter has key, operator and
a value. They are separated by a space.
Supported keys: family
Returns a list of vt oids which match with the given filter.
"""
vts_list = list()
families = dict()
# Only get vts from NVTICache. Does not include Notus advisories,
# since they are handled by Notus.
oids = self.nvti.get_oids()
# Same here. Only check for families in NVT Cache.
# If necessary, consider to call get_advisory_famaly from
# Notus class
for _, oid in oids:
family = self.nvti.get_nvt_family(oid)
if family not in families:
families[family] = list()
families[family].append(oid)
for elem in filters:
key, value = elem.split('=')
if key == 'family' and value in families:
vts_list.extend(families[value])
return vts_list
def _get_vt_param_type(self, vt: Dict, vt_param_id: str) -> Optional[str]:
"""Return the type of the vt parameter from the vts dictionary."""
vt_params_list = vt.get("vt_params")
if vt_params_list.get(vt_param_id):
return vt_params_list[vt_param_id]["type"]
return None
def _get_vt_param_name(self, vt: Dict, vt_param_id: str) -> Optional[str]:
"""Return the type of the vt parameter from the vts dictionary."""
vt_params_list = vt.get("vt_params")
if vt_params_list.get(vt_param_id):
return vt_params_list[vt_param_id]["name"]
return None
@staticmethod
def check_param_type(vt_param_value: str, param_type: str) -> Optional[int]:
"""Check if the value of a vt parameter matches with
the type founded.
"""
if param_type in [
'entry',
'password',
'radio',
'sshlogin',
] and isinstance(vt_param_value, str):
return None
elif param_type == 'checkbox' and (
vt_param_value == '0' or vt_param_value == '1'
):
return None
elif param_type == 'file':
try:
b64decode(vt_param_value.encode())
except (binascii.Error, AttributeError, TypeError):
return 1
return None
elif param_type == 'integer':
try:
int(vt_param_value)
except ValueError:
return 1
return None
return 1
def _process_vts(
self,
vts: Dict[str, Dict[str, str]],
) -> Tuple[List[str], Dict[str, str]]:
"""Add single VTs and their parameters."""
vts_list = []
vts_params = {}
vtgroups = vts.pop('vt_groups')
vthelper = VtHelper(self.nvti, None)
# This get vt groups which are not Notus Family.
# Since Notus advisories are handled by notus, they
# are not sent to Openvas-scanner
if vtgroups:
vts_list = self._get_vts_in_groups(vtgroups)
counter = 0
for vtid, vt_params in vts.items():
counter += 1
if counter % 500 == 0:
if (
self.scan_collection.get_status(self.scan_id)
== ScanStatus.STOPPED
):
break
# Remove oids handled by notus
if self.is_handled_by_notus(vtid):
logger.debug('The VT %s is handled by notus. Ignoring.', vtid)
continue
vt = vthelper.get_single_vt(vtid)
if not vt:
logger.warning(
'The VT %s was not found and it will not be added to the '
'plugin scheduler.',
vtid,
)
continue
vts_list.append(vtid)
for vt_param_id, vt_param_value in vt_params.items():
param_type = self._get_vt_param_type(vt, vt_param_id)
param_name = self._get_vt_param_name(vt, vt_param_id)
if vt_param_id > '0' and (not param_type or not param_name):
logger.debug(
'Missing type or name for VT parameter %s of %s. '
'This VT parameter will not be set.',
vt_param_id,
vtid,
)
continue
if vt_param_id == '0':
type_aux = 'integer'
else:
type_aux = param_type
if self.check_param_type(vt_param_value, type_aux):
logger.debug(
'The VT parameter %s for %s could not be set. '
'Expected %s type for parameter value %s',
vt_param_id,
vtid,
type_aux,
str(vt_param_value),
)
continue
if type_aux == 'checkbox':
vt_param_value = _from_bool_to_str(int(vt_param_value))
vts_params[
f'{vtid}:{vt_param_id}:{param_type}:{param_name}'
] = str(vt_param_value)
return vts_list, vts_params
def prepare_plugins_for_openvas(self) -> bool:
"""Get the plugin list and it preferences from the Scan Collection.
The plugin list is immediately stored in the kb.
"""
nvts = self.scan_collection.get_vts(self.scan_id)
if nvts:
nvts_list, self._nvts_params = self._process_vts(nvts)
# Add nvts list
separ = ';'
plugin_list = f'plugin_set|||{separ.join(nvts_list)}'
self.kbdb.add_scan_preferences(self.scan_id, [plugin_list])
return True
return False
def prepare_nvt_preferences(self):
"""Prepare the vts preferences. Store the data in the kb."""
items_list = []
counter = 0
for key, val in self._nvts_params.items():
items_list.append(f'{key}|||{val}')
counter += 1
if counter % 500 == 0:
if (
self.scan_collection.get_status(self.scan_id)
== ScanStatus.STOPPED
):
break
if items_list:
self.kbdb.add_scan_preferences(self.scan_id, items_list)
@staticmethod
def build_alive_test_opt_as_prefs(
target_options: Dict[str, str],
) -> Dict[str, str]:
"""Parse the target options dictionary.
Arguments:
target_options: Dictionary with the target options.
Return:
A dict with the target options related to alive test method
in string format to be added to the redis KB.
"""
target_opt_prefs_list = {}
alive_test = None
if target_options:
# Alive test specified as bit field.
alive_test = target_options.get('alive_test')
# Alive test specified as individual methods.
alive_test_methods = target_options.get('alive_test_methods')
# alive_test takes precedence over alive_test_methods
if alive_test is None and alive_test_methods:
alive_test = alive_test_methods_to_bit_field(
icmp=target_options.get('icmp') == '1',
tcp_syn=target_options.get('tcp_syn') == '1',
tcp_ack=target_options.get('tcp_ack') == '1',
arp=target_options.get('arp') == '1',
consider_alive=target_options.get('consider_alive') == '1',
)
if target_options and alive_test:
try:
alive_test = int(alive_test)
except ValueError:
logger.debug(
'Alive test settings not applied. '
'Invalid alive test value %s',
target_options.get('alive_test'),
)
return target_opt_prefs_list
# No alive test or wrong value, uses the default
# preferences sent by the client.
if alive_test < 1 or alive_test > 31:
return target_opt_prefs_list
if (
alive_test & AliveTest.ALIVE_TEST_TCP_ACK_SERVICE
or alive_test & AliveTest.ALIVE_TEST_TCP_SYN_SERVICE
):
value = "yes"
else:
value = "no"
target_opt_prefs_list[
f'{OID_PING_HOST}:1:checkbox:Do a TCP ping'
] = value
if (
alive_test & AliveTest.ALIVE_TEST_TCP_SYN_SERVICE
and alive_test & AliveTest.ALIVE_TEST_TCP_ACK_SERVICE
):
value = "yes"
else:
value = "no"
target_opt_prefs_list[
f'{OID_PING_HOST}:2:checkbox:TCP ping tries also TCP-SYN ping'
] = value
if (alive_test & AliveTest.ALIVE_TEST_TCP_SYN_SERVICE) and not (
alive_test & AliveTest.ALIVE_TEST_TCP_ACK_SERVICE
):
value = "yes"
else:
value = "no"
target_opt_prefs_list[
f'{OID_PING_HOST}:7:checkbox:TCP ping tries only TCP-SYN ping'
] = value
if alive_test & AliveTest.ALIVE_TEST_ICMP:
value = "yes"
else:
value = "no"
target_opt_prefs_list[
f'{OID_PING_HOST}:3:checkbox:Do an ICMP ping'
] = value
if alive_test & AliveTest.ALIVE_TEST_ARP:
value = "yes"
else:
value = "no"
target_opt_prefs_list[f'{OID_PING_HOST}:4:checkbox:Use ARP'] = value
if alive_test & AliveTest.ALIVE_TEST_CONSIDER_ALIVE:
value = "no"
else:
value = "yes"
target_opt_prefs_list[
f'{OID_PING_HOST}:5:checkbox:Mark unrechable Hosts '
'as dead (not scanning)'
] = value
return target_opt_prefs_list
def prepare_alive_test_option_for_openvas(self):
"""Set alive test option. Overwrite the scan config settings."""
settings = Openvas.get_settings()
if settings and (
self.target_options.get('alive_test')
or self.target_options.get('alive_test_methods')
):
alive_test_opt = self.build_alive_test_opt_as_prefs(
self.target_options
)
self._nvts_params.update(alive_test_opt)
def prepare_boreas_alive_test(self):
"""Set alive_test for Boreas if boreas scanner config
(BOREAS_SETTING_NAME) was set"""
settings = Openvas.get_settings()
alive_test = None
alive_test_ports = None
target_options = self.target_options
if settings:
boreas = settings.get(BOREAS_SETTING_NAME)
if not boreas:
return
else:
return
if target_options:
alive_test_ports = target_options.get('alive_test_ports')
# Alive test was specified as bit field.
alive_test = target_options.get('alive_test')
# Alive test was specified as individual methods.
alive_test_methods = target_options.get('alive_test_methods')
# takes precedence over
if alive_test is None and alive_test_methods:
alive_test = alive_test_methods_to_bit_field(
icmp=target_options.get('icmp') == '1',
tcp_syn=target_options.get('tcp_syn') == '1',
tcp_ack=target_options.get('tcp_ack') == '1',
arp=target_options.get('arp') == '1',
consider_alive=target_options.get('consider_alive') == '1',
)
if alive_test is not None:
try:
alive_test = int(alive_test)
except ValueError:
logger.debug(
'Alive test preference for Boreas not set. '
'Invalid alive test value %s.',
alive_test,
)
# Use default alive test as fall back
alive_test = AliveTest.ALIVE_TEST_SCAN_CONFIG_DEFAULT
# Use default alive test if no valid alive_test was provided
else:
alive_test = AliveTest.ALIVE_TEST_SCAN_CONFIG_DEFAULT
# If a valid alive_test was set then the bit mask
# has value between 31 (11111) and 1 (10000)
if 1 <= alive_test <= 31:
pref = f"{BOREAS_ALIVE_TEST}|||{alive_test}"
self.kbdb.add_scan_preferences(self.scan_id, [pref])
if alive_test == AliveTest.ALIVE_TEST_SCAN_CONFIG_DEFAULT:
alive_test = AliveTest.ALIVE_TEST_ICMP
pref = f"{BOREAS_ALIVE_TEST}|||{alive_test}"
self.kbdb.add_scan_preferences(self.scan_id, [pref])
# Add portlist if present. Validity is checked on Boreas side.
if alive_test_ports is not None:
pref = f"{BOREAS_ALIVE_TEST_PORTS}|||{alive_test_ports}"
self.kbdb.add_scan_preferences(self.scan_id, [pref])
def prepare_reverse_lookup_opt_for_openvas(self):
"""Set reverse lookup options in the kb"""
if self.target_options:
items = []
_rev_lookup_only = int(
self.target_options.get('reverse_lookup_only', '0')
)
rev_lookup_only = _from_bool_to_str(_rev_lookup_only)
items.append(f'reverse_lookup_only|||{rev_lookup_only}')
_rev_lookup_unify = int(
self.target_options.get('reverse_lookup_unify', '0')
)
rev_lookup_unify = _from_bool_to_str(_rev_lookup_unify)
items.append(f'reverse_lookup_unify|||{rev_lookup_unify}')
self.kbdb.add_scan_preferences(self.scan_id, items)
def prepare_target_for_openvas(self):
"""Get the target from the scan collection and set the target
in the kb"""
target = self.scan_collection.get_host_list(self.scan_id)
target_aux = f'TARGET|||{target}'
self.kbdb.add_scan_preferences(self.scan_id, [target_aux])
def prepare_ports_for_openvas(self) -> str:
"""Get the port list from the scan collection and store the list
in the kb."""
ports = self.scan_collection.get_ports(self.scan_id)
if not valid_port_list(ports):
return False
port_range = f'port_range|||{ports}'
self.kbdb.add_scan_preferences(self.scan_id, [port_range])
return ports
def prepare_host_options_for_openvas(self):
"""Get the excluded and finished hosts from the scan collection and
stores the list of hosts that must not be scanned in the kb."""
exclude_hosts = self.scan_collection.get_exclude_hosts(self.scan_id)
if exclude_hosts:
pref_val = "exclude_hosts|||" + exclude_hosts
self.kbdb.add_scan_preferences(self.scan_id, [pref_val])
def prepare_scan_params_for_openvas(self, ospd_params: Dict[str, Dict]):
"""Get the scan parameters from the scan collection and store them
in the kb.
Arguments:
ospd_params: Dictionary with the OSPD Params.
"""
# Options which were supplied via the XML element.
options = self.scan_collection.get_options(self.scan_id)
prefs_val = []
for key, value in options.items():
item_type = ''
if key in ospd_params:
item_type = ospd_params[key].get('type')
else:
if key not in BASE_SCANNER_PARAMS:
logger.debug(
"%s is a scanner only setting and should not be set "
"by the client. Setting needs to be included in "
"OpenVAS configuration file instead.",
key,
)
if item_type == 'boolean':
val = _from_bool_to_str(value)
else:
val = str(value)
prefs_val.append(key + "|||" + val)
if prefs_val:
self.kbdb.add_scan_preferences(self.scan_id, prefs_val)
def disable_message(self, disabled: str) -> str:
"""Return a string with the message for exclusive services."""
disabled = f"Disabled {disabled}"
return disabled + ": KRB5 and SMB credentials are mutually exclusive."
def build_credentials_as_prefs(self, credentials: Dict) -> List[str]:
"""Parse the credential dictionary.
Arguments:
credentials: Dictionary with the credentials.
Return:
A list with the credentials in string format to be
added to the redis KB.
"""
cred_prefs_list = []
krb5_set = False
smb_set = False
for credential in credentials.items():
service = credential[0]
cred_params = credentials.get(service)
if not cred_params:
logger.warning(
"No credentials parameter found for service %s", service
)
continue
cred_type = cred_params.get('type', '')
username = cred_params.get('username', '')
password = cred_params.get('password', '')
# Check service ssh
if service == 'ssh':
# For ssh check the Port
port = cred_params.get('port', '22')
priv_username = cred_params.get('priv_username', '')
priv_password = cred_params.get('priv_password', '')
if not port:
port = '22'
warning = (
"Missing port number for ssh credentials. "
"Using default port 22."
)
logger.warning(warning)
elif not port.isnumeric():
self.errors.append(
f"Port for SSH '{port}' is not a valid number."
)
continue
elif int(port) > 65535 or int(port) < 1:
self.errors.append(
f"Port for SSH is out of range (1-65535): {port}"
)
continue
# For ssh check the credential type
if cred_type == 'up':
cred_prefs_list.append(
f'{OID_SSH_AUTH}:3:password:SSH password '
f'(unsafe!):|||{password}'
)
elif cred_type == 'usk':
private = cred_params.get('private', '')
cred_prefs_list.append(
f'{OID_SSH_AUTH}:2:password:SSH key passphrase:|||'
f'{password}'
)
cred_prefs_list.append(
f'{OID_SSH_AUTH}:4:file:SSH private key:|||'
f'{private}'
)
elif cred_type:
self.errors.append(
f"Unknown Credential Type for SSH: {cred_type}. "
"Use 'up' for Username + Password or 'usk' for "
"Username + SSH Key."
)
continue
else:
self.errors.append(
"Missing Credential Type for SSH. Use 'up' for "
"Username + Password or 'usk' for Username + SSH Key."
)
continue
cred_prefs_list.append(f'auth_port_ssh|||{port}')
cred_prefs_list.append(
f'{OID_SSH_AUTH}:1:entry:SSH login name:|||{username}'
)
cred_prefs_list.append(
f'{OID_SSH_AUTH}:7:entry:SSH privilege login name:'
f'|||{priv_username}'
)
cred_prefs_list.append(
f'{OID_SSH_AUTH}:8:password:SSH privilege password:'
f'|||{priv_password}'
)
# Check servic smb
elif service == 'smb':
if krb5_set:
self.errors.append(self.disable_message("SMB"))
continue
smb_set = True
cred_prefs_list.append(
f'{OID_SMB_AUTH}:1:entry:SMB login:|||{username}'
)
cred_prefs_list.append(
f'{OID_SMB_AUTH}:2:password:SMB password:|||{password}'
)
elif service == 'krb5':
if smb_set:
self.errors.append(self.disable_message("KRB5"))
continue
krb5_set = True
realm = cred_params.get('realm', '')
if not realm:
self.errors.append(
"Missing realm for Kerberos authentication."
)
continue
kdc = cred_params.get('kdc', '')
if not kdc:
self.errors.append(
"Missing KDC for Kerberos authentication."
)
continue
cred_prefs_list.append(
f'{OID_KRB5_AUTH}:1:entry:KRB5 login:|||{username}'
)
cred_prefs_list.append(
f'{OID_KRB5_AUTH}:2:password:KRB5 password:|||{password}'
)
cred_prefs_list.append(
f'{OID_KRB5_AUTH}:3:entry:KRB5 realm:|||{realm}'
)
cred_prefs_list.append(
f'{OID_KRB5_AUTH}:4:entry:KRB5 kdc:|||{kdc}'
)
# Check service esxi
elif service == 'esxi':
cred_prefs_list.append(
f'{OID_ESXI_AUTH}:1:entry:ESXi login name:|||{username}'
)
cred_prefs_list.append(
f'{OID_ESXI_AUTH}:2:password:ESXi login password:|||'
f'{password}'
)
# Check service snmp
elif service == 'snmp':
community = cred_params.get('community', '')
auth_algorithm = cred_params.get('auth_algorithm', '')
privacy_password = cred_params.get('privacy_password', '')
privacy_algorithm = cred_params.get('privacy_algorithm', '')
if not privacy_algorithm:
if privacy_password:
self.errors.append(
"When no privacy algorithm is used, the privacy"
+ " password also has to be empty."
)
continue
elif (
not privacy_algorithm == "aes"
and not privacy_algorithm == "des"
):
self.errors.append(
"Unknown privacy algorithm used: "
+ privacy_algorithm
+ ". Use 'aes', 'des' or '' (none)."
)
continue
if not auth_algorithm:
self.errors.append(
"Missing authentication algorithm for SNMP."
+ " Use 'md5' or 'sha1'."
)
continue
elif (
not auth_algorithm == "md5" and not auth_algorithm == "sha1"
):
self.errors.append(
"Unknown authentication algorithm: "
+ auth_algorithm
+ ". Use 'md5' or 'sha1'."
)
continue
cred_prefs_list.append(
f'{OID_SNMP_AUTH}:1:password:SNMP Community:|||{community}'
)
cred_prefs_list.append(
f'{OID_SNMP_AUTH}:2:entry:SNMPv3 Username:|||{username}'
)
cred_prefs_list.append(
f'{OID_SNMP_AUTH}:3:password:SNMPv3 Password:|||{password}'
)
cred_prefs_list.append(
f'{OID_SNMP_AUTH}:4:radio:SNMPv3 Authentication Algorithm:'
f'|||{auth_algorithm}'
)
cred_prefs_list.append(
f'{OID_SNMP_AUTH}:5:password:SNMPv3 Privacy Password:|||'
f'{privacy_password}'
)
cred_prefs_list.append(
f'{OID_SNMP_AUTH}:6:radio:SNMPv3 Privacy Algorithm:|||'
f'{privacy_algorithm}'
)
elif service:
self.errors.append(
f"Unknown service type for credential: {service}"
)
else:
self.errors.append("Missing service type for credential.")
return cred_prefs_list
def prepare_credentials_for_openvas(self) -> bool:
"""Get the credentials from the scan collection and store them
in the kb."""
logger.debug("Looking for given Credentials...")
credentials = self.scan_collection.get_credentials(self.scan_id)
if credentials:
cred_prefs = self.build_credentials_as_prefs(credentials)
if cred_prefs:
self.kbdb.add_credentials_to_scan_preferences(
self.scan_id, cred_prefs
)
logger.debug("Credentials added to the kb.")
else:
logger.debug("No credentials found.")
if credentials and not cred_prefs:
return False
return True
def prepare_main_kbindex_for_openvas(self):
"""Store main_kbindex as global preference in the
kb, used by OpenVAS"""
ov_maindbid = f'ov_maindbid|||{self.kbdb.index}'
self.kbdb.add_scan_preferences(self.scan_id, [ov_maindbid])
ospd-openvas-22.9.0/ospd_openvas/vthelper.py 0000664 0000000 0000000 00000020247 15011310720 0021120 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Provide functions to handle VT Info."""
from hashlib import sha256
import logging
from typing import Any, Optional, Dict, List, Tuple, Iterator
from itertools import chain
from ospd.cvss import CVSS
from ospd_openvas.nvticache import NVTICache
from ospd_openvas.notus import Notus
logger = logging.getLogger(__name__)
class VtHelper:
def __init__(self, nvticache: NVTICache, notus: Optional[Notus] = None):
self.nvti = nvticache
self.notus = notus
def get_single_vt(self, vt_id: str, oids=None) -> Optional[Dict[str, Any]]:
nr = None
if self.notus:
nr = self.notus.get_nvt_metadata(vt_id)
if nr:
custom = nr
else:
custom = self.nvti.get_nvt_metadata(vt_id)
if not custom:
return None
vt_params = custom.pop('vt_params')
vt_refs = custom.pop('refs')
name = custom.pop('name')
vt_creation_time = custom.pop('creation_date')
vt_modification_time = custom.pop('last_modification')
if oids:
vt_dependencies = list()
if 'dependencies' in custom:
deps = custom.pop('dependencies')
deps_list = deps.split(', ')
for dep_name in deps_list:
# this will bug out on notus since notus does contain
# multiple oids per advisory; however luckily they don't
# have dependencies; otherwise it must be treated as a list
dep_oid = oids.get(dep_name)
if dep_oid:
vt_dependencies.append(dep_oid)
else:
vt_dependencies.append(dep_name)
else:
vt_dependencies = None
summary = None
impact = None
affected = None
insight = None
solution = None
solution_t = None
solution_m = None
vuldetect = None
qod_t = None
qod_v = None
if 'summary' in custom:
summary = custom.pop('summary')
if 'impact' in custom:
impact = custom.pop('impact')
if 'affected' in custom:
affected = custom.pop('affected')
if 'insight' in custom:
insight = custom.pop('insight')
if 'solution' in custom:
solution = custom.pop('solution')
if 'solution_type' in custom:
solution_t = custom.pop('solution_type')
if 'solution_method' in custom:
solution_m = custom.pop('solution_method')
if 'vuldetect' in custom:
vuldetect = custom.pop('vuldetect')
if 'qod_type' in custom:
qod_t = custom.pop('qod_type')
elif 'qod' in custom:
qod_v = custom.pop('qod')
severity = dict()
if 'severity_vector' in custom:
severity_vector = custom.pop('severity_vector')
else:
severity_vector = custom.pop('cvss_base_vector', None)
if not severity_vector:
logger.warning("no severity_vector in %s found.", vt_id)
# when there is no severity than we return None; alternatively we
# could set it to an empty dict and continue
# severity_vector = {}
return None
severity['severity_base_vector'] = severity_vector
if "CVSS:3" in severity_vector:
severity_type = 'cvss_base_v3'
else:
severity_type = 'cvss_base_v2'
severity['severity_type'] = severity_type
if 'severity_date' in custom:
severity['severity_date'] = custom.pop('severity_date')
else:
severity['severity_date'] = vt_creation_time
if 'severity_origin' in custom:
severity['severity_origin'] = custom.pop('severity_origin')
if name is None:
name = ''
vt = {'name': name}
if custom is not None:
vt["custom"] = custom
if vt_params is not None:
vt["vt_params"] = vt_params
if vt_refs is not None:
vt["vt_refs"] = vt_refs
if vt_dependencies is not None:
vt["vt_dependencies"] = vt_dependencies
if vt_creation_time is not None:
vt["creation_time"] = vt_creation_time
if vt_modification_time is not None:
vt["modification_time"] = vt_modification_time
if summary is not None:
vt["summary"] = summary
if impact is not None:
vt["impact"] = impact
if affected is not None:
vt["affected"] = affected
if insight is not None:
vt["insight"] = insight
if solution is not None:
vt["solution"] = solution
if solution_t is not None:
vt["solution_type"] = solution_t
if solution_m is not None:
vt["solution_method"] = solution_m
if vuldetect is not None:
vt["detection"] = vuldetect
if qod_t is not None:
vt["qod_type"] = qod_t
elif qod_v is not None:
vt["qod"] = qod_v
if severity is not None:
vt["severities"] = severity
return vt
def get_vt_iterator(
self, vt_selection: List[str] = None, details: bool = True
) -> Iterator[Tuple[str, Dict]]:
"""Yield the vts from the Redis NVTicache."""
oids = None
if not vt_selection or details:
# notus contains multiple oids per advisory therefore unlike
# nasl they share the filename
# The vt collection is taken from both Caches
if self.notus:
vt_collection = chain(
self.notus.get_oids(), self.nvti.get_oids()
)
else:
vt_collection = self.nvti.get_oids()
if not vt_selection:
vt_selection = [v for _, v in vt_collection]
if details:
# luckily notus doesn't have dependency therefore we can
# treat oids for dependency lookup as a dict
oids = dict(vt_collection)
vt_selection.sort()
for vt_id in vt_selection:
vt = self.get_single_vt(vt_id, oids)
if vt:
yield (vt_id, vt)
def vt_verification_string_iter(self) -> str:
# for a reproducible hash calculation
# the vts must already be sorted in the dictionary.
for vt_id, vt in self.get_vt_iterator(details=False):
param_chain = ""
vt_params = vt.get('vt_params')
if vt_params:
for _, param in sorted(vt_params.items()):
if param:
param_chain += (
param.get('id')
+ param.get('name')
+ param.get('default')
)
yield (
(vt_id + vt.get('modification_time')).encode('utf-8')
+ param_chain.encode('utf-8')
)
def calculate_vts_collection_hash(self) -> str:
"""Calculate the vts collection sha256 hash."""
m = sha256() # pylint: disable=invalid-name
for chunk in self.vt_verification_string_iter():
m.update(chunk)
return m.hexdigest()
def get_severity_score(self, vt_aux: dict) -> Optional[float]:
"""Return the severity score for the given oid.
Arguments:
vt_aux: VT element from which to get the severity vector
Returns:
The calculated cvss base value. None if there is no severity
vector or severity type is not cvss base version 2.
"""
if vt_aux:
severity_type = vt_aux['severities'].get('severity_type')
severity_vector = vt_aux['severities'].get('severity_base_vector')
if severity_type == "cvss_base_v2" and severity_vector:
return CVSS.cvss_base_v2_value(severity_vector)
elif severity_type == "cvss_base_v3" and severity_vector:
return CVSS.cvss_base_v3_value(severity_vector)
return None
ospd-openvas-22.9.0/poetry.lock 0000664 0000000 0000000 00000174366 15011310720 0016425 0 ustar 00root root 0000000 0000000 # This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "anyio"
version = "4.3.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"},
{file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""]
trio = ["trio (>=0.23)"]
[[package]]
name = "astroid"
version = "3.3.8"
description = "An abstract syntax tree for Python with inference support."
optional = false
python-versions = ">=3.9.0"
groups = ["dev"]
files = [
{file = "astroid-3.3.8-py3-none-any.whl", hash = "sha256:187ccc0c248bfbba564826c26f070494f7bc964fd286b6d9fff4420e55de828c"},
{file = "astroid-3.3.8.tar.gz", hash = "sha256:a88c7994f914a4ea8572fac479459f4955eeccc877be3f2d959a33273b0cf40b"},
]
[package.dependencies]
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
[[package]]
name = "async-timeout"
version = "4.0.3"
description = "Timeout context manager for asyncio programs"
optional = false
python-versions = ">=3.7"
groups = ["main"]
markers = "python_full_version < \"3.11.3\""
files = [
{file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
{file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
]
[[package]]
name = "autohooks"
version = "24.2.0"
description = "Library for managing git hooks"
optional = false
python-versions = ">=3.9,<4.0"
groups = ["dev"]
files = [
{file = "autohooks-24.2.0-py3-none-any.whl", hash = "sha256:a2097de2e092fa7b78a17b63b73980d3cc42b01065feb2caf0d41e2bb506621b"},
{file = "autohooks-24.2.0.tar.gz", hash = "sha256:ee929a9f7ff68e61c73201de817b679df168b4465be1026550a71956b419200b"},
]
[package.dependencies]
pontos = ">=22.8.0"
rich = ">=12.5.1"
shtab = ">=1.7.0"
tomlkit = ">=0.5.11"
[[package]]
name = "autohooks-plugin-black"
version = "23.10.0"
description = "An autohooks plugin for python code formatting via black"
optional = false
python-versions = ">=3.9,<4.0"
groups = ["dev"]
files = [
{file = "autohooks_plugin_black-23.10.0-py3-none-any.whl", hash = "sha256:88d648251df749586af9ea5be3105daa4358ed916b61aee738d0727387214470"},
{file = "autohooks_plugin_black-23.10.0.tar.gz", hash = "sha256:8415b5f566d861236bde2b0973699f64a8b861208af4fa05fe04a1f923ea3ef6"},
]
[package.dependencies]
autohooks = ">=21.6.0"
black = ">=20.8"
[[package]]
name = "autohooks-plugin-pylint"
version = "23.10.0"
description = "An autohooks plugin for python code linting via pylint"
optional = false
python-versions = ">=3.9,<4.0"
groups = ["dev"]
files = [
{file = "autohooks_plugin_pylint-23.10.0-py3-none-any.whl", hash = "sha256:49e1e60b81f48ca17d55a6660bff9aae0e9bd8cda2ec6f4ef1237b2f86475682"},
{file = "autohooks_plugin_pylint-23.10.0.tar.gz", hash = "sha256:ed2f82ba89d28f772562794734eb37ae630b7a3011526e9cdfc13240865b0359"},
]
[package.dependencies]
autohooks = ">=2.2.0"
pylint = ">=2.8.3"
[[package]]
name = "black"
version = "25.1.0"
description = "The uncompromising code formatter."
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "black-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759e7ec1e050a15f89b770cefbf91ebee8917aac5c20483bc2d80a6c3a04df32"},
{file = "black-25.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e519ecf93120f34243e6b0054db49c00a35f84f195d5bce7e9f5cfc578fc2da"},
{file = "black-25.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:055e59b198df7ac0b7efca5ad7ff2516bca343276c466be72eb04a3bcc1f82d7"},
{file = "black-25.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:db8ea9917d6f8fc62abd90d944920d95e73c83a5ee3383493e35d271aca872e9"},
{file = "black-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a39337598244de4bae26475f77dda852ea00a93bd4c728e09eacd827ec929df0"},
{file = "black-25.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96c1c7cd856bba8e20094e36e0f948718dc688dba4a9d78c3adde52b9e6c2299"},
{file = "black-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce2e264d59c91e52d8000d507eb20a9aca4a778731a08cfff7e5ac4a4bb7096"},
{file = "black-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:172b1dbff09f86ce6f4eb8edf9dede08b1fce58ba194c87d7a4f1a5aa2f5b3c2"},
{file = "black-25.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b60580e829091e6f9238c848ea6750efed72140b91b048770b64e74fe04908b"},
{file = "black-25.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e2978f6df243b155ef5fa7e558a43037c3079093ed5d10fd84c43900f2d8ecc"},
{file = "black-25.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b48735872ec535027d979e8dcb20bf4f70b5ac75a8ea99f127c106a7d7aba9f"},
{file = "black-25.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea0213189960bda9cf99be5b8c8ce66bb054af5e9e861249cd23471bd7b0b3ba"},
{file = "black-25.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f0b18a02996a836cc9c9c78e5babec10930862827b1b724ddfe98ccf2f2fe4f"},
{file = "black-25.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afebb7098bfbc70037a053b91ae8437c3857482d3a690fefc03e9ff7aa9a5fd3"},
{file = "black-25.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:030b9759066a4ee5e5aca28c3c77f9c64789cdd4de8ac1df642c40b708be6171"},
{file = "black-25.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a22f402b410566e2d1c950708c77ebf5ebd5d0d88a6a2e87c86d9fb48afa0d18"},
{file = "black-25.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1ee0a0c330f7b5130ce0caed9936a904793576ef4d2b98c40835d6a65afa6a0"},
{file = "black-25.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3df5f1bf91d36002b0a75389ca8663510cf0531cca8aa5c1ef695b46d98655f"},
{file = "black-25.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6827d563a2c820772b32ce8a42828dc6790f095f441beef18f96aa6f8294e"},
{file = "black-25.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:bacabb307dca5ebaf9c118d2d2f6903da0d62c9faa82bd21a33eecc319559355"},
{file = "black-25.1.0-py3-none-any.whl", hash = "sha256:95e8176dae143ba9097f351d174fdaf0ccd29efb414b362ae3fd72bf0f710717"},
{file = "black-25.1.0.tar.gz", hash = "sha256:33496d5cd1222ad73391352b4ae8da15253c5de89b93a80b3e2c8d9a19ec2666"},
]
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
packaging = ">=22.0"
pathspec = ">=0.9.0"
platformdirs = ">=2"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
[package.extras]
colorama = ["colorama (>=0.4.3)"]
d = ["aiohttp (>=3.10)"]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "certifi"
version = "2024.7.4"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
groups = ["dev"]
files = [
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
]
[[package]]
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
{file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
groups = ["dev"]
markers = "platform_system == \"Windows\" or sys_platform == \"win32\""
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "colorful"
version = "0.5.6"
description = "Terminal string styling done right, in Python."
optional = false
python-versions = "*"
groups = ["dev"]
files = [
{file = "colorful-0.5.6-py2.py3-none-any.whl", hash = "sha256:eab8c1c809f5025ad2b5238a50bd691e26850da8cac8f90d660ede6ea1af9f1e"},
{file = "colorful-0.5.6.tar.gz", hash = "sha256:b56d5c01db1dac4898308ea889edcb113fbee3e6ec5df4bacffd61d5241b5b8d"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "defusedxml"
version = "0.7.1"
description = "XML bomb protection for Python stdlib modules"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
]
[[package]]
name = "deprecated"
version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
groups = ["main"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
]
[package.dependencies]
wrapt = ">=1.10,<2"
[package.extras]
dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
[[package]]
name = "dill"
version = "0.3.8"
description = "serialize all of Python"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"},
{file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"},
]
[package.extras]
graph = ["objgraph (>=1.7.2)"]
profile = ["gprof2dot (>=2022.7.29)"]
[[package]]
name = "exceptiongroup"
version = "1.2.0"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"},
{file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "h2"
version = "4.1.0"
description = "HTTP/2 State-Machine based protocol implementation"
optional = false
python-versions = ">=3.6.1"
groups = ["dev"]
files = [
{file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"},
{file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"},
]
[package.dependencies]
hpack = ">=4.0,<5"
hyperframe = ">=6.0,<7"
[[package]]
name = "hpack"
version = "4.0.0"
description = "Pure-Python HPACK header compression"
optional = false
python-versions = ">=3.6.1"
groups = ["dev"]
files = [
{file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"},
{file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"},
]
[[package]]
name = "httpcore"
version = "1.0.4"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"},
{file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.25.0)"]
[[package]]
name = "httpx"
version = "0.27.0"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
{file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""}
httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
[[package]]
name = "hyperframe"
version = "6.0.1"
description = "HTTP/2 framing layer for Python"
optional = false
python-versions = ">=3.6.1"
groups = ["dev"]
files = [
{file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"},
{file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"},
]
[[package]]
name = "idna"
version = "3.7"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
groups = ["dev"]
files = [
{file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
]
[[package]]
name = "isort"
version = "5.13.2"
description = "A Python utility / library to sort Python imports."
optional = false
python-versions = ">=3.8.0"
groups = ["dev"]
files = [
{file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"},
{file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"},
]
[package.extras]
colors = ["colorama (>=0.4.6)"]
[[package]]
name = "lxml"
version = "5.4.0"
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
optional = false
python-versions = ">=3.6"
groups = ["main", "dev"]
files = [
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"},
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:696ea9e87442467819ac22394ca36cb3d01848dad1be6fac3fb612d3bd5a12cf"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef80aeac414f33c24b3815ecd560cee272786c3adfa5f31316d8b349bfade28"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b9c2754cef6963f3408ab381ea55f47dabc6f78f4b8ebb0f0b25cf1ac1f7609"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7a62cc23d754bb449d63ff35334acc9f5c02e6dae830d78dab4dd12b78a524f4"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f82125bc7203c5ae8633a7d5d20bcfdff0ba33e436e4ab0abc026a53a8960b7"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:b67319b4aef1a6c56576ff544b67a2a6fbd7eaee485b241cabf53115e8908b8f"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:a8ef956fce64c8551221f395ba21d0724fed6b9b6242ca4f2f7beb4ce2f41997"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:0a01ce7d8479dce84fc03324e3b0c9c90b1ece9a9bb6a1b6c9025e7e4520e78c"},
{file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91505d3ddebf268bb1588eb0f63821f738d20e1e7f05d3c647a5ca900288760b"},
{file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3bcdde35d82ff385f4ede021df801b5c4a5bcdfb61ea87caabcebfc4945dc1b"},
{file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aea7c06667b987787c7d1f5e1dfcd70419b711cdb47d6b4bb4ad4b76777a0563"},
{file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:a7fb111eef4d05909b82152721a59c1b14d0f365e2be4c742a473c5d7372f4f5"},
{file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:43d549b876ce64aa18b2328faff70f5877f8c6dede415f80a2f799d31644d776"},
{file = "lxml-5.4.0-cp310-cp310-win32.whl", hash = "sha256:75133890e40d229d6c5837b0312abbe5bac1c342452cf0e12523477cd3aa21e7"},
{file = "lxml-5.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:de5b4e1088523e2b6f730d0509a9a813355b7f5659d70eb4f319c76beea2e250"},
{file = "lxml-5.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:98a3912194c079ef37e716ed228ae0dcb960992100461b704aea4e93af6b0bb9"},
{file = "lxml-5.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ea0252b51d296a75f6118ed0d8696888e7403408ad42345d7dfd0d1e93309a7"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92b69441d1bd39f4940f9eadfa417a25862242ca2c396b406f9272ef09cdcaa"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20e16c08254b9b6466526bc1828d9370ee6c0d60a4b64836bc3ac2917d1e16df"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7605c1c32c3d6e8c990dd28a0970a3cbbf1429d5b92279e37fda05fb0c92190e"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ecf4c4b83f1ab3d5a7ace10bafcb6f11df6156857a3c418244cef41ca9fa3e44"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cef4feae82709eed352cd7e97ae062ef6ae9c7b5dbe3663f104cd2c0e8d94ba"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:df53330a3bff250f10472ce96a9af28628ff1f4efc51ccba351a8820bca2a8ba"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:aefe1a7cb852fa61150fcb21a8c8fcea7b58c4cb11fbe59c97a0a4b31cae3c8c"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ef5a7178fcc73b7d8c07229e89f8eb45b2908a9238eb90dcfc46571ccf0383b8"},
{file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d2ed1b3cb9ff1c10e6e8b00941bb2e5bb568b307bfc6b17dffbbe8be5eecba86"},
{file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:72ac9762a9f8ce74c9eed4a4e74306f2f18613a6b71fa065495a67ac227b3056"},
{file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f5cb182f6396706dc6cc1896dd02b1c889d644c081b0cdec38747573db88a7d7"},
{file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3a3178b4873df8ef9457a4875703488eb1622632a9cee6d76464b60e90adbfcd"},
{file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e094ec83694b59d263802ed03a8384594fcce477ce484b0cbcd0008a211ca751"},
{file = "lxml-5.4.0-cp311-cp311-win32.whl", hash = "sha256:4329422de653cdb2b72afa39b0aa04252fca9071550044904b2e7036d9d97fe4"},
{file = "lxml-5.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd3be6481ef54b8cfd0e1e953323b7aa9d9789b94842d0e5b142ef4bb7999539"},
{file = "lxml-5.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b5aff6f3e818e6bdbbb38e5967520f174b18f539c2b9de867b1e7fde6f8d95a4"},
{file = "lxml-5.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942a5d73f739ad7c452bf739a62a0f83e2578afd6b8e5406308731f4ce78b16d"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:460508a4b07364d6abf53acaa0a90b6d370fafde5693ef37602566613a9b0779"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529024ab3a505fed78fe3cc5ddc079464e709f6c892733e3f5842007cec8ac6e"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ca56ebc2c474e8f3d5761debfd9283b8b18c76c4fc0967b74aeafba1f5647f9"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a81e1196f0a5b4167a8dafe3a66aa67c4addac1b22dc47947abd5d5c7a3f24b5"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00b8686694423ddae324cf614e1b9659c2edb754de617703c3d29ff568448df5"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:c5681160758d3f6ac5b4fea370495c48aac0989d6a0f01bb9a72ad8ef5ab75c4"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:2dc191e60425ad70e75a68c9fd90ab284df64d9cd410ba8d2b641c0c45bc006e"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:67f779374c6b9753ae0a0195a892a1c234ce8416e4448fe1e9f34746482070a7"},
{file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:79d5bfa9c1b455336f52343130b2067164040604e41f6dc4d8313867ed540079"},
{file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3d3c30ba1c9b48c68489dc1829a6eede9873f52edca1dda900066542528d6b20"},
{file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1af80c6316ae68aded77e91cd9d80648f7dd40406cef73df841aa3c36f6907c8"},
{file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4d885698f5019abe0de3d352caf9466d5de2baded00a06ef3f1216c1a58ae78f"},
{file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea53d51859b6c64e7c51d522c03cc2c48b9b5d6172126854cc7f01aa11f52bc"},
{file = "lxml-5.4.0-cp312-cp312-win32.whl", hash = "sha256:d90b729fd2732df28130c064aac9bb8aff14ba20baa4aee7bd0795ff1187545f"},
{file = "lxml-5.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1dc4ca99e89c335a7ed47d38964abcb36c5910790f9bd106f2a8fa2ee0b909d2"},
{file = "lxml-5.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:773e27b62920199c6197130632c18fb7ead3257fce1ffb7d286912e56ddb79e0"},
{file = "lxml-5.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9c671845de9699904b1e9df95acfe8dfc183f2310f163cdaa91a3535af95de"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9454b8d8200ec99a224df8854786262b1bd6461f4280064c807303c642c05e76"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cccd007d5c95279e529c146d095f1d39ac05139de26c098166c4beb9374b0f4d"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fce1294a0497edb034cb416ad3e77ecc89b313cff7adbee5334e4dc0d11f422"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24974f774f3a78ac12b95e3a20ef0931795ff04dbb16db81a90c37f589819551"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:497cab4d8254c2a90bf988f162ace2ddbfdd806fce3bda3f581b9d24c852e03c"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:e794f698ae4c5084414efea0f5cc9f4ac562ec02d66e1484ff822ef97c2cadff"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:2c62891b1ea3094bb12097822b3d44b93fc6c325f2043c4d2736a8ff09e65f60"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:142accb3e4d1edae4b392bd165a9abdee8a3c432a2cca193df995bc3886249c8"},
{file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1a42b3a19346e5601d1b8296ff6ef3d76038058f311902edd574461e9c036982"},
{file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4291d3c409a17febf817259cb37bc62cb7eb398bcc95c1356947e2871911ae61"},
{file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4f5322cf38fe0e21c2d73901abf68e6329dc02a4994e483adbcf92b568a09a54"},
{file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0be91891bdb06ebe65122aa6bf3fc94489960cf7e03033c6f83a90863b23c58b"},
{file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:15a665ad90054a3d4f397bc40f73948d48e36e4c09f9bcffc7d90c87410e478a"},
{file = "lxml-5.4.0-cp313-cp313-win32.whl", hash = "sha256:d5663bc1b471c79f5c833cffbc9b87d7bf13f87e055a5c86c363ccd2348d7e82"},
{file = "lxml-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:bcb7a1096b4b6b24ce1ac24d4942ad98f983cd3810f9711bcd0293f43a9d8b9f"},
{file = "lxml-5.4.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7be701c24e7f843e6788353c055d806e8bd8466b52907bafe5d13ec6a6dbaecd"},
{file = "lxml-5.4.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb54f7c6bafaa808f27166569b1511fc42701a7713858dddc08afdde9746849e"},
{file = "lxml-5.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97dac543661e84a284502e0cf8a67b5c711b0ad5fb661d1bd505c02f8cf716d7"},
{file = "lxml-5.4.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:c70e93fba207106cb16bf852e421c37bbded92acd5964390aad07cb50d60f5cf"},
{file = "lxml-5.4.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9c886b481aefdf818ad44846145f6eaf373a20d200b5ce1a5c8e1bc2d8745410"},
{file = "lxml-5.4.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:fa0e294046de09acd6146be0ed6727d1f42ded4ce3ea1e9a19c11b6774eea27c"},
{file = "lxml-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:61c7bbf432f09ee44b1ccaa24896d21075e533cd01477966a5ff5a71d88b2f56"},
{file = "lxml-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7ce1a171ec325192c6a636b64c94418e71a1964f56d002cc28122fceff0b6121"},
{file = "lxml-5.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:795f61bcaf8770e1b37eec24edf9771b307df3af74d1d6f27d812e15a9ff3872"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29f451a4b614a7b5b6c2e043d7b64a15bd8304d7e767055e8ab68387a8cacf4e"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa412a82e460571fad592d0f93ce9935a20090029ba08eca05c614f99b0cc92"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:c5d32f5284012deaccd37da1e2cd42f081feaa76981f0eaa474351b68df813c5"},
{file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31e63621e073e04697c1b2d23fcb89991790eef370ec37ce4d5d469f40924ed6"},
{file = "lxml-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:be2ba4c3c5b7900246a8f866580700ef0d538f2ca32535e991027bdaba944063"},
{file = "lxml-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:09846782b1ef650b321484ad429217f5154da4d6e786636c38e434fa32e94e49"},
{file = "lxml-5.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eaf24066ad0b30917186420d51e2e3edf4b0e2ea68d8cd885b14dc8afdcf6556"},
{file = "lxml-5.4.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b31a3a77501d86d8ade128abb01082724c0dfd9524f542f2f07d693c9f1175f"},
{file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e108352e203c7afd0eb91d782582f00a0b16a948d204d4dec8565024fafeea5"},
{file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11a96c3b3f7551c8a8109aa65e8594e551d5a84c76bf950da33d0fb6dfafab7"},
{file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:ca755eebf0d9e62d6cb013f1261e510317a41bf4650f22963474a663fdfe02aa"},
{file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:4cd915c0fb1bed47b5e6d6edd424ac25856252f09120e3e8ba5154b6b921860e"},
{file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:226046e386556a45ebc787871d6d2467b32c37ce76c2680f5c608e25823ffc84"},
{file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b108134b9667bcd71236c5a02aad5ddd073e372fb5d48ea74853e009fe38acb6"},
{file = "lxml-5.4.0-cp38-cp38-win32.whl", hash = "sha256:1320091caa89805df7dcb9e908add28166113dcd062590668514dbd510798c88"},
{file = "lxml-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:073eb6dcdf1f587d9b88c8c93528b57eccda40209cf9be549d469b942b41d70b"},
{file = "lxml-5.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bda3ea44c39eb74e2488297bb39d47186ed01342f0022c8ff407c250ac3f498e"},
{file = "lxml-5.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9ceaf423b50ecfc23ca00b7f50b64baba85fb3fb91c53e2c9d00bc86150c7e40"},
{file = "lxml-5.4.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:664cdc733bc87449fe781dbb1f309090966c11cc0c0cd7b84af956a02a8a4729"},
{file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67ed8a40665b84d161bae3181aa2763beea3747f748bca5874b4af4d75998f87"},
{file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b4a3bd174cc9cdaa1afbc4620c049038b441d6ba07629d89a83b408e54c35cd"},
{file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b0989737a3ba6cf2a16efb857fb0dfa20bc5c542737fddb6d893fde48be45433"},
{file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:dc0af80267edc68adf85f2a5d9be1cdf062f973db6790c1d065e45025fa26140"},
{file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:639978bccb04c42677db43c79bdaa23785dc7f9b83bfd87570da8207872f1ce5"},
{file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a99d86351f9c15e4a901fc56404b485b1462039db59288b203f8c629260a142"},
{file = "lxml-5.4.0-cp39-cp39-win32.whl", hash = "sha256:3e6d5557989cdc3ebb5302bbdc42b439733a841891762ded9514e74f60319ad6"},
{file = "lxml-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a8c9b7f16b63e65bbba889acb436a1034a82d34fa09752d754f88d708eca80e1"},
{file = "lxml-5.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1b717b00a71b901b4667226bba282dd462c42ccf618ade12f9ba3674e1fabc55"},
{file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27a9ded0f0b52098ff89dd4c418325b987feed2ea5cc86e8860b0f844285d740"},
{file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7ce10634113651d6f383aa712a194179dcd496bd8c41e191cec2099fa09de5"},
{file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53370c26500d22b45182f98847243efb518d268374a9570409d2e2276232fd37"},
{file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c6364038c519dffdbe07e3cf42e6a7f8b90c275d4d1617a69bb59734c1a2d571"},
{file = "lxml-5.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b12cb6527599808ada9eb2cd6e0e7d3d8f13fe7bbb01c6311255a15ded4c7ab4"},
{file = "lxml-5.4.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5f11a1526ebd0dee85e7b1e39e39a0cc0d9d03fb527f56d8457f6df48a10dc0c"},
{file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b4afaf38bf79109bb060d9016fad014a9a48fb244e11b94f74ae366a64d252"},
{file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de6f6bb8a7840c7bf216fb83eec4e2f79f7325eca8858167b68708b929ab2172"},
{file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5cca36a194a4eb4e2ed6be36923d3cffd03dcdf477515dea687185506583d4c9"},
{file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b7c86884ad23d61b025989d99bfdd92a7351de956e01c61307cb87035960bcb1"},
{file = "lxml-5.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:53d9469ab5460402c19553b56c3648746774ecd0681b1b27ea74d5d8a3ef5590"},
{file = "lxml-5.4.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:56dbdbab0551532bb26c19c914848d7251d73edb507c3079d6805fa8bba5b706"},
{file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14479c2ad1cb08b62bb941ba8e0e05938524ee3c3114644df905d2331c76cd57"},
{file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32697d2ea994e0db19c1df9e40275ffe84973e4232b5c274f47e7c1ec9763cdd"},
{file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:24f6df5f24fc3385f622c0c9d63fe34604893bc1a5bdbb2dbf5870f85f9a404a"},
{file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:151d6c40bc9db11e960619d2bf2ec5829f0aaffb10b41dcf6ad2ce0f3c0b2325"},
{file = "lxml-5.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4025bf2884ac4370a3243c5aa8d66d3cb9e15d3ddd0af2d796eccc5f0244390e"},
{file = "lxml-5.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9459e6892f59ecea2e2584ee1058f5d8f629446eab52ba2305ae13a32a059530"},
{file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47fb24cc0f052f0576ea382872b3fc7e1f7e3028e53299ea751839418ade92a6"},
{file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50441c9de951a153c698b9b99992e806b71c1f36d14b154592580ff4a9d0d877"},
{file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ab339536aa798b1e17750733663d272038bf28069761d5be57cb4a9b0137b4f8"},
{file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9776af1aad5a4b4a1317242ee2bea51da54b2a7b7b48674be736d463c999f37d"},
{file = "lxml-5.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:63e7968ff83da2eb6fdda967483a7a023aa497d85ad8f05c3ad9b1f2e8c84987"},
{file = "lxml-5.4.0.tar.gz", hash = "sha256:d12832e1dbea4be280b22fd0ea7c9b87f0d8fc51ba06e92dc62d52f804f78ebd"},
]
[package.extras]
cssselect = ["cssselect (>=0.7)"]
html-clean = ["lxml_html_clean"]
html5 = ["html5lib"]
htmlsoup = ["BeautifulSoup4"]
source = ["Cython (>=3.0.11,<3.1.0)"]
[[package]]
name = "markdown-it-py"
version = "3.0.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
{file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
]
[package.dependencies]
mdurl = ">=0.1,<1.0"
[package.extras]
benchmarking = ["psutil", "pytest", "pytest-benchmark"]
code-style = ["pre-commit (>=3.0,<4.0)"]
compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
linkify = ["linkify-it-py (>=1,<3)"]
plugins = ["mdit-py-plugins"]
profiling = ["gprof2dot"]
rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
[[package]]
name = "mccabe"
version = "0.7.0"
description = "McCabe checker, plugin for flake8"
optional = false
python-versions = ">=3.6"
groups = ["dev"]
files = [
{file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
[[package]]
name = "mdurl"
version = "0.1.2"
description = "Markdown URL utilities"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
{file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
]
[[package]]
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.5"
groups = ["dev"]
files = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
[[package]]
name = "packaging"
version = "25.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
files = [
{file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
]
[[package]]
name = "paho-mqtt"
version = "1.6.1"
description = "MQTT version 5.0/3.1.1 client class"
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "paho-mqtt-1.6.1.tar.gz", hash = "sha256:2a8291c81623aec00372b5a85558a372c747cbca8e9934dfe218638b8eefc26f"},
]
[package.extras]
proxy = ["PySocks"]
[[package]]
name = "pathspec"
version = "0.12.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
]
[[package]]
name = "platformdirs"
version = "4.2.0"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"},
{file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"},
]
[package.extras]
docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"]
[[package]]
name = "pontos"
version = "25.4.0"
description = "Common utilities and tools maintained by Greenbone Networks"
optional = false
python-versions = "<4.0,>=3.9"
groups = ["dev"]
files = [
{file = "pontos-25.4.0-py3-none-any.whl", hash = "sha256:3559c2066bd8c8dda1360cbe05b29af41ac2e949e684a0196f4ca040a192e912"},
{file = "pontos-25.4.0.tar.gz", hash = "sha256:04f19a044fba60c6a72116b463de1d8fba17e688763b52b2fce3b3a935515fef"},
]
[package.dependencies]
colorful = ">=0.5.4"
httpx = {version = ">=0.23", extras = ["http2"]}
lxml = ">=4.9.0"
packaging = ">=20.3"
python-dateutil = ">=2.8.2"
rich = ">=12.4.4"
semver = ">=2.13"
shtab = ">=1.7.0"
tomlkit = ">=0.5.11"
[[package]]
name = "psutil"
version = "7.0.0"
description = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7."
optional = false
python-versions = ">=3.6"
groups = ["main"]
files = [
{file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"},
{file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"},
{file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91"},
{file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34"},
{file = "psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993"},
{file = "psutil-7.0.0-cp36-cp36m-win32.whl", hash = "sha256:84df4eb63e16849689f76b1ffcb36db7b8de703d1bc1fe41773db487621b6c17"},
{file = "psutil-7.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1e744154a6580bc968a0195fd25e80432d3afec619daf145b9e5ba16cc1d688e"},
{file = "psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99"},
{file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"},
{file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"},
]
[package.extras]
dev = ["abi3audit", "black (==24.10.0)", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"]
test = ["pytest", "pytest-xdist", "setuptools"]
[[package]]
name = "pygments"
version = "2.17.2"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"},
{file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"},
]
[package.extras]
plugins = ["importlib-metadata ; python_version < \"3.8\""]
windows-terminal = ["colorama (>=0.4.6)"]
[[package]]
name = "pylint"
version = "3.3.7"
description = "python code static checker"
optional = false
python-versions = ">=3.9.0"
groups = ["dev"]
files = [
{file = "pylint-3.3.7-py3-none-any.whl", hash = "sha256:43860aafefce92fca4cf6b61fe199cdc5ae54ea28f9bf4cd49de267b5195803d"},
{file = "pylint-3.3.7.tar.gz", hash = "sha256:2b11de8bde49f9c5059452e0c310c079c746a0a8eeaa789e5aa966ecc23e4559"},
]
[package.dependencies]
astroid = ">=3.3.8,<=3.4.0.dev0"
colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
dill = [
{version = ">=0.2", markers = "python_version < \"3.11\""},
{version = ">=0.3.7", markers = "python_version >= \"3.12\""},
{version = ">=0.3.6", markers = "python_version == \"3.11\""},
]
isort = ">=4.2.5,<5.13 || >5.13,<7"
mccabe = ">=0.6,<0.8"
platformdirs = ">=2.2"
tomli = {version = ">=1.1", markers = "python_version < \"3.11\""}
tomlkit = ">=0.10.1"
typing-extensions = {version = ">=3.10", markers = "python_version < \"3.10\""}
[package.extras]
spelling = ["pyenchant (>=3.2,<4.0)"]
testutils = ["gitpython (>3)"]
[[package]]
name = "python-dateutil"
version = "2.8.2"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["dev"]
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
]
[package.dependencies]
six = ">=1.5"
[[package]]
name = "python-gnupg"
version = "0.5.4"
description = "A wrapper for the Gnu Privacy Guard (GPG or GnuPG)"
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "python-gnupg-0.5.4.tar.gz", hash = "sha256:f2fdb5fb29615c77c2743e1cb3d9314353a6e87b10c37d238d91ae1c6feae086"},
{file = "python_gnupg-0.5.4-py2.py3-none-any.whl", hash = "sha256:40ce25cde9df29af91fe931ce9df3ce544e14a37f62b13ca878c897217b2de6c"},
]
[[package]]
name = "pytoolconfig"
version = "1.3.1"
description = "Python tool configuration"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "pytoolconfig-1.3.1-py3-none-any.whl", hash = "sha256:5d8cea8ae1996938ec3eaf44567bbc5ef1bc900742190c439a44a704d6e1b62b"},
{file = "pytoolconfig-1.3.1.tar.gz", hash = "sha256:51e6bd1a6f108238ae6aab6a65e5eed5e75d456be1c2bf29b04e5c1e7d7adbae"},
]
[package.dependencies]
packaging = ">=23.2"
platformdirs = {version = ">=3.11.0", optional = true, markers = "extra == \"global\""}
tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["sphinx (>=7.1.2)", "tabulate (>=0.9.0)"]
gendocs = ["pytoolconfig[doc]", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.25.2)", "sphinx-rtd-theme (>=2.0.0)"]
global = ["platformdirs (>=3.11.0)"]
validation = ["pydantic (>=2.5.3)"]
[[package]]
name = "redis"
version = "5.2.1"
description = "Python client for Redis database and key-value store"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4"},
{file = "redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f"},
]
[package.dependencies]
async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
[package.extras]
hiredis = ["hiredis (>=3.0.0)"]
ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"]
[[package]]
name = "rich"
version = "13.7.1"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
optional = false
python-versions = ">=3.7.0"
groups = ["dev"]
files = [
{file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"},
{file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"},
]
[package.dependencies]
markdown-it-py = ">=2.2.0"
pygments = ">=2.13.0,<3.0.0"
[package.extras]
jupyter = ["ipywidgets (>=7.5.1,<9)"]
[[package]]
name = "rope"
version = "1.13.0"
description = "a python refactoring library..."
optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "rope-1.13.0-py3-none-any.whl", hash = "sha256:b435a0c0971244fdcd8741676a9fae697ae614c20cc36003678a7782f25c0d6c"},
{file = "rope-1.13.0.tar.gz", hash = "sha256:51437d2decc8806cd5e9dd1fd9c1306a6d9075ecaf78d191af85fc1dfface880"},
]
[package.dependencies]
pytoolconfig = {version = ">=1.2.2", extras = ["global"]}
[package.extras]
dev = ["build (>=0.7.0)", "pre-commit (>=2.20.0)", "pytest (>=7.0.1)", "pytest-cov (>=4.1.0)", "pytest-timeout (>=2.1.0)"]
doc = ["pytoolconfig[doc]", "sphinx (>=4.5.0)", "sphinx-autodoc-typehints (>=1.18.1)", "sphinx-rtd-theme (>=1.0.0)"]
release = ["pip-tools (>=6.12.1)", "toml (>=0.10.2)", "twine (>=4.0.2)"]
[[package]]
name = "semver"
version = "3.0.2"
description = "Python helper for Semantic Versioning (https://semver.org)"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "semver-3.0.2-py3-none-any.whl", hash = "sha256:b1ea4686fe70b981f85359eda33199d60c53964284e0cfb4977d243e37cf4bf4"},
{file = "semver-3.0.2.tar.gz", hash = "sha256:6253adb39c70f6e51afed2fa7152bcd414c411286088fb4b9effb133885ab4cc"},
]
[[package]]
name = "shtab"
version = "1.7.0"
description = "Automagic shell tab completion for Python CLI applications"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "shtab-1.7.0-py3-none-any.whl", hash = "sha256:0824f6f965cf5b6466d8870289c15a8973518820231a84f4422a0b8377261375"},
{file = "shtab-1.7.0.tar.gz", hash = "sha256:6661c2835d0214e259ab74d09bdb9a863752e898bcf2e75ad8cf7ebd7c35bc7e"},
]
[package.extras]
dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout"]
[[package]]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
groups = ["dev"]
files = [
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
[[package]]
name = "tomlkit"
version = "0.12.4"
description = "Style preserving TOML library"
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "tomlkit-0.12.4-py3-none-any.whl", hash = "sha256:5cd82d48a3dd89dee1f9d64420aa20ae65cfbd00668d6f094d7578a78efbb77b"},
{file = "tomlkit-0.12.4.tar.gz", hash = "sha256:7ca1cfc12232806517a8515047ba66a19369e71edf2439d0f5824f91032b6cc3"},
]
[[package]]
name = "typing-extensions"
version = "4.10.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
groups = ["dev"]
markers = "python_version < \"3.11\""
files = [
{file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"},
{file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"},
]
[[package]]
name = "wrapt"
version = "1.16.0"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.6"
groups = ["main"]
files = [
{file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"},
{file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"},
{file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"},
{file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"},
{file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"},
{file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"},
{file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"},
{file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"},
{file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"},
{file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"},
{file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"},
{file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"},
{file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"},
{file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"},
{file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"},
{file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"},
{file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"},
{file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"},
{file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"},
{file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"},
{file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"},
{file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"},
{file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"},
{file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"},
{file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"},
{file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"},
{file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"},
{file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"},
{file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"},
{file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"},
{file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"},
{file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"},
{file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"},
{file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"},
{file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"},
{file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"},
{file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"},
{file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"},
{file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"},
{file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"},
{file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"},
{file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"},
{file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"},
{file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"},
{file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"},
{file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"},
{file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"},
{file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"},
{file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"},
{file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"},
{file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"},
{file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"},
{file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"},
{file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"},
{file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"},
{file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"},
{file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"},
{file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"},
{file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"},
{file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"},
{file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"},
{file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"},
{file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"},
{file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"},
{file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"},
{file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"},
{file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"},
{file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"},
{file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"},
{file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"},
]
[metadata]
lock-version = "2.1"
python-versions = "^3.9"
content-hash = "f6e1105f1cf70545c954799440199fcf63045ddcd879cb73ea9e8f838abe4140"
ospd-openvas-22.9.0/poetry.toml 0000664 0000000 0000000 00000000040 15011310720 0016421 0 ustar 00root root 0000000 0000000 [virtualenvs]
in-project = true
ospd-openvas-22.9.0/pyproject.toml 0000664 0000000 0000000 00000004375 15011310720 0017135 0 ustar 00root root 0000000 0000000 [build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "ospd-openvas"
version = "22.9.0"
description = "ospd based scanner for openvas"
authors = ["Greenbone AG "]
license = "AGPL-3.0-or-later"
readme = "README.md"
homepage = "https://github.com/greenbone/ospd-openvas"
repository = "https://github.com/greenbone/ospd-openvas"
# Full list: https://pypi.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
]
keywords = [
"openvas",
"Greenbone Vulnerability Management",
"Vulnerability Scanning",
"OSP",
"Open Scanner Protocol",
]
packages = [
{ include = "ospd_openvas"},
{ include = "ospd"},
{ include = "docs/ospd-openvas.8", format = "sdist"},
{ include = "config/ospd-openvas.service", format = "sdist"},
{ include = "config/ospd-openvas.conf", format = "sdist"},
{ include = "tests", format = "sdist" },
{ include = "CHANGELOG.md", format = "sdist"},
{ include = "COPYING", format = "sdist"},
{ include = "poetry.lock", format = "sdist"},
{ include = "poetry.toml", format = "sdist"},
]
[tool.poetry.dependencies]
python = "^3.9"
redis = ">=4.5.0"
psutil = ">=5.5.1,<8.0.0"
packaging = ">=20.4,<26.0"
lxml = ">=4.5.2,<6.0.0"
defusedxml = ">=0.6,<0.8"
deprecated = "^1.2.10"
paho-mqtt = ">=1.6,<3"
python-gnupg = ">=0.4.8,<0.6.0"
[tool.poetry.dev-dependencies]
pylint = "^3.3.7"
rope = "^1.13.0"
autohooks-plugin-pylint = ">=21.6.0"
autohooks-plugin-black = ">=22.7.0"
pontos = ">=22.8.0"
black = ">=22.6.0"
[tool.poetry.scripts]
ospd-openvas = "ospd_openvas.daemon:main"
[tool.black]
line-length = 80
target-version = ['py39']
skip-string-normalization = true
exclude = '''
/(
\.git
| \.hg
| \.venv
| \.circleci
| \.github
| \.vscode
| _build
| build
| dist
| docs
)/
'''
[tool.autohooks]
mode = "poetry"
pre-commit = ['autohooks.plugins.black']
[tool.pontos.version]
version-module-file = "ospd_openvas/__version__.py"
ospd-openvas-22.9.0/smoketest/ 0000775 0000000 0000000 00000000000 15011310720 0016226 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/Dockerfile 0000664 0000000 0000000 00000005243 15011310720 0020224 0 ustar 00root root 0000000 0000000 FROM registry.community.greenbone.net/community/vulnerability-tests AS nasl
# use latest version
RUN mv `ls -d /var/lib/openvas/* | sort -r | head -n 1`/vt-data/nasl /nasl
FROM golang AS binaries
COPY --chmod=7777 smoketest /usr/local/src
WORKDIR /usr/local/src
RUN make build-cmds
FROM registry.community.greenbone.net/community/openvas-scanner:edge
RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y \
mosquitto \
redis \
gcc \
python3-dev \
python3 \
python3-pip \
openssh-server &&\
apt-get remove --purge --auto-remove -y &&\
rm -rf /var/lib/apt/lists/*
COPY --chmod=7777 . /usr/local/src/ospd-openvas
COPY smoketest/redis.conf /etc/redis/redis.conf
RUN rm -rf /var/lib/openvas/plugins/*
RUN cp -r /usr/local/src/ospd-openvas/smoketest/data/notus /var/lib/notus
RUN useradd -rm -s /bin/bash -g redis -u 1000 gvm
RUN mkdir /run/redis
RUN chown gvm:redis /run/redis
RUN mkdir -p /var/run/ospd/
RUN chown gvm:redis /var/run/ospd
RUN touch /etc/openvas/openvas_log.conf
RUN chown gvm:redis /etc/openvas/openvas_log.conf
WORKDIR /usr/local/src/ospd-openvas
RUN python3 -m pip install --break-system-packages .
RUN chown gvm:redis /var/log/gvm
RUN mkdir /run/mosquitto
RUN echo "allow_anonymous true" >> /etc/mosquitto.conf
RUN echo "pid_file /tmp/mosquitto.pid" >> /etc/mosquitto.conf
RUN echo "log_dest file /tmp/mosquitto.log" >> /etc/mosquitto.conf
RUN echo "persistence_location = /tmp/" >> /etc/mosquitto.conf
RUN echo "persistence true" >> /etc/mosquitto.conf
RUN echo "mqtt_server_uri = localhost:1883" >> /etc/openvas/openvas.conf
RUN chown mosquitto:mosquitto /run/mosquitto
RUN mkdir -p /var/log/mosquitto/
RUN chown mosquitto:mosquitto /var/log/mosquitto
RUN chmod 774 /var/log/mosquitto
COPY --from=binaries /usr/local/src/bin/* /usr/local/bin/
RUN mv /usr/local/src/ospd-openvas/smoketest/run-tests.sh /usr/local/bin/run
COPY --from=nasl --chmod=7777 /nasl /var/lib/openvas/plugins
RUN mkdir -p /usr/local/src/policies
COPY smoketest/gatherpackagelist-c18bb781-3740-44c2-aa01-1b73a00066e8.xml /usr/local/src/policies
RUN ospd-policy-feed -p /usr/local/src/policies -t /usr/local/src/plugins
RUN rm -rf /var/lib/openvas/plugins
RUN mv /usr/local/src/plugins /var/lib/openvas/plugins
RUN cp -r /usr/local/src/ospd-openvas/smoketest/data/plugins/* /var/lib/openvas/plugins
RUN rm -rf /usr/local/src/ospd-openvas
RUN apt-get remove --purge --auto-remove -y python3-pip
RUN chown -R gvm:redis /var/lib/openvas/plugins/
RUN mkdir /run/sshd
# make gvm capable of running sshd
RUN chown -R gvm:redis /etc/ssh
RUN echo 'gvm:test' | chpasswd
RUN sed -i 's/#PidFile/Pidfile/' /etc/ssh/sshd_config
USER gvm
WORKDIR /home/gvm
CMD /usr/local/bin/run
ospd-openvas-22.9.0/smoketest/Makefile 0000664 0000000 0000000 00000004100 15011310720 0017661 0 ustar 00root root 0000000 0000000 # SPDX-FileCopyrightText: 2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
.PHONY: build run
MAKEFILE_PATH := $(dir $(realpath $(firstword $(MAKEFILE_LIST))))
ALTERNATIVE_HOSTS := smoketest.localdomain smoke.localdomain and.localdomain mirrors.localdomain addhostname.localdomain
ADD_HOST := $(addprefix --add-host ,$(addsuffix :127.0.0.1, ${ALTERNATIVE_HOSTS}))
RUN_PARAM := run --rm --privileged ${ADD_HOST}
VARIANT := community
DATA_OBJECTS := ghcr.io/greenbone/data-objects:${VARIANT}-staging
NASL := ghcr.io/greenbone/vulnerability-tests:${VARIANT}-staging
ifndef GO
GO := go
endif
GO_BUILD := CGO_ENABLED=0 GOOS=linux GOARCH=amd64 ${GO} build -o
ifndef SORT
SORT := sort -r
endif
ifndef NASL_ROOT
NASL_ROOT := `ls -d /var/lib/openvas/* | ${SORT} | head -n 1`
endif
ifndef POLICY_ROOT
POLICY_ROOT := `find /var/lib/gvm/data-objects/gvmd/*/*configs -type d | ${SORT} | head -n 1`
endif
all: build run
build-cmds:
- mkdir bin || true
${GO_BUILD} bin/ospd-openvas-smoketests cmd/test/main.go
${GO_BUILD} bin/ospd-policy-feed cmd/feed-preparer/main.go
${GO_BUILD} bin/ospd-scans cmd/scans/main.go
build:
cd .. && DOCKER_BUILDKIT=1 docker build -t greenbone/ospd-openvas-smoketests -f smoketest/Dockerfile . 2>build.log && rm build.log || (cat build.log && false)
run:
docker ${RUN_PARAM} greenbone/ospd-openvas-smoketests
interactive:
docker ${RUN_PARAM} --name ospd-st-ia -it greenbone/ospd-openvas-smoketests bash
update-nasl-image:
- docker pull ${NASL} || ( printf "are you logged in ghr.io within docker and an access token `read:packages`?\n" && false )
fetch-nasl: update-nasl-image
- docker run -it -v ${MAKEFILE_PATH}:/mnt --rm ${NASL} sh -c 'cp -rv ${NASL_ROOT}/vt-data/nasl /mnt/.nasl && chmod -R 777 /mnt/.nasl'
update-data-objects-image:
- docker pull ${DATA_OBJECTS} || ( printf "are you logged in ghr.io within docker and an access token `read:packages`?\n" && false )
fetch-scan-configs: update-data-objects-image
- docker run -it -v ${MAKEFILE_PATH}:/mnt --rm ${DATA_OBJECTS} sh -c 'install -D -v -m 777 ${POLICY_ROOT}/* -t /mnt/.scan-config'
ospd-openvas-22.9.0/smoketest/README.md 0000664 0000000 0000000 00000003210 15011310720 0017501 0 ustar 00root root 0000000 0000000 # smoke-test
Contains a small subset of functionality tests for ospd-openvas within a controlled environment.
To build and run the tests a Makefile is provided:
- make build-cmds - creates the go binaries within bin/
- make fetch-nasl - fetches the newest community feed into `.nasl/`
- make fetch-scan-configs - fetches the newest scan-configs/policies into `.scan-configs/`
- make build - builds the image `greenbone/ospd-openvas-smoketests`
- make run - runs the image `greenbone/ospd-openvas-smoketests`
- make - builds and run the image `greenbone/ospd-openvas-smoketests`
Unfortunately the community images are not deployed into docker hub yet.
You have to login within ghcr.io:
```
echo | docker login ghcr.io -u --password-stdin
```
To verify your local environment you need to have `go` installed:
```
OSPD_SOCKET=$PATH_TO_YOUR_OSPD_SOCKET go run cmd/test/main.go
```
Be aware that you need to have the nasl files within `./data/plugins` within your feed dir and the notus advisories `./data/notus/advisories` installed in your notus advisory dir.
To run the policy tests you also need to have the dependent scripts installed. To prepare that you can run:
```
go run cmd/feed-preparer/main.go -p .scan-configs -s .nasl -t /var/lib/openvas/plugins
```
This will parse the given scan configs and copy the necessary plugins from to .
On top of that you need to have a local ssh-server running and populate user credentials via `USERNAME` and `PASSWORD`; otherwise the policy test will fail because they're unable to connect to ssh.
## Usage
```
make build run
```
ospd-openvas-22.9.0/smoketest/cmd/ 0000775 0000000 0000000 00000000000 15011310720 0016771 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/cmd/feed-preparer/ 0000775 0000000 0000000 00000000000 15011310720 0021512 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/cmd/feed-preparer/main.go 0000664 0000000 0000000 00000002652 15011310720 0022772 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
/*
This cmd is
parsing the scan-configs,
looking within a source dir for nvts with either oid or family
to copy them to a target dir.
It is mainly used to prevent an unnecessarily large feed within the smoketest image when testing the policies.
*/
package main
import (
"flag"
"fmt"
"github.com/greenbone/ospd-openvas/smoketest/feed"
"github.com/greenbone/ospd-openvas/smoketest/nasl"
"github.com/greenbone/ospd-openvas/smoketest/policies"
)
func main() {
source := flag.String("s", "/var/lib/openvas/plugins", "A path to existing plugins to copy from.")
target := flag.String("t", "", "A path to prepare the new plugins layout.")
policy := flag.String("p", "", "Path to scan-configs / plugins.")
flag.Parse()
if *source == "" || *target == "" || *policy == "" {
flag.Usage()
return
}
fmt.Print("Initializing caches")
naslCache, err := nasl.InitCache(*source)
if err != nil {
panic(err)
}
policyCache, err := policies.InitCache(*policy)
if err != nil {
panic(err)
}
policies := policyCache.Get()
fmt.Printf(" found %d plugins and %d policies\n", len(naslCache.Get()), len(policies))
p := feed.NewPreparer(naslCache, policyCache, *source, *target)
fmt.Printf("Preparing feed structure %s\n", *target)
if err := p.Run(); err != nil {
panic(err)
}
p.Wait()
fmt.Printf("Prepared feed structure %s\n", *target)
}
ospd-openvas-22.9.0/smoketest/cmd/scans/ 0000775 0000000 0000000 00000000000 15011310720 0020100 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/cmd/scans/README.md 0000664 0000000 0000000 00000003112 15011310720 0021354 0 ustar 00root root 0000000 0000000 # ospd-scans
Is a small utility to make direct OSP commands easier to handle.
To function it needs to have
- the vt feed
- scan-configs from data-object feed
- as well as an running OSPD in either TCP or UNIX mode
It reads the scan-config (in our lingo from now on policy) and creates an OSP start scan command and sends it to OSPD.
## Usage
```
Usage of bin/ospd-scans:
-a string
(optional, when set it will NOT use unix socket but TCP) a target address (e.g. 10.42.0.81:4242)
-alive-method int
which alive method to use; 1. bit is for ICMP, 2. bit is for TCPSYN, 3. bit is for TCPACK, 4. bit is for ARP and 5. bit is to consider alive. (default 15)
-cert-path string
(only require when port is set ) path to the certificate used by ospd.
-certkey-path string
(only required when port is set) path to certificate key used by ospd.
-cmd string
Can either be start,get,start-finish.
-host string
host to scan
-id string
id of a scan
-oid string
comma separated list of oid of a plugin to execute
-password string
password of user (when using credentials)
-policies string
comma separated list of policies.
-policy-path string
path to policies. (default "/usr/local/src/policies")
-ports string
comma separated list of ports. (default "22,80,443,8080,513")
-u string
path the ospd unix socket (default "/run/ospd/ospd-openvas.sock")
-user string
user of host (when using credentials)
-v Enables or disables verbose.
-vt-dir string
A path to existing plugins. (default "/var/lib/openvas/plugins")
```
ospd-openvas-22.9.0/smoketest/cmd/scans/main.go 0000664 0000000 0000000 00000011466 15011310720 0021363 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package main
import (
"encoding/xml"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/greenbone/ospd-openvas/smoketest/connection"
"github.com/greenbone/ospd-openvas/smoketest/nasl"
"github.com/greenbone/ospd-openvas/smoketest/policies"
"github.com/greenbone/ospd-openvas/smoketest/scan"
"github.com/greenbone/ospd-openvas/smoketest/usecases"
)
var DefaultScannerParams = []scan.ScannerParam{
{},
}
type PrintResponses struct{}
func (pr PrintResponses) Each(r scan.GetScansResponse) {
fmt.Printf("\rprogress: %d", r.Scan.Progress)
}
func (pr PrintResponses) Last(r scan.GetScansResponse) {
xr, err := xml.MarshalIndent(r, "", " ")
if err != nil {
panic(err)
}
tmp, err := ioutil.TempFile(os.TempDir(), fmt.Sprintf("result-%s.xml", r.Scan.ID))
if tmp == nil {
panic(err)
}
fmt.Printf("\rprogress: %d; status: %s; report: %s\n", r.Scan.Progress, r.Scan.Status, tmp.Name())
if _, err := tmp.Write(xr); err != nil {
panic(err)
}
tmp.Close()
}
func main() {
vtDIR := flag.String("vt-dir", "/var/lib/openvas/plugins", "A path to existing plugins.")
tps := flag.String("policies", "", "comma separated list of policies.")
policyPath := flag.String("policy-path", "/usr/local/src/policies", "path to policies.")
host := flag.String("host", "", "host to scan")
oids := flag.String("oid", "", "comma separated list of oid of a plugin to execute")
ports := flag.String("ports", "22,80,443,8080,513", "comma separated list of ports.")
username := flag.String("user", "", "user of host (when using credentials)")
password := flag.String("password", "", "password of user (when using credentials)")
scan_id := flag.String("id", "", "id of a scan")
alivemethod := flag.Int("alive-method", 15, "which alive method to use; 1. bit is for ICMP, 2. bit is for TCPSYN, 3. bit is for TCPACK, 4. bit is for ARP and 5. bit is to consider alive. Use 16 to disable alive check.")
cmd := flag.String("cmd", "", "Can either be start,get,start-finish.")
ospdSocket := flag.String("u", "/run/ospd/ospd-openvas.sock", "path the ospd unix socket")
tcpAddress := flag.String("a", "", "(optional) a target address, will set usage from UNIX to TCP protocoll (e.g. 10.42.0.81:4242)")
certPath := flag.String("cert-path", "", "(only required when 'a' is set ) path to the certificate used by ospd.")
certKeyPath := flag.String("certkey-path", "", "(only required when 'a' is set) path to certificate key used by ospd.")
debug := flag.Bool("v", false, "Enables or disables verbose.")
flag.Parse()
tillFinished := false
naslCache, err := nasl.InitCache(*vtDIR)
if err != nil {
panic(err)
}
var ospdCMD interface{}
if flag.Parsed() {
switch *cmd {
case "get":
ospdCMD = scan.GetScans{
ID: *scan_id,
}
case "start-finish":
tillFinished = true
fallthrough
case "start":
alive := scan.AliveTestMethods{
ICMP: *alivemethod >> 0 & 1,
TCPSYN: *alivemethod >> 1 & 1,
TCPACK: *alivemethod >> 2 & 1,
ARP: *alivemethod >> 3 & 1,
ConsiderAlive: *alivemethod >> 4 & 1,
}
target := scan.Target{
Hosts: *host,
Ports: *ports,
AliveTestMethods: alive,
}
if *username != "" {
credential := scan.Credential{
Type: "up",
Service: "ssh",
Username: *username,
Password: *password,
}
target.Credentials = scan.Credentials{
Credentials: []scan.Credential{credential},
}
}
selection := scan.VTSelection{
Single: make([]scan.VTSingle, 0),
Group: make([]scan.VTGroup, 0),
}
policyCache, err := policies.InitCache(*policyPath)
if err != nil {
panic(err)
}
if *tps != "" {
for _, policy := range strings.Split(*tps, ",") {
ps := policyCache.ByName(policy).AsVTSelection(naslCache)
selection.Group = append(selection.Group, ps.Group...)
selection.Single = append(selection.Single, ps.Single...)
}
}
if *oids != "" {
for _, oid := range strings.Split(*oids, ",") {
selection.Single = append(selection.Single, scan.VTSingle{
ID: oid,
})
}
}
ospdCMD = scan.Start{
Targets: scan.Targets{Targets: []scan.Target{target}},
VTSelection: []scan.VTSelection{selection},
ScannerParams: DefaultScannerParams,
}
default:
flag.Usage()
os.Exit(1)
}
}
protocoll := "unix"
address := *ospdSocket
if *tcpAddress != "" {
address = *tcpAddress
protocoll = "tcp"
}
co := connection.New(protocoll, address, *certPath, *certKeyPath, *debug)
if !tillFinished {
b, err := co.SendRaw(ospdCMD)
if err != nil {
panic(err)
}
fmt.Printf("%s\n", b)
} else {
resp := usecases.StartScanGetLastStatus(ospdCMD.(scan.Start), co, PrintResponses{})
if resp.Failure != nil {
panic(fmt.Errorf(resp.Failure.Description))
}
}
}
ospd-openvas-22.9.0/smoketest/cmd/test/ 0000775 0000000 0000000 00000000000 15011310720 0017750 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/cmd/test/main.go 0000664 0000000 0000000 00000005715 15011310720 0021233 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package main
import (
"flag"
"fmt"
"os"
"github.com/greenbone/ospd-openvas/smoketest/connection"
"github.com/greenbone/ospd-openvas/smoketest/policies"
"github.com/greenbone/ospd-openvas/smoketest/vt"
"github.com/greenbone/ospd-openvas/smoketest/usecases"
"github.com/greenbone/ospd-openvas/smoketest/usecases/notus"
"github.com/greenbone/ospd-openvas/smoketest/usecases/policy"
"github.com/greenbone/ospd-openvas/smoketest/usecases/scan"
)
var username string
var password string
const protocoll = "unix"
func init() {
username = os.Getenv("USERNAME")
if username == "" {
username = "gvm"
}
password = os.Getenv("PASSWORD")
if password == "" {
password = "test"
}
}
func getVTs(co connection.OSPDSender) vt.GetVTsResponse {
var response vt.GetVTsResponse
if err := co.SendCommand(vt.Get{}, &response); err != nil {
panic(err)
}
return response
}
func retryUntilPluginsAreLoaded(co connection.OSPDSender) vt.GetVTsResponse {
r := getVTs(co)
for len(r.VTs.VT) == 0 {
r = getVTs(co)
}
return r
}
func PrintFailures(uc usecases.Tests, resp []usecases.Response) {
hasFailures := false
for i, r := range resp {
if !r.Success {
if !hasFailures {
fmt.Printf("%s Failures:\n", uc.Title)
hasFailures = true
}
fmt.Printf("\t%s:\n\t\t%s\n", uc.UseCases[i].Title, r.Description)
}
}
}
func main() {
ospdSocket := flag.String("u", "/run/ospd/ospd-openvas.sock", "(optional, default: /run/ospd/ospd-openvas.sock) path the ospd unix socket")
tcpAddress := flag.String("a", "", "(optional, when set it will NOT use unix socket but TCP) a target address (e.g. 10.42.0.81:4242)")
policyPath := flag.String("policy-path", "/usr/local/src/policies", "(optional, default: /usr/local/src/policies) path to policies.")
certPath := flag.String("cert-path", "", "(only require when port is set ) path to the certificate used by ospd.")
certKeyPath := flag.String("certkey-path", "", "(only required when port is set) path to certificate key used by ospd.")
tg := flag.String("t", "", "(optional) Name of testgroup. If set it just tests given testgroup t.")
flag.Parse()
fmt.Printf("Initializing policy cache (%s)\n", *policyPath)
policyCache, err := policies.InitCache(*policyPath)
if err != nil {
panic(err)
}
protocoll := "unix"
address := *ospdSocket
if *tcpAddress != "" {
protocoll = "tcp"
address = *tcpAddress
}
co := connection.New(protocoll, address, *certPath, *certKeyPath, false)
fmt.Print("Trying to connect\n")
response := retryUntilPluginsAreLoaded(co)
ucs := []usecases.Tests{
notus.Create(username, password),
scan.Create(),
policy.Create(policyCache, username, password),
}
resps := make([][]usecases.Response, len(ucs))
fmt.Printf("OSPD loaded %d vts\n", len(response.VTs.VT))
for i, t := range ucs {
if *tg == "" || *tg == t.Title {
resps[i] = t.Run(co)
}
}
for i, t := range ucs {
PrintFailures(t, resps[i])
}
}
ospd-openvas-22.9.0/smoketest/connection/ 0000775 0000000 0000000 00000000000 15011310720 0020365 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/connection/init.go 0000664 0000000 0000000 00000005040 15011310720 0021656 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package connection
import (
"crypto/tls"
"encoding/xml"
"errors"
"fmt"
"io"
"net"
"reflect"
)
//TODO move this to smoketest
// OSPDSender sends given commands to OSPD
type OSPDSender interface {
// SendCommand sends a given cmd and parses to result into given interface{}
SendCommand(cmd, v interface{}) error
// SendRaw sends a given cmd and returns the result in []bytes
SendRaw(cmd interface{}) ([]byte, error)
}
// ospdCon represents the connection used to connect to OSPD
type ospdCon struct {
Protocol string // The protocol to be used OSPD supports tcp with TLS and unix
Address string // Either a path to a unix socker or host:port combination
CertPath string // Path to certificate used by OSPD when not UNIX socket, default: "/var/lib/gvm/CA/servercert.pem"
KeyPath string // Path to keyfile of certigicate used by OSPD when not UNIX socket, default: "/var/lib/gvm/private/CA/serverkey.pem"
Debug bool // when true it will preint the send commands
}
// New creates a OSPDSender
func New(
protocol string,
address string,
certPath string,
keyPath string,
debug bool,
) OSPDSender {
return &ospdCon{protocol, address, certPath, keyPath, debug}
}
// SendCommand sends given cmd to OSP (protocol, address) and returns the response
func (con *ospdCon) SendRaw(cmd interface{}) ([]byte, error) {
var c net.Conn
var err error
if con.Protocol == "tcp" {
cer, err := tls.LoadX509KeyPair(con.CertPath, con.KeyPath)
if err != nil {
return nil, err
}
conf := &tls.Config{
Certificates: []tls.Certificate{cer},
InsecureSkipVerify: true,
}
c, err = tls.Dial("tcp", con.Address, conf)
} else {
c, err = net.Dial(con.Protocol, con.Address)
}
if err != nil {
return nil, err
}
defer c.Close()
b, err := xml.Marshal(cmd)
if err != nil {
return nil, err
}
if con.Debug {
fmt.Printf("request: %s\n", b)
}
n, err := c.Write(b)
if err != nil {
return nil, err
}
if n != len(b) {
return nil, fmt.Errorf("%d bytes were not send", len(b)-n)
}
return io.ReadAll(c)
}
// SendCommand sends given cmd to OSP (protocol, address) and unmarshal the result into v
func (con *ospdCon) SendCommand(cmd, v interface{}) error {
if reflect.ValueOf(v).Kind() != reflect.Ptr {
return errors.New("non-pointer passed to Unmarshal")
}
incoming, err := con.SendRaw(cmd)
if err != nil {
return err
}
if con.Debug {
fmt.Printf("response: %s\n", incoming)
}
if v == nil {
return nil
}
return xml.Unmarshal(incoming, v)
}
ospd-openvas-22.9.0/smoketest/data/ 0000775 0000000 0000000 00000000000 15011310720 0017137 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/data/notus/ 0000775 0000000 0000000 00000000000 15011310720 0020307 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/data/notus/advisories/ 0000775 0000000 0000000 00000000000 15011310720 0022457 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/data/notus/advisories/my test family.notus 0000664 0000000 0000000 00000003522 15011310720 0026402 0 ustar 00root root 0000000 0000000 {
"version": "1.0",
"advisories": [
{
"oid": "1.3.6.1.4.1.25623.1.0.90022",
"title": "NOTUS: should be overriden in get_nvts",
"creation_date": 1623250709,
"last_modification": 1629339952,
"advisory_id": "Example",
"advisory_xref": "https://www.greenbone.net",
"cves": [
"CVE-2009-5029"
],
"summary": "Sad, sad robot",
"insight": "His steely skin is covered by centuries of dust. Once he was a great one, now he's dull and rust.",
"affected": "The robot, I guess.",
"impact": "No one, he's not a great one anymore.",
"xrefs": [],
"severity": {
"origin": "NVD",
"date": 1367584740,
"cvss_v2": "AV:N/AC:M/Au:N/C:P/I:P/A:P",
"cvss_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N"
}
},
{
"oid": "1.3.6.1.4.1.25623.1.0.42",
"title": "I am also here",
"creation_date": 1623250710,
"last_modification": 1629339962,
"advisory_id": "Example",
"advisory_xref": "https://www.greenbone.net",
"cves": [
"CVE-2009-5029"
],
"summary": "Sad, sad robot",
"insight": "His steely skin is covered by centuries of dust. Once he was a great one, now he's dull and rust.",
"affected": "The robot, I guess.",
"impact": "No one, he's not a great one anymore.",
"xrefs": [],
"severity": {
"origin": "NVD",
"date": 1367584740,
"cvss_v2": "AV:N/AC:M/Au:N/C:P/I:P/A:P",
"cvss_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N"
}
}
]
}
ospd-openvas-22.9.0/smoketest/data/plugins/ 0000775 0000000 0000000 00000000000 15011310720 0020620 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/data/plugins/add_host_name.nasl 0000664 0000000 0000000 00000004326 15011310720 0024271 0 ustar 00root root 0000000 0000000 # SPDX-FileCopyrightText: 2023 Greenbone AG
# Some text descriptions might be excerpted from (a) referenced
# source(s), and are Copyright (C) by the respective right holder(s).
#
# SPDX-License-Identifier: AGPL-3.0-or-later
if(description)
{
script_oid("0.0.0.0.0.0.0.0.0.3");
script_version("2019-11-10T15:30:28+0000");
script_name("test-add-host-name");
script_category(ACT_SCANNER);
script_family("my test family");
script_tag(name:"some", value:"tag");
script_tag(name:"last_modification", value:"2019-11-10 15:30:28 +0000 (Tue, 10 Nov 2020)");
script_tag(name:"creation_date", value:"2015-03-27 12:00:00 +0100 (Fri, 27 Mar 2015)");
script_tag(name:"cvss_base", value:"0.0");
script_tag(name:"cvss_base_vector", value:"AV:N/AC:L/Au:N/C:N/I:N/A:N");
script_tag(name:"qod_type", value:"remote_app");
script_tag(name:"qod", value:"0");
script_version("2021-08-19T02:25:52+0000");
script_cve_id("CVE-0000-0000", "CVE-0000-0001");
script_tag(name:"severity_vector", value:"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H");
script_tag(name:"severity_origin", value:"NVD");
script_tag(name:"severity_date", value:"2020-08-07 19:36:00 +0000 (Fri, 07 Aug 2020)");
script_xref(name:"Example", value:"GB-Test-1");
script_xref(name:"URL", value:"https://www.greenbone.net");
script_add_preference(name:"example", type:"entry", value:"a default string value");
script_tag(name:"vuldetect", value:"Describes what this plugin is doing to detect a vulnerability.");
script_tag(name:"summary", value:"A short description of the problem");
script_tag(name:"insight", value:"Some detailed insights of the problem");
script_tag(name:"impact", value:"Some detailed about what is impacted");
script_tag(name:"affected", value:"Affected programs, operation system, ...");
script_tag(name:"solution", value:"Solution description");
script_tag(name:"solution_type", value:"Type of solution (e.g. mitigation, vendor fix)");
script_tag(name:"solution_method", value:"how to solve it (e.g. debian apt upgrade)");
script_tag(name:"qod_type", value:"package");
exit(0);
}
add_host_name(hostname: "addhostname.localdomain", source: "magic entry");
hn = get_host_name();
log_msg = hn;
log_message(data:log_msg);
exit(0);
ospd-openvas-22.9.0/smoketest/data/plugins/plugin_feed_info.inc 0000664 0000000 0000000 00000000547 15011310720 0024615 0 ustar 00root root 0000000 0000000 # SPDX-FileCopyrightText: 2023 Greenbone AG
# Some text descriptions might be excerpted from (a) referenced
# source(s), and are Copyright (C) by the respective right holder(s).
#
# SPDX-License-Identifier: AGPL-3.0-or-later
PLUGIN_SET = "202001010002";
PLUGIN_FEED = "Greenbone SCM Feed";
FEED_VENDOR = "Greenbone AG";
FEED_HOME = "N/A";
FEED_NAME = "SCM";
ospd-openvas-22.9.0/smoketest/data/plugins/slowtest.nasl 0000664 0000000 0000000 00000004153 15011310720 0023366 0 ustar 00root root 0000000 0000000 # SPDX-FileCopyrightText: 2023 Greenbone AG
# Some text descriptions might be excerpted from (a) referenced
# source(s), and are Copyright (C) by the respective right holder(s).
#
# SPDX-License-Identifier: AGPL-3.0-or-later
if(description)
{
script_oid("0.0.0.0.0.0.0.0.0.2");
script_version("2019-11-10T15:30:28+0000");
script_name("test");
script_category(ACT_SCANNER);
script_family("my test family");
script_tag(name:"some", value:"tag");
script_tag(name:"last_modification", value:"2019-11-10 15:30:28 +0000 (Tue, 10 Nov 2020)");
script_tag(name:"creation_date", value:"2015-03-27 12:00:00 +0100 (Fri, 27 Mar 2015)");
script_tag(name:"cvss_base", value:"0.0");
script_tag(name:"cvss_base_vector", value:"AV:N/AC:L/Au:N/C:N/I:N/A:N");
script_tag(name:"qod_type", value:"remote_app");
script_tag(name:"qod", value:"0");
script_version("2021-08-19T02:25:52+0000");
script_cve_id("CVE-0000-0000", "CVE-0000-0001");
script_tag(name:"severity_vector", value:"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H");
script_tag(name:"severity_origin", value:"NVD");
script_tag(name:"severity_date", value:"2020-08-07 19:36:00 +0000 (Fri, 07 Aug 2020)");
script_xref(name:"Example", value:"GB-Test-1");
script_xref(name:"URL", value:"https://www.greenbone.net");
script_add_preference(name:"example", type:"entry", value:"a default string value");
script_tag(name:"vuldetect", value:"Describes what this plugin is doing to detect a vulnerability.");
script_tag(name:"summary", value:"A short description of the problem");
script_tag(name:"insight", value:"Some detailed insights of the problem");
script_tag(name:"impact", value:"Some detailed about what is impacted");
script_tag(name:"affected", value:"Affected programs, operation system, ...");
script_tag(name:"solution", value:"Solution description");
script_tag(name:"solution_type", value:"Type of solution (e.g. mitigation, vendor fix)");
script_tag(name:"solution_method", value:"how to solve it (e.g. debian apt upgrade)");
script_tag(name:"qod_type", value:"package");
exit(0);
}
sleep(60);
log_message(data: "waking up");
exit(0);
ospd-openvas-22.9.0/smoketest/data/plugins/test.nasl 0000664 0000000 0000000 00000004411 15011310720 0022456 0 ustar 00root root 0000000 0000000 # SPDX-FileCopyrightText: 2023 Greenbone AG
# Some text descriptions might be excerpted from (a) referenced
# source(s), and are Copyright (C) by the respective right holder(s).
#
# SPDX-License-Identifier: AGPL-3.0-or-later
if(description)
{
script_oid("0.0.0.0.0.0.0.0.0.1");
script_version("2019-11-10T15:30:28+0000");
script_name("test");
script_category(ACT_SCANNER);
script_family("my test family");
script_tag(name:"some", value:"tag");
script_tag(name:"last_modification", value:"2019-11-10 15:30:28 +0000 (Tue, 10 Nov 2020)");
script_tag(name:"creation_date", value:"2015-03-27 12:00:00 +0100 (Fri, 27 Mar 2015)");
script_tag(name:"cvss_base", value:"0.0");
script_tag(name:"cvss_base_vector", value:"AV:N/AC:L/Au:N/C:N/I:N/A:N");
script_tag(name:"qod_type", value:"remote_app");
script_tag(name:"qod", value:"0");
script_version("2021-08-19T02:25:52+0000");
script_cve_id("CVE-0000-0000", "CVE-0000-0001");
script_tag(name:"severity_vector", value:"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H");
script_tag(name:"severity_origin", value:"NVD");
script_tag(name:"severity_date", value:"2020-08-07 19:36:00 +0000 (Fri, 07 Aug 2020)");
script_xref(name:"Example", value:"GB-Test-1");
script_xref(name:"URL", value:"https://www.greenbone.net");
script_add_preference(name:"example", type:"entry", value:"a default string value");
script_tag(name:"vuldetect", value:"Describes what this plugin is doing to detect a vulnerability.");
script_tag(name:"summary", value:"A short description of the problem");
script_tag(name:"insight", value:"Some detailed insights of the problem");
script_tag(name:"impact", value:"Some detailed about what is impacted");
script_tag(name:"affected", value:"Affected programs, operation system, ...");
script_tag(name:"solution", value:"Solution description");
script_tag(name:"solution_type", value:"Type of solution (e.g. mitigation, vendor fix)");
script_tag(name:"solution_method", value:"how to solve it (e.g. debian apt upgrade)");
script_tag(name:"qod_type", value:"package");
exit(0);
}
sec_msg = "this is a security message";
log_msg = "this is a log message";
err_msg = "this is a error message";
security_message(data:sec_msg);
log_message(data:log_msg);
error_message(data:err_msg);
exit(0);
ospd-openvas-22.9.0/smoketest/feed/ 0000775 0000000 0000000 00000000000 15011310720 0017131 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/feed/preparer.go 0000664 0000000 0000000 00000010773 15011310720 0021310 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package feed
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/greenbone/ospd-openvas/smoketest/file"
"github.com/greenbone/ospd-openvas/smoketest/nasl"
"github.com/greenbone/ospd-openvas/smoketest/policies"
"github.com/greenbone/ospd-openvas/smoketest/scan"
)
const familyPrefixLen int = len("family = \"")
type copier struct {
sync.RWMutex
copied []string
source string
target string
}
func (c *copier) Append(path string) {
c.Lock()
c.copied = append(c.copied, path)
c.Unlock()
}
func (c *copier) IsCopied(path string) bool {
c.RLock()
idx := -1
for i := range c.copied {
if c.copied[i] == path {
idx = i
break
}
}
c.RUnlock()
return idx > -1
}
func newCopier(source, target string) *copier {
return &copier{
copied: make([]string, 0),
source: source,
target: target,
}
}
func (c *copier) Copy(fpath string) (int64, error) {
if c.IsCopied(fpath) {
return 0, nil
}
npath := strings.Replace(fpath, c.source, c.target, 1)
bdir := filepath.Dir(npath)
if _, err := os.Stat(bdir); errors.Is(err, os.ErrNotExist) {
if err := os.MkdirAll(bdir, 0740); err != nil {
return 0, err
}
}
fin, err := file.Retry(fpath, os.Open)
if err != nil {
return 0, err
}
fout, err := file.Retry(npath, os.Create)
if err != nil {
fin.Close()
return 0, err
}
blen, err := io.Copy(fout, fin)
fin.Close()
fout.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "failed to copy %s into %s: %s\n", fpath, npath, err)
if err := os.Remove(npath); err != nil {
fmt.Fprintf(os.Stderr, "failed to remove %s: %s\n", npath, err)
}
} else {
c.Append(fpath)
}
return blen, err
}
type preparer struct {
wg sync.WaitGroup
naslCache *nasl.Cache
policyCache *policies.Cache
feedsource string
feedtarget string
copier *copier
}
func NewPreparer(naslCache *nasl.Cache, policyCache *policies.Cache, source, target string) *preparer {
c := newCopier(source, target)
return &preparer{
naslCache: naslCache,
policyCache: policyCache,
feedsource: source,
feedtarget: target,
copier: c,
}
}
func (p *preparer) feedInfo() error {
fip := filepath.Join(p.feedtarget, "plugin_feed_info.inc")
if _, err := os.Stat(p.feedtarget); errors.Is(err, os.ErrNotExist) {
if err := os.MkdirAll(p.feedtarget, 0740); err != nil {
return err
}
}
if _, err := os.Stat(fip); errors.Is(err, os.ErrNotExist) {
fout, err := os.Create(fip)
if err != nil {
return err
}
fmt.Fprintf(fout, "PLUGIN_SET = \"%d\"\n", time.Now().UnixMilli())
fmt.Fprintf(fout, "PLUGIN_FEED = \"%s\"\n", "Policy Plugins Only")
fmt.Fprintf(fout, "FEED_VENDOR = \"%s\"\n", "Greenbone AG")
fmt.Fprintf(fout, "FEED_HOME = \"%s\"\n", "N/A")
fmt.Fprintf(fout, "FEED_NAME = \"%s\"\n", "PPO")
fout.Close()
}
return nil
}
func (p *preparer) copyPlugin(n *nasl.Plugin) error {
cp := func(np *nasl.Plugin) error {
if _, err := p.copier.Copy(np.Path); err != nil {
return err
}
for _, inc := range np.Plugins {
if _, err := p.copier.Copy(inc); err != nil {
return err
}
}
return nil
}
if err := cp(n); err != nil {
return err
}
for _, sdp := range n.ScriptDependencies {
if sd := p.naslCache.ByPath(sdp); sd != nil {
if err := p.copyPlugin(sd); err != nil {
return err
}
} else {
fmt.Fprintf(os.Stderr, "%s dependency %s not found\n", n.OID, sdp)
}
}
return nil
}
func (p *preparer) Run() error {
policies := p.policyCache.Get()
if err := p.feedInfo(); err != nil {
return err
}
for _, policy := range policies {
s := policy.AsVTSelection(p.naslCache)
p.wg.Add(1)
go func(s []scan.VTSingle) {
defer p.wg.Done()
for _, i := range s {
p.wg.Add(1)
go func(oid string) {
defer p.wg.Done()
if n := p.naslCache.ByOID(oid); n != nil {
if err := p.copyPlugin(n); err != nil {
fmt.Fprintf(os.Stderr, "Unable to copy %s: %s\n", n.OID, err)
}
} else {
fmt.Fprintf(os.Stderr, "%s not found\n", oid)
}
}(i.ID)
}
}(s.Single)
p.wg.Add(1)
go func(g []scan.VTGroup) {
defer p.wg.Done()
for _, i := range g {
p.wg.Add(1)
go func(filter string) {
defer p.wg.Done()
var fam string
for _, j := range p.naslCache.ByFamily(fam) {
if err := p.copyPlugin(j); err != nil {
fmt.Fprintf(os.Stderr, "Unable to copy %s: %s\n", j.OID, err)
}
}
}(i.Filter)
}
}(s.Group)
}
return nil
}
func (p *preparer) Wait() {
p.wg.Wait()
}
ospd-openvas-22.9.0/smoketest/file/ 0000775 0000000 0000000 00000000000 15011310720 0017145 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/file/walker.go 0000664 0000000 0000000 00000002262 15011310720 0020763 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package file
import (
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
type Walker struct {
Suffix string
Handler func(*os.File) error
wg sync.WaitGroup
}
func (fw *Walker) Wait() {
fw.wg.Wait()
}
func Retry(path string, fo func(string) (*os.File, error)) (*os.File, error) {
open:
f, err := fo(path)
if pe, ok := err.(*fs.PathError); ok {
if pe.Err.Error() == "too many open files" {
// wait for a bit and retry
time.Sleep(1 * time.Second)
goto open
}
}
return f, err
}
func (fw *Walker) Walk(p string, i os.FileInfo, e error) error {
if e != nil {
if pe, ok := e.(*fs.PathError); i.IsDir() && ok {
if pe.Err.Error() == "too many open files" {
// wait for a bit and retry
time.Sleep(1 * time.Second)
return filepath.Walk(p, fw.Walk)
}
}
panic(e)
}
if !i.IsDir() && strings.HasSuffix(p, fw.Suffix) {
fw.wg.Add(1)
go func(path string) {
defer fw.wg.Done()
f, err := Retry(path, os.Open)
if err != nil {
panic(err)
}
if err := fw.Handler(f); err != nil {
panic(err)
}
f.Close()
}(p)
}
return nil
}
ospd-openvas-22.9.0/smoketest/gatherpackagelist-c18bb781-3740-44c2-aa01-1b73a00066e8.xml 0000664 0000000 0000000 00000000626 15011310720 0027024 0 ustar 00root root 0000000 0000000
GatherPackageList
A simple policy containing only gather package list.
0
scan
1
2
1.3.6.1.4.1.25623.1.0.50282
ospd-openvas-22.9.0/smoketest/go.mod 0000664 0000000 0000000 00000000540 15011310720 0017333 0 ustar 00root root 0000000 0000000 module github.com/greenbone/ospd-openvas/smoketest
go 1.18
require (
github.com/eclipse/paho.golang v0.10.0
github.com/google/uuid v1.3.0
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/go-cmp v0.5.6 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
gopkg.in/yaml.v3 v3.0.0 // indirect
)
ospd-openvas-22.9.0/smoketest/go.sum 0000664 0000000 0000000 00000004204 15011310720 0017361 0 ustar 00root root 0000000 0000000 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eclipse/paho.golang v0.10.0 h1:oUGPjRwWcZQRgDD9wVDV7y7i7yBSxts3vcvcNJo8B4Q=
github.com/eclipse/paho.golang v0.10.0/go.mod h1:rhrV37IEwauUyx8FHrvmXOKo+QRKng5ncoN1vJiJMcs=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
ospd-openvas-22.9.0/smoketest/gpg-key-definition 0000664 0000000 0000000 00000000411 15011310720 0021636 0 ustar 00root root 0000000 0000000 %echo Generating a basic OpenPGP key
Key-Type: DSA
Key-Length: 1024
Subkey-Type: ELG-E
Subkey-Length: 1024
Name-Real: Joe Tester
Name-Comment: Only use in test images
Name-Email: test@greenbone.net
Expire-Date: 0
Passphrase: abc
%pubring foo.pub
%commit
%echo done
ospd-openvas-22.9.0/smoketest/mqtt/ 0000775 0000000 0000000 00000000000 15011310720 0017213 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/mqtt/mqtt.go 0000664 0000000 0000000 00000006175 15011310720 0020540 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package mqtt
import (
"context"
"encoding/json"
"fmt"
"net"
"github.com/eclipse/paho.golang/paho"
)
// TopicData is a tuple for Topic and Message.
type TopicData struct {
Topic string
Message []byte
}
// MQTT is connection type for
type MQTT struct {
client *paho.Client
connectProperties *paho.Connect
qos byte
incoming chan *TopicData // Is used to send respons messages of a handler downwards
}
func (m MQTT) Incoming() <-chan *TopicData {
return m.incoming
}
func (m MQTT) Close() error {
close(m.incoming)
return m.client.Disconnect(&paho.Disconnect{ReasonCode: 0})
}
func (m MQTT) register(topic string) error {
m.client.Router.RegisterHandler(topic, func(p *paho.Publish) {
m.incoming <- &TopicData{Topic: topic, Message: p.Payload}
})
_, err := m.client.Subscribe(context.Background(), &paho.Subscribe{
// we need NoLocal otherwise we would consum our own messages
// again and ack them.
Subscriptions: map[string]paho.SubscribeOptions{
topic: {QoS: m.qos, NoLocal: true},
},
},
)
return err
}
func (m MQTT) Subscribe(topics ...string) error {
for _, t := range topics {
if err := m.register(t); err != nil {
return err
}
}
return nil
}
func (m MQTT) Publish(topic string, message interface{}) error {
b, err := json.Marshal(message)
if err != nil {
return err
}
props := &paho.PublishProperties{}
pb := &paho.Publish{
Topic: topic,
QoS: m.qos,
Payload: b,
Properties: props,
}
_, err = m.client.Publish(context.Background(), pb)
return err
}
func (m MQTT) Connect() error {
ca, err := m.client.Connect(context.Background(), m.connectProperties)
if err != nil {
return err
}
if ca.ReasonCode != 0 {
return fmt.Errorf(
"failed to connect to %s : %d - %s",
m.client.Conn.RemoteAddr().String(),
ca.ReasonCode,
ca.Properties.ReasonString,
)
}
return nil
}
// Configuration holds information for MQTT
type Configuration struct {
ClientID string // The ID to be used when connecting to a broker
Username string // Username to be used as authentication; empty for anonymous
Password string // Password to be used as authentication with Username
CleanStart bool // CleanStart when false and SessionExpiry set to > 1 it will reuse a session
SessionExpiry uint64 // Amount of seconds a session is valid; WARNING when set to 0 it is effectively a cleanstart.
QOS byte
KeepAlive uint16
Inflight uint
}
func New(conn net.Conn,
cfg Configuration,
) (*MQTT, error) {
c := paho.NewClient(paho.ClientConfig{
Router: paho.NewStandardRouter(),
Conn: conn,
})
cp := &paho.Connect{
KeepAlive: cfg.KeepAlive,
ClientID: cfg.ClientID,
CleanStart: cfg.CleanStart,
Username: cfg.Username,
Password: []byte(cfg.Password),
}
if cfg.Username != "" {
cp.UsernameFlag = true
}
if cfg.Password != "" {
cp.PasswordFlag = true
}
return &MQTT{
client: c,
connectProperties: cp,
qos: cfg.QOS,
incoming: make(chan *TopicData, cfg.Inflight),
}, nil
}
ospd-openvas-22.9.0/smoketest/nasl/ 0000775 0000000 0000000 00000000000 15011310720 0017163 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/nasl/cache.go 0000664 0000000 0000000 00000004533 15011310720 0020562 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package nasl
import (
"os"
"path/filepath"
"strings"
"sync"
"github.com/greenbone/ospd-openvas/smoketest/file"
)
type Cache struct {
sync.RWMutex
plugins []Plugin
byOID map[string]*Plugin
byFamily map[string][]*Plugin
byPath map[string]*Plugin
}
func NewCache() *Cache {
return &Cache{
plugins: make([]Plugin, 0),
byOID: make(map[string]*Plugin),
byFamily: make(map[string][]*Plugin),
byPath: make(map[string]*Plugin),
}
}
func (c *Cache) Append(p Plugin) {
c.Lock()
c.plugins = append(c.plugins, p)
// point to the copy of c.plugins instead of given instance
// to make it a bit easier for the garbage collector to not
// have to track of the original
ptr := &c.plugins[len(c.plugins)-1]
c.byOID[p.OID] = ptr
c.byPath[p.Path] = ptr
if p.Family != "" {
fam := strings.ToLower(p.Family)
if f, ok := c.byFamily[fam]; ok {
c.byFamily[fam] = append(f, ptr)
} else {
c.byFamily[fam] = []*Plugin{ptr}
}
}
// not using defer to speed things up
c.Unlock()
}
func (c *Cache) Get() []Plugin {
c.RLock()
defer c.RUnlock()
return c.plugins
}
func (c *Cache) ByOID(oid string) *Plugin {
c.RLock()
defer c.RUnlock()
if r, ok := c.byOID[oid]; ok {
return r
}
return nil
}
func (c *Cache) ByPath(path string) *Plugin {
c.RLock()
defer c.RUnlock()
if r, ok := c.byPath[path]; ok {
return r
}
return nil
}
func (c *Cache) ByFamily(family string) []*Plugin {
c.RLock()
defer c.RUnlock()
if family == "" {
result := make([]*Plugin, 0)
for _, v := range c.byFamily {
result = append(result, v...)
}
return result
} else {
if r, ok := c.byFamily[family]; ok {
return r
}
}
return []*Plugin{}
}
type CacheFileWalkerHandler struct {
cache *Cache
source string
}
func (fwh *CacheFileWalkerHandler) fh(f *os.File) error {
p := Parse(fwh.source, f.Name(), f)
f.Close()
fwh.cache.Append(p)
return nil
}
func NewCacheFileWalker(source string, c *Cache) *file.Walker {
fwh := &CacheFileWalkerHandler{
source: source,
cache: c,
}
return &file.Walker{
Handler: fwh.fh,
Suffix: ".nasl",
}
}
func InitCache(source string) (cache *Cache, err error) {
cache = NewCache()
fw := NewCacheFileWalker(source, cache)
err = filepath.Walk(source, fw.Walk)
if err != nil {
return
}
fw.Wait()
return
}
ospd-openvas-22.9.0/smoketest/nasl/parser.go 0000664 0000000 0000000 00000012703 15011310720 0021011 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package nasl
import (
"bufio"
"bytes"
"io"
"path/filepath"
"strings"
"sync"
)
type Plugin struct {
Path string
OID string
Family string
Plugins []string
ScriptDependencies []string
}
type Token int
const (
UNKNOWN Token = iota
EOF
WS // \t\n
QT // "'
LP // (
RP // )
LB // [
RB // ]
CLB // {
CRB // }
C // ,
SC // ;
DP // :
KEYWORD //keyword are non special character
)
var eof = rune(0)
func isWhitespace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\n'
}
func isKeywordComp(ch rune) bool {
return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-' || ch == '_'
}
type Scanner struct {
r *bufio.Reader
}
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r)}
}
func (s *Scanner) read() rune {
ch, _, err := s.r.ReadRune()
if err != nil {
return eof
}
return ch
}
func (s *Scanner) unread() { _ = s.r.UnreadRune() }
func (s *Scanner) skip(result Token, verify func(rune) bool) (Token, string) {
var buf bytes.Buffer
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if !verify(ch) {
s.unread()
break
} else {
buf.WriteRune(ch)
}
}
return result, buf.String()
}
func (s *Scanner) skipWS() (Token, string) {
return s.skip(WS, isWhitespace)
}
func (s *Scanner) skipNonSpecial() (Token, string) {
return s.skip(KEYWORD, isKeywordComp)
}
func (s *Scanner) Scan() (Token, string) {
ch := s.read()
if isWhitespace(ch) {
s.unread()
return s.skip(WS, isWhitespace)
}
if isKeywordComp(ch) {
s.unread()
return s.skip(KEYWORD, isKeywordComp)
}
switch ch {
case eof:
return EOF, ""
case '\'':
return QT, string(ch)
case '"':
return QT, string(ch)
case '(':
return LP, string(ch)
case ')':
return RP, string(ch)
case '[':
return LB, string(ch)
case ']':
return RB, string(ch)
case '{':
return CLB, string(ch)
case '}':
return CRB, string(ch)
case ',':
return C, string(ch)
case ';':
return SC, string(ch)
case ':':
return DP, string(ch)
default:
return UNKNOWN, string(ch)
}
}
func skipWS(scanner *Scanner) (t Token, s string) {
for {
t, s = scanner.Scan()
if t != WS {
return
}
}
}
type PluginCache struct {
sync.RWMutex
plugins []string
}
func (pc *PluginCache) Append(s ...string) {
pc.Lock()
pc.plugins = append(pc.plugins, s...)
pc.Unlock()
}
func (pc *PluginCache) Get() []string {
pc.RLock()
result := pc.plugins
pc.RUnlock()
return result
}
func StringArgument(scanner *Scanner) (string, bool) {
var buf bytes.Buffer
t, i := skipWS(scanner)
if t == QT {
for {
t, i = scanner.Scan()
if t == EOF {
break
}
if t == QT {
return buf.String(), true
}
buf.WriteString(i)
}
}
return "", false
}
func singleAnonStringArgumentFunction(scanner *Scanner) (string, bool) {
t, _ := skipWS(scanner)
if t == LP {
if arg, ok := StringArgument(scanner); ok {
t, _ = skipWS(scanner)
if t == RP {
t, _ = skipWS(scanner)
if t == SC {
return arg, true
}
}
}
}
return "", false
}
func multipleAnonStringArgumentFunction(scanner *Scanner) ([]string, bool) {
result := make([]string, 0)
t, _ := skipWS(scanner)
if t == LP {
for {
if arg, ok := StringArgument(scanner); ok {
t, _ = skipWS(scanner)
if t == RP {
t, _ = skipWS(scanner)
if t == SC {
result = append(result, arg)
return result, true
}
}
if t == C {
result = append(result, arg)
continue
}
if t != C || t == EOF {
break
}
} else {
break
}
}
}
return result, false
}
func Parse(source, path string, input io.Reader) Plugin {
// We currently assume that each nasl script has a
// if (description) { }
// block so that we don't have to care about && ||
// As of 2022-06-03 there are no cases where script_oid or script_family contain anything but a string
// to make things easier we just asssume that so tat we don't have to do a loopup for a variable
oid := ""
family := ""
plugins := make([]string, 0)
script_dependencies := make([]string, 0)
scanner := NewScanner(input)
appendPluginPath := func(arg string, cache *[]string) {
ip := filepath.Join(source, arg)
*cache = append(*cache, ip)
}
for {
t, i := scanner.Scan()
if t == EOF {
break
}
if t == KEYWORD {
switch i {
case "script_oid":
if arg, ok := singleAnonStringArgumentFunction(scanner); ok {
// TODO check if already parsed via cache and return
oid = arg
}
case "script_family":
if arg, ok := singleAnonStringArgumentFunction(scanner); ok {
family = arg
}
case "script_dependencies":
if args, ok := multipleAnonStringArgumentFunction(scanner); ok {
for _, i := range args {
// there are some instances that call script_dependencies("a.nasl, b.nasl");
// instead of script_dependencies("a.nasl", "b.nasl");
split := strings.Split(i, ",")
for _, j := range split {
appendPluginPath(strings.Trim(j, " "), &script_dependencies)
}
}
}
case "include":
if arg, ok := singleAnonStringArgumentFunction(scanner); ok {
appendPluginPath(arg, &plugins)
}
}
}
}
return Plugin{
Path: path,
OID: oid,
Family: family,
Plugins: plugins,
ScriptDependencies: script_dependencies,
}
}
ospd-openvas-22.9.0/smoketest/notus/ 0000775 0000000 0000000 00000000000 15011310720 0017376 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/notus/notus.go 0000664 0000000 0000000 00000001731 15011310720 0021077 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package notus
type Severity struct {
Origin string `json:"origin"`
Date int64 `json:"date"`
CVSSV2 string `json:"cvss_v2"`
CVSSV3 string `json:"cvss_v3"`
}
type Advisory struct {
OID string `json:"oid"`
Title string `json:"title"`
CreationDate int64 `json:"creation_date"`
LastModification int64 `json:"last_modification"`
AdvisoryId string `json:"advisory_id"`
AdvisoryXref string `json:"advisory_xref"`
Cves []string `json:"cves"`
Summary string `json:"summary"`
Insight string `json:"insight"`
Affected string `json:"affected"`
Impact string `json:"impact"`
Xrefs []string `json:"xrefs"`
Severity Severity `json:"seveerity"`
}
type Advisories struct {
Version string `json:"version"`
Advisories []Advisory `json:"advisories"`
}
ospd-openvas-22.9.0/smoketest/policies/ 0000775 0000000 0000000 00000000000 15011310720 0020035 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/policies/cache.go 0000664 0000000 0000000 00000002626 15011310720 0021435 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package policies
import (
"encoding/xml"
"os"
"path/filepath"
"strings"
"sync"
"github.com/greenbone/ospd-openvas/smoketest/file"
)
type Cache struct {
cache map[string]ScanConfig
sync.RWMutex
}
func (c *Cache) Append(s ScanConfig) {
c.Lock()
c.cache[strings.ToLower(s.Name)] = s
c.Unlock()
}
func (c *Cache) Get() []ScanConfig {
c.RLock()
defer c.RUnlock()
i := 0
r := make([]ScanConfig, len(c.cache))
for _, v := range c.cache {
r[i] = v
i++
}
return r
}
func (c *Cache) ByName(name string) ScanConfig {
c.RLock()
defer c.RUnlock()
if s, ok := c.cache[strings.ToLower(name)]; ok {
return s
}
return ScanConfig{}
}
func NewCache() *Cache {
return &Cache{
cache: make(map[string]ScanConfig),
}
}
type FileWalkerHandler struct {
cache *Cache
}
func (fw *FileWalkerHandler) fh(f *os.File) error {
d := xml.NewDecoder(f)
var sp ScanConfig
if err := d.Decode(&sp); err != nil {
return err
}
f.Close()
fw.cache.Append(sp)
return nil
}
func NewFileWalker(cache *Cache) *file.Walker {
h := &FileWalkerHandler{
cache: cache,
}
return &file.Walker{
Handler: h.fh,
Suffix: ".xml",
}
}
func InitCache(source string) (cache *Cache, err error) {
cache = NewCache()
fw := NewFileWalker(cache)
err = filepath.Walk(source, fw.Walk)
if err != nil {
return
}
fw.Wait()
return
}
ospd-openvas-22.9.0/smoketest/policies/init.go 0000664 0000000 0000000 00000003516 15011310720 0021334 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package policies
import (
"encoding/xml"
"fmt"
"github.com/greenbone/ospd-openvas/smoketest/nasl"
"github.com/greenbone/ospd-openvas/smoketest/scan"
)
type NVTSelector struct {
Include int `xml:"include"`
Type int `xml:"type"` // 0 = Disabled, 1 = Family, 2 = NVT
Filter string `xml:"family_or_nvt"`
}
type NVTSelectors struct {
XMLName xml.Name `xml:"nvt_selectors"`
Selectors []NVTSelector `xml:"nvt_selector"`
}
func (s NVTSelector) AsScanSelector(cache *nasl.Cache) (group *scan.VTGroup, single []scan.VTSingle) {
switch s.Type {
case 0:
if cache != nil {
plugins := cache.ByFamily("")
single = make([]scan.VTSingle, len(plugins))
for i, p := range plugins {
single[i] = scan.VTSingle{
ID: p.OID,
}
}
}
case 1:
group = &scan.VTGroup{
Filter: fmt.Sprintf("family = \"%s\"", s.Filter),
}
case 2:
single = make([]scan.VTSingle, 1)
single[0] = scan.VTSingle{
ID: s.Filter,
}
}
return
}
type ScanConfig struct {
XMLName xml.Name `xml:"config"`
ID string `xml:"id,attr"`
Name string `xml:"name"`
Comment string `xml:"comment"`
Type int `xml:"type"` // no function since we just are a ospd scanner
Usage string `xml:"usage_type"` // no function, we don't differntiate between policy and scan
Selectors NVTSelectors
}
func (c ScanConfig) AsVTSelection(cache *nasl.Cache) scan.VTSelection {
selection := scan.VTSelection{
Single: make([]scan.VTSingle, 0),
Group: make([]scan.VTGroup, 0),
}
for _, sel := range c.Selectors.Selectors {
g, s := sel.AsScanSelector(cache)
if s != nil {
selection.Single = append(selection.Single, s...)
}
if g != nil {
selection.Group = append(selection.Group, *g)
}
}
return selection
}
ospd-openvas-22.9.0/smoketest/redis.conf 0000664 0000000 0000000 00000133270 15011310720 0020211 0 ustar 00root root 0000000 0000000 # Redis configuration file example.
#
# Note that in order to read the configuration file, Redis must be
# started with the file path as first argument:
#
# ./redis-server /path/to/redis.conf
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis servers but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include /path/to/local.conf
# include /path/to/other.conf
################################## NETWORK #####################################
# By default, if no "bind" configuration directive is specified, Redis listens
# for connections from all the network interfaces available on the server.
# It is possible to listen to just one or multiple selected interfaces using
# the "bind" configuration directive, followed by one or more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1 ::1
#
# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
# internet, binding to all the interfaces is dangerous and will expose the
# instance to everybody on the internet. So by default we uncomment the
# following bind directive, that will force Redis to listen only into
# the IPv4 lookback interface address (this means Redis will be able to
# accept connections only from clients running into the same computer it
# is running).
#
# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
# JUST COMMENT THE FOLLOWING LINE.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bind 127.0.0.1
# Protected mode is a layer of security protection, in order to avoid that
# Redis instances left open on the internet are accessed and exploited.
#
# When protected mode is on and if:
#
# 1) The server is not binding explicitly to a set of addresses using the
# "bind" directive.
# 2) No password is configured.
#
# The server only accepts connections from clients connecting from the
# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
# sockets.
#
# By default protected mode is enabled. You should disable it only if
# you are sure you want clients from other hosts to connect to Redis
# even if no authentication is configured, nor a specific set of interfaces
# are explicitly listed using the "bind" directive.
protected-mode yes
# Accept connections on the specified port, default is 6379 (IANA #815344).
# If port 0 is specified Redis will not listen on a TCP socket.
port 0
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# Unix socket.
#
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
unixsocket /run/redis/redis.sock
# You only live once, it is used for a non production environment
unixsocketperm 777
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 300 seconds, which is the new
# Redis default starting with Redis 3.2.1.
tcp-keepalive 300
################################# GENERAL #####################################
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize yes
# If you run Redis from upstart or systemd, Redis can interact with your
# supervision tree. Options:
# supervised no - no supervision interaction
# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
# supervised auto - detect upstart or systemd method based on
# UPSTART_JOB or NOTIFY_SOCKET environment variables
# Note: these supervision methods only signal "process is ready."
# They do not enable continuous liveness pings back to your supervisor.
supervised no
# If a pid file is specified, Redis writes it where specified at startup
# and removes it at exit.
#
# When the server runs non daemonized, no pid file is created if none is
# specified in the configuration. When the server is daemonized, the pid file
# is used even if not specified, defaulting to "/var/run/redis.pid".
#
# Creating a pid file is best effort: if Redis is not able to create it
# nothing bad happens, the server will start and run normally.
# pidfile /run/redis/redis-server.pid
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
# logfile ""
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled yes
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT where
# dbid is a number between 0 and 'databases'-1
databases 1025
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving completely by commenting out all "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
# save 900 1
# save 300 10
# save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir ./
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Replication SYNC strategy: disk or socket.
#
# -------------------------------------------------------
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
# -------------------------------------------------------
#
# New slaves and reconnecting slaves that are not able to continue the replication
# process just receiving differences, need to do what is called a "full
# synchronization". An RDB file is transmitted from the master to the slaves.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
# process to the slaves incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
# RDB file to slave sockets, without touching the disk at all.
#
# With disk-backed replication, while the RDB file is generated, more slaves
# can be queued and served with the RDB file as soon as the current child producing
# the RDB file finishes its work. With diskless replication instead once
# the transfer starts, new slaves arriving will be queued and a new transfer
# will start when the current one terminates.
#
# When diskless replication is used, the master waits a configurable amount of
# time (in seconds) before starting the transfer in the hope that multiple slaves
# will arrive and the transfer can be parallelized.
#
# With slow disks and fast (large bandwidth) networks, diskless replication
# works better.
repl-diskless-sync no
# When diskless replication is enabled, it is possible to configure the delay
# the server waits in order to spawn the child that transfers the RDB via socket
# to the slaves.
#
# This is important since once the transfer starts, it is not possible to serve
# new slaves arriving, that will be queued for the next RDB transfer, so the server
# waits a delay in order to let more slaves arrive.
#
# The delay is specified in seconds, and by default is 5 seconds. To disable
# it entirely just set it to 0 seconds and the transfer will start ASAP.
repl-diskless-sync-delay 5
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The bigger the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
# A Redis master is able to list the address and port of the attached
# slaves in different ways. For example the "INFO replication" section
# offers this information, which is used, among other tools, by
# Redis Sentinel in order to discover slave instances.
# Another place where this info is available is in the output of the
# "ROLE" command of a masteer.
#
# The listed IP and address normally reported by a slave is obtained
# in the following way:
#
# IP: The address is auto detected by checking the peer address
# of the socket used by the slave to connect with the master.
#
# Port: The port is communicated by the slave during the replication
# handshake, and is normally the port that the slave is using to
# list for connections.
#
# However when port forwarding or Network Address Translation (NAT) is
# used, the slave may be actually reachable via different IP and port
# pairs. The following two options can be used by a slave in order to
# report to its master a specific set of IP and port, so that both INFO
# and ROLE will report those values.
#
# There is no need to use both the options if you need to override just
# the port or the IP address.
#
# slave-announce-ip 5.5.5.5
# slave-announce-port 1234
################################## SECURITY ###################################
# Require clients to issue AUTH before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
maxclients 10000
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key according to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are no suitable keys for eviction.
#
# At the date of writing these commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy noeviction
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can tune it for speed or
# accuracy. For default Redis will check five keys and pick the one that was
# used less recently, you can change the sample size using the following
# configuration directive.
#
# The default of 5 produces good enough results. 10 Approximates very closely
# true LRU but costs a bit more CPU. 3 is very fast but not very accurate.
#
# maxmemory-samples 5
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log. Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "redis-check-aof" utility before to restart
# the server.
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceeds the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write command was
# already issued by the script but the user doesn't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################ REDIS CLUSTER ###############################
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
# in order to mark it as "mature" we need to wait for a non trivial percentage
# of users to deploy it in production.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
# started as cluster nodes can. In order to start a Redis instance as a
# cluster node enable the cluster support uncommenting the following:
#
# cluster-enabled yes
# Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by Redis nodes.
# Every Redis Cluster node requires a different cluster configuration file.
# Make sure that instances running in the same system do not have
# overlapping cluster configuration file names.
#
# cluster-config-file nodes-6379.conf
# Cluster node timeout is the amount of milliseconds a node must be unreachable
# for it to be considered in failure state.
# Most other internal time limits are multiple of the node timeout.
#
# cluster-node-timeout 15000
# A slave of a failing master will avoid to start a failover if its data
# looks too old.
#
# There is no simple way for a slave to actually have a exact measure of
# its "data age", so the following two checks are performed:
#
# 1) If there are multiple slaves able to failover, they exchange messages
# in order to try to give an advantage to the slave with the best
# replication offset (more data from the master processed).
# Slaves will try to get their rank by offset, and apply to the start
# of the failover a delay proportional to their rank.
#
# 2) Every single slave computes the time of the last interaction with
# its master. This can be the last ping or command received (if the master
# is still in the "connected" state), or the time that elapsed since the
# disconnection with the master (if the replication link is currently down).
# If the last interaction is too old, the slave will not try to failover
# at all.
#
# The point "2" can be tuned by user. Specifically a slave will not perform
# the failover if, since the last interaction with the master, the time
# elapsed is greater than:
#
# (node-timeout * slave-validity-factor) + repl-ping-slave-period
#
# So for example if node-timeout is 30 seconds, and the slave-validity-factor
# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
# slave will not try to failover if it was not able to talk with the master
# for longer than 310 seconds.
#
# A large slave-validity-factor may allow slaves with too old data to failover
# a master, while a too small value may prevent the cluster from being able to
# elect a slave at all.
#
# For maximum availability, it is possible to set the slave-validity-factor
# to a value of 0, which means, that slaves will always try to failover the
# master regardless of the last time they interacted with the master.
# (However they'll always try to apply a delay proportional to their
# offset rank).
#
# Zero is the only value able to guarantee that when all the partitions heal
# the cluster will always be able to continue.
#
# cluster-slave-validity-factor 10
# Cluster slaves are able to migrate to orphaned masters, that are masters
# that are left without working slaves. This improves the cluster ability
# to resist to failures as otherwise an orphaned master can't be failed over
# in case of failure if it has no working slaves.
#
# Slaves migrate to orphaned masters only if there are still at least a
# given number of other working slaves for their old master. This number
# is the "migration barrier". A migration barrier of 1 means that a slave
# will migrate only if there is at least 1 other working slave for its master
# and so forth. It usually reflects the number of slaves you want for every
# master in your cluster.
#
# Default is 1 (slaves migrate only if their masters remain with at least
# one slave). To disable migration just set it to a very large value.
# A value of 0 can be set but is useful only for debugging and dangerous
# in production.
#
# cluster-migration-barrier 1
# By default Redis Cluster nodes stop accepting queries if they detect there
# is at least an hash slot uncovered (no available node is serving it).
# This way if the cluster is partially down (for example a range of hash slots
# are no longer covered) all the cluster becomes, eventually, unavailable.
# It automatically returns available as soon as all the slots are covered again.
#
# However sometimes you want the subset of the cluster which is working,
# to continue to accept queries for the part of the key space that is still
# covered. In order to do so, just set the cluster-require-full-coverage
# option to no.
#
# cluster-require-full-coverage yes
# In order to setup your cluster make sure to read the documentation
# available at http://redis.io web site.
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enabled at runtime using the command
# "CONFIG SET latency-monitor-threshold " if needed.
latency-monitor-threshold 0
############################# EVENT NOTIFICATION ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@__ prefix.
# E Keyevent events, published with __keyevent@__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# of zero or multiple characters. The empty string means that notifications
# are disabled.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Lists are also encoded in a special way to save a lot of space.
# The number of entries allowed per internal list node can be specified
# as a fixed maximum size or a maximum number of elements.
# For a fixed maximum size, use -5 through -1, meaning:
# -5: max size: 64 Kb <-- not recommended for normal workloads
# -4: max size: 32 Kb <-- not recommended
# -3: max size: 16 Kb <-- probably not recommended
# -2: max size: 8 Kb <-- good
# -1: max size: 4 Kb <-- good
# Positive numbers mean store up to _exactly_ that number of elements
# per list node.
# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
# but if your use case is unique, adjust the settings as necessary.
list-max-ziplist-size -2
# Lists may also be compressed.
# Compress depth is the number of quicklist ziplist nodes from *each* side of
# the list to *exclude* from compression. The head and tail of the list
# are always uncompressed for fast push/pop operations. Settings are:
# 0: disable all list compression
# 1: depth 1 means "don't start compressing until after 1 node into the list,
# going from either the head or tail"
# So: [head]->node->node->...->node->[tail]
# [head], [tail] will always be uncompressed; inner nodes will compress.
# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
# 2 here means: don't compress head or head->next or tail->prev or tail,
# but compress all nodes between them.
# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
# etc.
list-compress-depth 0
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# actively rehash the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
ospd-openvas-22.9.0/smoketest/run-tests.sh 0000664 0000000 0000000 00000001674 15011310720 0020536 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
# SPDX-FileCopyrightText: 2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# Is a convenience script to start redis, ospd-openvas and execute smoketests
shutdown() {
kill $(cat /run/ospd/ospd.pid) || true
kill $(cat /tmp/mosquitto.pid) || true
kill $(grep -o "Pidfile.*" /etc/ssh/sshd_config | awk '{printf $2}') || true
redis-cli -s /run/redis/redis.sock SHUTDOWN
}
trap shutdown EXIT
set -e
mosquitto -c /etc/mosquitto.conf &
redis-server /etc/redis/redis.conf
/usr/sbin/sshd
ospd-openvas --disable-notus-hashsum-verification True \
-u /run/ospd/ospd-openvas.sock \
-l /var/log/gvm/ospd.log
wait_turn=0
while [ ! -S /run/ospd/ospd-openvas.sock ]; do
if [ $wait_turn -eq 10 ]; then
printf "too many attempts to find ospd-openvas.sock\n"
exit 1
fi
printf "waiting for ospd-openvas.socket ($wait_turn)\n"
sleep 1
wait_turn=$(($wait_turn + 1))
done
/usr/local/bin/ospd-openvas-smoketests
ospd-openvas-22.9.0/smoketest/scan/ 0000775 0000000 0000000 00000000000 15011310720 0017152 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/scan/init.go 0000664 0000000 0000000 00000011566 15011310720 0020455 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package scan
import "encoding/xml"
type ScannerParam struct {
TargetPort string `xml:"target_port,omitempty"`
UseHttps int `xml:"use_https,omitempty"`
Profile string `xml:"profile,omitempty"`
TableDrivenLSC string `xml:"table_driven_lsc,omitempty"`
}
var DefaultScannerParams = []ScannerParam{
{},
}
var DisableNotus = []ScannerParam{
{TableDrivenLSC: "0"},
}
type VTValue struct {
ID string `xml:"id,attr,omitempty"`
Value string `xml:",chardata"`
}
type VTSingle struct {
ID string `xml:"id,attr,omitempty"`
Values []VTValue `xml:"vt_value,omitempty"`
}
type VTGroup struct {
Filter string `xml:"filter,attr,omitempty"`
}
type VTSelection struct {
Single []VTSingle `xml:"vt_single,omitempty"`
Group []VTGroup `xml:"vt_group,omitempty"`
}
type Credential struct {
Type string `xml:"type,attr,omitempty"`
Service string `xml:"service,attr,omitempty"`
Port string `xml:"port,attr,omitempty"`
Username string `xml:"username,omitempty"`
Password string `xml:"password,omitempty"`
}
type Credentials struct {
XMLName xml.Name `xml:"credentials"`
Credentials []Credential `xml:"credential"`
}
type AliveTestMethods struct {
ICMP int `xml:"icmp,omitempty"`
TCPSYN int `xml:"tcp_syn,omitempty"`
TCPACK int `xml:"tcp_ack,omitempty"`
ARP int `xml:"arp,omitempty"`
ConsiderAlive int `xml:"consider_alive,omitempty"`
}
var ConsiderAlive AliveTestMethods = AliveTestMethods{
ConsiderAlive: 1,
}
var Alive = AliveTestMethods{
ICMP: 1,
TCPSYN: 1,
TCPACK: 1,
ARP: 1,
ConsiderAlive: 0,
}
type Target struct {
XMLName xml.Name `xml:"target"`
Hosts string `xml:"hosts,omitempty"`
Ports string `xml:"ports,omitempty"`
Credentials Credentials `xml:"credentials,omitempty"`
ExcludedHosts string `xml:"excluded_hosts,omitempty"`
FinishedHosts string `xml:"finished_hosts,omitempty"`
AliveTestPorts string `xml:"alive_test_ports,omitempty"`
AliveTest int `xml:"alive_test,omitempty"`
AliveTestMethods AliveTestMethods `xml:"alive_test_methods,omitempty"`
ReverseLookupUnify bool `xml:"reverse_lookup_unify,omitempty"`
ReverseLookupOnly bool `xml:"reverse_lookup_only,omitempty"`
}
type Targets struct {
XMLName xml.Name `xml:"targets"`
Targets []Target
}
type Start struct {
XMLName xml.Name `xml:"start_scan"`
Target string `xml:"target,attr,omitempty"`
Ports string `xml:"ports,attr,omitempty"`
ID string `xml:"scan_id,attr,omitempty"`
Parallel int `xml:"parallel,attr,omitempty"`
ScannerParams []ScannerParam `xml:"scanner_params"`
VTSelection []VTSelection `xml:"vt_selection,omitempty"`
Targets Targets `xml:"targets"`
}
type StatusCodeResponse struct {
Text string `xml:"status_text,attr,omitempty"`
Code string `xml:"status,attr,omitempty"`
}
type StartResponse struct {
XMLName xml.Name `xml:"start_scan_response"`
ID string `xml:"id,omitempty"`
StatusCodeResponse
}
type Delete struct {
XMLName xml.Name `xml:"delete_scan"`
ID string `xml:"scan_id,attr,omitempty"`
}
type DeleteResponse struct {
XMLName xml.Name `xml:"delete_scan_response"`
StatusCodeResponse
}
type Stop struct {
XMLName xml.Name `xml:"stop_scan"`
ID string `xml:"scan_id,attr,omitempty"`
}
type StopResponse struct {
XMLName xml.Name `xml:"stop_scan_response"`
StatusCodeResponse
}
type GetScans struct {
XMLName xml.Name `xml:"get_scans"`
ID string `xml:"scan_id,attr,omitempty"`
Details bool `xml:"details,attr,omitempty"`
Progress bool `xml:"progress,attr,omitempty"`
PopResults bool `xml:"pop_results,attr,omitempty"`
MaxResults int `xml:"max_results,attr,omitempty"`
}
type Result struct {
Host string `xml:"host,attr,omitempty"`
HostName string `xml:"hostname,attr,omitempty"`
Severity string `xml:"severity,attr,omitempty"`
QOD string `xml:"qod,attr,omitempty"`
Port string `xml:"port,attr,omitempty"`
TestID string `xml:"test_id,attr,omitempty"`
Name string `xml:"name,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
Value string `xml:",chardata"`
}
type Results struct {
Results []Result `xml:"result,omitempty"`
}
type Scan struct {
ID string `xml:"id,attr"`
Target string `xml:"target,attr"`
StartTime string `xml:"start_time,attr"`
EndTime string `xml:"end_time,attr"`
Progress int `xml:"progress,attr"`
Status string `xml:"status,attr"`
Results Results `xml:"results,omitempty"`
}
type GetScansResponse struct {
XMLName xml.Name `xml:"get_scans_response"`
StatusCodeResponse
Scan Scan `xml:"scan"`
}
ospd-openvas-22.9.0/smoketest/usecases/ 0000775 0000000 0000000 00000000000 15011310720 0020041 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/usecases/init.go 0000664 0000000 0000000 00000010351 15011310720 0021333 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package usecases
import (
"fmt"
"github.com/greenbone/ospd-openvas/smoketest/connection"
"github.com/greenbone/ospd-openvas/smoketest/scan"
)
type Runner func(connection.OSPDSender) Response
type Test struct {
Title string
Run Runner
}
type Response struct {
Success bool
Description string
}
type Tests struct {
Title string
UseCases []Test
}
func (ouc Tests) Run(co connection.OSPDSender) []Response {
result := make([]Response, len(ouc.UseCases))
fmt.Printf("Testing %s\n", ouc.Title)
for i, t := range ouc.UseCases {
fmt.Printf("\t%s\t", t.Title)
result[i] = t.Run(co)
if !result[i].Success {
fmt.Printf("\x1B[31mX\x1B[0m\n")
} else {
fmt.Printf("\x1B[32m✓\x1B[0m\n")
}
}
return result
}
func WrongStatusCodeResponse(response scan.StatusCodeResponse) *Response {
return &Response{
Success: false,
Description: fmt.Sprintf("Wrong status code(%s): %s", response.Code, response.Text),
}
}
func WrongScanStatus(expected, got string) *Response {
return &Response{
Success: false,
Description: fmt.Sprintf("Expected %s but got %s as a Scan.Status", expected, got),
}
}
func ScanStatusFinished(status string) bool {
switch status {
case "interrupted", "finished", "stopped", "failed":
return true
default:
return false
}
}
type GetScanResponseFailure struct {
Resp scan.GetScansResponse
Failure *Response
}
func VerifyGet(get scan.GetScans, co connection.OSPDSender, status string) GetScanResponseFailure {
var result GetScanResponseFailure
if err := co.SendCommand(get, &result.Resp); err != nil {
panic(err)
}
if result.Resp.Code != "200" {
result.Failure = WrongStatusCodeResponse(result.Resp.StatusCodeResponse)
return result
}
if result.Resp.Scan.Status != status {
result.Failure = WrongScanStatus(status, result.Resp.Scan.Status)
}
return result
}
func VerifyTillNextState(get scan.GetScans, co connection.OSPDSender, status string) GetScanResponseFailure {
if r := VerifyGet(get, co, status); r.Failure != nil {
return r
}
return TillNextState(get, co, status)
}
func TillNextState(get scan.GetScans, co connection.OSPDSender, status string) GetScanResponseFailure {
var result GetScanResponseFailure
result.Resp.Scan.Status = status
for result.Resp.Scan.Status == status {
result.Resp = scan.GetScansResponse{}
if err := co.SendCommand(get, &result.Resp); err != nil {
panic(err)
}
if result.Resp.Code != "200" {
result.Failure = WrongStatusCodeResponse(result.Resp.StatusCodeResponse)
break
}
}
return result
}
func TillState(get scan.GetScans, co connection.OSPDSender, status string) GetScanResponseFailure {
var result GetScanResponseFailure
result.Resp.Scan.Status = status
for !ScanStatusFinished(result.Resp.Scan.Status) && result.Resp.Scan.Status != status {
result.Resp = scan.GetScansResponse{}
if err := co.SendCommand(get, &result.Resp); err != nil {
panic(err)
}
if result.Resp.Code != "200" {
result.Failure = WrongStatusCodeResponse(result.Resp.StatusCodeResponse)
break
}
}
if result.Failure == nil && result.Resp.Scan.Status != status {
result.Failure = WrongScanStatus(status, result.Resp.Scan.Status)
}
return result
}
type MessageHandler interface {
Each(scan.GetScansResponse)
Last(scan.GetScansResponse)
}
func StartScanGetLastStatus(start scan.Start, co connection.OSPDSender, mhs ...MessageHandler) GetScanResponseFailure {
var result GetScanResponseFailure
var startR scan.StartResponse
if err := co.SendCommand(start, &startR); err != nil {
panic(err)
}
if startR.Code != "200" {
result.Failure = WrongStatusCodeResponse(startR.StatusCodeResponse)
return result
}
get := scan.GetScans{ID: startR.ID}
for !ScanStatusFinished(result.Resp.Scan.Status) {
// reset to not contain previous results
result.Resp = scan.GetScansResponse{}
if err := co.SendCommand(get, &result.Resp); err != nil {
panic(err)
}
for _, mh := range mhs {
if mh != nil {
mh.Each(result.Resp)
}
}
if result.Resp.Code != "200" {
result.Failure = WrongStatusCodeResponse(result.Resp.StatusCodeResponse)
return result
}
}
for _, mh := range mhs {
if mh != nil {
mh.Last(result.Resp)
}
}
return result
}
ospd-openvas-22.9.0/smoketest/usecases/notus/ 0000775 0000000 0000000 00000000000 15011310720 0021211 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/usecases/notus/init.go 0000664 0000000 0000000 00000007122 15011310720 0022505 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
/*
Package notus is testing ospd-openvas functionality specific to enable notus.
*/
package notus
import (
"fmt"
"strings"
"github.com/greenbone/ospd-openvas/smoketest/connection"
"github.com/greenbone/ospd-openvas/smoketest/scan"
uc "github.com/greenbone/ospd-openvas/smoketest/usecases"
"github.com/greenbone/ospd-openvas/smoketest/vt"
)
func includeNotusAdvisory() uc.Test {
oid := "1.3.6.1.4.1.25623.1.0.42"
return uc.Test{
Title: "Advisories are included within GetVTs",
Run: func(co connection.OSPDSender) uc.Response {
var response vt.GetVTsResponse
if err := co.SendCommand(vt.Get{
ID: oid,
}, &response); err != nil {
panic(err)
}
return uc.Response{
Success: len(response.VTs.VT) == 1,
Description: fmt.Sprintf("Expected to find %s once but found %d times", oid, len(response.VTs.VT)),
}
},
}
}
func overridesNVTS() uc.Test {
return uc.Test{
Title: "Advisories override NASL",
Run: func(co connection.OSPDSender) uc.Response {
oid := "1.3.6.1.4.1.25623.1.0.90022"
var response vt.GetVTsResponse
expected := "NOTUS: should be overriden in get_nvts"
if err := co.SendCommand(vt.Get{
ID: oid,
}, &response); err != nil {
panic(err)
}
if len(response.VTs.VT) != 1 {
return uc.Response{
Success: false,
Description: fmt.Sprintf("Expected to find '%s' once but found %d times", oid, len(response.VTs.VT)),
}
}
return uc.Response{
Success: response.VTs.VT[0].Name == expected,
Description: fmt.Sprintf("Expected '%s' to be '%s'", response.VTs.VT[0].Name, expected),
}
},
}
}
func containNotusResults(username, password string) uc.Test {
return uc.Test{
Title: "contain results",
Run: func(co connection.OSPDSender) uc.Response {
oid := "1.3.6.1.4.1.25623.1.0.50282" // gatherpackagelist is a dependency for notus
selection := scan.VTSelection{
Single: []scan.VTSingle{{
ID: oid,
},
},
}
s, err := NewServer("localhost:1883")
if err != nil {
return uc.Response{
Description: err.Error(),
Success: false,
}
}
if err = s.Connect(); err != nil {
return uc.Response{
Description: err.Error(),
Success: false,
}
}
defer s.Close()
credential := scan.Credential{
Type: "up",
Service: "ssh",
Username: username,
Password: password,
}
target := scan.Target{
Hosts: "localhost",
Ports: "22",
AliveTestMethods: scan.ConsiderAlive,
Credentials: scan.Credentials{Credentials: []scan.Credential{credential}},
}
start := scan.Start{
Targets: scan.Targets{Targets: []scan.Target{target}},
VTSelection: []scan.VTSelection{selection},
ScannerParams: scan.DefaultScannerParams,
}
resp := uc.StartScanGetLastStatus(start, co)
if resp.Failure != nil {
return *resp.Failure
}
var msg string
for _, r := range resp.Resp.Scan.Results.Results {
if strings.HasPrefix(r.Value, "Vulnerable package") {
return uc.Response{
Description: fmt.Sprintf("results contained Notus result: %s", r.Value),
Success: true,
}
}
msg = fmt.Sprintf("%s,%s", r.Value, msg)
}
return uc.Response{
Description: fmt.Sprintf("no indicator for Notus results found in: %s\n", msg),
Success: false,
}
},
}
}
func Create(user, pass string) uc.Tests {
return uc.Tests{
Title: "Notus",
UseCases: []uc.Test{
includeNotusAdvisory(),
overridesNVTS(),
containNotusResults(user, pass),
},
}
}
ospd-openvas-22.9.0/smoketest/usecases/notus/server.go 0000664 0000000 0000000 00000006261 15011310720 0023053 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package notus
import (
"bytes"
"encoding/json"
"fmt"
"net"
"os"
"time"
"github.com/google/uuid"
"github.com/greenbone/ospd-openvas/smoketest/mqtt"
)
type Message struct {
MessageId string `json:"message_id"` //UUID(data.get("message_id")),
GroupId string `json:"group_id"` //data.get("group_id"),
Created int64 `json:"created"` //datetime.fromtimestamp(
Type string `json:"message_type"` //result.scan
}
func newMessage() Message {
return Message{
MessageId: uuid.NewString(),
GroupId: uuid.NewString(),
Created: time.Now().Unix(),
Type: "result.scan",
}
}
type ScanStartMessage struct {
MessageType string `json:"message_type"`
ID string `json:"scan_id"`
HostIP string `json:"host_ip"`
HostName string `json:"host_name"`
OSRelease string `json:"os_release"`
PackageList []string `json:"package_list"`
}
type ScanStatusMessage struct {
ID string `json:"scan_id"`
Host string `json:"host_ip"`
Status string `json:"status"`
}
type ScanResultMessage struct {
Message
ID string `json:"scan_id"`
HostIP string `json:"host_ip"`
HostName string `json:"host_name"`
OID string `json:"oid"`
Value string `json:"value"`
Port string `json:"port"`
Uri string `json:"uri"`
ResultType string `json:"result_type"` // ALARM
}
func (s *Server) check() bool {
select {
case in, open := <-s.client.Incoming():
if in != nil {
var start ScanStartMessage
if err := json.NewDecoder(bytes.NewReader(in.Message)).Decode(&start); err != nil {
fmt.Fprintf(os.Stderr, "Unable to parse %s to ScanStartMessage: %s", string(in.Message), err)
return open
}
running := ScanStatusMessage{
ID: start.ID,
Host: start.HostIP,
Status: "running",
}
s.client.Publish("scanner/status", running)
vulr := fmt.Sprintf("Vulnerable package: %s\nInstalled version: %s\nFixed version: %s\n", "a", "0.0.1", "0.0.2")
resultMSG := ScanResultMessage{
Message: newMessage(),
ID: start.ID,
HostIP: start.HostIP,
HostName: start.HostName,
OID: "1.3.6.1.4.1.25623.1.0.90022",
Value: vulr,
Port: "package",
Uri: "",
ResultType: "ALARM",
}
s.client.Publish("scanner/scan/info", resultMSG)
running.Status = "finished"
s.client.Publish("scanner/status", running)
return open
}
}
return false
}
// Server simulates a notus instance
type Server struct {
address string
client *mqtt.MQTT
}
func NewServer(address string) (*Server, error) {
conn, err := net.Dial("tcp", address)
if err != nil {
return nil, err
}
cfg := mqtt.Configuration{}
client, err := mqtt.New(conn, cfg)
if err != nil {
return nil, err
}
return &Server{
client: client,
address: address,
}, nil
}
func (s *Server) Connect() error {
if err := s.client.Connect(); err != nil {
return err
}
if err := s.client.Subscribe("scanner/package/cmd/notus"); err != nil {
return err
}
go func() {
for s.check() {
// keep running
}
}()
return nil
}
func (s *Server) Close() error {
return s.client.Close()
}
ospd-openvas-22.9.0/smoketest/usecases/policy/ 0000775 0000000 0000000 00000000000 15011310720 0021340 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/usecases/policy/init.go 0000664 0000000 0000000 00000004171 15011310720 0022635 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package policy
import (
"fmt"
"github.com/greenbone/ospd-openvas/smoketest/connection"
"github.com/greenbone/ospd-openvas/smoketest/policies"
"github.com/greenbone/ospd-openvas/smoketest/scan"
uc "github.com/greenbone/ospd-openvas/smoketest/usecases"
)
func discoveryAuthenticated(cache *policies.Cache, username, password string) uc.Test {
return uc.Test{
Title: "GatherPackageList - enable authenticated checks",
Run: func(co connection.OSPDSender) uc.Response {
pol := "GatherPackageList"
sc := cache.ByName(pol)
selection := sc.AsVTSelection(nil)
if len(selection.Single) == 0 && len(selection.Group) == 0 {
return uc.Response{
Success: false,
Description: fmt.Sprintf("Config %s not found\n", pol),
}
}
credential := scan.Credential{
Type: "up",
Service: "ssh",
Username: username,
Password: password,
}
target := scan.Target{
Hosts: "localhost",
Ports: "22",
AliveTestMethods: scan.ConsiderAlive,
Credentials: scan.Credentials{Credentials: []scan.Credential{credential}},
}
ospdCMD := scan.Start{
Targets: scan.Targets{Targets: []scan.Target{target}},
VTSelection: []scan.VTSelection{selection},
ScannerParams: scan.DisableNotus,
}
r := uc.StartScanGetLastStatus(ospdCMD, co)
if r.Failure != nil {
return *r.Failure
}
ssh_success_msg := "It was possible to login using the provided SSH credentials. Hence authenticated checks are enabled.\n"
for _, rs := range r.Resp.Scan.Results.Results {
if rs.Value == ssh_success_msg {
return uc.Response{
Success: true,
Description: "ssh login with given credentials was successful.",
}
}
}
return uc.Response{
Success: false,
Description: "failed to find ssh success message",
}
},
}
}
func Create(cache *policies.Cache, username, password string) uc.Tests {
return uc.Tests{
Title: "Policy/Scan-Config",
UseCases: []uc.Test{
discoveryAuthenticated(cache, username, password),
},
}
}
ospd-openvas-22.9.0/smoketest/usecases/scan/ 0000775 0000000 0000000 00000000000 15011310720 0020765 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/usecases/scan/init.go 0000664 0000000 0000000 00000014042 15011310720 0022260 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package scan
import (
"fmt"
"github.com/greenbone/ospd-openvas/smoketest/connection"
"github.com/greenbone/ospd-openvas/smoketest/scan"
"github.com/greenbone/ospd-openvas/smoketest/usecases"
)
var DefaultTargets = Targets(scan.ConsiderAlive)
var DefaultSelection = []scan.VTSelection{
{Single: []scan.VTSingle{{
ID: "0.0.0.0.0.0.0.0.0.1",
}}},
}
var DefaultStart = scan.Start{
ScannerParams: scan.DisableNotus,
Targets: DefaultTargets,
VTSelection: DefaultSelection,
}
func Targets(alive scan.AliveTestMethods) scan.Targets {
return scan.Targets{Targets: []scan.Target{
{
Hosts: "localhost,smoketest.localdomain,smoke.localdomain,and.localdomain,mirrors.localdomain",
Ports: "8080,443",
AliveTestMethods: alive,
},
},
}
}
func addHostName() usecases.Test {
return usecases.Test{
Title: "Add Host Name Function",
Run: func(o connection.OSPDSender) usecases.Response {
target := Targets(scan.Alive)
slowSelection := []scan.VTSelection{
{Single: []scan.VTSingle{{
ID: "0.0.0.0.0.0.0.0.0.3",
}}},
}
startScan := scan.Start{
ScannerParams: scan.DefaultScannerParams,
Targets: target,
VTSelection: slowSelection,
}
r := usecases.StartScanGetLastStatus(startScan, o)
if r.Resp.Scan.Status != "finished" {
return *usecases.WrongScanStatus("finished", r.Resp.Scan.Status)
}
for _, result := range r.Resp.Scan.Results.Results {
if result.HostName == "addhostname.localdomain" {
return usecases.Response{
Success: true,
}
}
}
return usecases.Response{
Description: fmt.Sprintf("addhost not found in %+v", r.Resp.Scan.Results.Results),
Success: false,
}
},
}
}
func stopDeleteWhenNoResults() usecases.Test {
return usecases.Test{
Title: "When no results: Queue->Init->Running->Stop->Delete",
Run: func(co connection.OSPDSender) usecases.Response {
target := Targets(scan.Alive)
slowSelection := []scan.VTSelection{
{Single: []scan.VTSingle{{
ID: "0.0.0.0.0.0.0.0.0.2",
}}},
}
startScan := scan.Start{
ScannerParams: scan.DefaultScannerParams,
Targets: target,
VTSelection: slowSelection,
}
var startR scan.StartResponse
if err := co.SendCommand(startScan, &startR); err != nil {
panic(err)
}
if startR.Code != "200" {
return *usecases.WrongStatusCodeResponse(startR.StatusCodeResponse)
}
get := scan.GetScans{ID: startR.ID}
r := usecases.TillState(get, co, "running")
if r.Failure != nil {
return *r.Failure
}
if len(r.Resp.Scan.Results.Results) > 1 {
return usecases.Response{
Success: false,
Description: fmt.Sprintf("Expected to have 0 results but got %d", len(r.Resp.Scan.Results.Results)),
}
}
var stopR scan.StopResponse
if err := co.SendCommand(scan.Stop{ID: get.ID}, &stopR); err != nil {
panic(err)
}
if stopR.Code != "200" {
return *usecases.WrongStatusCodeResponse(r.Resp.StatusCodeResponse)
}
var deleteR scan.DeleteResponse
co.SendCommand(scan.Delete{ID: get.ID}, &deleteR)
if deleteR.Code != "200" {
return *usecases.WrongStatusCodeResponse(deleteR.StatusCodeResponse)
}
return usecases.Response{
Success: true,
Description: "",
}
},
}
}
func transitionQueueToRunning() usecases.Test {
return usecases.Test{
Title: "GVMD Workflow: Queue->Init->Running->Stop->Delete-Start->Finish",
Run: func(co connection.OSPDSender) usecases.Response {
var startR scan.StartResponse
if err := co.SendCommand(DefaultStart, &startR); err != nil {
panic(err)
}
if startR.Code != "200" {
return *usecases.WrongStatusCodeResponse(startR.StatusCodeResponse)
}
get := scan.GetScans{ID: startR.ID}
if r := usecases.VerifyTillNextState(get, co, "queued"); r.Failure == nil {
if r.Resp.Scan.Status != "init" {
// on some slower machines it can happen that the call to get the state
// is taking too long for the init phase and it is already running.
// On this case we just skip forward.
if r.Resp.Scan.Status == "running" {
goto is_running
}
return *usecases.WrongScanStatus("init", r.Resp.Scan.Status)
}
r = usecases.TillNextState(get, co, "init")
if r.Failure != nil {
return *r.Failure
}
if r.Resp.Scan.Status != "running" {
return *usecases.WrongScanStatus("running", r.Resp.Scan.Status)
}
is_running:
var stopR scan.StopResponse
if err := co.SendCommand(scan.Stop{ID: get.ID}, &stopR); err != nil {
panic(err)
}
if stopR.Code != "200" {
return *usecases.WrongStatusCodeResponse(r.Resp.StatusCodeResponse)
}
r = usecases.VerifyGet(get, co, "stopped")
if r.Failure != nil {
return *r.Failure
}
var deleteR scan.DeleteResponse
co.SendCommand(scan.Delete{ID: get.ID}, &deleteR)
if deleteR.Code != "200" {
return *usecases.WrongStatusCodeResponse(deleteR.StatusCodeResponse)
}
resume := DefaultStart
resume.ID = get.ID
r = usecases.StartScanGetLastStatus(resume, co)
if r.Resp.Scan.Status != "finished" {
return *usecases.WrongScanStatus("finished", r.Resp.Scan.Status)
}
} else {
return *r.Failure
}
return usecases.Response{
Success: true,
Description: "",
}
},
}
}
func startScan() usecases.Test {
return usecases.Test{
Title: "start",
Run: func(co connection.OSPDSender) usecases.Response {
r := usecases.StartScanGetLastStatus(DefaultStart, co)
if r.Resp.Scan.Status != "finished" {
return *usecases.WrongScanStatus("finished", r.Resp.Scan.Status)
}
return usecases.Response{
Success: r.Resp.Scan.Status == "finished",
Description: fmt.Sprintf("Espected status of %s to be finished but was %s",
r.Resp.Scan.ID, r.Resp.Scan.Status),
}
},
}
}
func Create() usecases.Tests {
return usecases.Tests{
Title: "Scan",
UseCases: []usecases.Test{
addHostName(),
startScan(),
transitionQueueToRunning(),
stopDeleteWhenNoResults(),
},
}
}
ospd-openvas-22.9.0/smoketest/vt/ 0000775 0000000 0000000 00000000000 15011310720 0016657 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/smoketest/vt/vts.go 0000664 0000000 0000000 00000006110 15011310720 0020020 0 ustar 00root root 0000000 0000000 // SPDX-FileCopyrightText: 2023 Greenbone AG
//
// SPDX-License-Identifier: AGPL-3.0-or-later
package vt
import "encoding/xml"
type Parameter struct {
ID string `xml:"id,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
Name string `xml:"name,omitempty"`
Default string `xml:"default,omitempty"`
}
type Reference struct {
ID string `xml:"id,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
}
type Dependency struct {
ID string `xml:"vt_id,attr,omitempty"`
}
type Solution struct {
Type string `xml:"type,attr,omitempty"`
Method string `xml:"method,attr,omitempty"`
Solution string `xml:",chardata"`
}
type Detection struct {
Type string `xml:"qod_type,attr,omitempty"`
Detection string `xml:",chardata"`
}
type Severity struct {
Type string `xml:"type,attr,omitempty"`
Value string `xml:"value,omitempty"`
Origin string `xml:"origin,omitempty"`
Date int64 `xml:"date,omitempty"`
}
type Custom struct {
Family string `xml:"family,omitempty"`
FileName string `xml:"filename,omitempty"`
RequiredKeys string `xml:"required_keys,omitempty"`
ExcludedKeys string `xml:"excluded_keys,omitempty"`
MandatoryKeys string `xml:"mandatory_keys,omitempty"`
CreationDate string `xml:"creation_date,omitempty"`
CvssBase string `xml:"cvss_base,omitempty"`
CvssBaseVector string `xml:"cvss_base_vector,omitempty"`
Deprecated string `xml:"deprecated,omitempty"`
LastModification string `xml:"last_modification,omitempty"`
Qod string `xml:"qod,omitempty"`
QodType string `xml:"qod_type,omitempty"`
Vuldetect string `xml:"vuldetect,omitempty"`
}
type VT struct {
ID string `xml:"id,attr,omitempty"`
Name string `xml:"name,omitempty"`
Parameter *[]Parameter `xml:"params>param,omitempty"`
References *[]Reference `xml:"refs>ref,omitempty"`
Dependencies *[]Dependency `xml:"dependencies>dependency,omitempty"`
Created int64 `xml:"creation_time,omitempty"`
Modified int64 `xml:"modification_time,omitempty"`
Summary string `xml:"summary,omitempty"`
Impact string `xml:"impact,omitempty"`
Affected string `xml:"affected,omitempty"`
Insight string `xml:"insight,omitempty"`
Solution *Solution `xml:"solution,omitempty"`
Detection *Detection `xml:"detection,omitempty"`
Severities *[]Severity `xml:"severities>severity,omitempty"`
Custom []Custom `xml:"custom,omitempty"`
}
type VTs struct {
Version string `xml:"vts_version,attr,omitempty"`
Total string `xml:"total,attr,omitempty"`
Hash string `xml:"sha256_hash,attr,omitempty"`
VT []VT `xml:"vt,omitempty"`
}
type GetVTsResponse struct {
Status string `xml:"status,attr,omitempty"`
StatusText string `xml:"status_text,attr,omitempty"`
VTs VTs `xml:"vts,omitempty"`
}
type Get struct {
XMLName xml.Name `xml:"get_vts"`
ID string `xml:"vt_id,attr,omitempty"`
Filter string `xml:"filter,attr,omitempty"`
Details string `xml:"details,attr,omitempty"`
}
ospd-openvas-22.9.0/tests/ 0000775 0000000 0000000 00000000000 15011310720 0015352 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/tests/__init__.py 0000664 0000000 0000000 00000000170 15011310720 0017461 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
ospd-openvas-22.9.0/tests/command/ 0000775 0000000 0000000 00000000000 15011310720 0016770 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/tests/command/__init__.py 0000664 0000000 0000000 00000000170 15011310720 0021077 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
ospd-openvas-22.9.0/tests/command/test_command.py 0000664 0000000 0000000 00000003752 15011310720 0022026 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from unittest import TestCase
from ospd.command.registry import get_commands, remove_command
from ospd.command.command import BaseCommand
class BaseCommandTestCase(TestCase):
def test_auto_register(self):
commands = get_commands()
before = len(commands)
class Foo(BaseCommand):
name = "foo"
def handle_xml(self, xml):
pass
after = len(commands)
try:
self.assertEqual(before + 1, after)
c_dict = {c.name: c for c in commands}
self.assertIn('foo', c_dict)
self.assertIs(c_dict['foo'], Foo)
finally:
remove_command(Foo)
def test_basic_properties(self):
class Foo(BaseCommand):
name = "foo"
attributes = {'lorem': 'ipsum'}
elements = {'foo': 'bar'}
description = 'bar'
def handle_xml(self, xml):
pass
try:
f = Foo({})
self.assertEqual(f.get_name(), 'foo')
self.assertEqual(f.get_description(), 'bar')
self.assertEqual(f.get_attributes(), {'lorem': 'ipsum'})
self.assertEqual(f.get_elements(), {'foo': 'bar'})
finally:
remove_command(Foo)
def test_as_dict(self):
class Foo(BaseCommand):
name = "foo"
attributes = {'lorem': 'ipsum'}
elements = {'foo': 'bar'}
description = 'bar'
def handle_xml(self, xml):
pass
try:
f = Foo({})
f_dict = f.as_dict()
self.assertEqual(f_dict['name'], 'foo')
self.assertEqual(f_dict['description'], 'bar')
self.assertEqual(f_dict['attributes'], {'lorem': 'ipsum'})
self.assertEqual(f_dict['elements'], {'foo': 'bar'})
finally:
remove_command(Foo)
ospd-openvas-22.9.0/tests/command/test_commands.py 0000664 0000000 0000000 00000041177 15011310720 0022214 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import time
from unittest import TestCase
from unittest.mock import patch, MagicMock
from xml.etree import ElementTree as et
from ospd.command.command import (
CheckFeed,
GetPerformance,
StartScan,
StopScan,
GetMemoryUsage,
)
from ospd.errors import OspdCommandError, OspdError
from ospd.misc import create_process
from ..helper import (
DummyWrapper,
assert_called_once,
FakeStream,
FakeDataManager,
)
class CheckFeedTestCase(TestCase):
def test_check_feed_fail(self):
daemon = DummyWrapper([])
daemon.check_feed_self_test = MagicMock(return_value=None)
cmd = CheckFeed(daemon)
request = et.fromstring('')
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
def test_check_feed(self):
daemon = DummyWrapper([])
daemon.check_feed_self_test = MagicMock(return_value={'a': '1'})
cmd = CheckFeed(daemon)
response = et.fromstring(cmd.handle_xml(et.fromstring('')))
self.assertEqual(response.get('status'), '200')
self.assertEqual(response.tag, 'check_feed_response')
class GetPerformanceTestCase(TestCase):
@patch('ospd.command.command.subprocess')
def test_get_performance(self, mock_subproc):
cmd = GetPerformance(None)
mock_subproc.check_output.return_value = b'foo'
response = et.fromstring(
cmd.handle_xml(
et.fromstring(
''
)
)
)
self.assertEqual(response.get('status'), '200')
self.assertEqual(response.tag, 'get_performance_response')
def test_get_performance_fail_int(self):
cmd = GetPerformance(None)
request = et.fromstring(
''
)
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
def test_get_performance_fail_regex(self):
cmd = GetPerformance(None)
request = et.fromstring(
''
)
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
def test_get_performance_fail_cmd(self):
cmd = GetPerformance(None)
request = et.fromstring(
''
)
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
class StartScanTestCase(TestCase):
def test_scan_with_vts_empty_vt_list(self):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
)
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
@patch("ospd.ospd.create_process")
def test_scan_with_vts(self, mock_create_process):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
''
''
''
)
# With one vt, without params
response = et.fromstring(cmd.handle_xml(request))
daemon.start_queued_scans()
scan_id = response.findtext('id')
vts_collection = daemon.get_scan_vts(scan_id)
self.assertEqual(vts_collection, {'1.2.3.4': {}, 'vt_groups': []})
self.assertNotEqual(vts_collection, {'1.2.3.6': {}})
daemon.start_queued_scans()
assert_called_once(mock_create_process)
def test_scan_pop_vts(self):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
''
''
''
)
# With one vt, without params
response = et.fromstring(cmd.handle_xml(request))
scan_id = response.findtext('id')
daemon.start_queued_scans()
vts_collection = daemon.get_scan_vts(scan_id)
self.assertEqual(vts_collection, {'1.2.3.4': {}, 'vt_groups': []})
self.assertRaises(KeyError, daemon.get_scan_vts, scan_id)
def test_scan_pop_ports(self):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
''
''
''
)
# With one vt, without params
response = et.fromstring(cmd.handle_xml(request))
daemon.start_queued_scans()
scan_id = response.findtext('id')
ports = daemon.scan_collection.get_ports(scan_id)
self.assertEqual(ports, '80, 443')
self.assertRaises(KeyError, daemon.scan_collection.get_ports, scan_id)
@patch("ospd.ospd.create_process")
def test_scan_without_vts(self, mock_create_process):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
# With out vts
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
)
response = et.fromstring(cmd.handle_xml(request))
daemon.start_queued_scans()
scan_id = response.findtext('id')
self.assertEqual(daemon.get_scan_vts(scan_id), {})
assert_called_once(mock_create_process)
def test_scan_with_vts_and_param_missing_vt_param_id(self):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
# Raise because no vt_param id attribute
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
'200'
''
''
)
with self.assertRaises(OspdError):
cmd.handle_xml(request)
@patch("ospd.ospd.create_process")
def test_scan_with_vts_and_param(self, mock_create_process):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
# No error
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
''
'200'
''
''
''
)
response = et.fromstring(cmd.handle_xml(request))
daemon.start_queued_scans()
scan_id = response.findtext('id')
self.assertEqual(
daemon.get_scan_vts(scan_id),
{'1234': {'ABC': '200'}, 'vt_groups': []},
)
daemon.start_queued_scans()
assert_called_once(mock_create_process)
def test_scan_with_vts_and_param_missing_vt_group_filter(self):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
# Raise because no vtgroup filter attribute
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
''
)
daemon.start_queued_scans()
with self.assertRaises(OspdError):
cmd.handle_xml(request)
@patch("ospd.ospd.create_process")
def test_scan_with_vts_and_param_with_vt_group_filter(
self, mock_create_process
):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
# No error
request = et.fromstring(
''
''
''
'localhost'
'80, 443'
''
''
''
''
''
''
''
)
response = et.fromstring(cmd.handle_xml(request))
daemon.start_queued_scans()
scan_id = response.findtext('id')
self.assertEqual(daemon.get_scan_vts(scan_id), {'vt_groups': ['a']})
assert_called_once(mock_create_process)
@patch("ospd.ospd.create_process")
@patch("ospd.command.command.logger")
def test_scan_ignore_multi_target(self, mock_logger, mock_create_process):
daemon = DummyWrapper([])
cmd = StartScan(daemon)
request = et.fromstring(
''
''
''
'localhosts'
'22'
''
''
''
''
)
cmd.handle_xml(request)
daemon.start_queued_scans()
assert_called_once(mock_logger.warning)
assert_called_once(mock_create_process)
def test_max_queued_scans_reached(self):
daemon = DummyWrapper([])
daemon.max_queued_scans = 1
cmd = StartScan(daemon)
request = et.fromstring(
''
''
''
'localhosts'
'22'
''
''
''
''
)
# create first scan
response = et.fromstring(cmd.handle_xml(request))
scan_id_1 = response.findtext('id')
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
daemon.scan_collection.remove_file_pickled_scan_info(scan_id_1)
@patch("ospd.ospd.create_process")
@patch("ospd.command.command.logger")
def test_scan_use_legacy_target_and_port(
self, mock_logger, mock_create_process
):
daemon = DummyWrapper([])
daemon.scan_collection.datamanager = FakeDataManager()
cmd = StartScan(daemon)
request = et.fromstring(
''
''
''
)
response = et.fromstring(cmd.handle_xml(request))
daemon.start_queued_scans()
scan_id = response.findtext('id')
self.assertIsNotNone(scan_id)
self.assertEqual(daemon.get_scan_host(scan_id), 'localhost')
self.assertEqual(daemon.get_scan_ports(scan_id), '22')
assert_called_once(mock_logger.warning)
assert_called_once(mock_create_process)
class StopCommandTestCase(TestCase):
@patch("ospd.ospd.os")
@patch("ospd.ospd.create_process")
def test_stop_scan(self, mock_create_process, mock_os):
mock_process = mock_create_process.return_value
mock_process.is_alive.return_value = True
mock_process.pid = "foo"
fs = FakeStream()
daemon = DummyWrapper([])
daemon.scan_collection.datamanager = FakeDataManager()
request = (
''
''
''
'localhosts'
'22'
''
''
''
''
)
daemon.handle_command(request, fs)
response = fs.get_response()
daemon.start_queued_scans()
assert_called_once(mock_create_process)
assert_called_once(mock_process.start)
scan_id = response.findtext('id')
request = et.fromstring(f'')
cmd = StopScan(daemon)
cmd.handle_xml(request)
assert_called_once(mock_process.terminate)
mock_os.getpgid.assert_called_with('foo')
def test_unknown_scan_id(self):
daemon = DummyWrapper([])
cmd = StopScan(daemon)
request = et.fromstring('')
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
def test_missing_scan_id(self):
request = et.fromstring('')
cmd = StopScan(None)
with self.assertRaises(OspdCommandError):
cmd.handle_xml(request)
class GetMemoryUsageTestCase(TestCase):
def test_with_main_process_only(self):
cmd = GetMemoryUsage(None)
request = et.fromstring('')
response = et.fromstring(cmd.handle_xml(request))
processes_element = response.find('processes')
process_elements = processes_element.findall('process')
self.assertTrue(len(process_elements), 1)
main_process_element = process_elements[0]
rss_element = main_process_element.find('rss')
vms_element = main_process_element.find('vms')
shared_element = main_process_element.find('shared')
self.assertIsNotNone(rss_element)
self.assertIsNotNone(rss_element.text)
self.assertIsNotNone(vms_element)
self.assertIsNotNone(vms_element.text)
self.assertIsNotNone(shared_element)
self.assertIsNotNone(shared_element.text)
def test_with_subprocess(self):
cmd = GetMemoryUsage(None)
def foo(): # pylint: disable=blacklisted-name
time.sleep(60)
create_process(foo, args=[])
request = et.fromstring('')
response = et.fromstring(cmd.handle_xml(request))
processes_element = response.find('processes')
process_elements = processes_element.findall('process')
self.assertTrue(len(process_elements), 2)
for process_element in process_elements:
rss_element = process_element.find('rss')
vms_element = process_element.find('vms')
shared_element = process_element.find('shared')
self.assertIsNotNone(rss_element)
self.assertIsNotNone(rss_element.text)
self.assertIsNotNone(vms_element)
self.assertIsNotNone(vms_element.text)
self.assertIsNotNone(shared_element)
self.assertIsNotNone(shared_element.text)
def test_with_subsubprocess(self):
cmd = GetMemoryUsage(None)
def bar(): # pylint: disable=blacklisted-name
create_process(foo, args=[])
def foo(): # pylint: disable=blacklisted-name
time.sleep(60)
create_process(bar, args=[])
request = et.fromstring('')
response = et.fromstring(cmd.handle_xml(request))
processes_element = response.find('processes')
process_elements = processes_element.findall('process')
# sub-sub-processes aren't listed
self.assertTrue(len(process_elements), 2)
for process_element in process_elements:
rss_element = process_element.find('rss')
vms_element = process_element.find('vms')
shared_element = process_element.find('shared')
self.assertIsNotNone(rss_element)
self.assertIsNotNone(rss_element.text)
self.assertIsNotNone(vms_element)
self.assertIsNotNone(vms_element.text)
self.assertIsNotNone(shared_element)
self.assertIsNotNone(shared_element.text)
ospd-openvas-22.9.0/tests/command/test_registry.py 0000664 0000000 0000000 00000002617 15011310720 0022257 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from unittest import TestCase
from ospd.command.registry import get_commands, register_command, remove_command
COMMAND_NAMES = [
"help",
"check_feed",
"get_version",
"get_performance",
"get_scanner_details",
"delete_scan",
"get_vts",
"stop_scan",
"get_scans",
"start_scan",
"get_memory_usage",
]
class RegistryTestCase(TestCase):
def test_available_commands(self):
commands = get_commands()
self.assertEqual(len(COMMAND_NAMES), len(commands))
c_list = [c.name for c in commands]
self.assertListEqual(COMMAND_NAMES, c_list)
def test_register_command(self):
commands = get_commands()
before = len(commands)
class Foo:
name = 'foo'
register_command(Foo)
commands = get_commands()
after = len(commands)
try:
self.assertEqual(before + 1, after)
c_dict = {c.name: c for c in commands}
self.assertIn('foo', c_dict)
self.assertIs(c_dict['foo'], Foo)
finally:
remove_command(Foo)
commands = get_commands()
after2 = len(commands)
self.assertEqual(before, after2)
c_dict = {c.name: c for c in commands}
self.assertNotIn('foo', c_dict)
ospd-openvas-22.9.0/tests/dummydaemon.py 0000664 0000000 0000000 00000012627 15011310720 0020253 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from unittest.mock import patch, MagicMock
from xml.etree import ElementTree as et
from ospd_openvas.daemon import OSPDopenvas
class FakeDataManager:
def __init__(self):
pass
def dict(self):
return dict()
class DummyDaemon(OSPDopenvas):
VTS = {
'1.3.6.1.4.1.25623.1.0.100061': {
'creation_time': '1237458156',
'custom': {
'category': '3',
'excluded_keys': 'Settings/disable_cgi_scanning',
'family': 'Product detection',
'filename': 'mantis_detect.nasl',
'required_ports': 'Services/www, 80',
'timeout': '0',
},
'modification_time': '1533906565',
'name': 'Mantis Detection',
'qod_type': 'remote_banner',
'insight': 'some insight',
'severities': {
'severity_base_vector': 'AV:N/AC:L/Au:N/C:N/I:N/A:N',
'severity_type': 'cvss_base_v2',
'severity_date': '1237458156',
'severity_origin': 'Greenbone',
},
'solution': 'some solution',
'solution_type': 'WillNotFix',
'solution_method': 'DebianAPTUpgrade',
'impact': 'some impact',
'summary': 'some summary',
'affected': 'some affection',
'vt_dependencies': [],
'vt_params': {
'1': {
'id': '1',
'default': '',
'description': 'Description',
'name': 'Data length :',
'type': 'entry',
},
'2': {
'id': '2',
'default': 'no',
'description': 'Description',
'name': (
'Do not randomize the order in which ports are'
' scanned'
),
'type': 'checkbox',
},
},
'vt_refs': {
'bid': [''],
'cve': [''],
'xref': ['URL:http://www.mantisbt.org/'],
},
}
}
@patch('ospd_openvas.daemon.NVTICache')
@patch('ospd_openvas.daemon.MainDB')
def __init__(
self, _MainDBClass: MagicMock = None, NvtiClass: MagicMock = None
):
assert _MainDBClass
assert NvtiClass
nvti = NvtiClass.return_value
oids = [['mantis_detect.nasl', '1.3.6.1.4.1.25623.1.0.100061']]
nvti.notus = None
nvti.get_oids.return_value = oids
nvti.get_nvt_params.return_value = {
'1': {
'id': '1',
'default': '',
'description': 'Description',
'name': 'Data length :',
'type': 'entry',
},
'2': {
'id': '2',
'default': 'no',
'description': 'Description',
'name': (
'Do not randomize the order in which ports are scanned'
),
'type': 'checkbox',
},
}
nvti.get_nvt_refs.return_value = {
'bid': [''],
'cve': [''],
'xref': ['URL:http://www.mantisbt.org/'],
}
nvti.get_nvt_metadata.return_value = {
'category': '3',
'creation_date': '1237458156',
'cvss_base_vector': 'AV:N/AC:L/Au:N/C:N/I:N/A:N',
'excluded_keys': 'Settings/disable_cgi_scanning',
'family': 'Product detection',
'filename': 'mantis_detect.nasl',
'last_modification': '1533906565',
'name': 'Mantis Detection',
'qod_type': 'remote_banner',
'required_ports': 'Services/www, 80',
'solution': 'some solution',
'solution_type': 'WillNotFix',
'solution_method': 'DebianAPTUpgrade',
'impact': 'some impact',
'insight': 'some insight',
'summary': 'some summary',
'affected': 'some affection',
'timeout': '0',
'vt_params': {
'1': {
'id': '1',
'default': '',
'description': 'Description',
'name': 'Data length :',
'type': 'entry',
},
'2': {
'id': '2',
'default': 'no',
'description': 'Description',
'name': (
'Do not randomize the order in which ports are'
' scanned'
),
'type': 'checkbox',
},
},
'refs': {
'bid': [''],
'cve': [''],
'xref': ['URL:http://www.mantisbt.org/'],
},
}
nvti.get_feed_version.return_value = '123'
super().__init__(
niceness=10, lock_file_dir='/tmp', mqtt_broker_address=""
)
self.scan_collection.data_manager = FakeDataManager()
def create_xml_target(self) -> et.Element:
target = et.fromstring(
"192.168.0.180,443"
)
return target
ospd-openvas-22.9.0/tests/helper.py 0000664 0000000 0000000 00000014424 15011310720 0017210 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=unused-argument
# pylint: disable=disallowed-name
import time
from unittest.mock import Mock
from xml.etree import ElementTree as et
from ospd.ospd import OSPDaemon
def assert_called_once(mock: Mock):
if hasattr(mock, 'assert_called_once'):
return mock.assert_called_once()
if not mock.call_count == 1:
# pylint: disable=protected-access
msg = (
f"Expected '{mock._mock_name or 'mock'}' to have "
f"been called once. Called {mock.call_count} "
f"times.{mock._calls_repr()}"
)
raise AssertionError(msg)
def assert_called(mock: Mock):
"""assert that the mock was called at least once"""
if mock.call_count == 0:
# pylint: disable=protected-access
msg = f"Expected '{mock._mock_name or 'mock'}' to have been called."
raise AssertionError(msg)
class FakePsutil:
def __init__(self, available=None):
self.available = available
class FakeStream:
def __init__(self, return_value=True):
self.response = b''
self.return_value = return_value
def write(self, data):
self.response = self.response + data
return self.return_value
def get_response(self):
return et.fromstring(self.response)
class FakeDataManager:
def __init__(self):
pass
def dict(self):
return dict()
def __enter__(self):
pass
def __exit__(self, foo=None, bar=None, bar1=None, foo1=None):
pass
class DummyXML:
@staticmethod
def get_custom_vt_as_xml_str(vt_id, custom):
return 'static test'
@staticmethod
def get_params_vt_as_xml_str(vt_id, vt_params):
return (
''
'ABCTest ABC'
'yes'
''
'DEFTest DEF'
'no'
)
@staticmethod
def get_refs_vt_as_xml_str(vt_id, vt_refs):
response = (
''
''
)
return response
@staticmethod
def get_dependencies_vt_as_xml_str(vt_id, vt_dependencies):
response = (
''
''
''
''
)
return response
@staticmethod
def get_severities_vt_as_xml_str(vt_id, severities):
response = (
'AV:N/AC:L/Au:N/C:N/I:N/'
'A:P'
)
return response
@staticmethod
def get_detection_vt_as_xml_str(
vt_id, detection=None, qod_type=None, qod=None
):
response = 'some detection'
return response
@staticmethod
def get_summary_vt_as_xml_str(vt_id, summary):
response = 'Some summary'
return response
@staticmethod
def get_affected_vt_as_xml_str(vt_id, affected):
response = 'Some affected'
return response
@staticmethod
def get_impact_vt_as_xml_str(vt_id, impact):
response = 'Some impact'
return response
@staticmethod
def get_insight_vt_as_xml_str(vt_id, insight):
response = 'Some insight'
return response
@staticmethod
def get_solution_vt_as_xml_str(
vt_id, solution, solution_type=None, solution_method=None
):
response = 'Some solution'
return response
@staticmethod
def get_creation_time_vt_as_xml_str(
vt_id, vt_creation_time
): # pylint: disable=arguments-differ
response = f'{vt_creation_time}'
return response
@staticmethod
def get_modification_time_vt_as_xml_str(
vt_id, vt_modification_time
): # pylint: disable=arguments-differ
response = (
f'{vt_modification_time}'
)
return response
class DummyWrapper(OSPDaemon):
def __init__(self, results, checkresult=True):
super().__init__()
self.checkresult = checkresult
self.results = results
self.initialized = True
self.scan_collection.data_manager = FakeDataManager()
self.scan_collection.file_storage_dir = '/tmp'
self.scan_collection.scan_collection_lock = FakeDataManager()
def check(self):
return self.checkresult
def exec_scan(self, scan_id):
time.sleep(0.01)
for res in self.results:
if res.result_type == 'log':
self.add_scan_log(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
res.port,
)
if res.result_type == 'error':
self.add_scan_error(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
res.port,
)
elif res.result_type == 'host-detail':
self.add_scan_host_detail(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
)
elif res.result_type == 'alarm':
self.add_scan_alarm(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
res.port,
res.test_id,
res.severity,
res.qod,
)
else:
raise ValueError(res.result_type)
ospd-openvas-22.9.0/tests/messages/ 0000775 0000000 0000000 00000000000 15011310720 0017161 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/tests/messages/__init__.py 0000664 0000000 0000000 00000000170 15011310720 0021270 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
ospd-openvas-22.9.0/tests/messages/test_message.py 0000664 0000000 0000000 00000010157 15011310720 0022222 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from datetime import datetime, timezone
from uuid import UUID
from unittest import TestCase
from ospd_openvas.messages.message import Message, MessageType
class MessageTestCase(TestCase):
def test_default_constructor(self):
message = Message()
self.assertIsInstance(message.message_id, UUID)
self.assertIsInstance(message.group_id, str)
self.assertIsInstance(message.created, datetime)
def test_serialize(self):
created = datetime.fromtimestamp(1628512774)
message_id = UUID('63026767-029d-417e-9148-77f4da49f49a')
group_id = '866350e8-1492-497e-b12b-c079287d51dd'
message = Message(
message_id=message_id, group_id=group_id, created=created
)
serialized = message.serialize()
self.assertEqual(serialized['created'], 1628512774.0)
self.assertEqual(
serialized['message_id'], '63026767-029d-417e-9148-77f4da49f49a'
)
self.assertEqual(
serialized['group_id'], '866350e8-1492-497e-b12b-c079287d51dd'
)
self.assertIsNone(message.message_type)
def test_deserialize(self):
data = {
'message_id': '63026767-029d-417e-9148-77f4da49f49a',
'group_id': '866350e8-1492-497e-b12b-c079287d51dd',
'created': 1628512774.0,
'message_type': 'scan.start',
}
Message.message_type = MessageType.SCAN_START # hack a message type
message = Message.deserialize(data)
self.assertEqual(
message.message_id, UUID('63026767-029d-417e-9148-77f4da49f49a')
)
self.assertEqual(
message.group_id, '866350e8-1492-497e-b12b-c079287d51dd'
)
self.assertEqual(
message.created,
datetime.fromtimestamp(1628512774.0, tz=timezone.utc),
)
Message.message_type = None
def test_deserialize_no_message_type(self):
data = {
'message_id': '63026767-029d-417e-9148-77f4da49f49a',
'group_id': '866350e8-1492-497e-b12b-c079287d51dd',
'created': 1628512774.0,
}
with self.assertRaisesRegex(
ValueError, "None is not a valid MessageType"
):
Message.deserialize(data)
def test_deserialize_unknown_message_type(self):
data = {
'message_id': '63026767-029d-417e-9148-77f4da49f49a',
'group_id': '866350e8-1492-497e-b12b-c079287d51dd',
'created': 1628512774.0,
'message_type': 'foo',
}
with self.assertRaisesRegex(
ValueError, "'foo' is not a valid MessageType"
):
Message.deserialize(data)
def test_to_str(self):
created = datetime.fromtimestamp(1628512774)
message_id = UUID('63026767-029d-417e-9148-77f4da49f49a')
group_id = '866350e8-1492-497e-b12b-c079287d51dd'
message = Message(
message_id=message_id, group_id=group_id, created=created
)
self.assertEqual(
str(message),
'{"message_id": "63026767-029d-417e-9148-77f4da49f49a", '
'"message_type": null, '
'"group_id": "866350e8-1492-497e-b12b-c079287d51dd", '
'"created": 1628512774.0}',
)
def test_load(self):
payload = (
'{"message_id": "63026767-029d-417e-9148-77f4da49f49a", '
'"message_type": "scan.start", '
'"group_id": "866350e8-1492-497e-b12b-c079287d51dd", '
'"created": 1628512774.0}'
)
Message.message_type = MessageType.SCAN_START # hack a message type
message = Message.load(payload)
self.assertEqual(
message.message_id, UUID('63026767-029d-417e-9148-77f4da49f49a')
)
self.assertEqual(
message.group_id, '866350e8-1492-497e-b12b-c079287d51dd'
)
self.assertEqual(
message.created,
datetime.fromtimestamp(1628512774.0, tz=timezone.utc),
)
Message.message_type = None
ospd-openvas-22.9.0/tests/messages/test_result.py 0000664 0000000 0000000 00000013366 15011310720 0022121 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from datetime import datetime, timezone
from uuid import UUID
from unittest import TestCase
from ospd_openvas.messages.message import MessageType
from ospd_openvas.messages.result import ResultMessage, ResultType
class ResultMessageTestCase(TestCase):
def test_constructor(self):
message = ResultMessage(
scan_id='scan_1',
host_ip='1.1.1.1',
host_name='foo',
oid='1.2.3.4.5',
value='A Vulnerability has been found',
uri='file://foo/bar',
)
self.assertIsInstance(message.message_id, UUID)
self.assertIsInstance(message.group_id, str)
self.assertIsInstance(message.created, datetime)
self.assertEqual(message.message_type, MessageType.RESULT)
self.assertEqual(message.topic, 'scanner/scan/info')
self.assertEqual(message.scan_id, 'scan_1')
self.assertEqual(message.host_ip, '1.1.1.1')
self.assertEqual(message.host_name, 'foo')
self.assertEqual(message.oid, '1.2.3.4.5')
self.assertEqual(message.value, 'A Vulnerability has been found')
self.assertEqual(message.result_type, ResultType.ALARM)
self.assertEqual(message.port, 'package')
self.assertEqual(message.uri, 'file://foo/bar')
def test_serialize(self):
created = datetime.fromtimestamp(1628512774)
message_id = UUID('63026767-029d-417e-9148-77f4da49f49a')
group_id = UUID('866350e8-1492-497e-b12b-c079287d51dd')
message = ResultMessage(
created=created,
message_id=message_id,
group_id=group_id,
scan_id='scan_1',
host_ip='1.1.1.1',
host_name='foo',
oid='1.2.3.4.5',
value='A Vulnerability has been found',
uri='file://foo/bar',
)
serialized = message.serialize()
self.assertEqual(serialized['created'], 1628512774.0)
self.assertEqual(
serialized['message_id'], '63026767-029d-417e-9148-77f4da49f49a'
)
self.assertEqual(
serialized['group_id'], '866350e8-1492-497e-b12b-c079287d51dd'
)
self.assertEqual(serialized['message_type'], 'result.scan')
self.assertEqual(serialized['scan_id'], 'scan_1')
self.assertEqual(serialized['host_ip'], '1.1.1.1')
self.assertEqual(serialized['host_name'], 'foo')
self.assertEqual(serialized['oid'], '1.2.3.4.5')
self.assertEqual(serialized['value'], 'A Vulnerability has been found')
self.assertEqual(serialized['uri'], 'file://foo/bar')
self.assertEqual(serialized['port'], 'package')
self.assertEqual(serialized['result_type'], 'ALARM')
def test_deserialize(self):
data = {
'message_id': '63026767-029d-417e-9148-77f4da49f49a',
'group_id': '866350e8-1492-497e-b12b-c079287d51dd',
'created': 1628512774.0,
'message_type': 'result.scan',
'scan_id': 'scan_1',
'host_ip': '1.1.1.1',
'host_name': 'foo',
'oid': '1.2.3.4.5',
'value': 'A Vulnerability has been found',
'uri': 'file://foo/bar',
'port': 'package',
'result_type': 'ALARM',
}
message = ResultMessage.deserialize(data)
self.assertEqual(
message.message_id, UUID('63026767-029d-417e-9148-77f4da49f49a')
)
self.assertEqual(
message.group_id, '866350e8-1492-497e-b12b-c079287d51dd'
)
self.assertEqual(
message.created,
datetime.fromtimestamp(1628512774.0, tz=timezone.utc),
)
self.assertEqual(message.message_type, MessageType.RESULT)
self.assertEqual(message.scan_id, 'scan_1')
self.assertEqual(message.host_ip, '1.1.1.1')
self.assertEqual(message.host_name, 'foo')
self.assertEqual(message.oid, '1.2.3.4.5')
self.assertEqual(message.value, 'A Vulnerability has been found')
self.assertEqual(message.uri, 'file://foo/bar')
self.assertEqual(message.port, 'package')
self.assertEqual(message.result_type, ResultType.ALARM)
def test_deserialize_invalid_message_type(self):
data = {
'message_id': '63026767-029d-417e-9148-77f4da49f49a',
'group_id': '866350e8-1492-497e-b12b-c079287d51dd',
'created': 1628512774.0,
'message_type': 'scan.status',
'scan_id': 'scan_1',
'host_ip': '1.1.1.1',
'host_name': 'foo',
'oid': '1.2.3.4.5',
'value': 'A Vulnerability has been found',
'uri': 'file://foo/bar',
'port': 'package',
'result_type': 'ALARM',
}
with self.assertRaisesRegex(
ValueError,
"Invalid message type MessageType.SCAN_STATUS for "
"ResultMessage. Must be MessageType.RESULT.",
):
ResultMessage.deserialize(data)
def test_deserialize_invalid_result_type(self):
data = {
'message_id': '63026767-029d-417e-9148-77f4da49f49a',
'group_id': '866350e8-1492-497e-b12b-c079287d51dd',
'created': 1628512774.0,
'message_type': 'result.scan',
'scan_id': 'scan_1',
'host_ip': '1.1.1.1',
'host_name': 'foo',
'oid': '1.2.3.4.5',
'value': 'A Vulnerability has been found',
'uri': 'file://foo/bar',
'port': 'package',
'result_type': 'foo',
}
with self.assertRaisesRegex(
ValueError, "'foo' is not a valid ResultType"
):
ResultMessage.deserialize(data)
ospd-openvas-22.9.0/tests/messaging/ 0000775 0000000 0000000 00000000000 15011310720 0017327 5 ustar 00root root 0000000 0000000 ospd-openvas-22.9.0/tests/messaging/__init__.py 0000664 0000000 0000000 00000000170 15011310720 0021436 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
ospd-openvas-22.9.0/tests/messaging/test_mqtt.py 0000664 0000000 0000000 00000005723 15011310720 0021734 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import time
from datetime import datetime
from uuid import UUID
from unittest import TestCase, mock
from ospd_openvas.messages.result import ResultMessage
from ospd_openvas.messaging.mqtt import (
MQTTDaemon,
MQTTPublisher,
MQTTSubscriber,
)
class MQTTPublisherTestCase(TestCase):
def test_publish(self):
client = mock.MagicMock()
publisher = MQTTPublisher(client)
created = datetime.fromtimestamp(1628512774)
message_id = UUID('63026767-029d-417e-9148-77f4da49f49a')
group_id = UUID('866350e8-1492-497e-b12b-c079287d51dd')
message = ResultMessage(
created=created,
message_id=message_id,
group_id=group_id,
scan_id='scan_1',
host_ip='1.1.1.1',
host_name='foo',
oid='1.2.3.4.5',
value='A Vulnerability has been found',
port='42',
uri='file://foo/bar',
)
publisher.publish(message)
client.publish.assert_called_with(
'scanner/scan/info',
'{"message_id": "63026767-029d-417e-9148-77f4da49f49a", '
'"message_type": "result.scan", '
'"group_id": "866350e8-1492-497e-b12b-c079287d51dd", '
'"created": 1628512774.0, '
'"scan_id": "scan_1", '
'"host_ip": "1.1.1.1", '
'"host_name": "foo", '
'"oid": "1.2.3.4.5", '
'"value": "A Vulnerability has been found", '
'"port": "42", '
'"uri": "file://foo/bar", '
'"result_type": "ALARM"}',
qos=1,
)
class MQTTSubscriberTestCase(TestCase):
def test_subscribe(self):
client = mock.MagicMock()
callback = mock.MagicMock()
callback.__name__ = "callback_name"
subscriber = MQTTSubscriber(client)
message = ResultMessage(
scan_id='scan_1',
host_ip='1.1.1.1',
host_name='foo',
oid='1.2.3.4.5',
value='A Vulnerability has been found',
uri='file://foo/bar',
)
subscriber.subscribe(message, callback)
client.subscribe.assert_called_with('scanner/scan/info', qos=1)
class MQTTDaemonTestCase(TestCase):
def test_connect(self):
client = mock.MagicMock()
# pylint: disable=unused-variable
daemon = MQTTDaemon(client)
def test_run(self):
client = mock.MagicMock(side_effect=1)
daemon = MQTTDaemon(client)
t_ini = time.time()
daemon.run()
# In some systems the spawn of the thread can take longer than expected.
# Therefore, we wait until the thread is spawned or times out.
while len(client.mock_calls) == 0 and time.time() - t_ini < 10:
time.sleep(1)
client.connect.assert_called_with()
client.loop_start.assert_called_with()
ospd-openvas-22.9.0/tests/test_argument_parser.py 0000664 0000000 0000000 00000011227 15011310720 0022164 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Test module for command line arguments."""
import unittest
from unittest.mock import patch
from io import StringIO
from pathlib import Path
from typing import List
from ospd.parser import (
DEFAULT_MQTT_BROKER_ADDRESS,
DEFAULT_MQTT_BROKER_PORT,
Arguments,
DEFAULT_ADDRESS,
DEFAULT_PORT,
DEFAULT_KEY_FILE,
DEFAULT_NICENESS,
DEFAULT_SCANINFO_STORE_TIME,
DEFAULT_UNIX_SOCKET_PATH,
DEFAULT_PID_PATH,
DEFAULT_LOCKFILE_DIR_PATH,
)
from ospd_openvas.notus import NotusParser
here = Path(__file__).absolute().parent
class ArgumentParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = NotusParser()
def parse_args(self, args: List[str]) -> Arguments:
return self.parser.parse_arguments(args)
@patch('sys.stderr', new_callable=StringIO)
def test_port_interval(self, _mock_stderr):
with self.assertRaises(SystemExit):
self.parse_args(['--port=65536'])
with self.assertRaises(SystemExit):
self.parse_args(['--port=0'])
args = self.parse_args(['--port=3353'])
self.assertEqual(3353, args.port)
@patch('sys.stderr', new_callable=StringIO)
def test_port_as_string(self, _mock_stderr):
with self.assertRaises(SystemExit):
self.parse_args(['--port=abcd'])
def test_address_param(self):
args = self.parse_args('-b 1.2.3.4'.split())
self.assertEqual('1.2.3.4', args.address)
def test_correct_lower_case_log_level(self):
args = self.parse_args('-L error'.split())
self.assertEqual('ERROR', args.log_level)
def test_correct_upper_case_log_level(self):
args = self.parse_args('-L INFO'.split())
self.assertEqual('INFO', args.log_level)
@patch('sys.stderr', new_callable=StringIO)
def test_correct_log_level(self, _mock_stderr):
with self.assertRaises(SystemExit):
self.parse_args('-L blah'.split())
def test_non_existing_key(self):
args = self.parse_args('-k foo'.split())
self.assertEqual('foo', args.key_file)
def test_existing_key(self):
args = self.parse_args('-k /etc/passwd'.split())
self.assertEqual('/etc/passwd', args.key_file)
def test_disable_notus_hashsum_verification(self):
args = self.parse_args(
'--disable-notus-hashsum-verification true'.split()
)
self.assertEqual(args.disable_notus_hashsum_verification, True)
def test_defaults(self):
args = self.parse_args([])
self.assertIsNone(args.config)
self.assertEqual(args.key_file, DEFAULT_KEY_FILE)
self.assertEqual(args.niceness, DEFAULT_NICENESS)
self.assertEqual(args.log_level, 'INFO')
self.assertEqual(args.address, DEFAULT_ADDRESS)
self.assertEqual(args.port, DEFAULT_PORT)
self.assertEqual(args.scaninfo_store_time, DEFAULT_SCANINFO_STORE_TIME)
self.assertEqual(args.unix_socket, DEFAULT_UNIX_SOCKET_PATH)
self.assertEqual(args.pid_file, DEFAULT_PID_PATH)
self.assertEqual(args.lock_file_dir, DEFAULT_LOCKFILE_DIR_PATH)
self.assertEqual(args.mqtt_broker_address, DEFAULT_MQTT_BROKER_ADDRESS)
self.assertEqual(args.mqtt_broker_port, DEFAULT_MQTT_BROKER_PORT)
self.assertEqual(args.disable_notus_hashsum_verification, False)
class ArgumentParserConfigTestCase(unittest.TestCase):
def setUp(self):
self.parser = NotusParser()
def parse_args(self, args: List[str]) -> Arguments:
return self.parser.parse_arguments(args)
def test_using_config(self):
config_file = str(here / 'testing.conf')
args = self.parse_args(['--config', config_file])
self.assertEqual(args.key_file, '/foo/key.pem')
self.assertEqual(args.niceness, 666)
self.assertEqual(args.log_level, 'DEBUG')
self.assertEqual(args.address, '6.6.6.6')
self.assertEqual(args.port, 6666)
self.assertEqual(args.scaninfo_store_time, 123)
self.assertEqual(args.config, config_file)
self.assertEqual(args.unix_socket, '/foo/ospd-openvas.sock')
self.assertEqual(args.pid_file, '/foo/ospd-openvas.pid')
self.assertEqual(args.lock_file_dir, '/foo/openvas')
self.assertEqual(args.mqtt_broker_address, 'foo.bar.com')
self.assertEqual(args.mqtt_broker_port, 1234)
self.assertEqual(args.notus_feed_dir, '/foo/advisories')
@patch('sys.stderr', new_callable=StringIO)
def test_not_existing_config(self, _mock):
with self.assertRaises(SystemExit):
self.parse_args(['--config', 'foo.conf'])
ospd-openvas-22.9.0/tests/test_cvss.py 0000664 0000000 0000000 00000001755 15011310720 0017751 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Test module for cvss scoring calculation"""
import unittest
from ospd.cvss import CVSS
class CvssTestCase(unittest.TestCase):
def test_cvssv2(self):
vector = 'AV:A/AC:L/Au:S/C:P/I:P/A:P'
cvss_base = CVSS.cvss_base_v2_value(vector)
self.assertEqual(cvss_base, 5.2)
def test_cvssv3(self):
vector = 'CVSS:3.0/AV:N/AC:L/PR:H/UI:N/S:U/C:L/I:L/A:N'
cvss_base = CVSS.cvss_base_v3_value(vector)
self.assertEqual(cvss_base, 3.8)
def test_cvssv2_optional_metrics(self):
vector = 'AV:A/AC:L/Au:S/C:P/I:P/A:P/E:F'
cvss_base = CVSS.cvss_base_v2_value(vector)
self.assertEqual(cvss_base, None)
def test_cvssv3_optional_metrics(self):
vector = 'CVSS:3.0/AV:N/AC:L/PR:H/UI:N/S:U/C:L/I:L/A:N/E:X'
cvss_base = CVSS.cvss_base_v3_value(vector)
self.assertEqual(cvss_base, None)
ospd-openvas-22.9.0/tests/test_daemon.py 0000664 0000000 0000000 00000056412 15011310720 0020236 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=invalid-name,line-too-long,no-value-for-parameter
"""Unit Test for ospd-openvas"""
import io
import logging
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch, Mock, MagicMock
from ospd.protocol import OspRequest
from tests.dummydaemon import DummyDaemon
from tests.helper import assert_called_once
from ospd_openvas.daemon import (
OSPD_PARAMS,
OpenVasVtsFilter,
)
from ospd_openvas.openvas import Openvas
from ospd_openvas.notus import Notus, hashsum_verificator
OSPD_PARAMS_OUT = {
'auto_enable_dependencies': {
'type': 'boolean',
'name': 'auto_enable_dependencies',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': 'Automatically enable the plugins that are depended on',
},
'cgi_path': {
'type': 'string',
'name': 'cgi_path',
'default': '/cgi-bin:/scripts',
'mandatory': 1,
'visible_for_client': True,
'description': 'Look for default CGIs in /cgi-bin and /scripts',
},
'checks_read_timeout': {
'type': 'integer',
'name': 'checks_read_timeout',
'default': 5,
'mandatory': 1,
'visible_for_client': True,
'description': (
'Number of seconds that the security checks will '
+ 'wait for when doing a recv()'
),
},
'non_simult_ports': {
'type': 'string',
'name': 'non_simult_ports',
'default': '139, 445, 3389, Services/irc',
'mandatory': 1,
'visible_for_client': True,
'description': (
'Prevent to make two connections on the same given '
+ 'ports at the same time.'
),
},
'open_sock_max_attempts': {
'type': 'integer',
'name': 'open_sock_max_attempts',
'default': 5,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Number of unsuccessful retries to open the socket '
+ 'before to set the port as closed.'
),
},
'timeout_retry': {
'type': 'integer',
'name': 'timeout_retry',
'default': 5,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Number of retries when a socket connection attempt ' + 'timesout.'
),
},
'optimize_test': {
'type': 'boolean',
'name': 'optimize_test',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': (
'By default, optimize_test is enabled which means openvas does '
+ 'trust the remote host banners and is only launching plugins '
+ 'against the services they have been designed to check. '
+ 'For example it will check a web server claiming to be IIS only '
+ 'for IIS related flaws but will skip plugins testing for Apache '
+ 'flaws, and so on. This default behavior is used to optimize '
+ 'the scanning performance and to avoid false positives. '
+ 'If you are not sure that the banners of the remote host '
+ 'have been tampered with, you can disable this option.'
),
},
'plugins_timeout': {
'type': 'integer',
'name': 'plugins_timeout',
'default': 5,
'mandatory': 0,
'visible_for_client': True,
'description': 'This is the maximum lifetime, in seconds of a plugin.',
},
'report_host_details': {
'type': 'boolean',
'name': 'report_host_details',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': '',
},
'safe_checks': {
'type': 'boolean',
'name': 'safe_checks',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': (
'Disable the plugins with potential to crash '
+ 'the remote services'
),
},
'scanner_plugins_timeout': {
'type': 'integer',
'name': 'scanner_plugins_timeout',
'default': 36000,
'mandatory': 1,
'visible_for_client': True,
'description': 'Like plugins_timeout, but for ACT_SCANNER plugins.',
},
'time_between_request': {
'type': 'integer',
'name': 'time_between_request',
'default': 0,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Allow to set a wait time between two actions '
+ '(open, send, close).'
),
},
'unscanned_closed': {
'type': 'boolean',
'name': 'unscanned_closed',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': '',
},
'unscanned_closed_udp': {
'type': 'boolean',
'name': 'unscanned_closed_udp',
'default': 1,
'mandatory': 1,
'visible_for_client': True,
'description': '',
},
'expand_vhosts': {
'type': 'boolean',
'name': 'expand_vhosts',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': 'Whether to expand the target hosts '
+ 'list of vhosts with values gathered from sources '
+ 'such as reverse-lookup queries and VT checks '
+ 'for SSL/TLS certificates.',
},
'test_empty_vhost': {
'type': 'boolean',
'name': 'test_empty_vhost',
'default': 0,
'mandatory': 0,
'visible_for_client': True,
'description': 'If set to yes, the scanner will '
+ 'also test the target by using empty vhost value '
+ 'in addition to the targets associated vhost values.',
},
'max_hosts': {
'type': 'integer',
'name': 'max_hosts',
'default': 30,
'mandatory': 0,
'visible_for_client': False,
'description': (
'The maximum number of hosts to test at the same time which '
+ 'should be given to the client (which can override it). '
+ 'This value must be computed given your bandwidth, '
+ 'the number of hosts you want to test, your amount of '
+ 'memory and the performance of your processor(s).'
),
},
'max_checks': {
'type': 'integer',
'name': 'max_checks',
'default': 10,
'mandatory': 0,
'visible_for_client': False,
'description': (
'The number of plugins that will run against each host being '
+ 'tested. Note that the total number of process will be max '
+ 'checks x max_hosts so you need to find a balance between '
+ 'these two options. Note that launching too many plugins at '
+ 'the same time may disable the remote host, either temporarily '
+ '(ie: inetd closes its ports) or definitely (the remote host '
+ 'crash because it is asked to do too many things at the '
+ 'same time), so be careful.'
),
},
'port_range': {
'type': 'string',
'name': 'port_range',
'default': '',
'mandatory': 0,
'visible_for_client': False,
'description': (
'This is the default range of ports that the scanner plugins will '
+ 'probe. The syntax of this option is flexible, it can be a '
+ 'single range ("1-1500"), several ports ("21,23,80"), several '
+ 'ranges of ports ("1-1500,32000-33000"). Note that you can '
+ 'specify UDP and TCP ports by prefixing each range by T or U. '
+ 'For instance, the following range will make openvas scan UDP '
+ 'ports 1 to 1024 and TCP ports 1 to 65535 : '
+ '"T:1-65535,U:1-1024".'
),
},
'alive_test_ports': {
'type': 'string',
'name': 'alive_test_ports',
'default': '21-23,25,53,80,110-111,135,139,143,443,445,'
+ '993,995,1723,3306,3389,5900,8080',
'mandatory': 0,
'visible_for_client': True,
'description': ('Port list used for host alive detection.'),
},
'test_alive_hosts_only': {
'type': 'boolean',
'name': 'test_alive_hosts_only',
'default': 0,
'mandatory': 0,
'visible_for_client': False,
'description': (
'If this option is set, openvas will scan the target list for '
+ 'alive hosts in a separate process while only testing those '
+ 'hosts which are identified as alive. This boosts the scan '
+ 'speed of target ranges with a high amount of dead hosts '
+ 'significantly.'
),
},
'test_alive_wait_timeout': {
'type': 'integer',
'name': 'test_alive_wait_timeout',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': (
'This is the default timeout to wait for replies after last '
+ 'packet was sent.'
),
},
'hosts_allow': {
'type': 'string',
'name': 'hosts_allow',
'default': '',
'mandatory': 0,
'visible_for_client': False,
'description': (
'Comma-separated list of the only targets that are authorized '
+ 'to be scanned. Supports the same syntax as the list targets. '
+ 'Both target hostnames and the address to which they resolve '
+ 'are checked. Hostnames in hosts_allow list are not resolved '
+ 'however.'
),
},
'hosts_deny': {
'type': 'string',
'name': 'hosts_deny',
'default': '',
'mandatory': 0,
'visible_for_client': False,
'description': (
'Comma-separated list of targets that are not authorized to '
+ 'be scanned. Supports the same syntax as the list targets. '
+ 'Both target hostnames and the address to which they resolve '
+ 'are checked. Hostnames in hosts_deny list are not '
+ 'resolved however.'
),
},
'results_per_host': {
'type': 'integer',
'name': 'results_per_host',
'default': 10,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Amount of fake results generated per each host in the target '
+ 'list for a dry run scan.'
),
},
'table_driven_lsc': {
'type': 'boolean',
'name': 'table_driven_lsc',
'default': 1,
'mandatory': 0,
'visible_for_client': True,
'description': (
'If this option is enabled a scanner for table_driven_lsc will '
+ 'scan package results.'
),
},
'max_mem_kb': {
'type': 'integer',
'name': 'max_mem_kb',
'default': 0,
'mandatory': 0,
'visible_for_client': True,
'description': (
'Maximum amount of memory (in MB) allowed to use for a single '
+ 'script. If this value is set, the amount of memory put into '
+ 'redis is tracked for every Script. If the amount of memory '
+ 'exceeds this limit, the script is not able to set more kb '
+ 'items. The tracked the value written into redis is only '
+ 'estimated, as it does not check, if a value was replaced or '
+ 'appended. The size of the key is also not tracked. If this '
+ 'value is not set or <= 0, the maximum amount is unlimited '
+ '(Default).'
),
},
}
class TestOspdOpenvas(TestCase):
def test_return_disabled_verifier(self):
verifier = hashsum_verificator(Path('/tmp'), True)
self.assertEqual(verifier(Path('/tmp')), True)
@patch('ospd_openvas.daemon.Openvas')
def test_set_params_from_openvas_settings(self, mock_openvas: Openvas):
mock_openvas.get_settings.return_value = {
'non_simult_ports': '139, 445, 3389, Services/irc',
'plugins_folder': '/foo/bar',
}
w = DummyDaemon()
w.set_params_from_openvas_settings()
self.assertEqual(mock_openvas.get_settings.call_count, 1)
self.assertEqual(OSPD_PARAMS, OSPD_PARAMS_OUT)
self.assertEqual(w.scan_only_params.get('plugins_folder'), '/foo/bar')
@patch('ospd_openvas.daemon.Openvas')
def test_sudo_available(self, mock_openvas):
mock_openvas.check_sudo.return_value = True
w = DummyDaemon()
w._sudo_available = None # pylint: disable=protected-access
w._is_running_as_root = False # pylint: disable=protected-access
self.assertTrue(w.sudo_available)
def test_update_vts(self):
daemon = DummyDaemon()
daemon.notus = MagicMock(spec=Notus)
daemon.update_vts()
self.assertEqual(daemon.notus.reload_cache.call_count, 1)
@patch('ospd_openvas.daemon.Path.exists')
@patch('ospd_openvas.daemon.Path.open')
def test_get_feed_info(
self,
mock_path_open: MagicMock,
mock_path_exists: MagicMock,
):
read_data = 'PLUGIN_SET = "1235";'
mock_path_exists.return_value = True
mock_read = MagicMock(name='Path open context manager')
mock_read.__enter__ = MagicMock(return_value=io.StringIO(read_data))
mock_path_open.return_value = mock_read
w = DummyDaemon()
# Return True
w.scan_only_params['plugins_folder'] = '/foo/bar'
ret = w.get_feed_info()
self.assertEqual(ret, {"PLUGIN_SET": "1235"})
self.assertEqual(mock_path_exists.call_count, 1)
self.assertEqual(mock_path_open.call_count, 1)
@patch('ospd_openvas.daemon.Path.exists')
@patch('ospd_openvas.daemon.OSPDopenvas.set_params_from_openvas_settings')
def test_get_feed_info_none(
self, mock_set_params: MagicMock, mock_path_exists: MagicMock
):
w = DummyDaemon()
w.scan_only_params['plugins_folder'] = '/foo/bar'
# Return None
mock_path_exists.return_value = False
ret = w.get_feed_info()
self.assertEqual(ret, {})
self.assertEqual(mock_set_params.call_count, 1)
self.assertEqual(mock_path_exists.call_count, 1)
@patch('ospd_openvas.daemon.Path.exists')
@patch('ospd_openvas.daemon.Path.open')
def test_feed_is_outdated_true(
self,
mock_path_open: MagicMock,
mock_path_exists: MagicMock,
):
read_data = 'PLUGIN_SET = "1235";'
mock_path_exists.return_value = True
mock_read = MagicMock(name='Path open context manager')
mock_read.__enter__ = MagicMock(return_value=io.StringIO(read_data))
mock_path_open.return_value = mock_read
w = DummyDaemon()
# Return True
w.scan_only_params['plugins_folder'] = '/foo/bar'
ret = w.feed_is_outdated('1234')
self.assertTrue(ret)
self.assertEqual(mock_path_exists.call_count, 1)
self.assertEqual(mock_path_open.call_count, 1)
@patch('ospd_openvas.daemon.Path.exists')
@patch('ospd_openvas.daemon.Path.open')
def test_feed_is_outdated_false(
self,
mock_path_open: MagicMock,
mock_path_exists: MagicMock,
):
mock_path_exists.return_value = True
read_data = 'PLUGIN_SET = "1234"'
mock_path_exists.return_value = True
mock_read = MagicMock(name='Path open context manager')
mock_read.__enter__ = MagicMock(return_value=io.StringIO(read_data))
mock_path_open.return_value = mock_read
w = DummyDaemon()
w.scan_only_params['plugins_folder'] = '/foo/bar'
ret = w.feed_is_outdated('1234')
self.assertFalse(ret)
self.assertEqual(mock_path_exists.call_count, 1)
self.assertEqual(mock_path_open.call_count, 1)
def test_check_feed_cache_unavailable(self):
w = DummyDaemon()
w.vts.is_cache_available = False
w.feed_is_outdated = Mock()
w.feed_is_outdated.assert_not_called()
@patch('ospd_openvas.daemon.BaseDB')
@patch('ospd_openvas.daemon.ResultList.add_scan_log_to_list')
def test_get_openvas_result(self, mock_add_scan_log_to_list, MockDBClass):
w = DummyDaemon()
target_element = w.create_xml_target()
targets = OspRequest.process_target_element(target_element)
w.create_scan('123-456', targets, None, [])
results = [
"LOG|||192.168.0.1|||localhost|||general/Host_Details||||||Host"
" dead",
]
MockDBClass.get_result.return_value = results
mock_add_scan_log_to_list.return_value = None
w.report_openvas_results(MockDBClass, '123-456')
mock_add_scan_log_to_list.assert_called_with(
host='192.168.0.1',
hostname='localhost',
name='',
port='general/Host_Details',
qod='',
test_id='',
uri='',
value='Host dead',
)
@patch('ospd_openvas.daemon.BaseDB')
@patch('ospd_openvas.daemon.ResultList.add_scan_error_to_list')
def test_get_openvas_result_host_deny(
self, mock_add_scan_error_to_list, MockDBClass
):
w = DummyDaemon()
target_element = w.create_xml_target()
targets = OspRequest.process_target_element(target_element)
w.create_scan('123-456', targets, None, [])
results = [
"ERRMSG|||127.0.0.1|||localhost|||||||||Host access denied.",
]
MockDBClass.get_result.return_value = results
mock_add_scan_error_to_list.return_value = None
w.report_openvas_results(MockDBClass, '123-456')
mock_add_scan_error_to_list.assert_called_with(
host='127.0.0.1',
hostname='localhost',
name='',
port='',
test_id='',
uri='',
value='Host access denied.',
)
@patch('ospd_openvas.daemon.BaseDB')
def test_get_openvas_result_dead_hosts(self, MockDBClass):
w = DummyDaemon()
target_element = w.create_xml_target()
targets = OspRequest.process_target_element(target_element)
w.create_scan('123-456', targets, None, [])
results = [
"DEADHOST||| ||| ||| ||| |||4",
]
MockDBClass.get_result.return_value = results
w.scan_collection.set_amount_dead_hosts = MagicMock()
w.report_openvas_results(MockDBClass, '123-456')
w.scan_collection.set_amount_dead_hosts.assert_called_with(
'123-456',
total_dead=4,
)
@patch('ospd_openvas.daemon.BaseDB')
@patch('ospd_openvas.daemon.ResultList.add_scan_log_to_list')
def test_get_openvas_result_host_start(
self, mock_add_scan_log_to_list, MockDBClass
):
w = DummyDaemon()
target_element = w.create_xml_target()
targets = OspRequest.process_target_element(target_element)
w.create_scan('123-456', targets, None, [])
results = [
"HOST_START|||192.168.10.124||| ||| ||||||today 1",
]
MockDBClass.get_result.return_value = results
mock_add_scan_log_to_list.return_value = None
w.report_openvas_results(MockDBClass, '123-456')
mock_add_scan_log_to_list.assert_called_with(
host='192.168.10.124',
name='HOST_START',
value='today 1',
)
@patch('ospd_openvas.daemon.BaseDB')
def test_get_openvas_result_hosts_count(self, MockDBClass):
w = DummyDaemon()
target_element = w.create_xml_target()
targets = OspRequest.process_target_element(target_element)
w.create_scan('123-456', targets, None, [])
results = [
"HOSTS_COUNT||| ||| ||| ||| |||4",
]
MockDBClass.get_result.return_value = results
w.set_scan_total_hosts = MagicMock()
w.report_openvas_results(MockDBClass, '123-456')
w.set_scan_total_hosts.assert_called_with(
'123-456',
4,
)
@patch('ospd_openvas.daemon.BaseDB')
@patch('ospd_openvas.daemon.ResultList.add_scan_alarm_to_list')
def test_result_without_vt_oid(
self, mock_add_scan_alarm_to_list, MockDBClass
):
w = DummyDaemon()
logging.Logger.warning = Mock()
target_element = w.create_xml_target()
targets = OspRequest.process_target_element(target_element)
w.create_scan('123-456', targets, None, [])
w.scan_collection.scans_table['123-456']['results'] = list()
results = ["ALARM||| ||| ||| ||| |||some alarm|||path", None]
MockDBClass.get_result.return_value = results
mock_add_scan_alarm_to_list.return_value = None
w.report_openvas_results(MockDBClass, '123-456')
assert_called_once(logging.Logger.warning)
@patch('psutil.Popen')
def test_openvas_is_alive_already_stopped(self, mock_process):
w = DummyDaemon()
mock_process.is_running.return_value = True
ret = w.is_openvas_process_alive(mock_process)
self.assertTrue(ret)
@patch('psutil.Popen')
def test_openvas_is_alive_still(self, mock_process):
w = DummyDaemon()
mock_process.is_running.return_value = False
ret = w.is_openvas_process_alive(mock_process)
self.assertFalse(ret)
@patch('ospd_openvas.daemon.OSPDaemon.set_scan_progress_batch')
@patch('ospd_openvas.daemon.OSPDaemon.sort_host_finished')
@patch('ospd_openvas.db.KbDB')
def test_report_openvas_scan_status(
self, mock_db, mock_sort_host_finished, mock_set_scan_progress_batch
):
w = DummyDaemon()
mock_set_scan_progress_batch.return_value = None
mock_sort_host_finished.return_value = None
mock_db.get_scan_status.return_value = [
'192.168.0.1/15/1000',
'192.168.0.2/15/0',
'192.168.0.3/15/-1',
'192.168.0.4/1500/1500',
]
target_element = w.create_xml_target()
targets = OspRequest.process_target_element(target_element)
w.create_scan('123-456', targets, None, [])
w.report_openvas_scan_status(mock_db, '123-456')
mock_set_scan_progress_batch.assert_called_with(
'123-456',
host_progress={
'192.168.0.1': 1,
'192.168.0.3': -1,
'192.168.0.4': 100,
},
)
mock_sort_host_finished.assert_called_with(
'123-456', ['192.168.0.3', '192.168.0.4']
)
class TestFilters(TestCase):
def test_format_vt_modification_time(self):
ovformat = OpenVasVtsFilter(None, None)
td = '1517443741'
formatted = ovformat.format_vt_modification_time(td)
self.assertEqual(formatted, "20180201000901")
def test_get_filtered_vts_false(self):
w = DummyDaemon()
vts_collection = ['1234', '1.3.6.1.4.1.25623.1.0.100061']
ovfilter = OpenVasVtsFilter(w.nvti, None)
res = ovfilter.get_filtered_vts_list(
vts_collection, "modification_time<10"
)
self.assertNotIn('1.3.6.1.4.1.25623.1.0.100061', res)
def test_get_filtered_vts_true(self):
w = DummyDaemon()
vts_collection = ['1234', '1.3.6.1.4.1.25623.1.0.100061']
ovfilter = OpenVasVtsFilter(w.nvti, None)
res = ovfilter.get_filtered_vts_list(
vts_collection, "modification_time>10"
)
self.assertIn('1.3.6.1.4.1.25623.1.0.100061', res)
ospd-openvas-22.9.0/tests/test_datapickler.py 0000664 0000000 0000000 00000007061 15011310720 0021252 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import pickle
from pathlib import Path
from hashlib import sha256
from unittest import TestCase
from unittest.mock import patch
from ospd.errors import OspdCommandError
from ospd.datapickler import DataPickler
from .helper import assert_called_once
class DataPecklerTestCase(TestCase):
def test_store_data(self):
data = {'foo', 'bar'}
filename = 'scan_info_1'
pickled_data = pickle.dumps(data)
tmp_hash = sha256()
tmp_hash.update(pickled_data)
data_pickler = DataPickler('/tmp')
ret = data_pickler.store_data(filename, data)
self.assertEqual(ret, tmp_hash.hexdigest())
data_pickler.remove_file(filename)
def test_store_data_failed(self):
data = {'foo', 'bar'}
filename = 'scan_info_1'
data_pickler = DataPickler('/root')
self.assertRaises(
OspdCommandError, data_pickler.store_data, filename, data
)
def test_store_data_check_permission(self):
OWNER_ONLY_RW_PERMISSION = '0o100600' # pylint: disable=invalid-name
data = {'foo', 'bar'}
filename = 'scan_info_1'
data_pickler = DataPickler('/tmp')
data_pickler.store_data(filename, data)
file_path = (
Path(data_pickler._storage_path) # pylint: disable=protected-access
/ filename
)
self.assertEqual(
oct(file_path.stat().st_mode), OWNER_ONLY_RW_PERMISSION
)
data_pickler.remove_file(filename)
def test_load_data(self):
data_pickler = DataPickler('/tmp')
data = {'foo', 'bar'}
filename = 'scan_info_1'
pickled_data = pickle.dumps(data)
tmp_hash = sha256()
tmp_hash.update(pickled_data)
pickled_data_hash = tmp_hash.hexdigest()
ret = data_pickler.store_data(filename, data)
self.assertEqual(ret, pickled_data_hash)
original_data = data_pickler.load_data(filename, pickled_data_hash)
self.assertIsNotNone(original_data)
self.assertIn('foo', original_data)
@patch("ospd.datapickler.logger")
def test_remove_file_failed(self, mock_logger):
filename = 'inenxistent_file'
data_pickler = DataPickler('/root')
data_pickler.remove_file(filename)
assert_called_once(mock_logger.error)
@patch("ospd.datapickler.logger")
def test_load_data_no_file(self, mock_logger):
filename = 'scan_info_1'
data_pickler = DataPickler('/tmp')
data_loaded = data_pickler.load_data(filename, "1234")
assert_called_once(mock_logger.error)
self.assertIsNone(data_loaded)
data_pickler.remove_file(filename)
def test_load_data_corrupted(self):
data_pickler = DataPickler('/tmp')
data = {'foo', 'bar'}
filename = 'scan_info_1'
pickled_data = pickle.dumps(data)
tmp_hash = sha256()
tmp_hash.update(pickled_data)
pickled_data_hash = tmp_hash.hexdigest()
ret = data_pickler.store_data(filename, data)
self.assertEqual(ret, pickled_data_hash)
# courrupt data
file_to_corrupt = (
Path(data_pickler._storage_path) # pylint: disable=protected-access
/ filename
)
with file_to_corrupt.open('ab') as f:
f.write(b'bar2')
original_data = data_pickler.load_data(filename, pickled_data_hash)
self.assertIsNone(original_data)
data_pickler.remove_file(filename)
ospd-openvas-22.9.0/tests/test_db.py 0000664 0000000 0000000 00000060103 15011310720 0017350 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=unused-argument
"""Unit Test for ospd-openvas"""
import logging
from unittest import TestCase
from unittest.mock import patch, MagicMock
from redis.exceptions import ConnectionError as RCE
from ospd.errors import RequiredArgument
from ospd_openvas.db import OpenvasDB, MainDB, ScanDB, KbDB, DBINDEX_NAME, time
from ospd_openvas.errors import OspdOpenvasError
from tests.helper import assert_called
@patch('ospd_openvas.db.redis.Redis')
class TestOpenvasDB(TestCase):
@patch('ospd_openvas.db.Openvas')
def test_get_db_connection(
self, mock_openvas: MagicMock, mock_redis: MagicMock
):
OpenvasDB._db_address = None # pylint: disable=protected-access
mock_settings = mock_openvas.get_settings.return_value
mock_settings.get.return_value = None
self.assertIsNone(OpenvasDB.get_database_address())
# set the first time
mock_openvas.get_settings.return_value = {'db_address': '/foo/bar'}
self.assertEqual(OpenvasDB.get_database_address(), "unix:///foo/bar")
self.assertEqual(mock_openvas.get_settings.call_count, 2)
# should cache address
self.assertEqual(OpenvasDB.get_database_address(), "unix:///foo/bar")
self.assertEqual(mock_openvas.get_settings.call_count, 2)
@patch('ospd_openvas.db.Openvas')
def test_create_context_fail(self, mock_openvas: MagicMock, mock_redis):
mock_redis.from_url.side_effect = RCE
mock_check = mock_openvas.check.return_value
mock_check.get.return_value = True
OpenvasDB._db_address = None # pylint: disable=protected-access
mock_settings = mock_openvas.get_settings.return_value
mock_settings.get.return_value = None
logging.Logger.error = MagicMock()
with patch.object(time, 'sleep', return_value=None):
with self.assertRaises(SystemExit):
OpenvasDB.create_context()
logging.Logger.error.assert_called_with( # pylint: disable=no-member
'Redis Error: Not possible to connect to the kb.'
)
@patch('ospd_openvas.db.Openvas')
def test_create_context_success(self, mock_openvas: MagicMock, mock_redis):
ctx = mock_redis.from_url.return_value
mock_check = mock_openvas.check.return_value
mock_check.get.return_value = True
OpenvasDB._db_address = None # pylint: disable=protected-access
mock_settings = mock_openvas.get_settings.return_value
mock_settings.get.return_value = None
ret = OpenvasDB.create_context()
self.assertIs(ret, ctx)
def test_select_database_error(self, mock_redis):
with self.assertRaises(RequiredArgument):
OpenvasDB.select_database(None, 1)
with self.assertRaises(RequiredArgument):
OpenvasDB.select_database(mock_redis, None)
def test_select_database(self, mock_redis):
mock_redis.execute_command.return_value = mock_redis
OpenvasDB.select_database(mock_redis, 1)
mock_redis.execute_command.assert_called_with('SELECT 1')
def test_get_list_item_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.get_list_item(None, 'foo')
with self.assertRaises(RequiredArgument):
OpenvasDB.get_list_item(ctx, None)
def test_get_list_item(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.lrange.return_value = ['1234']
ret = OpenvasDB.get_list_item(ctx, 'name')
self.assertEqual(ret, ['1234'])
assert_called(ctx.lrange)
def test_get_last_list_item(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.rpop.return_value = 'foo'
ret = OpenvasDB.get_last_list_item(ctx, 'name')
self.assertEqual(ret, 'foo')
ctx.rpop.assert_called_with('name')
def test_get_last_list_item_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.get_last_list_item(ctx, None)
with self.assertRaises(RequiredArgument):
OpenvasDB.get_last_list_item(None, 'name')
def test_remove_list_item(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.lrem.return_value = 1
OpenvasDB.remove_list_item(ctx, 'name', '1234')
ctx.lrem.assert_called_once_with('name', count=0, value='1234')
def test_remove_list_item_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.remove_list_item(None, '1', 'bar')
with self.assertRaises(RequiredArgument):
OpenvasDB.remove_list_item(ctx, None, 'bar')
with self.assertRaises(RequiredArgument):
OpenvasDB.remove_list_item(ctx, '1', None)
def test_get_single_item_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.get_single_item(None, 'foo')
with self.assertRaises(RequiredArgument):
OpenvasDB.get_single_item(ctx, None)
def test_get_single_item(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.lindex.return_value = 'a'
value = OpenvasDB.get_single_item(ctx, 'a')
self.assertEqual(value, 'a')
ctx.lindex.assert_called_once_with('a', 0)
def test_add_single_list(self, mock_redis):
ctx = mock_redis.from_url.return_value
pipeline = ctx.pipeline.return_value
pipeline.delete.return_value = None
pipeline.execute.return_value = (None, 0)
OpenvasDB.add_single_list(ctx, 'a', ['12', '11', '12'])
pipeline.delete.assert_called_once_with('a')
pipeline.rpush.assert_called_once_with('a', '12', '11', '12')
assert_called(pipeline.execute)
def test_add_single_item(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.rpush.return_value = 1
OpenvasDB.add_single_item(ctx, 'a', ['12', '12'])
ctx.rpush.assert_called_once_with('a', '12')
def test_add_single_item_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.add_single_item(None, '1', ['12'])
with self.assertRaises(RequiredArgument):
OpenvasDB.add_single_item(ctx, None, ['12'])
with self.assertRaises(RequiredArgument):
OpenvasDB.add_single_item(ctx, '1', None)
def test_set_single_item_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.set_single_item(None, '1', ['12'])
with self.assertRaises(RequiredArgument):
OpenvasDB.set_single_item(ctx, None, ['12'])
with self.assertRaises(RequiredArgument):
OpenvasDB.set_single_item(ctx, '1', None)
def test_pop_list_items_no_results(self, mock_redis):
ctx = mock_redis.from_url.return_value
pipeline = ctx.pipeline.return_value
pipeline.lrange.return_value = None
pipeline.delete.return_value = None
pipeline.execute.return_value = (None, 0)
ret = OpenvasDB.pop_list_items(ctx, 'foo')
self.assertEqual(ret, [])
pipeline.lrange.assert_called_once_with('foo', 0, -1)
pipeline.delete.assert_called_once_with('foo')
assert_called(pipeline.execute)
def test_pop_list_items_with_results(self, mock_redis):
ctx = mock_redis.from_url.return_value
pipeline = ctx.pipeline.return_value
pipeline.lrange.return_value = None
pipeline.delete.return_value = None
pipeline.execute.return_value = [['c', 'b', 'a'], 2]
ret = OpenvasDB.pop_list_items(ctx, 'results')
# reversed list
self.assertEqual(ret, ['a', 'b', 'c'])
pipeline.lrange.assert_called_once_with('results', 0, -1)
pipeline.delete.assert_called_once_with('results')
assert_called(pipeline.execute)
def test_set_single_item(self, mock_redis):
ctx = mock_redis.from_url.return_value
pipeline = ctx.pipeline.return_value
pipeline.delete.return_value = None
pipeline.rpush.return_value = None
pipeline.execute.return_value = None
OpenvasDB.set_single_item(ctx, 'foo', ['bar'])
pipeline.delete.assert_called_once_with('foo')
pipeline.rpush.assert_called_once_with('foo', 'bar')
assert_called(pipeline.execute)
def test_get_pattern(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.keys.return_value = ['a', 'b']
ctx.lrange.return_value = [1, 2, 3]
ret = OpenvasDB.get_pattern(ctx, 'a')
self.assertEqual(ret, [['a', [1, 2, 3]], ['b', [1, 2, 3]]])
def test_get_pattern_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.get_pattern(None, 'a')
with self.assertRaises(RequiredArgument):
OpenvasDB.get_pattern(ctx, None)
def test_get_filenames_and_oids_error(self, mock_redis):
with self.assertRaises(RequiredArgument):
OpenvasDB.get_filenames_and_oids(None, None, None)
def test_get_filenames_and_oids(self, mock_redis):
def _pars(item):
return item[4:]
ctx = mock_redis.from_url.return_value
ctx.keys.return_value = ['nvt:1', 'nvt:2']
ctx.lindex.side_effect = ['aa', 'ab']
ret = OpenvasDB.get_filenames_and_oids(ctx, "nvt:*", _pars)
self.assertEqual(list(ret), [('aa', '1'), ('ab', '2')])
def test_get_keys_by_pattern_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
with self.assertRaises(RequiredArgument):
OpenvasDB.get_keys_by_pattern(None, 'a')
with self.assertRaises(RequiredArgument):
OpenvasDB.get_keys_by_pattern(ctx, None)
def test_get_keys_by_pattern(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.keys.return_value = ['nvt:2', 'nvt:1']
ret = OpenvasDB.get_keys_by_pattern(ctx, 'nvt:*')
# Return sorted list
self.assertEqual(ret, ['nvt:1', 'nvt:2'])
def test_get_key_count(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.keys.return_value = ['aa', 'ab']
ret = OpenvasDB.get_key_count(ctx, "foo")
self.assertEqual(ret, 2)
ctx.keys.assert_called_with('foo')
def test_get_key_count_with_default_pattern(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.keys.return_value = ['aa', 'ab']
ret = OpenvasDB.get_key_count(ctx)
self.assertEqual(ret, 2)
ctx.keys.assert_called_with('*')
def test_get_key_count_error(self, mock_redis):
with self.assertRaises(RequiredArgument):
OpenvasDB.get_key_count(None)
@patch('ospd_openvas.db.Openvas')
def test_find_database_by_pattern_none(
self, mock_openvas: MagicMock, mock_redis
):
ctx = mock_redis.from_url.return_value
ctx.keys.return_value = None
mock_check = mock_openvas.check.return_value
mock_check.get.return_value = True
OpenvasDB._db_address = None # pylint: disable=protected-access
mock_settings = mock_openvas.get_settings.return_value
mock_settings.get.return_value = None
new_ctx, index = OpenvasDB.find_database_by_pattern('foo*', 123)
self.assertIsNone(new_ctx)
self.assertIsNone(index)
@patch('ospd_openvas.db.Openvas')
def test_find_database_by_pattern(
self, mock_openvas: MagicMock, mock_redis
):
ctx = mock_redis.from_url.return_value
mock_check = mock_openvas.check.return_value
mock_check.get.return_value = True
OpenvasDB._db_address = None # pylint: disable=protected-access
mock_settings = mock_openvas.get_settings.return_value
mock_settings.get.return_value = None
# keys is called twice per iteration
ctx.keys.side_effect = [None, None, None, None, True, True]
new_ctx, index = OpenvasDB.find_database_by_pattern('foo*', 123)
self.assertEqual(new_ctx, ctx)
self.assertEqual(index, 2)
@patch('ospd_openvas.db.OpenvasDB')
class ScanDBTestCase(TestCase):
@patch('ospd_openvas.db.redis.Redis')
def setUp(self, mock_redis): # pylint: disable=arguments-differ
self.ctx = mock_redis.from_url.return_value
self.db = ScanDB(10, self.ctx)
def test_get_result(self, mock_openvas_db):
mock_openvas_db.pop_list_items.return_value = [
'some result',
]
ret = self.db.get_result()
self.assertEqual(
ret,
[
'some result',
],
)
mock_openvas_db.pop_list_items.assert_called_with(
self.ctx, 'internal/results'
)
def test_get_status(self, mock_openvas_db):
mock_openvas_db.get_single_item.return_value = 'some status'
ret = self.db.get_status('foo')
self.assertEqual(ret, 'some status')
mock_openvas_db.get_single_item.assert_called_with(
self.ctx, 'internal/foo'
)
def test_select(self, mock_openvas_db):
ret = self.db.select(11)
self.assertIs(ret, self.db)
self.assertEqual(self.db.index, 11)
mock_openvas_db.select_database.assert_called_with(self.ctx, 11)
def test_flush(self, mock_openvas_db):
self.db.flush()
self.ctx.flushdb.assert_called_with()
@patch('ospd_openvas.db.OpenvasDB')
class KbDBTestCase(TestCase):
@patch('ospd_openvas.db.redis.Redis')
def setUp(self, mock_redis): # pylint: disable=arguments-differ
self.ctx = mock_redis.from_url.return_value
self.db = KbDB(10, self.ctx)
def test_get_result(self, mock_openvas_db):
mock_openvas_db.pop_list_items.return_value = [
'some results',
]
ret = self.db.get_result()
self.assertEqual(
ret,
[
'some results',
],
)
mock_openvas_db.pop_list_items.assert_called_with(
self.ctx, 'internal/results'
)
def test_get_status(self, mock_openvas_db):
mock_openvas_db.get_single_item.return_value = 'some status'
ret = self.db.get_status('foo')
self.assertEqual(ret, 'some status')
mock_openvas_db.get_single_item.assert_called_with(
self.ctx, 'internal/foo'
)
def test_get_scan_status(self, mock_openvas_db):
status = [
'192.168.0.1/10/120',
'192.168.0.2/35/120',
]
mock_openvas_db.pop_list_items.return_value = status
ret = self.db.get_scan_status()
self.assertEqual(ret, status)
mock_openvas_db.pop_list_items.assert_called_with(
self.ctx, 'internal/status'
)
def test_flush(self, mock_openvas_db):
self.db.flush()
self.ctx.flushdb.assert_called_with()
def test_add_scan_id(self, mock_openvas_db):
self.db.add_scan_id('bar')
calls = mock_openvas_db.add_single_item.call_args_list
call = calls[0]
kwargs = call[0]
self.assertEqual(kwargs[1], 'internal/bar')
self.assertEqual(kwargs[2], ['new'])
call = calls[1]
kwargs = call[0]
self.assertEqual(kwargs[1], 'internal/scanid')
self.assertEqual(kwargs[2], ['bar'])
def test_add_scan_preferences(self, mock_openvas_db):
prefs = ['foo', 'bar']
self.db.add_scan_preferences('foo', prefs)
mock_openvas_db.add_single_item.assert_called_with(
self.ctx, 'internal/foo/scanprefs', prefs
)
@patch('ospd_openvas.db.OpenvasDB')
def test_add_credentials_to_scan_preferences(
self, mock_redis, mock_openvas_db
):
prefs = ['foo', 'bar']
ctx = mock_redis.from_url.return_value
mock_openvas_db.create_context.return_value = ctx
self.db.add_credentials_to_scan_preferences('scan_id', prefs)
mock_openvas_db.create_context.assert_called_with(
self.db.index, encoding='utf-8'
)
mock_openvas_db.add_single_item.assert_called_with(
ctx, 'internal/scan_id/scanprefs', prefs
)
def test_add_scan_process_id(self, mock_openvas_db):
self.db.add_scan_process_id(123)
mock_openvas_db.add_single_item.assert_called_with(
self.ctx, 'internal/ovas_pid', [123]
)
def test_get_scan_process_id(self, mock_openvas_db):
mock_openvas_db.get_single_item.return_value = '123'
ret = self.db.get_scan_process_id()
self.assertEqual(ret, '123')
mock_openvas_db.get_single_item.assert_called_with(
self.ctx, 'internal/ovas_pid'
)
def test_remove_scan_database(self, mock_openvas_db):
scan_db = MagicMock(spec=ScanDB)
scan_db.index = 123
self.db.remove_scan_database(scan_db)
mock_openvas_db.remove_list_item.assert_called_with(
self.ctx, 'internal/dbindex', 123
)
def test_target_is_finished_false(self, mock_openvas_db):
mock_openvas_db.get_single_item.side_effect = ['new']
ret = self.db.target_is_finished('bar')
self.assertFalse(ret)
calls = mock_openvas_db.get_single_item.call_args_list
call = calls[0]
args = call[0]
self.assertEqual(args[1], 'internal/bar')
def test_target_is_finished_true(self, mock_openvas_db):
mock_openvas_db.get_single_item.side_effect = ['finished']
ret = self.db.target_is_finished('bar')
self.assertTrue(ret)
calls = mock_openvas_db.get_single_item.call_args_list
call = calls[0]
args = call[0]
self.assertEqual(args[1], 'internal/bar')
def test_stop_scan(self, mock_openvas_db):
self.db.stop_scan('foo')
mock_openvas_db.set_single_item.assert_called_with(
self.ctx, 'internal/foo', ['stop_all']
)
def test_scan_is_stopped_false(self, mock_openvas_db):
mock_openvas_db.get_single_item.return_value = 'new'
ret = self.db.scan_is_stopped('foo')
self.assertFalse(ret)
mock_openvas_db.get_single_item.assert_called_with(
self.ctx, 'internal/foo'
)
def test_scan_is_stopped_true(self, mock_openvas_db):
mock_openvas_db.get_single_item.return_value = 'stop_all'
ret = self.db.scan_is_stopped('foo')
self.assertTrue(ret)
mock_openvas_db.get_single_item.assert_called_with(
self.ctx, 'internal/foo'
)
def test_get_scan_databases(self, mock_openvas_db):
mock_openvas_db.get_list_item.return_value = [
'4',
self.db.index,
'7',
'11',
]
scan_dbs = self.db.get_scan_databases()
scan_db = next(scan_dbs)
self.assertEqual(scan_db.index, '4')
scan_db = next(scan_dbs)
self.assertEqual(scan_db.index, '7')
scan_db = next(scan_dbs)
self.assertEqual(scan_db.index, '11')
with self.assertRaises(StopIteration):
next(scan_dbs)
@patch('ospd_openvas.db.redis.Redis')
class MainDBTestCase(TestCase):
def test_max_database_index_fail(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.config_get.return_value = {}
maindb = MainDB(ctx)
with self.assertRaises(OspdOpenvasError):
max_db = ( # pylint: disable=unused-variable
maindb.max_database_index
)
ctx.config_get.assert_called_with('databases')
def test_max_database_index(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.config_get.return_value = {'databases': '123'}
maindb = MainDB(ctx)
max_db = maindb.max_database_index
self.assertEqual(max_db, 123)
ctx.config_get.assert_called_with('databases')
def test_try_database_success(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.hsetnx.return_value = 1
maindb = MainDB(ctx)
ret = maindb.try_database(1)
self.assertEqual(ret, True)
ctx.hsetnx.assert_called_with(DBINDEX_NAME, 1, 1)
def test_try_database_false(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.hsetnx.return_value = 0
maindb = MainDB(ctx)
ret = maindb.try_database(1)
self.assertEqual(ret, False)
ctx.hsetnx.assert_called_with(DBINDEX_NAME, 1, 1)
def test_try_db_index_error(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.hsetnx.side_effect = Exception
maindb = MainDB(ctx)
with self.assertRaises(OspdOpenvasError):
maindb.try_database(1)
def test_release_database_by_index(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.hdel.return_value = 1
maindb = MainDB(ctx)
maindb.release_database_by_index(3)
ctx.hdel.assert_called_once_with(DBINDEX_NAME, 3)
def test_release_database(self, mock_redis):
ctx = mock_redis.from_url.return_value
ctx.hdel.return_value = 1
db = MagicMock()
db.index = 3
maindb = MainDB(ctx)
maindb.release_database(db)
ctx.hdel.assert_called_once_with(DBINDEX_NAME, 3)
db.flush.assert_called_with()
def test_release(self, mock_redis):
ctx = mock_redis.from_url.return_value
maindb = MainDB(ctx)
maindb.release()
ctx.hdel.assert_called_with(DBINDEX_NAME, maindb.index)
ctx.flushdb.assert_called_with()
@patch('ospd_openvas.db.Openvas')
def test_get_new_kb_database(self, mock_openvas: MagicMock, mock_redis):
ctx = mock_redis.from_url.return_value
mock_check = mock_openvas.check.return_value
mock_check.get.return_value = True
OpenvasDB._db_address = None # pylint: disable=protected-access
mock_settings = mock_openvas.get_settings.return_value
mock_settings.get.return_value = None
maindb = MainDB(ctx)
maindb._max_dbindex = 123 # pylint: disable=protected-access
ctx.hsetnx.side_effect = [0, 0, 1]
kbdb = maindb.get_new_kb_database()
self.assertEqual(kbdb.index, 3)
ctx.flushdb.assert_called_once_with()
def test_get_new_kb_database_none(self, mock_redis):
ctx = mock_redis.from_url.return_value
maindb = MainDB(ctx)
maindb._max_dbindex = 3 # pylint: disable=protected-access
ctx.hsetnx.side_effect = [0, 0, 0]
kbdb = maindb.get_new_kb_database()
self.assertIsNone(kbdb)
ctx.flushdb.assert_not_called()
@patch('ospd_openvas.db.OpenvasDB')
def test_find_kb_database_by_scan_id_none(
self, mock_openvas_db, mock_redis
):
ctx = mock_redis.from_url.return_value
new_ctx = 'bar' # just some object to compare
mock_openvas_db.create_context.return_value = new_ctx
mock_openvas_db.get_key_count.return_value = None
maindb = MainDB(ctx)
maindb._max_dbindex = 2 # pylint: disable=protected-access
kbdb = maindb.find_kb_database_by_scan_id('foo')
mock_openvas_db.get_key_count.assert_called_once_with(
new_ctx, 'internal/foo'
)
self.assertIsNone(kbdb)
@patch('ospd_openvas.db.OpenvasDB')
def test_find_kb_database_by_scan_id(self, mock_openvas_db, mock_redis):
ctx = mock_redis.from_url.return_value
new_ctx = 'foo' # just some object to compare
mock_openvas_db.create_context.return_value = new_ctx
mock_openvas_db.get_key_count.side_effect = [0, 1]
maindb = MainDB(ctx)
maindb._max_dbindex = 3 # pylint: disable=protected-access
kbdb = maindb.find_kb_database_by_scan_id('foo')
mock_openvas_db.get_key_count.assert_called_with(
new_ctx, 'internal/foo'
)
self.assertEqual(kbdb.index, 2)
self.assertIs(kbdb.ctx, new_ctx)
ospd-openvas-22.9.0/tests/test_errors.py 0000664 0000000 0000000 00000003363 15011310720 0020304 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Test module for OspdCommandError class"""
import unittest
from ospd.errors import OspdError, OspdCommandError, RequiredArgument
class OspdCommandErrorTestCase(unittest.TestCase):
def test_is_ospd_error(self):
e = OspdCommandError('message')
self.assertIsInstance(e, OspdError)
def test_default_params(self):
e = OspdCommandError('message')
self.assertEqual('message', e.message)
self.assertEqual(400, e.status)
self.assertEqual('osp', e.command)
def test_constructor(self):
e = OspdCommandError('message', 'command', 304)
self.assertEqual('message', e.message)
self.assertEqual('command', e.command)
self.assertEqual(304, e.status)
def test_string_conversion(self):
e = OspdCommandError('message foo bar', 'command', 304)
self.assertEqual('message foo bar', str(e))
def test_as_xml(self):
e = OspdCommandError('message')
self.assertEqual(
b'', e.as_xml()
)
class RequiredArgumentTestCase(unittest.TestCase):
def test_raise_exception(self):
with self.assertRaises(RequiredArgument) as cm:
raise RequiredArgument('foo', 'bar')
ex = cm.exception
self.assertEqual(ex.function, 'foo')
self.assertEqual(ex.argument, 'bar')
def test_string_conversion(self):
ex = RequiredArgument('foo', 'bar')
self.assertEqual(str(ex), 'foo: Argument bar is required')
def test_is_ospd_error(self):
e = RequiredArgument('foo', 'bar')
self.assertIsInstance(e, OspdError)
ospd-openvas-22.9.0/tests/test_gpg.py 0000664 0000000 0000000 00000005570 15011310720 0017547 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from pathlib import Path
from unittest import TestCase
from unittest.mock import Mock, patch
from typing import Dict, Optional
from ospd_openvas.gpg_sha_verifier import (
ReloadConfiguration,
create_verify,
gpg_sha256sums,
reload_sha256sums,
)
class GpgTest(TestCase):
@patch("gnupg.GPG")
@patch("pathlib.Path")
def test_reload(self, gmock, pathmock: Path):
def on_failure(_: Optional[Dict[str, str]]) -> Dict[str, str]:
raise Exception( # pylint: disable=broad-exception-raised
"verification_failed"
)
omock = Mock()
emock = Mock()
omock.__enter__ = Mock(return_value=emock)
omock.__exit__ = Mock()
pathmock.open.return_value = omock
emock.readlines.side_effect = [["h hi\n"], ["g gude\n"]]
emock.read.side_effect = [b"hi", b"", b"hi", b"", b"ih", b""]
load = reload_sha256sums(
ReloadConfiguration(
hash_file=pathmock,
on_verification_failure=on_failure,
gpg=gmock,
)
)
self.assertDictEqual(load(), {"h": "hi"})
self.assertDictEqual(load(), {"h": "hi"})
self.assertDictEqual(load(), {"g": "gude"})
gmock.verify_file.side_effect = [False]
with self.assertRaises(Exception):
load()
@patch("gnupg.GPG")
@patch("pathlib.Path")
def test_verifying(self, gmock, pathmock: Path):
omock = Mock()
emock = Mock()
omock.__enter__ = Mock(return_value=emock)
omock.__exit__ = Mock()
pathmock.open.return_value = omock
emock.readlines.side_effect = [["h hi\n", "g gude\n"]]
success_result = gpg_sha256sums(pathmock, gmock)
self.assertIsNotNone(success_result)
self.assertDictEqual(success_result, {"h": "hi", "g": "gude"})
gmock.verify_file.side_effect = [False]
self.assertIsNone(gpg_sha256sums(pathmock, gmock))
@patch("pathlib.Path")
def test_verify_closure(self, pathmock):
shas = (
"98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4"
)
vsuccess = create_verify(lambda: {shas: "hi.txt"})
omock = Mock()
emock = Mock()
omock.__enter__ = Mock(return_value=emock)
omock.__exit__ = Mock()
pathmock.open.return_value = omock
emock.read.side_effect = [bytes("hi\n", "utf-8"), ""]
pathmock.name = "hi.txt"
self.assertTrue(vsuccess(pathmock))
emock.read.side_effect = [bytes("hi\n", "utf-8"), ""]
pathmock.name = "false.txt"
self.assertFalse(vsuccess(pathmock))
emock.read.side_effect = [bytes("hin", "utf-8"), ""]
pathmock.name = "hi.txt"
self.assertFalse(vsuccess(pathmock))
ospd-openvas-22.9.0/tests/test_lock.py 0000664 0000000 0000000 00000005652 15011310720 0017723 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import unittest
import shutil
import tempfile
from pathlib import Path, PosixPath
from unittest.mock import patch, MagicMock
from ospd_openvas.lock import LockFile
from .helper import assert_called_once, assert_called
class LockFileTestCase(unittest.TestCase):
def setUp(self):
self.temp_dir = Path(tempfile.mkdtemp())
def tearDown(self):
shutil.rmtree(str(self.temp_dir))
def test_acquire_lock(self):
lock_file_path = self.temp_dir / "test.lock"
lock_file = LockFile(lock_file_path)
lock_file._acquire_lock() # pylint: disable = protected-access
self.assertTrue(lock_file.has_lock())
self.assertTrue(lock_file_path.exists())
lock_file._release_lock() # pylint: disable = protected-access
@patch("ospd_openvas.lock.logger")
def test_already_locked(self, mock_logger):
lock_file_path = self.temp_dir / "test.lock"
lock_file_aux = LockFile(lock_file_path)
lock_file_aux._acquire_lock() # pylint: disable = protected-access
self.assertTrue(lock_file_aux.has_lock())
lock_file = LockFile(lock_file_path)
lock_file._acquire_lock() # pylint: disable = protected-access
self.assertFalse(lock_file.has_lock())
assert_called(mock_logger.debug)
lock_file_aux._release_lock() # pylint: disable = protected-access
def test_create_parent_dirs(self):
lock_file_path = self.temp_dir / "foo" / "bar" / "test.lock"
lock_file = LockFile(lock_file_path)
lock_file._acquire_lock() # pylint: disable = protected-access
self.assertTrue(lock_file.has_lock())
self.assertTrue(lock_file_path.exists())
self.assertTrue(lock_file_path.parent.is_dir())
self.assertTrue(lock_file_path.parent.parent.is_dir())
lock_file._release_lock() # pylint: disable = protected-access
@patch("ospd_openvas.lock.logger")
def test_create_paren_dirs_fail(self, mock_logger):
lock_file_path = MagicMock(spec=Path).return_value
parent = MagicMock(spec=PosixPath)
lock_file_path.parent = parent
parent.mkdir.side_effect = PermissionError
lock_file = LockFile(lock_file_path)
lock_file._acquire_lock() # pylint: disable = protected-access
self.assertFalse(lock_file.has_lock())
assert_called_once(mock_logger.error)
def test_context_manager(self):
lock_file_path = self.temp_dir / "test.lock"
lock_file = LockFile(lock_file_path)
with lock_file:
self.assertTrue(lock_file.has_lock())
self.assertTrue(lock_file_path.is_file())
lock_file._release_lock() # pylint: disable = protected-access
# The file is not removed
self.assertFalse(lock_file.has_lock())
self.assertTrue(lock_file_path.is_file())
ospd-openvas-22.9.0/tests/test_notus.py 0000664 0000000 0000000 00000015237 15011310720 0020143 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import logging
import threading
from unittest import TestCase, mock
from typing import Dict, Optional, Iterator
from ospd_openvas.messages.result import ResultMessage
from ospd_openvas.notus import Cache, Notus, NotusResultHandler
class CacheFake(Cache):
# pylint: disable=super-init-not-called
def __init__(self):
self.db = {}
self.ctx = 'foo'
def store_advisory(self, oid: str, value: Dict[str, str]):
self.db[oid] = value
def exists(self, oid: str) -> bool:
return True if self.db.get(oid, None) else False
def get_advisory(self, oid: str) -> Optional[Dict[str, str]]:
return self.db.get(oid, None)
def get_keys(self) -> Iterator[str]:
for key, _ in self.db:
yield key
class NotusTestCase(TestCase):
@mock.patch('ospd_openvas.notus.OpenvasDB')
def test_notus_retrieve(self, mock_openvasdb):
path_mock = mock.MagicMock()
redis_mock = mock.MagicMock()
mock_openvasdb.find_database_by_pattern.return_value = ('foo', 1)
mock_openvasdb.get_filenames_and_oids.return_value = [
('filename', '1.2.3')
]
notus = Notus(path_mock, Cache(redis_mock))
notus._verifier = lambda _: True # pylint: disable=protected-access
oids = [x for x in notus.get_oids()]
self.assertEqual(len(oids), 1)
@mock.patch('ospd_openvas.notus.OpenvasDB')
def test_notus_init(self, mock_openvasdb):
mock_openvasdb.find_database_by_pattern.return_value = ('foo', 1)
redis_mock = mock.MagicMock()
path_mock = mock.MagicMock()
notus = Notus(path_mock, Cache(redis_mock))
self.assertEqual(mock_openvasdb.find_database_by_pattern.call_count, 1)
self.assertEqual(notus.cache.ctx, 'foo')
@mock.patch('ospd_openvas.notus.OpenvasDB')
def test_notus_reload(self, mock_openvasdb):
path_mock = mock.MagicMock()
adv_path = mock.MagicMock()
adv_path.name = "hi"
adv_path.stem = "family"
path_mock.glob.return_value = [adv_path]
adv_path.read_bytes.return_value = b'''
{
"family": "family",
"qod_type": "remote_app",
"advisories": [
{ "oid": "12", "file_name": "aha.txt" }
]
}'''
redis_mock = mock.MagicMock()
mock_openvasdb.find_database_by_pattern.return_value = ('foo', 1)
load_into_redis = Notus(path_mock, Cache(redis_mock))
# pylint: disable=protected-access
load_into_redis._verifier = lambda _: True
load_into_redis.reload_cache()
self.assertEqual(mock_openvasdb.set_single_item.call_count, 1)
mock_openvasdb.reset_mock()
do_not_load_into_redis = Notus(path_mock, Cache(redis_mock))
# pylint: disable=protected-access
do_not_load_into_redis._verifier = lambda _: False
do_not_load_into_redis.reload_cache()
self.assertEqual(mock_openvasdb.set_single_item.call_count, 0)
def test_notus_qod_type(self):
path_mock = mock.MagicMock()
adv_path = mock.MagicMock()
adv_path.name = "hi"
adv_path.stem = "family"
path_mock.glob.return_value = [adv_path]
adv_path.read_bytes.return_value = b'''
{
"family": "family",
"advisories": [
{
"oid": "12",
"qod_type": "package_unreliable",
"severity": {
"origin": "NVD",
"date": 1505784960,
"cvss_v2": "AV:N/AC:M/Au:N/C:C/I:C/A:C",
"cvss_v3": null
}
}
]
}'''
cache_fake = CacheFake()
notus = Notus(path_mock, cache_fake)
notus._verifier = lambda _: True # pylint: disable=protected-access
notus.reload_cache()
nm = notus.get_nvt_metadata("12")
assert nm
self.assertEqual("package_unreliable", nm.get("qod_type", ""))
def test_notus_cvss_v2_v3_none(self):
path_mock = mock.MagicMock()
adv_path = mock.MagicMock()
adv_path.name = "hi"
adv_path.stem = "family"
path_mock.glob.return_value = [adv_path]
adv_path.read_bytes.return_value = b'''
{
"family": "family",
"advisories": [
{
"oid": "12",
"severity": {
"origin": "NVD",
"date": 1505784960,
"cvss_v2": "AV:N/AC:M/Au:N/C:C/I:C/A:C",
"cvss_v3": null
}
}
]
}'''
cache_fake = CacheFake()
notus = Notus(path_mock, cache_fake)
notus._verifier = lambda _: True # pylint: disable=protected-access
notus.reload_cache()
nm = notus.get_nvt_metadata("12")
assert nm
self.assertEqual(
"AV:N/AC:M/Au:N/C:C/I:C/A:C", nm.get("severity_vector", "")
)
def test_notus_fail_cases(self):
def start(self):
self.function(*self.args, **self.kwargs)
mock_report_func = mock.MagicMock(return_value=False)
logging.Logger.warning = mock.MagicMock()
notus = NotusResultHandler(mock_report_func)
res_msg = ResultMessage(
scan_id='scan_1',
host_ip='1.1.1.1',
host_name='foo',
oid='1.2.3.4.5',
value='A Vulnerability has been found',
port="42",
uri='file://foo/bar',
)
with mock.patch.object(threading.Timer, 'start', start):
notus.result_handler(res_msg)
logging.Logger.warning.assert_called_with( # pylint: disable=no-member
"Unable to report %d notus results for scan id %s.", 1, "scan_1"
)
def test_notus_success_case(self):
def start(self):
self.function(*self.args, **self.kwargs)
mock_report_func = mock.MagicMock(return_value=True)
logging.Logger.warning = mock.MagicMock()
notus = NotusResultHandler(mock_report_func)
res_msg = ResultMessage(
scan_id='scan_1',
host_ip='1.1.1.1',
host_name='foo',
oid='1.2.3.4.5',
value='A Vulnerability has been found',
port="42",
uri='file://foo/bar',
)
with mock.patch.object(threading.Timer, 'start', start):
notus.result_handler(res_msg)
logging.Logger.warning.assert_not_called() # pylint: disable=no-member
ospd-openvas-22.9.0/tests/test_nvti_cache.py 0000664 0000000 0000000 00000024742 15011310720 0021077 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=unused-argument, protected-access, invalid-name
"""Unit Test for ospd-openvas"""
import logging
from unittest import TestCase
from unittest.mock import patch, Mock, PropertyMock
from pathlib import Path
from ospd_openvas.nvticache import NVTICache, NVTI_CACHE_NAME
from tests.helper import assert_called
@patch('ospd_openvas.nvticache.OpenvasDB')
class TestNVTICache(TestCase):
@patch('ospd_openvas.db.MainDB')
def setUp(self, MockMainDB): # pylint: disable=arguments-differ
self.db = MockMainDB()
self.nvti = NVTICache(self.db)
self.nvti._ctx = 'foo'
def test_set_index(self, MockOpenvasDB):
self.nvti._ctx = None
MockOpenvasDB.find_database_by_pattern.return_value = ('foo', 22)
ctx = self.nvti.ctx
self.assertIsNotNone(ctx)
self.assertEqual(ctx, 'foo')
self.assertEqual(self.nvti.index, 22)
def test_get_feed_version(self, MockOpenvasDB):
MockOpenvasDB.get_single_item.return_value = '1234'
resp = self.nvti.get_feed_version()
self.assertEqual(resp, '1234')
MockOpenvasDB.get_single_item.assert_called_with('foo', NVTI_CACHE_NAME)
def test_get_feed_version_not_available(self, MockOpenvasDB):
pmock = PropertyMock(return_value=123)
type(self.db).max_database_index = pmock
self.nvti._ctx = None
MockOpenvasDB.find_database_by_pattern.return_value = (None, None)
resp = self.nvti.get_feed_version()
self.assertIsNone(resp)
MockOpenvasDB.find_database_by_pattern.assert_called_with(
NVTI_CACHE_NAME, 123
)
def test_get_oids(self, MockOpenvasDB):
MockOpenvasDB.get_filenames_and_oids.return_value = [
('filename', 'oid')
]
resp = self.nvti.get_oids()
self.assertEqual([x for x in resp], [('filename', 'oid')])
def test_parse_metadata_tag_missing_value(self, MockOpenvasDB):
logging.Logger.error = Mock()
tags = 'tag1'
ret = (
NVTICache._parse_metadata_tags( # pylint: disable=protected-access
tags, '1.2.3'
)
)
self.assertEqual(ret, {})
assert_called(logging.Logger.error)
def test_parse_metadata_tag(self, MockOpenvasDB):
tags = 'tag1=value1'
ret = (
NVTICache._parse_metadata_tags( # pylint: disable=protected-access
tags, '1.2.3'
)
)
self.assertEqual(ret, {'tag1': 'value1'})
def test_parse_metadata_tags(self, MockOpenvasDB):
tags = 'tag1=value1|foo=bar'
ret = (
NVTICache._parse_metadata_tags( # pylint: disable=protected-access
tags, '1.2.3'
)
)
self.assertEqual(ret, {'tag1': 'value1', 'foo': 'bar'})
def test_get_nvt_params(self, MockOpenvasDB):
prefs1 = ['1|||dns-fuzz.timelimit|||entry|||default']
prefs2 = ['1|||dns-fuzz.timelimit|||entry|||']
prefs3 = ['1|||dns-fuzz.timelimit|||entry']
out_dict1 = {
'1': {
'id': '1',
'type': 'entry',
'default': 'default',
'name': 'dns-fuzz.timelimit',
'description': 'Description',
},
}
out_dict2 = {
'1': {
'id': '1',
'type': 'entry',
'default': '',
'name': 'dns-fuzz.timelimit',
'description': 'Description',
},
}
MockOpenvasDB.get_list_item.return_value = prefs1
resp = self.nvti.get_nvt_params('1.2.3.4')
self.assertEqual(resp, out_dict1)
MockOpenvasDB.get_list_item.return_value = prefs2
resp = self.nvti.get_nvt_params('1.2.3.4')
self.assertEqual(resp, out_dict2)
MockOpenvasDB.get_list_item.return_value = prefs3
resp = self.nvti.get_nvt_params('1.2.3.4')
self.assertEqual(resp, out_dict2)
def test_get_nvt_metadata(self, MockOpenvasDB):
metadata = [
'mantis_detect.nasl',
'',
'',
'Settings/disable_cgi_scanning',
'',
'Services/www, 80',
'find_service.nasl, http_version.nasl',
'cvss_base_vector=AV:N/AC:L/Au:N/C:N/I:N'
'/A:N|last_modification=1533906565'
'|creation_date=1237458156'
'|summary=Detects the ins'
'talled version of\n Mantis a free popular web-based '
'bugtracking system.\n\n This script sends HTTP GET r'
'equest and try to get the version from the\n respons'
'e, and sets the result in KB.|qod_type=remote_banner',
'',
'',
'URL:http://www.mantisbt.org/',
'3',
'Product detection',
'Mantis Detection',
]
custom = {
'category': '3',
'creation_date': '1237458156',
'cvss_base_vector': 'AV:N/AC:L/Au:N/C:N/I:N/A:N',
'dependencies': 'find_service.nasl, http_version.nasl',
'excluded_keys': 'Settings/disable_cgi_scanning',
'family': 'Product detection',
'filename': 'mantis_detect.nasl',
'last_modification': ('1533906565'),
'name': 'Mantis Detection',
'qod_type': 'remote_banner',
'refs': {'xref': ['URL:http://www.mantisbt.org/']},
'required_ports': 'Services/www, 80',
'summary': (
'Detects the installed version of\n Mantis a '
'free popular web-based bugtracking system.\n'
'\n This script sends HTTP GET request and t'
'ry to get the version from the\n response, '
'and sets the result in KB.'
),
'vt_params': {
'0': {
'id': '0',
'type': 'entry',
'name': 'timeout',
'description': 'Description',
'default': '10',
},
'1': {
'id': '1',
'type': 'entry',
'name': 'dns-fuzz.timelimit',
'description': 'Description',
'default': 'default',
},
},
}
prefs1 = [
'0|||timeout|||entry|||10',
'1|||dns-fuzz.timelimit|||entry|||default',
]
MockOpenvasDB.get_list_item.side_effect = [metadata, prefs1]
resp = self.nvti.get_nvt_metadata('1.2.3.4')
self.maxDiff = None
self.assertEqual(resp, custom)
def test_get_nvt_metadata_fail(self, MockOpenvasDB):
MockOpenvasDB.get_list_item.return_value = []
resp = self.nvti.get_nvt_metadata('1.2.3.4')
self.assertIsNone(resp)
def test_get_nvt_refs(self, MockOpenvasDB):
refs = ['', '', 'URL:http://www.mantisbt.org/']
out_dict = {
'cve': [''],
'bid': [''],
'xref': ['URL:http://www.mantisbt.org/'],
}
MockOpenvasDB.get_list_item.return_value = refs
resp = self.nvti.get_nvt_refs('1.2.3.4')
self.assertEqual(resp, out_dict)
def test_get_nvt_refs_fail(self, MockOpenvasDB):
MockOpenvasDB.get_list_item.return_value = []
resp = self.nvti.get_nvt_refs('1.2.3.4')
self.assertIsNone(resp)
def test_get_nvt_prefs(self, MockOpenvasDB):
prefs = ['dns-fuzz.timelimit|||entry|||default']
MockOpenvasDB.get_list_item.return_value = prefs
resp = self.nvti.get_nvt_prefs('1.2.3.4')
self.assertEqual(resp, prefs)
def test_get_nvt_tags(self, MockOpenvasDB):
tag = (
'last_modification=1533906565'
'|creation_date=1517443741|cvss_bas'
'e_vector=AV:N/AC:L/Au:N/C:P/I:P/A:P|solution_type=V'
'endorFix|qod_type=package|affected=rubygems on Debi'
'an Linux|solution_method=DebianAPTUpgrade'
)
out_dict = {
'last_modification': '1533906565',
'creation_date': '1517443741',
'cvss_base_vector': 'AV:N/AC:L/Au:N/C:P/I:P/A:P',
'solution_type': 'VendorFix',
'qod_type': 'package',
'affected': 'rubygems on Debian Linux',
'solution_method': 'DebianAPTUpgrade',
}
MockOpenvasDB.get_single_item.return_value = tag
resp = self.nvti.get_nvt_tags('1.2.3.4')
self.assertEqual(out_dict, resp)
def test_get_nvt_files_count(self, MockOpenvasDB):
MockOpenvasDB.get_key_count.return_value = 20
self.assertEqual(self.nvti.get_nvt_files_count(), 20)
MockOpenvasDB.get_key_count.assert_called_with('foo', 'filename:*')
def test_get_nvt_count(self, MockOpenvasDB):
MockOpenvasDB.get_key_count.return_value = 20
self.assertEqual(self.nvti.get_nvt_count(), 20)
MockOpenvasDB.get_key_count.assert_called_with('foo', 'nvt:*')
def test_flush(self, _MockOpenvasDB):
self.nvti._ctx = Mock()
self.nvti.flush()
self.nvti._ctx.flushdb.assert_called_with()
def test_add_vt(self, MockOpenvasDB):
MockOpenvasDB.add_single_list = Mock()
self.nvti.add_vt_to_cache(
'1234',
[
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
],
)
MockOpenvasDB.add_single_list.assert_called_with(
'foo',
'1234',
[
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
],
)
def test_get_file_checksum(self, MockOpenvasDB):
MockOpenvasDB.get_single_item.return_value = '123456'
path = Path("/tmp/foo.csv")
resp = self.nvti.get_file_checksum(path)
self.assertEqual(resp, '123456')
MockOpenvasDB.get_single_item.assert_called_with(
'foo', "sha256sums:/tmp/foo.csv"
)
ospd-openvas-22.9.0/tests/test_openvas.py 0000664 0000000 0000000 00000024032 15011310720 0020437 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import subprocess
from unittest import TestCase
from unittest.mock import patch, MagicMock
import psutil
from ospd_openvas.openvas import Openvas
class OpenvasCommandTestCase(TestCase):
@patch('ospd_openvas.openvas.subprocess.check_output')
def test_get_version(self, mock_check_output: MagicMock):
mock_check_output.return_value = b"OpenVAS 20.08"
self.assertEqual(Openvas.get_version(), 'OpenVAS 20.08')
mock_check_output.assert_called_with(
['openvas', '-V'], stderr=subprocess.STDOUT
)
@patch('ospd_openvas.openvas.subprocess.check_output')
def test_get_version_not_found(self, mock_check_output: MagicMock):
mock_check_output.return_value = b"Foo 20.08"
self.assertIsNone(Openvas.get_version())
mock_check_output.assert_called_with(
['openvas', '-V'], stderr=subprocess.STDOUT
)
@patch('ospd_openvas.openvas.subprocess.check_output')
def test_get_version_with_error(self, mock_check_output: MagicMock):
mock_check_output.side_effect = subprocess.SubprocessError('foo')
self.assertIsNone(Openvas.get_version())
mock_check_output.assert_called_with(
['openvas', '-V'], stderr=subprocess.STDOUT
)
mock_check_output.reset_mock()
mock_check_output.side_effect = OSError('foo')
self.assertIsNone(Openvas.get_version())
mock_check_output.assert_called_with(
['openvas', '-V'], stderr=subprocess.STDOUT
)
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_check(self, mock_check_call: MagicMock):
self.assertTrue(Openvas.check())
mock_check_call.assert_called_with(
['openvas', '-V'], stdout=subprocess.DEVNULL
)
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_check_with_error(self, mock_check_call: MagicMock):
mock_check_call.side_effect = subprocess.SubprocessError('foo')
self.assertFalse(Openvas.check())
mock_check_call.assert_called_with(
['openvas', '-V'], stdout=subprocess.DEVNULL
)
mock_check_call.reset_mock()
mock_check_call.side_effect = OSError('foo')
self.assertFalse(Openvas.check())
mock_check_call.assert_called_with(
['openvas', '-V'], stdout=subprocess.DEVNULL
)
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_check_sudo(self, mock_check_call: MagicMock):
self.assertTrue(Openvas.check_sudo())
mock_check_call.assert_called_with(
['sudo', '-n', 'openvas', '-s'], stdout=subprocess.DEVNULL
)
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_check_sudo_with_error(self, mock_check_call: MagicMock):
mock_check_call.side_effect = subprocess.SubprocessError('foo')
self.assertFalse(Openvas.check_sudo())
mock_check_call.assert_called_with(
['sudo', '-n', 'openvas', '-s'], stdout=subprocess.DEVNULL
)
mock_check_call.reset_mock()
mock_check_call.side_effect = OSError('foo')
self.assertFalse(Openvas.check_sudo())
mock_check_call.assert_called_with(
['sudo', '-n', 'openvas', '-s'], stdout=subprocess.DEVNULL
)
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_load_vts_into_redis(self, mock_check_call, mock_logger):
Openvas.load_vts_into_redis()
mock_check_call.assert_called_with(
['openvas', '--update-vt-info'], stdout=subprocess.DEVNULL
)
mock_logger.error.assert_not_called()
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_load_vts_into_redis_with_error(
self, mock_check_call: MagicMock, mock_logger: MagicMock
):
mock_check_call.side_effect = subprocess.SubprocessError('foo')
Openvas.load_vts_into_redis()
mock_check_call.assert_called_with(
['openvas', '--update-vt-info'], stdout=subprocess.DEVNULL
)
self.assertEqual(mock_logger.error.call_count, 1)
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.subprocess.check_output')
def test_get_settings(
self, mock_check_output: MagicMock, _mock_logger: MagicMock
):
mock_check_output.return_value = (
b'non_simult_ports = 22 \n plugins_folder = /foo/bar\nfoo = yes\n'
b'bar=no\nipsum= \nlorem\n'
)
settings = Openvas.get_settings()
mock_check_output.assert_called_with(['openvas', '-s'])
self.assertEqual(settings['non_simult_ports'], '22')
self.assertEqual(settings['plugins_folder'], '/foo/bar')
self.assertEqual(settings['foo'], 1)
self.assertEqual(settings['bar'], 0)
self.assertFalse('ipsum' in settings)
self.assertFalse('lorem' in settings)
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.subprocess.check_output')
def test_get_settings_with_error(
self, mock_check_output: MagicMock, _mock_logger: MagicMock
):
mock_check_output.side_effect = subprocess.SubprocessError('foo')
settings = Openvas.get_settings()
mock_check_output.assert_called_with(['openvas', '-s'])
self.assertFalse(settings) # settings dict is empty
mock_check_output.reset_mock()
mock_check_output.side_effect = OSError('foo')
settings = Openvas.get_settings()
mock_check_output.assert_called_with(['openvas', '-s'])
self.assertFalse(settings) # settings dict is empty
mock_check_output.reset_mock()
# https://gehrcke.de/2015/12/how-to-raise-unicodedecodeerror-in-python-3/
mock_check_output.side_effect = UnicodeDecodeError(
'funnycodec', b'\x00\x00', 1, 2, 'This is just a fake reason!'
)
settings = Openvas.get_settings()
mock_check_output.assert_called_with(['openvas', '-s'])
self.assertFalse(settings) # settings dict is empty
@patch('ospd_openvas.openvas.psutil.Popen')
def test_start_scan(self, mock_popen: MagicMock):
proc = Openvas.start_scan('scan_1')
mock_popen.assert_called_with(
['openvas', '--scan-start', 'scan_1'], shell=False
)
self.assertIsNotNone(proc)
@patch('ospd_openvas.openvas.psutil.Popen')
def test_start_scan_with_sudo(self, mock_popen: MagicMock):
proc = Openvas.start_scan('scan_1', sudo=True)
mock_popen.assert_called_with(
['sudo', '-n', 'openvas', '--scan-start', 'scan_1'], shell=False
)
self.assertIsNotNone(proc)
@patch('ospd_openvas.openvas.psutil.Popen')
def test_start_scan_with_niceness(self, mock_popen: MagicMock):
proc = Openvas.start_scan('scan_1', niceness=4)
mock_popen.assert_called_with(
['nice', '-n', 4, 'openvas', '--scan-start', 'scan_1'], shell=False
)
self.assertIsNotNone(proc)
@patch('ospd_openvas.openvas.psutil.Popen')
def test_start_scan_with_niceness_and_sudo(self, mock_popen: MagicMock):
proc = Openvas.start_scan('scan_1', niceness=4, sudo=True)
mock_popen.assert_called_with(
[
'nice',
'-n',
4,
'sudo',
'-n',
'openvas',
'--scan-start',
'scan_1',
],
shell=False,
)
self.assertIsNotNone(proc)
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.psutil.Popen')
def test_start_scan_error(
self, mock_popen: MagicMock, mock_logger: MagicMock
):
mock_popen.side_effect = psutil.Error('foo')
proc = Openvas.start_scan('scan_1')
mock_popen.assert_called_with(
['openvas', '--scan-start', 'scan_1'], shell=False
)
self.assertIsNone(proc)
self.assertEqual(mock_logger.warning.call_count, 1)
mock_popen.reset_mock()
mock_logger.reset_mock()
mock_popen.side_effect = OSError('foo')
proc = Openvas.start_scan('scan_1')
mock_popen.assert_called_with(
['openvas', '--scan-start', 'scan_1'], shell=False
)
self.assertIsNone(proc)
self.assertEqual(mock_logger.warning.call_count, 1)
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_stop_scan(
self, mock_check_call: MagicMock, _mock_logger: MagicMock
):
success = Openvas.stop_scan('scan_1')
mock_check_call.assert_called_with(['openvas', '--scan-stop', 'scan_1'])
self.assertTrue(success)
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_stop_scan_with_sudo(
self, mock_check_call: MagicMock, _mock_logger: MagicMock
):
success = Openvas.stop_scan('scan_1', sudo=True)
mock_check_call.assert_called_with(
['sudo', '-n', 'openvas', '--scan-stop', 'scan_1']
)
self.assertTrue(success)
@patch('ospd_openvas.openvas.logger')
@patch('ospd_openvas.openvas.subprocess.check_call')
def test_stop_scan_with_error(
self, mock_check_call: MagicMock, mock_logger: MagicMock
):
mock_check_call.side_effect = subprocess.SubprocessError('foo')
success = Openvas.stop_scan('scan_1')
mock_check_call.assert_called_with(['openvas', '--scan-stop', 'scan_1'])
self.assertFalse(success)
self.assertEqual(mock_logger.warning.call_count, 1)
mock_check_call.reset_mock()
mock_logger.reset_mock()
mock_check_call.side_effect = OSError('foo')
success = Openvas.stop_scan('scan_1')
mock_check_call.assert_called_with(['openvas', '--scan-stop', 'scan_1'])
self.assertFalse(success)
self.assertEqual(mock_logger.warning.call_count, 1)
ospd-openvas-22.9.0/tests/test_port_convert.py 0000664 0000000 0000000 00000155757 15011310720 0021533 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Test suites for Port manipulation."""
import unittest
import logging
from ospd.network import (
ports_as_list,
get_udp_port_list,
get_tcp_port_list,
port_list_compress,
valid_port_list,
)
PORT_LISTS = {
"ALL_IANA_ASSIGNED_TCP_2020_02_12": "T:1-50,52-80,82-99,101-113,115-224,242-248,256-257,259-271,280-284,286-287,308-324,333,344-584,586-658,660-702,704-707,709-715,729-731,741-742,744,747-754,758-765,767,769-777,780,800-802,810,828-833,847-848,853-854,860-862,873,886-888,900-903,910-913,953,989-1001,1010,1021-1027,1029,1033-1108,1110-1490,1492-1782,1784-2193,2197-2258,2260-2368,2370-2377,2379-2681,2683-2793,2795-2824,2826-2872,2874-2924,2926-3091,3093-3125,3127-3300,3302-3321,3326-3366,3372-3402,3405-3545,3547-3693,3695-3993,3995-4047,4049-4143,4145-4193,4197,4199,4300-4314,4316,4320-4323,4325-4336,4340-4362,4366,4368-4379,4389-4396,4400-4423,4425-4433,4441-4458,4484-4488,4500,4534-4538,4545-4559,4563,4566-4570,4573,4590-4605,4621,4646,4658-4692,4700-4704,4711,4725-4733,4737-4747,4749-4756,4774,4784-4791,4800-4804,4827,4837-4851,4867-4871,4876-4885,4888-4889,4894,4899-4902,4912-4915,4936-4937,4940-4942,4949-4953,4969-4971,4980,4984-4991,4999-5015,5020-5034,5042-5075,5078-5087,5092-5094,5099-5107,5111-5112,5114-5117,5120,5133-5137,5145-5146,5150-5157,5161-5168,5172,5190-5197,5200-5203,5209,5215,5221-5237,5245-5254,5264-5265,5269-5272,5280-5282,5298-5310,5312-5318,5320-5321,5343-5344,5349-5364,5397-5437,5443,5445,5450,5453-5456,5461-5465,5470-5475,5500-5507,5550,5553-5557,5565-5569,5573-5575,5579-5586,5597-5605,5618,5627-5639,5646,5666,5670-5684,5687-5689,5693,5696,5700,5705,5713-5730,5741-5748,5750,5755,5757,5766-5771,5777,5780-5787,5793-5794,5813-5814,5841-5842,5859,5863,5868,5883,5900,5910-5913,5963,5968-5969,5984-5993,5999,6064-6066,6068-6077,6080-6088,6099-6118,6121-6124,6130,6133,6140-6149,6159-6163,6200-6201,6209,6222,6241-6244,6251-6253,6267-6269,6300-6301,6306,6315-6317,6320-6322,6324-6326,6343-6344,6346-6347,6350,6355,6360,6363,6370,6379,6382,6389-6390,6417-6421,6432,6442-6446,6455-6456,6464,6471,6480-6489,6500-6503,6505-6511,6513-6515,6543-6544,6547-6551,6558,6566,6568,6579-6583,6600-6602,6619-6629,6632-6636,6640,6653,6655-6657,6670-6673,6678-6679,6687-6690,6696-6697,6701-6706,6714-6716,6767-6771,6777-6778,6784-6791,6801,6817,6831,6841-6842,6850,6868,6888,6900-6901,6924,6935-6936,6946,6951,6961-6966,6969-6970,6997-7026,7030-7031,7040,7070-7073,7080,7088,7095,7099-7101,7107,7117,7121,7128-7129,7161-7174,7181,7200-7202,7215-7216,7227-7229,7235-7237,7244,7262,7272-7283,7365,7391-7395,7397,7400-7402,7410-7411,7420-7421,7426-7431,7437,7443,7471,7473-7474,7478,7491,7500-7501,7508-7511,7542-7551,7560,7563,7566,7569-7570,7574,7588,7606,7624,7626-7631,7633,7648,7663,7672-7677,7680,7683,7687,7689,7697,7700-7701,7707-7708,7720,7724-7728,7734,7738,7741-7744,7747,7775,7777-7779,7781,7784,7786-7787,7789,7794,7797-7802,7810,7845-7847,7869-7872,7878,7880,7887,7900-7903,7913,7932-7933,7962,7967,7979-7982,7997-8009,8015-8016,8019-8023,8025-8026,8032-8034,8040-8044,8051-8060,8066-8067,8070,8074,8077,8080-8084,8086-8088,8090-8091,8097,8100-8102,8111,8115-8118,8121-8122,8128-8132,8140,8148-8149,8153,8160-8162,8181-8184,8190-8192,8194-8195,8199-8202,8204-8208,8211,8230-8232,8243,8266,8270,8276,8280,8282,8292-8294,8300-8301,8313,8320-8322,8351,8376-8380,8383-8384,8400-8405,8415-8417,8423,8442-8445,8450,8457,8470-8474,8500-8503,8554-8555,8567,8600,8609-8615,8665-8666,8675,8686,8688,8699,8710-8711,8732-8733,8750,8763-8770,8778,8786-8787,8793,8800,8804-8805,8807-8809,8873,8880-8881,8883,8888-8894,8899-8901,8910-8913,8937,8953-8954,8980-8981,8989-8991,8997-9002,9005,9007-9011,9020-9026,9050-9051,9060,9080-9081,9083-9093,9100-9107,9111,9119,9122-9123,9131,9160-9164,9191,9200-9217,9222,9255,9277-9287,9292-9295,9300,9306,9312,9318,9321,9339,9343-9346,9374,9380,9387-9390,9396-9397,9400-9402,9418,9443-9445,9450,9500,9522,9535-9536,9555,9592-9600,9612,9614,9616-9618,9628-9632,9640,9666-9668,9694-9695,9700,9747,9750,9753,9762,9800-9802,9875-9878,9888-9889,9898-9900,9903,9909,9911,9925,9950-9956,9966,9978-9979,9981,9987-9988,9990-10010,10020,10023,10050-10051,10055,10080-10081,10100-10104,10107,10110-10111,10113-10117,10125,10128-10129,10160-10162,10200-10201,10252-10253,10260-10261,10288,10321,10439,10443,10500,10540-10544,10548,10631,10800,10805,10809-10810,10860,10880,10933,10990,11000-11001,11095,11103-11106,11108-11112,11161-11165,11171-11175,11201-11202,11208,11211,11319-11321,11367,11371,11430,11489,11600,11623,11720,11723,11751,11796,11876-11877,11967,11971,12000-12010,12012-12013,12109,12121,12168,12172,12300,12302,12321-12322,12345,12753,12865,13160,13216-13218,13223-13224,13400,13720-13722,13724,13782-13783,13785-13786,13818-13823,13894,13929-13930,14000-14002,14033-14034,14141-14143,14145,14149-14150,14154,14250,14414,14500,14936-14937,15000,15002,15118,15345,15363,15555,15660,15740,15998-16003,16020-16021,16161-16162,16309-16311,16360-16361,16367-16368,16384-16385,16619,16665-16666,16789,16900,16950,16991-16995,17007,17184-17185,17219-17225,17234-17235,17500,17555,17729,17754-17756,17777,18000,18104,18136,18181-18187,18241-18243,18262,18463,18634-18635,18668,18769,18881,18888,19000,19007,19020,19191,19194,19220,19283,19315,19398,19410-19412,19539-19541,19788,19998-20003,20005,20012-20014,20034,20046,20048-20049,20057,20167,20202,20222,20480,20670,20999-21000,21010,21212-21213,21221,21553-21554,21590,21800,21845-21849,22000-22005,22125,22128,22222,22273,22305,22335,22343,22347,22350-22351,22537,22555,22763,22800,22951,23000-23005,23053,23272,23294,23333,23400-23402,23456-23457,23546,24000-24006,24242,24249,24321-24323,24386,24465,24554,24577,24666,24676-24678,24680,24754,24850,24922,25000-25009,25576,25604,25793,25900-25903,25954-25955,26000,26133,26208,26257,26260-26263,26486-26487,26489,27010,27017,27345,27442,27504,27782,27876,27999-28001,28010,28119,28200,28240,28589,29118,29167-29168,29999-30004,30100,30260,30400,30832,30999,31016,31020,31029,31400,31416,31457,31620,31685,31765,31948-31949,32034,32249,32400,32483,32635-32636,32767-32777,32801,32811,32896,33000,33060,33123,33331,33333-33334,33434-33435,33656,34249,34378-34379,34567,34962-34964,34980,35000-35006,35100,35354-35357,36001,36411-36412,36422,36462,36524,36602,36700,36865,37475,37483,37601,37654,38000-38002,38201-38203,38412,38422,38462,38472,38800,38865,39681,40000,40023,40404,40841-40843,40853,41111,41121,41230,41794-41797,42508-42510,43000,43188-43191,43210,43438-43441,44123,44321-44323,44444-44445,44544,44553,44600,44818,44900,45000-45002,45045,45054,45514,45678,45824-45825,45966,46336,46998-47001,47100,47557,47624,47806,47808-47809,48000-48005,48048-48050,48128-48129,48556,48619,48653,49000-49001,49150,6000-6063,6665-6669", # pylint: disable=C0301
"ALL_PRIVILEGED_TCP": "T:1-1023",
"ALL_PRIVILEGED_TCP_AND_UDP": "T:1-1023, U:1-1023",
"ALL_TCP": "T:1-65535",
"OPENVAS_DEFAULT": "T:1-5,7-7,9-9,11-11,13-13,15-15,17-25,27-27,29-29,31-31,33-33,35-35,37-39,41-59,61-224,242-248,256-268,280-287,308-322,333-333,344-700,702-702,704-707,709-711,721-721,723-723,729-731,740-742,744-744,747-754,758-765,767-767,769-777,780-783,786-787,799-801,808-808,810-810,828-829,847-848,860-860,871-871,873-873,886-888,898-898,900-904,911-913,927-927,950-950,953-953,975-975,989-1002,1005-1005,1008-1008,1010-1010,1023-1027,1029-1036,1040-1040,1042-1042,1045-1045,1047-1112,1114-1117,1119-1120,1122-1127,1139-1139,1154-1155,1161-1162,1168-1170,1178-1178,1180-1181,1183-1188,1194-1194,1199-1231,1233-1286,1288-1774,1776-2028,2030-2030,2032-2035,2037-2038,2040-2065,2067-2083,2086-2087,2089-2152,2155-2155,2159-2167,2170-2177,2180-2181,2190-2191,2199-2202,2213-2213,2220-2223,2232-2246,2248-2255,2260-2260,2273-2273,2279-2289,2294-2311,2313-2371,2381-2425,2427-2681,2683-2824,2826-2854,2856-2924,2926-3096,3098-3299,3302-3321,3326-3366,3372-3403,3405-3545,3547-3707,3709-3765,3767-3770,3772-3800,3802-3802,3845-3871,3875-3876,3885-3885,3900-3900,3928-3929,3939-3939,3959-3959,3970-3971,3984-3987,3999-4036,4040-4042,4045-4045,4080-4080,4096-4100,4111-4111,4114-4114,4132-4134,4138-4138,4141-4145,4154-4154,4160-4160,4199-4200,4242-4242,4300-4300,4321-4321,4333-4333,4343-4351,4353-4358,4369-4369,4400-4400,4442-4457,4480-4480,4500-4500,4545-4547,4555-4555,4557-4557,4559-4559,4567-4568,4600-4601,4658-4662,4672-4672,4752-4752,4800-4802,4827-4827,4837-4839,4848-4849,4868-4869,4885-4885,4894-4894,4899-4899,4950-4950,4983-4983,4987-4989,4998-4998,5000-5011,5020-5025,5031-5031,5042-5042,5050-5057,5060-5061,5064-5066,5069-5069,5071-5071,5081-5081,5093-5093,5099-5102,5137-5137,5145-5145,5150-5152,5154-5154,5165-5165,5190-5193,5200-5203,5222-5222,5225-5226,5232-5232,5236-5236,5250-5251,5264-5265,5269-5269,5272-5272,5282-5282,5300-5311,5314-5315,5351-5355,5400-5432,5435-5435,5454-5456,5461-5463,5465-5465,5500-5504,5510-5510,5520-5521,5530-5530,5540-5540,5550-5550,5553-5556,5566-5566,5569-5569,5595-5605,5631-5632,5666-5666,5673-5680,5688-5688,5690-5690,5713-5717,5720-5720,5729-5730,5741-5742,5745-5746,5755-5755,5757-5757,5766-5768,5771-5771,5800-5803,5813-5813,5858-5859,5882-5882,5888-5889,5900-5903,5968-5969,5977-5979,5987-5991,5997-6010,6050-6051,6064-6073,6085-6085,6100-6112,6123-6123,6141-6150,6175-6177,6200-6200,6253-6253,6255-6255,6270-6270,6300-6300,6321-6322,6343-6343,6346-6347,6373-6373,6382-6382,6389-6389,6400-6400,6455-6456,6471-6471,6500-6503,6505-6510,6543-6543,6547-6550,6558-6558,6566-6566,6580-6582,6588-6588,6620-6621,6623-6623,6628-6628,6631-6631,6665-6670,6672-6673,6699-6701,6714-6714,6767-6768,6776-6776,6788-6790,6831-6831,6841-6842,6850-6850,6881-6889,6891-6891,6901-6901,6939-6939,6961-6966,6969-6970,6998-7015,7020-7021,7030-7030,7070-7070,7099-7100,7121-7121,7161-7161,7170-7170,7174-7174,7200-7201,7210-7210,7269-7269,7273-7273,7280-7281,7283-7283,7300-7300,7320-7320,7326-7326,7391-7392,7395-7395,7426-7431,7437-7437,7464-7464,7491-7491,7501-7501,7510-7511,7544-7545,7560-7560,7566-7566,7570-7570,7575-7575,7588-7588,7597-7597,7624-7624,7626-7627,7633-7634,7648-7649,7666-7666,7674-7676,7743-7743,7775-7779,7781-7781,7786-7786,7797-7798,7800-7801,7845-7846,7875-7875,7902-7902,7913-7913,7932-7933,7967-7967,7979-7980,7999-8005,8007-8010,8022-8022,8032-8033,8044-8044,8074-8074,8080-8082,8088-8089,8098-8098,8100-8100,8115-8116,8118-8118,8121-8122,8130-8132,8160-8161,8181-8194,8199-8201,8204-8208,8224-8225,8245-8245,8311-8311,8351-8351,8376-8380,8400-8403,8416-8417,8431-8431,8443-8444,8450-8450,8473-8473,8554-8555,8649-8649,8733-8733,8763-8765,8786-8787,8804-8804,8863-8864,8875-8875,8880-8880,8888-8894,8900-8901,8910-8911,8954-8954,8989-8989,8999-9002,9006-9006,9009-9009,9020-9026,9080-9080,9090-9091,9100-9103,9110-9111,9131-9131,9152-9152,9160-9164,9200-9207,9210-9211,9217-9217,9281-9285,9287-9287,9292-9292,9321-9321,9343-9344,9346-9346,9374-9374,9390-9390,9396-9397,9400-9400,9418-9418,9495-9495,9500-9500,9535-9537,9593-9595,9600-9600,9612-9612,9704-9704,9747-9747,9753-9753,9797-9797,9800-9802,9872-9872,9875-9876,9888-9889,9898-9901,9909-9909,9911-9911,9950-9952,9990-10005,10007-10008,10012-10012,10080-10083,10101-10103,10113-10116,10128-10128,10252-10252,10260-10260,10288-10288,10607-10607,10666-10666,10752-10752,10990-10990,11000-11001,11111-11111,11201-11201,11223-11223,11319-11321,11367-11367,11371-11371,11600-11600,11720-11720,11751-11751,11965-11965,11967-11967,11999-12006,12076-12076,12109-12109,12168-12168,12172-12172,12223-12223,12321-12321,12345-12346,12361-12362,12468-12468,12701-12701,12753-12753,13160-13160,13223-13224,13701-13702,13705-13706,13708-13718,13720-13722,13724-13724,13782-13783,13818-13822,14001-14001,14033-14034,14141-14141,14145-14145,14149-14149,14194-14194,14237-14237,14936-14937,15000-15000,15126-15126,15345-15345,15363-15363,16360-16361,16367-16368,16384-16384,16660-16661,16959-16959,16969-16969,16991-16991,17007-17007,17185-17185,17219-17219,17300-17300,17770-17772,18000-18000,18181-18187,18190-18190,18241-18241,18463-18463,18769-18769,18888-18888,19191-19191,19194-19194,19283-19283,19315-19315,19398-19398,19410-19412,19540-19541,19638-19638,19726-19726,20000-20001,20005-20005,20011-20012,20034-20034,20200-20200,20202-20203,20222-20222,20670-20670,20999-21000,21490-21490,21544-21544,21590-21590,21800-21800,21845-21849,22000-22001,22222-22222,22273-22273,22289-22289,22305-22305,22321-22321,22370-22370,22555-22555,22800-22800,22951-22951,23456-23456,24000-24006,24242-24242,24249-24249,24345-24347,24386-24386,24554-24554,24677-24678,24922-24922,25000-25009,25378-25378,25544-25544,25793-25793,25867-25867,25901-25901,25903-25903,26000-26000,26208-26208,26260-26264,27000-27010,27345-27345,27374-27374,27504-27504,27665-27665,27999-27999,28001-28001,29559-29559,29891-29891,30001-30002,30100-30102,30303-30303,30999-30999,31337-31337,31339-31339,31416-31416,31457-31457,31554-31554,31556-31556,31620-31620,31765-31765,31785-31787,32261-32261,32666-32666,32768-32780,32786-32787,32896-32896,33270-33270,33331-33331,33434-33434,33911-33911,34249-34249,34324-34324,34952-34952,36865-36865,37475-37475,37651-37651,38037-38037,38201-38201,38292-38293,39681-39681,40412-40412,40841-40843,41111-41111,41508-41508,41794-41795,42508-42510,43118-43118,43188-43190,44321-44322,44333-44334,44442-44443,44818-44818,45000-45000,45054-45054,45678-45678,45966-45966,47000-47000,47557-47557,47624-47624,47806-47806,47808-47808,47891-47891,48000-48003,48556-48556,49400-49400,50000-50004,50505-50505,50776-50776,51210-51210,53001-53001,54320-54321,57341-57341,59595-59595,60177-60177,60179-60179,61439-61441,61446-61446,65000-65000,65301-65301", # pylint: disable=C0301
"ALL_IANA_ASSIGNED_TCP_AND_UDP_2020_02_12": "T:1-50,52-80,82-99,101-113,115-224,242-248,256-257,259-271,280-284,286-287,308-324,333,344-584,586-658,660-702,704-707,709-715,729-731,741-742,744,747-754,758-765,767,769-777,780,800-802,810,828-833,847-848,853-854,860-862,873,886-888,900-903,910-913,953,989-1001,1010,1021-1027,1029,1033-1108,1110-1490,1492-1782,1784-2193,2197-2258,2260-2368,2370-2377,2379-2681,2683-2793,2795-2824,2826-2872,2874-2924,2926-3091,3093-3125,3127-3300,3302-3321,3326-3366,3372-3402,3405-3545,3547-3693,3695-3993,3995-4047,4049-4143,4145-4193,4197,4199,4300-4314,4316,4320-4323,4325-4336,4340-4362,4366,4368-4379,4389-4396,4400-4423,4425-4433,4441-4458,4484-4488,4500,4534-4538,4545-4559,4563,4566-4570,4573,4590-4605,4621,4646,4658-4692,4700-4704,4711,4725-4733,4737-4747,4749-4756,4774,4784-4791,4800-4804,4827,4837-4851,4867-4871,4876-4885,4888-4889,4894,4899-4902,4912-4915,4936-4937,4940-4942,4949-4953,4969-4971,4980,4984-4991,4999-5015,5020-5034,5042-5075,5078-5087,5092-5094,5099-5107,5111-5112,5114-5117,5120,5133-5137,5145-5146,5150-5157,5161-5168,5172,5190-5197,5200-5203,5209,5215,5221-5237,5245-5254,5264-5265,5269-5272,5280-5282,5298-5310,5312-5318,5320-5321,5343-5344,5349-5364,5397-5437,5443,5445,5450,5453-5456,5461-5465,5470-5475,5500-5507,5550,5553-5557,5565-5569,5573-5575,5579-5586,5597-5605,5618,5627-5639,5646,5666,5670-5684,5687-5689,5693,5696,5700,5705,5713-5730,5741-5748,5750,5755,5757,5766-5771,5777,5780-5787,5793-5794,5813-5814,5841-5842,5859,5863,5868,5883,5900,5910-5913,5963,5968-5969,5984-5993,5999,6064-6066,6068-6077,6080-6088,6099-6118,6121-6124,6130,6133,6140-6149,6159-6163,6200-6201,6209,6222,6241-6244,6251-6253,6267-6269,6300-6301,6306,6315-6317,6320-6322,6324-6326,6343-6344,6346-6347,6350,6355,6360,6363,6370,6379,6382,6389-6390,6417-6421,6432,6442-6446,6455-6456,6464,6471,6480-6489,6500-6503,6505-6511,6513-6515,6543-6544,6547-6551,6558,6566,6568,6579-6583,6600-6602,6619-6629,6632-6636,6640,6653,6655-6657,6670-6673,6678-6679,6687-6690,6696-6697,6701-6706,6714-6716,6767-6771,6777-6778,6784-6791,6801,6817,6831,6841-6842,6850,6868,6888,6900-6901,6924,6935-6936,6946,6951,6961-6966,6969-6970,6997-7026,7030-7031,7040,7070-7073,7080,7088,7095,7099-7101,7107,7117,7121,7128-7129,7161-7174,7181,7200-7202,7215-7216,7227-7229,7235-7237,7244,7262,7272-7283,7365,7391-7395,7397,7400-7402,7410-7411,7420-7421,7426-7431,7437,7443,7471,7473-7474,7478,7491,7500-7501,7508-7511,7542-7551,7560,7563,7566,7569-7570,7574,7588,7606,7624,7626-7631,7633,7648,7663,7672-7677,7680,7683,7687,7689,7697,7700-7701,7707-7708,7720,7724-7728,7734,7738,7741-7744,7747,7775,7777-7779,7781,7784,7786-7787,7789,7794,7797-7802,7810,7845-7847,7869-7872,7878,7880,7887,7900-7903,7913,7932-7933,7962,7967,7979-7982,7997-8009,8015-8016,8019-8023,8025-8026,8032-8034,8040-8044,8051-8060,8066-8067,8070,8074,8077,8080-8084,8086-8088,8090-8091,8097,8100-8102,8111,8115-8118,8121-8122,8128-8132,8140,8148-8149,8153,8160-8162,8181-8184,8190-8192,8194-8195,8199-8202,8204-8208,8211,8230-8232,8243,8266,8270,8276,8280,8282,8292-8294,8300-8301,8313,8320-8322,8351,8376-8380,8383-8384,8400-8405,8415-8417,8423,8442-8445,8450,8457,8470-8474,8500-8503,8554-8555,8567,8600,8609-8615,8665-8666,8675,8686,8688,8699,8710-8711,8732-8733,8750,8763-8770,8778,8786-8787,8793,8800,8804-8805,8807-8809,8873,8880-8881,8883,8888-8894,8899-8901,8910-8913,8937,8953-8954,8980-8981,8989-8991,8997-9002,9005,9007-9011,9020-9026,9050-9051,9060,9080-9081,9083-9093,9100-9107,9111,9119,9122-9123,9131,9160-9164,9191,9200-9217,9222,9255,9277-9287,9292-9295,9300,9306,9312,9318,9321,9339,9343-9346,9374,9380,9387-9390,9396-9397,9400-9402,9418,9443-9445,9450,9500,9522,9535-9536,9555,9592-9600,9612,9614,9616-9618,9628-9632,9640,9666-9668,9694-9695,9700,9747,9750,9753,9762,9800-9802,9875-9878,9888-9889,9898-9900,9903,9909,9911,9925,9950-9956,9966,9978-9979,9981,9987-9988,9990-10010,10020,10023,10050-10051,10055,10080-10081,10100-10104,10107,10110-10111,10113-10117,10125,10128-10129,10160-10162,10200-10201,10252-10253,10260-10261,10288,10321,10439,10443,10500,10540-10544,10548,10631,10800,10805,10809-10810,10860,10880,10933,10990,11000-11001,11095,11103-11106,11108-11112,11161-11165,11171-11175,11201-11202,11208,11211,11319-11321,11367,11371,11430,11489,11600,11623,11720,11723,11751,11796,11876-11877,11967,11971,12000-12010,12012-12013,12109,12121,12168,12172,12300,12302,12321-12322,12345,12753,12865,13160,13216-13218,13223-13224,13400,13720-13722,13724,13782-13783,13785-13786,13818-13823,13894,13929-13930,14000-14002,14033-14034,14141-14143,14145,14149-14150,14154,14250,14414,14500,14936-14937,15000,15002,15118,15345,15363,15555,15660,15740,15998-16003,16020-16021,16161-16162,16309-16311,16360-16361,16367-16368,16384-16385,16619,16665-16666,16789,16900,16950,16991-16995,17007,17184-17185,17219-17225,17234-17235,17500,17555,17729,17754-17756,17777,18000,18104,18136,18181-18187,18241-18243,18262,18463,18634-18635,18668,18769,18881,18888,19000,19007,19020,19191,19194,19220,19283,19315,19398,19410-19412,19539-19541,19788,19998-20003,20005,20012-20014,20034,20046,20048-20049,20057,20167,20202,20222,20480,20670,20999-21000,21010,21212-21213,21221,21553-21554,21590,21800,21845-21849,22000-22005,22125,22128,22222,22273,22305,22335,22343,22347,22350-22351,22537,22555,22763,22800,22951,23000-23005,23053,23272,23294,23333,23400-23402,23456-23457,23546,24000-24006,24242,24249,24321-24323,24386,24465,24554,24577,24666,24676-24678,24680,24754,24850,24922,25000-25009,25576,25604,25793,25900-25903,25954-25955,26000,26133,26208,26257,26260-26263,26486-26487,26489,27010,27017,27345,27442,27504,27782,27876,27999-28001,28010,28119,28200,28240,28589,29118,29167-29168,29999-30004,30100,30260,30400,30832,30999,31016,31020,31029,31400,31416,31457,31620,31685,31765,31948-31949,32034,32249,32400,32483,32635-32636,32767-32777,32801,32811,32896,33000,33060,33123,33331,33333-33334,33434-33435,33656,34249,34378-34379,34567,34962-34964,34980,35000-35006,35100,35354-35357,36001,36411-36412,36422,36462,36524,36602,36700,36865,37475,37483,37601,37654,38000-38002,38201-38203,38412,38422,38462,38472,38800,38865,39681,40000,40023,40404,40841-40843,40853,41111,41121,41230,41794-41797,42508-42510,43000,43188-43191,43210,43438-43441,44123,44321-44323,44444-44445,44544,44553,44600,44818,44900,45000-45002,45045,45054,45514,45678,45824-45825,45966,46336,46998-47001,47100,47557,47624,47806,47808-47809,48000-48005,48048-48050,48128-48129,48556,48619,48653,49000-49001,49150,6000-6063,6665-6669, U:1-50,52-80,82-99,101-113,115-224,242-248,256-257,259-271,280-284,286-287,308-324,333,344-584,586-658,660-702,704-707,709-716,729-731,741-742,744,747-754,758-765,767,769-777,780,800-802,810,828-833,847-848,853-854,860-862,873,886-888,900-903,910-913,953,989-1001,1008,1010,1021-1027,1029,1033-1108,1110-1490,1492-1782,1784-2193,2197-2258,2260-2368,2370-2375,2377,2379-2681,2683-2793,2795-2824,2826-2872,2874-2924,2926-3091,3093-3125,3127-3300,3302-3321,3326-3366,3372-3402,3405-3545,3547-3693,3695-3993,3995-4047,4049-4143,4145-4193,4197,4199,4300-4314,4316,4320-4323,4325-4336,4340-4362,4366,4368-4379,4389-4396,4400-4423,4425-4433,4441-4458,4484-4488,4500,4534-4538,4545-4559,4563,4566-4570,4573,4590-4605,4621,4646,4658-4692,4700-4704,4711,4725-4733,4737-4747,4749-4756,4774,4784-4791,4800-4804,4827,4837-4851,4867-4871,4876-4885,4888-4889,4894,4899-4902,4912,4914-4915,4936-4937,4940-4942,4949-4953,4969-4971,4980,4984-4991,4999-5015,5020-5034,5042-5075,5078-5087,5092-5094,5099-5107,5111-5112,5114-5117,5120,5133-5137,5145-5146,5150-5157,5161-5168,5172,5190-5197,5200-5203,5209,5215,5221-5237,5245-5254,5264-5265,5269-5272,5280-5282,5298-5310,5312-5318,5320-5321,5343-5344,5349-5364,5397-5437,5443,5445,5450,5453-5456,5461-5465,5470-5475,5500-5507,5550,5553-5557,5565-5569,5573-5575,5579-5586,5597-5605,5618,5627-5639,5646,5666,5670-5684,5687-5689,5693,5696,5700,5705,5713-5730,5741-5748,5750,5755,5757,5766-5771,5777,5780-5787,5793-5794,5813-5814,5841-5842,5859,5863,5868,5900,5910-5913,5963,5968-5969,5984-5993,5999,6064-6066,6068-6077,6080-6088,6099-6118,6121-6124,6130,6133,6140-6149,6159-6163,6200-6201,6209,6222,6241-6244,6251-6253,6267-6269,6300-6301,6306,6315-6317,6320-6322,6324-6326,6343-6344,6346-6347,6350,6355,6360,6363,6370,6379,6382,6389-6390,6417-6421,6432,6442-6446,6455-6456,6464,6471,6480-6489,6500-6503,6505-6511,6513-6515,6543-6544,6547-6551,6558,6566,6568,6579-6583,6600-6602,6619-6629,6632-6636,6640,6653,6655-6657,6670-6673,6678-6679,6687-6690,6696-6697,6701-6706,6714-6716,6767-6771,6777-6778,6784-6791,6801,6817,6831,6841-6842,6850,6868,6888,6900-6901,6924,6935-6936,6946,6951,6961-6966,6969-6970,6997-7026,7030-7031,7040,7070-7073,7080,7088,7095,7099-7101,7107,7117,7121,7128-7129,7161-7174,7181,7200-7201,7215-7216,7227-7229,7235-7237,7244,7262,7272-7283,7365,7391-7395,7397,7400-7402,7410-7411,7420-7421,7426-7431,7437,7443,7471,7473-7474,7478,7491,7500-7501,7508-7511,7542-7551,7560,7563,7566,7569-7570,7574,7588,7606,7624,7626-7631,7633,7648,7663,7672-7677,7680,7683,7687,7689,7697,7700-7702,7707-7708,7720,7724-7728,7734,7738,7741-7744,7747,7775,7777-7779,7781,7784,7786-7787,7789,7794,7797-7802,7810,7845-7847,7869-7872,7878,7880,7887,7900-7903,7913,7932-7933,7962,7967,7979-7982,7997-8009,8015-8016,8019-8023,8025-8026,8032-8034,8040-8044,8051-8060,8066-8067,8070,8074,8077,8080-8084,8086-8088,8090-8091,8097,8100-8102,8111,8115-8118,8121-8122,8128-8132,8140,8148-8149,8153,8160-8162,8181-8184,8190-8192,8194-8195,8199-8202,8204-8208,8211,8230-8232,8243,8266,8270,8276,8280,8282,8292-8294,8300-8301,8313,8320-8322,8351,8376-8380,8383-8384,8400-8405,8415-8417,8423,8442-8445,8450,8457,8470-8474,8500-8503,8554-8555,8567,8600,8609-8615,8665-8666,8675,8686,8688,8699,8710-8711,8732-8733,8750,8763-8770,8778,8786-8787,8793,8800,8804-8805,8807-8809,8873,8880-8881,8883,8888-8894,8899-8901,8910-8913,8937,8953-8954,8980-8981,8989-8991,8997-9002,9005,9007-9011,9020-9026,9050-9051,9060,9080-9081,9083-9093,9100-9107,9111,9119,9122-9123,9131,9160-9164,9191,9200-9217,9222,9255,9277-9287,9292-9295,9300,9306,9312,9318,9321,9339,9343-9346,9374,9380,9387-9390,9396-9397,9400-9402,9418,9443-9445,9450,9500,9522,9535-9536,9555,9592-9600,9612,9614,9616-9618,9628-9632,9640,9666-9668,9694-9695,9700,9747,9750,9753,9762,9800-9802,9875,9877-9878,9888-9889,9898-9901,9903,9909,9911,9925,9950-9956,9966,9978-9979,9981,9987-9988,9990-10010,10020,10023,10050-10051,10055,10080-10081,10100-10104,10107,10110-10111,10113-10117,10125,10128-10129,10160-10162,10200-10201,10252-10253,10260-10261,10288,10321,10439,10443,10500,10540-10544,10548,10631,10800,10805,10809-10810,10860,10880,10933,10990,11000-11001,11095,11103-11106,11108-11112,11161-11165,11171-11175,11201-11202,11208,11211,11319-11321,11367,11371,11430,11489,11600,11623,11720,11723,11751,11796,11876-11877,11967,11971,12000-12010,12012-12013,12109,12121,12168,12172,12300,12302,12321-12322,12345,12753,12865,13160,13216-13218,13223-13224,13400,13720-13722,13724,13782-13783,13785-13786,13818-13823,13894,13929-13930,14000-14002,14033-14034,14141-14143,14145,14149-14150,14154,14250,14414,14500,14936-14937,15000,15002,15118,15345,15363,15555,15660,15740,15998-16003,16020-16021,16161-16162,16309-16311,16360-16361,16367-16368,16384-16385,16619,16665-16666,16789,16900,16950,16991-16995,17007,17184-17185,17219-17225,17234-17235,17500,17555,17729,17754-17756,17777,18000,18104,18136,18181-18187,18241-18243,18262,18463,18634-18635,18668,18769,18881,18888,19000,19007,19020,19191,19194,19220,19283,19315,19398,19410-19412,19539-19541,19788,19998-20003,20005,20012-20014,20034,20046,20048-20049,20057,20167,20202,20222,20480,20670,20999-21000,21010,21212-21213,21221,21553-21554,21590,21800,21845-21849,22000-22005,22125,22128,22222,22273,22305,22335,22343,22347,22350-22351,22537,22555,22763,22800,22951,23000-23005,23053,23272,23294,23333,23400-23402,23456-23457,23546,24000-24006,24242,24249,24321-24323,24386,24465,24554,24577,24666,24676-24678,24680,24754,24850,24922,25000-25009,25576,25604,25793,25900-25903,25954-25955,26000,26133,26208,26257,26260-26263,26486-26487,26489,27010,27017,27345,27442,27504,27782,27876,27999-28001,28010,28119,28200,28240,28589,29118,29167-29168,29999-30004,30100,30260,30400,30832,30999,31016,31020,31029,31400,31416,31457,31620,31685,31765,31948-31949,32034,32249,32400,32483,32635-32636,32767-32777,32801,32811,32896,33000,33060,33123,33331,33333-33334,33434-33435,33656,34249,34378-34379,34567,34962-34964,34980,35000-35006,35100,35354-35357,36001,36411-36412,36422,36462,36524,36602,36700,36865,37475,37483,37601,37654,38000-38002,38201-38203,38412,38422,38462,38472,38800,38865,39681,40000,40023,40404,40841-40843,40853,41111,41121,41230,41794-41797,42508-42510,43000,43188-43191,43210,43438-43441,44123,44321-44323,44444-44445,44544,44553,44600,44818,44900,45000-45002,45045,45054,45514,45678,45824-45825,45966,46336,46998-47001,47100,47557,47624,47806,47808-47809,48000-48005,48048-48050,48128-48129,48556,48619,48653,49000-49001,49150,6000-6063,6665-6669", # pylint: disable=C0301
"ALL_TCP_AND_NMAP_5_51_TOP_100_UDP": "T:1-65535, U:7-7,9-9,17-17,19-19,49-49,53-53,67-69,80-80,88-88,111-111,120-120,123-123,135-139,158-158,161-162,177-177,427-427,443-443,445-445,497-497,500-500,514-515,518-518,520-520,593-593,623-623,626-626,631-631,996-999,1022-1023,1025-1030,1433-1434,1645-1646,1701-1701,1718-1719,1812-1813,1900-1900,2000-2000,2048-2049,2222-2223,3283-3283,3456-3456,3703-3703,4444-4444,4500-4500,5000-5000,5060-5060,5353-5353,5632-5632,9200-9200,10000-10000,17185-17185,20031-20031,30718-30718,31337-31337,32768-32769,32771-32771,32815-32815,33281-33281,49152-49154,49156-49156,49181-49182,49185-49186,49188-49188,49190-49194,49200-49201", # pylint: disable=C0301
"ALL_TCP_AND_NMAP_5_51_TOP_1000_UDP": "T:1-65535, U:2-3,7-7,9-9,13-13,17-17,19-23,37-38,42-42,49-49,53-53,67-69,80-80,88-88,111-113,120-120,123-123,135-139,158-158,161-162,177-177,192-192,199-199,207-207,217-217,363-363,389-389,402-402,407-407,427-427,434-434,443-443,445-445,464-464,497-497,500-500,502-502,512-515,517-518,520-520,539-539,559-559,593-593,623-623,626-626,631-631,639-639,643-643,657-657,664-664,682-689,764-764,767-767,772-776,780-782,786-786,789-789,800-800,814-814,826-826,829-829,838-838,902-903,944-944,959-959,965-965,983-983,989-990,996-1001,1007-1008,1012-1014,1019-1051,1053-1060,1064-1070,1072-1072,1080-1081,1087-1088,1090-1090,1100-1101,1105-1105,1124-1124,1200-1200,1214-1214,1234-1234,1346-1346,1419-1419,1433-1434,1455-1455,1457-1457,1484-1485,1524-1524,1645-1646,1701-1701,1718-1719,1761-1761,1782-1782,1804-1804,1812-1813,1885-1886,1900-1901,1993-1993,2000-2000,2002-2002,2048-2049,2051-2051,2148-2148,2160-2161,2222-2223,2343-2343,2345-2345,2362-2362,2967-2967,3052-3052,3130-3130,3283-3283,3296-3296,3343-3343,3389-3389,3401-3401,3456-3457,3659-3659,3664-3664,3702-3703,4000-4000,4008-4008,4045-4045,4444-4444,4500-4500,4666-4666,4672-4672,5000-5003,5010-5010,5050-5050,5060-5060,5093-5093,5351-5351,5353-5353,5355-5355,5500-5500,5555-5555,5632-5632,6000-6002,6004-6004,6050-6050,6346-6347,6970-6971,7000-7000,7938-7938,8000-8001,8010-8010,8181-8181,8193-8193,8900-8900,9000-9001,9020-9020,9103-9103,9199-9200,9370-9370,9876-9877,9950-9950,10000-10000,10080-10080,11487-11487,16086-16086,16402-16402,16420-16420,16430-16430,16433-16433,16449-16449,16498-16498,16503-16503,16545-16545,16548-16548,16573-16573,16674-16674,16680-16680,16697-16697,16700-16700,16708-16708,16711-16711,16739-16739,16766-16766,16779-16779,16786-16786,16816-16816,16829-16829,16832-16832,16838-16839,16862-16862,16896-16896,16912-16912,16918-16919,16938-16939,16947-16948,16970-16970,16972-16972,16974-16974,17006-17006,17018-17018,17077-17077,17091-17091,17101-17101,17146-17146,17184-17185,17205-17205,17207-17207,17219-17219,17236-17237,17282-17282,17302-17302,17321-17321,17331-17332,17338-17338,17359-17359,17417-17417,17423-17424,17455-17455,17459-17459,17468-17468,17487-17487,17490-17490,17494-17494,17505-17505,17533-17533,17549-17549,17573-17573,17580-17580,17585-17585,17592-17592,17605-17605,17615-17616,17629-17629,17638-17638,17663-17663,17673-17674,17683-17683,17726-17726,17754-17754,17762-17762,17787-17787,17814-17814,17823-17824,17836-17836,17845-17845,17888-17888,17939-17939,17946-17946,17989-17989,18004-18004,18081-18081,18113-18113,18134-18134,18156-18156,18228-18228,18234-18234,18250-18250,18255-18255,18258-18258,18319-18319,18331-18331,18360-18360,18373-18373,18449-18449,18485-18485,18543-18543,18582-18582,18605-18605,18617-18617,18666-18666,18669-18669,18676-18676,18683-18683,18807-18807,18818-18818,18821-18821,18830-18830,18832-18832,18835-18835,18869-18869,18883-18883,18888-18888,18958-18958,18980-18980,18985-18985,18987-18987,18991-18991,18994-18994,18996-18996,19017-19017,19022-19022,19039-19039,19047-19047,19075-19075,19096-19096,19120-19120,19130-19130,19140-19141,19154-19154,19161-19161,19165-19165,19181-19181,19193-19193,19197-19197,19222-19222,19227-19227,19273-19273,19283-19283,19294-19294,19315-19315,19322-19322,19332-19332,19374-19374,19415-19415,19482-19482,19489-19489,19500-19500,19503-19504,19541-19541,19600-19600,19605-19605,19616-19616,19624-19625,19632-19632,19639-19639,19647-19647,19650-19650,19660-19660,19662-19663,19682-19683,19687-19687,19695-19695,19707-19707,19717-19719,19722-19722,19728-19728,19789-19789,19792-19792,19933-19933,19935-19936,19956-19956,19995-19995,19998-19998,20003-20004,20019-20019,20031-20031,20082-20082,20117-20117,20120-20120,20126-20126,20129-20129,20146-20146,20154-20154,20164-20164,20206-20206,20217-20217,20249-20249,20262-20262,20279-20279,20288-20288,20309-20309,20313-20313,20326-20326,20359-20360,20366-20366,20380-20380,20389-20389,20409-20409,20411-20411,20423-20425,20445-20445,20449-20449,20464-20465,20518-20518,20522-20522,20525-20525,20540-20540,20560-20560,20665-20665,20678-20679,20710-20710,20717-20717,20742-20742,20752-20752,20762-20762,20791-20791,20817-20817,20842-20842,20848-20848,20851-20851,20865-20865,20872-20872,20876-20876,20884-20884,20919-20919,21000-21000,21016-21016,21060-21060,21083-21083,21104-21104,21111-21111,21131-21131,21167-21167,21186-21186,21206-21207,21212-21212,21247-21247,21261-21261,21282-21282,21298-21298,21303-21303,21318-21318,21320-21320,21333-21333,21344-21344,21354-21354,21358-21358,21360-21360,21364-21364,21366-21366,21383-21383,21405-21405,21454-21454,21468-21468,21476-21476,21514-21514,21524-21525,21556-21556,21566-21566,21568-21568,21576-21576,21609-21609,21621-21621,21625-21625,21644-21644,21649-21649,21655-21655,21663-21663,21674-21674,21698-21698,21702-21702,21710-21710,21742-21742,21780-21780,21784-21784,21800-21800,21803-21803,21834-21834,21842-21842,21847-21847,21868-21868,21898-21898,21902-21902,21923-21923,21948-21948,21967-21967,22029-22029,22043-22043,22045-22045,22053-22053,22055-22055,22105-22105,22109-22109,22123-22124,22341-22341,22692-22692,22695-22695,22739-22739,22799-22799,22846-22846,22914-22914,22986-22986,22996-22996,23040-23040,23176-23176,23354-23354,23531-23531,23557-23557,23608-23608,23679-23679,23781-23781,23965-23965,23980-23980,24007-24007,24279-24279,24511-24511,24594-24594,24606-24606,24644-24644,24854-24854,24910-24910,25003-25003,25157-25157,25240-25240,25280-25280,25337-25337,25375-25375,25462-25462,25541-25541,25546-25546,25709-25709,25931-25931,26407-26407,26415-26415,26720-26720,26872-26872,26966-26966,27015-27015,27195-27195,27444-27444,27473-27473,27482-27482,27707-27707,27892-27892,27899-27899,28122-28122,28369-28369,28465-28465,28493-28493,28543-28543,28547-28547,28641-28641,28840-28840,28973-28973,29078-29078,29243-29243,29256-29256,29810-29810,29823-29823,29977-29977,30263-30263,30303-30303,30365-30365,30544-30544,30656-30656,30697-30697,30704-30704,30718-30718,30975-30975,31059-31059,31073-31073,31109-31109,31189-31189,31195-31195,31335-31335,31337-31337,31365-31365,31625-31625,31681-31681,31731-31731,31891-31891,32345-32345,32385-32385,32528-32528,32768-32780,32798-32798,32815-32815,32818-32818,32931-32931,33030-33030,33249-33249,33281-33281,33354-33355,33459-33459,33717-33717,33744-33744,33866-33866,33872-33872,34038-34038,34079-34079,34125-34125,34358-34358,34422-34422,34433-34433,34555-34555,34570-34570,34577-34580,34758-34758,34796-34796,34855-34855,34861-34862,34892-34892,35438-35438,35702-35702,35777-35777,35794-35794,36108-36108,36206-36206,36384-36384,36458-36458,36489-36489,36669-36669,36778-36778,36893-36893,36945-36945,37144-37144,37212-37212,37393-37393,37444-37444,37602-37602,37761-37761,37783-37783,37813-37813,37843-37843,38037-38037,38063-38063,38293-38293,38412-38412,38498-38498,38615-38615,39213-39213,39217-39217,39632-39632,39683-39683,39714-39714,39723-39723,39888-39888,40019-40019,40116-40116,40441-40441,40539-40539,40622-40622,40708-40708,40711-40711,40724-40724,40732-40732,40805-40805,40847-40847,40866-40866,40915-40915,41058-41058,41081-41081,41308-41308,41370-41370,41446-41446,41524-41524,41638-41638,41702-41702,41774-41774,41896-41896,41967-41967,41971-41971,42056-42056,42172-42172,42313-42313,42431-42431,42434-42434,42508-42508,42557-42557,42577-42577,42627-42627,42639-42639,43094-43094,43195-43195,43370-43370,43514-43514,43686-43686,43824-43824,43967-43967,44101-44101,44160-44160,44179-44179,44185-44185,44190-44190,44253-44253,44334-44334,44508-44508,44923-44923,44946-44946,44968-44968,45247-45247,45380-45380,45441-45441,45685-45685,45722-45722,45818-45818,45928-45928,46093-46093,46532-46532,46836-46836,47624-47624,47765-47765,47772-47772,47808-47808,47915-47915,47981-47981,48078-48078,48189-48189,48255-48255,48455-48455,48489-48489,48761-48761,49152-49163,49165-49182,49184-49202,49204-49205,49207-49216,49220-49220,49222-49222,49226-49226,49259-49259,49262-49262,49306-49306,49350-49350,49360-49360,49393-49393,49396-49396,49503-49503,49640-49640,49968-49968,50099-50099,50164-50164,50497-50497,50612-50612,50708-50708,50919-50919,51255-51255,51456-51456,51554-51554,51586-51586,51690-51690,51717-51717,51905-51905,51972-51972,52144-52144,52225-52225,52503-52503,53006-53006,53037-53037,53571-53571,53589-53589,53838-53838,54094-54094,54114-54114,54281-54281,54321-54321,54711-54711,54807-54807,54925-54925,55043-55043,55544-55544,55587-55587,56141-56141,57172-57172,57409-57410,57813-57813,57843-57843,57958-57958,57977-57977,58002-58002,58075-58075,58178-58178,58419-58419,58631-58631,58640-58640,58797-58797,59193-59193,59207-59207,59765-59765,59846-59846,60172-60172,60381-60381,60423-60423,61024-61024,61142-61142,61319-61319,61322-61322,61370-61370,61412-61412,61481-61481,61550-61550,61685-61685,61961-61961,62154-62154,62287-62287,62575-62575,62677-62677,62699-62699,62958-62958,63420-63420,63555-63555,64080-64080,64481-64481,64513-64513,64590-64590,64727-64727", # pylint: disable=C0301
"NMAP_5_51_TOP_2000_TCP_AND_TOP_100_UDP": "T:1-1,3-4,6-7,9-9,13-13,17-17,19-27,30-30,32-33,37-37,42-43,49-49,53-53,55-55,57-57,59-59,70-70,77-77,79-90,98-100,102-102,106-106,109-111,113-113,119-119,123-123,125-125,127-127,135-135,139-139,143-144,146-146,157-157,161-161,163-163,179-179,199-199,210-212,220-220,222-223,225-225,250-252,254-257,259-259,264-264,280-280,301-301,306-306,311-311,333-333,340-340,366-366,388-389,406-407,411-411,416-417,419-419,425-425,427-427,441-445,447-447,458-458,464-465,475-475,481-481,497-497,500-500,502-502,512-515,523-524,540-541,543-545,548-548,554-557,563-563,587-587,593-593,600-600,602-602,606-606,610-610,616-617,621-621,623-623,625-625,631-631,636-636,639-639,641-641,646-646,648-648,655-655,657-657,659-660,666-669,674-674,683-684,687-687,690-691,700-701,705-705,709-711,713-715,720-720,722-722,725-726,728-732,740-740,748-749,754-754,757-758,765-765,777-778,780-780,782-783,786-787,790-790,792-792,795-795,800-803,805-806,808-808,822-823,825-825,829-829,839-840,843-843,846-846,856-856,859-859,862-862,864-864,873-874,878-878,880-880,888-888,898-898,900-905,911-913,918-918,921-922,924-924,928-928,930-931,943-943,953-953,969-969,971-971,980-981,987-987,990-990,992-993,995-996,998-1002,1004-1015,1020-1114,1116-1119,1121-1128,1130-1132,1134-1138,1141-1141,1143-1145,1147-1154,1156-1159,1162-1169,1173-1176,1179-1180,1182-1188,1190-1192,1194-1196,1198-1201,1204-1204,1207-1213,1215-1218,1220-1223,1228-1229,1233-1234,1236-1236,1239-1241,1243-1244,1247-1251,1259-1259,1261-1262,1264-1264,1268-1268,1270-1272,1276-1277,1279-1279,1282-1282,1287-1287,1290-1291,1296-1297,1299-1303,1305-1311,1314-1319,1321-1322,1324-1324,1327-1328,1330-1331,1334-1334,1336-1337,1339-1340,1347-1347,1350-1353,1357-1357,1413-1414,1417-1417,1433-1434,1443-1443,1455-1455,1461-1461,1494-1494,1500-1501,1503-1503,1516-1516,1521-1522,1524-1526,1533-1533,1547-1547,1550-1550,1556-1556,1558-1560,1565-1566,1569-1569,1580-1580,1583-1584,1592-1592,1594-1594,1598-1598,1600-1600,1605-1605,1607-1607,1615-1615,1620-1620,1622-1622,1632-1632,1635-1635,1638-1638,1641-1641,1645-1645,1658-1658,1666-1666,1677-1677,1683-1683,1687-1688,1691-1691,1694-1694,1699-1701,1703-1703,1707-1709,1711-1713,1715-1715,1717-1723,1730-1730,1735-1736,1745-1745,1750-1750,1752-1753,1755-1755,1761-1761,1782-1783,1791-1792,1799-1801,1805-1808,1811-1812,1823-1823,1825-1825,1835-1835,1839-1840,1858-1858,1861-1864,1871-1871,1875-1875,1900-1901,1911-1912,1914-1914,1918-1918,1924-1924,1927-1927,1935-1935,1947-1947,1954-1954,1958-1958,1971-1976,1981-1981,1984-1984,1998-2013,2020-2022,2025-2025,2030-2031,2033-2035,2038-2038,2040-2049,2062-2062,2065-2065,2067-2070,2080-2083,2086-2087,2095-2096,2099-2101,2103-2107,2111-2112,2115-2115,2119-2119,2121-2121,2124-2124,2126-2126,2134-2135,2142-2142,2144-2144,2148-2148,2150-2150,2160-2161,2170-2170,2179-2179,2187-2187,2190-2191,2196-2197,2200-2201,2203-2203,2222-2222,2224-2224,2232-2232,2241-2241,2250-2251,2253-2253,2260-2262,2265-2265,2269-2271,2280-2280,2288-2288,2291-2292,2300-2302,2304-2304,2312-2313,2323-2323,2325-2326,2330-2330,2335-2335,2340-2340,2366-2366,2371-2372,2381-2383,2391-2391,2393-2394,2399-2399,2401-2401,2418-2418,2425-2425,2433-2433,2435-2436,2438-2439,2449-2449,2456-2456,2463-2463,2472-2472,2492-2492,2500-2501,2505-2505,2522-2522,2525-2525,2531-2532,2550-2551,2557-2558,2567-2567,2580-2580,2583-2584,2598-2598,2600-2602,2604-2608,2622-2623,2628-2628,2631-2631,2638-2638,2644-2644,2691-2691,2700-2702,2706-2706,2710-2712,2717-2718,2723-2723,2725-2725,2728-2728,2734-2734,2800-2800,2804-2804,2806-2806,2809-2809,2811-2812,2847-2847,2850-2850,2869-2869,2875-2875,2882-2882,2888-2889,2898-2898,2901-2903,2908-2910,2920-2920,2930-2930,2957-2958,2967-2968,2973-2973,2984-2984,2987-2988,2991-2991,2997-2998,3000-3003,3005-3007,3011-3011,3013-3014,3017-3017,3023-3023,3025-3025,3030-3031,3050-3050,3052-3052,3057-3057,3062-3063,3071-3071,3077-3077,3080-3080,3089-3089,3102-3103,3118-3119,3121-3121,3128-3128,3146-3146,3162-3162,3167-3168,3190-3190,3200-3200,3210-3211,3220-3221,3240-3240,3260-3261,3263-3263,3268-3269,3280-3281,3283-3283,3291-3291,3299-3301,3304-3304,3306-3307,3310-3311,3319-3319,3322-3325,3333-3334,3351-3351,3362-3363,3365-3365,3367-3372,3374-3374,3376-3376,3388-3390,3396-3396,3399-3400,3404-3404,3410-3410,3414-3415,3419-3419,3425-3425,3430-3430,3439-3439,3443-3443,3456-3456,3476-3476,3479-3479,3483-3483,3485-3486,3493-3493,3497-3497,3503-3503,3505-3506,3511-3511,3513-3515,3517-3517,3519-3520,3526-3527,3530-3530,3532-3532,3546-3546,3551-3551,3577-3577,3580-3580,3586-3586,3599-3600,3602-3603,3621-3622,3632-3632,3636-3637,3652-3653,3656-3656,3658-3659,3663-3663,3669-3670,3672-3672,3680-3681,3683-3684,3689-3690,3697-3697,3700-3700,3703-3703,3712-3712,3728-3728,3731-3731,3737-3737,3742-3742,3749-3749,3765-3766,3784-3784,3787-3788,3790-3790,3792-3793,3795-3796,3798-3801,3803-3803,3806-3806,3808-3814,3817-3817,3820-3820,3823-3828,3830-3831,3837-3837,3839-3839,3842-3842,3846-3853,3856-3856,3859-3860,3863-3863,3868-3872,3876-3876,3878-3880,3882-3882,3888-3890,3897-3897,3899-3899,3901-3902,3904-3909,3911-3911,3913-3916,3918-3920,3922-3923,3928-3931,3935-3937,3940-3941,3943-3946,3948-3949,3952-3952,3956-3957,3961-3964,3967-3969,3971-3972,3975-3975,3979-3983,3986-3986,3989-4007,4009-4010,4016-4016,4020-4020,4022-4022,4024-4025,4029-4029,4035-4036,4039-4040,4045-4045,4056-4056,4058-4058,4065-4065,4080-4080,4087-4087,4090-4090,4096-4096,4100-4101,4111-4113,4118-4121,4125-4126,4129-4129,4135-4135,4141-4141,4143-4143,4147-4147,4158-4158,4161-4161,4164-4164,4174-4174,4190-4190,4192-4192,4200-4200,4206-4206,4220-4220,4224-4224,4234-4234,4242-4242,4252-4252,4262-4262,4279-4279,4294-4294,4297-4298,4300-4300,4302-4302,4321-4321,4325-4325,4328-4328,4333-4333,4342-4343,4355-4358,4369-4369,4374-4376,4384-4384,4388-4388,4401-4401,4407-4407,4414-4415,4418-4418,4430-4430,4433-4433,4442-4447,4449-4449,4454-4454,4464-4464,4471-4471,4476-4476,4516-4517,4530-4530,4534-4534,4545-4545,4550-4550,4555-4555,4558-4559,4567-4567,4570-4570,4599-4602,4606-4606,4609-4609,4644-4644,4649-4649,4658-4658,4662-4662,4665-4665,4687-4687,4689-4689,4700-4700,4712-4712,4745-4745,4760-4760,4767-4767,4770-4771,4778-4778,4793-4793,4800-4800,4819-4819,4848-4848,4859-4860,4875-4877,4881-4881,4899-4900,4903-4903,4912-4912,4931-4931,4949-4949,4998-5005,5009-5017,5020-5021,5023-5023,5030-5030,5033-5033,5040-5040,5050-5055,5060-5061,5063-5063,5066-5066,5070-5070,5074-5074,5080-5081,5087-5088,5090-5090,5095-5096,5098-5098,5100-5102,5111-5111,5114-5114,5120-5122,5125-5125,5133-5133,5137-5137,5147-5147,5151-5152,5190-5190,5200-5202,5212-5212,5214-5214,5219-5219,5221-5223,5225-5226,5233-5235,5242-5242,5250-5250,5252-5252,5259-5259,5261-5261,5269-5269,5279-5280,5291-5291,5298-5298,5339-5339,5347-5347,5353-5353,5357-5357,5370-5370,5377-5377,5405-5405,5414-5414,5423-5423,5431-5433,5440-5442,5444-5444,5457-5458,5473-5473,5475-5475,5500-5502,5510-5510,5520-5520,5544-5544,5550-5550,5552-5555,5557-5557,5560-5560,5566-5566,5631-5631,5633-5633,5666-5666,5678-5680,5718-5718,5730-5730,5800-5803,5807-5807,5810-5812,5815-5815,5818-5818,5822-5823,5825-5825,5850-5850,5859-5859,5862-5862,5868-5869,5877-5877,5899-5907,5909-5911,5914-5915,5918-5918,5922-5922,5925-5925,5938-5938,5940-5940,5950-5950,5952-5952,5959-5963,5968-5968,5981-5981,5987-5989,5998-6009,6017-6017,6025-6025,6050-6051,6059-6060,6068-6068,6100-6101,6103-6103,6106-6106,6112-6112,6123-6123,6129-6129,6156-6156,6203-6203,6222-6222,6247-6247,6346-6346,6389-6389,6481-6481,6500-6500,6502-6502,6504-6504,6510-6510,6520-6520,6543-6543,6547-6547,6550-6550,6565-6567,6580-6580,6600-6600,6646-6646,6662-6662,6666-6670,6689-6689,6692-6692,6699-6699,6711-6711,6732-6732,6779-6779,6788-6789,6792-6792,6839-6839,6881-6881,6896-6896,6901-6901,6969-6969,7000-7004,7007-7007,7010-7010,7019-7019,7024-7025,7050-7051,7070-7070,7080-7080,7100-7100,7103-7103,7106-7106,7123-7123,7200-7201,7241-7241,7272-7272,7278-7278,7281-7281,7402-7402,7435-7435,7438-7438,7443-7443,7496-7496,7512-7512,7625-7625,7627-7627,7676-7676,7725-7725,7741-7741,7744-7744,7749-7749,7770-7770,7777-7778,7800-7800,7878-7878,7900-7900,7911-7911,7913-7913,7920-7921,7929-7929,7937-7938,7999-8002,8007-8011,8015-8016,8019-8019,8021-8022,8031-8031,8042-8042,8045-8045,8050-8050,8080-8090,8093-8093,8095-8095,8097-8100,8118-8118,8180-8181,8189-8189,8192-8194,8200-8200,8222-8222,8254-8254,8290-8294,8300-8300,8333-8333,8383-8383,8385-8385,8400-8400,8402-8402,8443-8443,8481-8481,8500-8500,8540-8540,8600-8600,8648-8649,8651-8652,8654-8654,8675-8676,8686-8686,8701-8701,8765-8766,8800-8800,8873-8873,8877-8877,8888-8889,8899-8899,8987-8987,8994-8994,8996-8996,9000-9003,9009-9011,9040-9040,9050-9050,9071-9071,9080-9081,9090-9091,9098-9103,9110-9111,9152-9152,9191-9191,9197-9198,9200-9200,9207-9207,9220-9220,9290-9290,9409-9409,9415-9415,9418-9418,9443-9444,9485-9485,9500-9503,9535-9535,9575-9575,9593-9595,9600-9600,9618-9618,9621-9621,9643-9643,9666-9666,9673-9673,9815-9815,9876-9878,9898-9898,9900-9900,9914-9914,9917-9917,9929-9929,9941-9941,9943-9944,9968-9968,9988-9988,9992-9992,9998-10005,10008-10012,10022-10025,10034-10034,10058-10058,10082-10083,10160-10160,10180-10180,10215-10215,10243-10243,10566-10566,10616-10617,10621-10621,10626-10626,10628-10629,10778-10778,10873-10873,11110-11111,11967-11967,12000-12000,12006-12006,12021-12021,12059-12059,12174-12174,12215-12215,12262-12262,12265-12265,12345-12346,12380-12380,12452-12452,13456-13456,13722-13722,13724-13724,13782-13783,14000-14000,14238-14238,14441-14442,15000-15004,15402-15402,15660-15660,15742-15742,16000-16001,16012-16012,16016-16016,16018-16018,16080-16080,16113-16113,16705-16705,16800-16800,16851-16851,16992-16993,17595-17595,17877-17877,17988-17988,18000-18000,18018-18018,18040-18040,18101-18101,18264-18264,18988-18988,19101-19101,19283-19283,19315-19315,19350-19350,19780-19780,19801-19801,19842-19842,19900-19900,20000-20000,20002-20002,20005-20005,20031-20031,20221-20222,20828-20828,21571-21571,21792-21792,22222-22222,22939-22939,23052-23052,23502-23502,23796-23796,24444-24444,24800-24800,25734-25735,26000-26000,26214-26214,26470-26470,27000-27000,27352-27353,27355-27357,27715-27715,28201-28201,28211-28211,29672-29672,29831-29831,30000-30000,30005-30005,30704-30704,30718-30718,30951-30951,31038-31038,31337-31337,31727-31727,32768-32785,32791-32792,32803-32803,32816-32816,32822-32822,32835-32835,33354-33354,33453-33453,33554-33554,33899-33899,34571-34573,35500-35500,35513-35513,37839-37839,38037-38037,38185-38185,38188-38188,38292-38292,39136-39136,39376-39376,39659-39659,40000-40000,40193-40193,40811-40811,40911-40911,41064-41064,41511-41511,41523-41523,42510-42510,44176-44176,44334-44334,44442-44443,44501-44501,44709-44709,45100-45100,46200-46200,46996-46996,47544-47544,48080-48080,49152-49161,49163-49165,49167-49168,49171-49171,49175-49176,49186-49186,49195-49195,49236-49236,49400-49401,49999-50003,50006-50006,50050-50050,50300-50300,50389-50389,50500-50500,50636-50636,50800-50800,51103-51103,51191-51191,51413-51413,51493-51493,52660-52660,52673-52673,52710-52710,52735-52735,52822-52822,52847-52851,52853-52853,52869-52869,53211-53211,53313-53314,53535-53535,54045-54045,54328-54328,55020-55020,55055-55056,55555-55555,55576-55576,55600-55600,56737-56738,57294-57294,57665-57665,57797-57797,58001-58002,58080-58080,58630-58630,58632-58632,58838-58838,59110-59110,59200-59202,60020-60020,60123-60123,60146-60146,60443-60443,60642-60642,61532-61532,61613-61613,61900-61900,62078-62078,63331-63331,64623-64623,64680-64680,65000-65000,65129-65129,65310-65310, U:7-7,9-9,17-17,19-19,49-49,53-53,67-69,80-80,88-88,111-111,120-120,123-123,135-139,158-158,161-162,177-177,427-427,443-443,445-445,497-497,500-500,514-515,518-518,520-520,593-593,623-623,626-626,631-631,996-999,1022-1023,1025-1030,1433-1434,1645-1646,1701-1701,1718-1719,1812-1813,1900-1900,2000-2000,2048-2049,2222-2223,3283-3283,3456-3456,3703-3703,4444-4444,4500-4500,5000-5000,5060-5060,5353-5353,5632-5632,9200-9200,10000-10000,17185-17185,20031-20031,30718-30718,31337-31337,32768-32769,32771-32771,32815-32815,33281-33281,49152-49154,49156-49156,49181-49182,49185-49186,49188-49188,49190-49194,49200-49201", # pylint: disable=C0301
"WEB_SERVICES": "T:80-80,443-443",
}
logging.disable(logging.CRITICAL)
class ValidatePortList(unittest.TestCase):
def test_valid_port_list_no_range(self):
"""Test no port list provided"""
self.assertFalse(valid_port_list(None))
self.assertFalse(valid_port_list(""))
def test_valid_port_list_0_end(self):
self.assertFalse(valid_port_list("\0"))
self.assertFalse(valid_port_list("T:1-5,7,9,U:1-3,5,7,9,\\0"))
def test_valid_port_list_newline_between_range(self):
self.assertFalse(valid_port_list("\nT:1-\n5,7,9,\nU:1-3,5\n,7,9\n"))
def test_valid_port_out_of_range(self):
self.assertFalse(valid_port_list("0"))
self.assertFalse(valid_port_list("-9"))
self.assertFalse(valid_port_list("1,0,6,7"))
self.assertFalse(valid_port_list("2,-9,4"))
self.assertFalse(valid_port_list("90000"))
def test_valid_port_illegal_ranges(self):
self.assertFalse(valid_port_list("T:-"))
self.assertFalse(valid_port_list("T:-9"))
self.assertFalse(valid_port_list("T:0-"))
self.assertFalse(valid_port_list("T:0-9"))
self.assertFalse(valid_port_list("T:90000-"))
self.assertFalse(valid_port_list("T:90000-90010"))
self.assertFalse(valid_port_list("T:9-\\0"))
self.assertFalse(valid_port_list("T:9-0"))
self.assertFalse(valid_port_list("T:9-90000"))
self.assertFalse(valid_port_list("T:100-9"))
self.assertFalse(valid_port_list("0-"))
self.assertFalse(valid_port_list("0-9"))
self.assertFalse(valid_port_list("9-"))
self.assertFalse(valid_port_list("9-\\0"))
self.assertFalse(valid_port_list("9-8"))
self.assertFalse(valid_port_list("90000-90010"))
self.assertFalse(valid_port_list("100-9"))
self.assertFalse(valid_port_list("T,U"))
self.assertFalse(valid_port_list("T :\n: 1-2,U"))
self.assertFalse(valid_port_list("T :: 1-2,U"))
self.assertFalse(valid_port_list("T:2=2"))
self.assertFalse(valid_port_list("T:1.2-5,4.5"))
def test_valid_port_legal_ports(self):
self.assertTrue(valid_port_list("6,6,6,6,10,20"))
self.assertTrue(valid_port_list("T:7, U:7"))
self.assertTrue(valid_port_list("T:7, U:9"))
self.assertTrue(valid_port_list("9"))
self.assertTrue(valid_port_list("U:,T:"))
self.assertTrue(valid_port_list("1,2,,,,,,,\n\n\n\n\n\n,,,5"))
self.assertTrue(valid_port_list("T:1-5,7,9,U:1-3,5,7,9"))
self.assertTrue(valid_port_list("6-9,7,7,10-20,20"))
def test_valid_port_new_lines_as_commas(self):
self.assertTrue(valid_port_list("1,2,\n,\n4,6"))
self.assertTrue(valid_port_list("T:1-5,7,9,\nU:1-3,5\n,7,9"))
def test_valid_port_allow_white_spaces(self):
self.assertTrue(
valid_port_list(" T: 1 -5, 7 ,9, \nU :1- 3,5 \n,7,9")
)
def test_valid_port_some_standard_port_lists(self):
self.assertTrue(
valid_port_list(PORT_LISTS["ALL_IANA_ASSIGNED_TCP_2020_02_12"])
)
self.assertTrue(valid_port_list(PORT_LISTS["ALL_PRIVILEGED_TCP"]))
self.assertTrue(
valid_port_list(PORT_LISTS["ALL_PRIVILEGED_TCP_AND_UDP"])
)
self.assertTrue(valid_port_list(PORT_LISTS["ALL_TCP"]))
self.assertTrue(valid_port_list(PORT_LISTS["OPENVAS_DEFAULT"]))
self.assertTrue(
valid_port_list(
PORT_LISTS["ALL_IANA_ASSIGNED_TCP_AND_UDP_2020_02_12"]
)
)
self.assertTrue(
valid_port_list(PORT_LISTS["ALL_TCP_AND_NMAP_5_51_TOP_100_UDP"])
)
self.assertTrue(
valid_port_list(PORT_LISTS["ALL_TCP_AND_NMAP_5_51_TOP_1000_UDP"])
)
self.assertTrue(
valid_port_list(
PORT_LISTS["NMAP_5_51_TOP_2000_TCP_AND_TOP_100_UDP"]
)
)
self.assertTrue(valid_port_list(PORT_LISTS["WEB_SERVICES"]))
class ConvertPortTestCase(unittest.TestCase):
def test_tcp_ports(self):
"""Test only tcp ports."""
tports, uports = ports_as_list('T:1-10,30,31')
self.assertIsNotNone(tports)
self.assertEqual(len(uports), 0)
self.assertEqual(len(tports), 12)
for i in range(1, 10):
self.assertIn(i, tports)
self.assertIn(30, tports)
self.assertIn(31, tports)
def test_udp_ports(self):
"""Test only udp ports."""
tports, uports = ports_as_list('U:1-10')
self.assertIsNotNone(uports)
self.assertEqual(len(tports), 0)
self.assertEqual(len(uports), 10)
for i in range(1, 10):
self.assertIn(i, uports)
def test_both_ports(self):
"""Test tcp und udp ports."""
tports, uports = ports_as_list('T:1-10, U:1-10')
self.assertIsNotNone(tports)
self.assertIsNotNone(uports)
self.assertEqual(len(tports), 10)
self.assertEqual(len(uports), 10)
for i in range(1, 10):
self.assertIn(i, tports)
self.assertIn(i, uports)
self.assertNotIn(0, uports)
def test_both_ports_udp_first(self):
"""Test tcp und udp ports, but udp listed first."""
tports, uports = ports_as_list('U:20-30, T:1-10')
self.assertIsNotNone(tports)
self.assertIsNotNone(uports)
self.assertEqual(len(tports), 10)
self.assertEqual(len(uports), 11)
for i in range(1, 10):
self.assertIn(i, tports)
for i in range(20, 30):
self.assertIn(i, uports)
def test_not_spec_type_ports(self):
"""Test port list without specific type."""
tports, uports = ports_as_list('51-60')
self.assertIsNotNone(tports)
self.assertEqual(len(uports), 0)
self.assertEqual(len(tports), 10)
for i in range(51, 60):
self.assertIn(i, tports)
def test_invalid_char_port(self):
"""Test list with a false char."""
tports, uports = ports_as_list('R:51-60')
self.assertIsNone(tports)
self.assertIsNone(uports)
def test_empty_port(self):
"""Test an empty port list."""
tports, uports = ports_as_list('')
self.assertIsNone(tports)
self.assertIsNone(uports)
def test_get_spec_type_ports(self):
"""Test get specific type ports."""
uports = get_udp_port_list('U:9392,9393T:22')
self.assertEqual(len(uports), 2)
self.assertIn(9392, uports)
tports = get_tcp_port_list('U:9392T:80,22,443')
self.assertEqual(len(tports), 3)
self.assertIn(22, tports)
self.assertIn(80, tports)
self.assertIn(443, tports)
def test_malformed_port_string(self):
"""Test different malformed port list."""
tports, uports = ports_as_list('TU:1-2')
self.assertIsNone(tports)
self.assertIsNone(uports)
tports, uports = ports_as_list('U1-2')
self.assertIsNone(tports)
self.assertIsNone(uports)
tports, uports = ports_as_list('U:1-2t:22')
self.assertIsNone(tports)
self.assertIsNone(uports)
tports, uports = ports_as_list('U1-2,T22')
self.assertIsNone(tports)
self.assertIsNone(uports)
tports, uports = ports_as_list('U:1-2,U:22')
self.assertIsNone(tports)
self.assertIsNone(uports)
def test_compress_list(self):
"""Test different malformed port list."""
port_list = [1, 2, 3, 4, 5, 8, 9, 10, 22, 24, 29, 30]
string = port_list_compress(port_list)
self.assertEqual(string, '1-5,8-10,22,24,29-30')
ospd-openvas-22.9.0/tests/test_preferencehandler.py 0000664 0000000 0000000 00000141535 15011310720 0022450 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable = too-many-lines
import logging
from unittest import TestCase
from unittest.mock import call, patch, Mock, MagicMock
from tests.dummydaemon import DummyDaemon
from tests.helper import assert_called_once
from ospd_openvas.openvas import Openvas
from ospd_openvas.preferencehandler import (
AliveTest,
BOREAS_SETTING_NAME,
BOREAS_ALIVE_TEST,
BOREAS_ALIVE_TEST_PORTS,
PreferenceHandler,
alive_test_methods_to_bit_field,
)
class PreferenceHandlerTestCase(TestCase):
@patch('ospd_openvas.db.KbDB')
def test_process_vts_not_found(self, mock_kb):
dummy = DummyDaemon()
logging.Logger.warning = Mock()
vts = {
'1.3.6.1.4.1.25623.1.0.100065': {'3': 'new value'},
'vt_groups': ['family=debian', 'family=general'],
}
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, dummy.nvti, None
)
dummy.nvti.get_nvt_metadata.return_value = None
p_handler._process_vts(vts) # pylint: disable = protected-access
assert_called_once(logging.Logger.warning)
def test_process_vts_bad_param_id(self):
dummy = DummyDaemon()
vts = {
'1.3.6.1.4.1.25623.1.0.100061': {'3': 'new value'},
'vt_groups': ['family=debian', 'family=general'],
}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, dummy.nvti, None
)
ret = p_handler._process_vts(vts) # pylint: disable = protected-access
self.assertFalse(ret[1])
def test_not_append_notus_oids(self):
dummy = DummyDaemon()
vts = {
'1.3.6.1.4.1.25623.1.0.100061': {'1': 'new value'},
'vt_groups': ['family=debian', 'family=general'],
}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, dummy.nvti, lambda _: True
)
re = p_handler._process_vts(vts) # pylint: disable = protected-access
self.assertEqual(re[0], [])
self.assertEqual(re[1], {})
def test_process_vts(self):
dummy = DummyDaemon()
vts = {
'1.3.6.1.4.1.25623.1.0.100061': {'1': 'new value'},
'vt_groups': ['family=debian', 'family=general'],
}
vt_out = (
['1.3.6.1.4.1.25623.1.0.100061'],
{'1.3.6.1.4.1.25623.1.0.100061:1:entry:Data length :': 'new value'},
)
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, dummy.nvti, None
)
ret = p_handler._process_vts(vts) # pylint: disable = protected-access
self.assertEqual(ret, vt_out)
@patch('ospd_openvas.db.KbDB')
def test_set_plugins_false(self, mock_kb):
dummy = DummyDaemon()
dummy.scan_collection.get_vts = Mock()
dummy.scan_collection.get_vts.return_value = {}
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, dummy.nvti, None
)
p_handler.kbdb.add_scan_preferences = Mock()
ret = p_handler.prepare_plugins_for_openvas()
self.assertFalse(ret)
@patch('ospd_openvas.db.KbDB')
def test_set_plugins_true(self, mock_kb):
dummy = DummyDaemon()
vts = {
'1.3.6.1.4.1.25623.1.0.100061': {'3': 'new value'},
'vt_groups': ['family=debian', 'family=general'],
}
dummy.scan_collection.get_vts = Mock()
dummy.scan_collection.get_vts.return_value = vts
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, dummy.nvti, None
)
p_handler.kbdb.add_scan_preferences = Mock()
ret = p_handler.prepare_plugins_for_openvas()
self.assertTrue(ret)
def test_build_credentials_ssh_up(self):
dummy = DummyDaemon()
cred_out = [
'auth_port_ssh|||22',
'1.3.6.1.4.1.25623.1.0.103591:1:entry:SSH login name:|||username',
'1.3.6.1.4.1.25623.1.0.103591:3:'
'password:SSH password (unsafe!):|||pass',
'1.3.6.1.4.1.25623.1.0.103591:7:entry:SSH privilege login name:|||',
'1.3.6.1.4.1.25623.1.0.103591:8:'
'password:SSH privilege password:|||',
]
cred_dict = {
'ssh': {
'type': 'up',
'port': '22',
'username': 'username',
'password': 'pass',
}
}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, None, None
)
ret = p_handler.build_credentials_as_prefs(cred_dict)
self.assertCountEqual(ret, cred_out)
def test_build_credentials(self):
dummy = DummyDaemon()
cred_out = [
'1.3.6.1.4.1.25623.1.0.105058:1:entry:ESXi login name:|||username',
'1.3.6.1.4.1.25623.1.0.105058:2:password:'
'ESXi login password:|||pass',
'auth_port_ssh|||22',
'1.3.6.1.4.1.25623.1.0.103591:1:entry:SSH login name:|||username',
'1.3.6.1.4.1.25623.1.0.103591:2:'
'password:SSH key passphrase:|||pass',
'1.3.6.1.4.1.25623.1.0.103591:4:file:SSH private key:|||',
'1.3.6.1.4.1.25623.1.0.103591:7:'
'entry:SSH privilege login name:|||',
'1.3.6.1.4.1.25623.1.0.103591:8:'
'password:SSH privilege password:|||',
'1.3.6.1.4.1.25623.1.0.90023:1:entry:SMB login:|||username',
'1.3.6.1.4.1.25623.1.0.90023:2:password]:SMB password :|||pass',
'1.3.6.1.4.1.25623.1.0.105076:1:'
'password:SNMP Community:some comunity',
'1.3.6.1.4.1.25623.1.0.105076:2:entry:SNMPv3 Username:username',
'1.3.6.1.4.1.25623.1.0.105076:3:password:SNMPv3 Password:pass',
'1.3.6.1.4.1.25623.1.0.105076:4:radio:SNMPv3'
' Authentication Algorithm:some auth algo',
'1.3.6.1.4.1.25623.1.0.105076:5:password:'
'SNMPv3 Privacy Password:privacy pass',
'1.3.6.1.4.1.25623.1.0.105076:6:radio:'
'SNMPv3 Privacy Algorithm:privacy algo',
]
cred_dict = {
'ssh': {
'type': 'usk',
'port': '22',
'username': 'username',
'password': 'pass',
'private': 'some key',
'priv_username': 'su_user',
'priv_password': 'su_pass',
},
'smb': {'type': 'up', 'username': 'username', 'password': 'pass'},
'esxi': {
'type': 'up',
'username': 'username',
'password': 'pass',
},
'snmp': {
'type': 'snmp',
'username': 'username',
'password': 'pass',
'community': 'some comunity',
'auth_algorithm': 'md5',
'privacy_password': 'privacy pass',
'privacy_algorithm': 'aes',
},
}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, None, None
)
ret = p_handler.build_credentials_as_prefs(cred_dict)
self.assertEqual(len(ret), len(cred_out))
self.assertIn('auth_port_ssh|||22', ret)
self.assertIn(
'1.3.6.1.4.1.25623.1.0.90023:1:entry:SMB login:|||username',
ret,
)
self.assertIn(
'1.3.6.1.4.1.25623.1.0.103591:8:'
'password:SSH privilege password:|||su_pass',
ret,
)
def test_build_alive_test_opt_empty(self):
dummy = DummyDaemon()
target_options_dict = {'alive_test': '0'}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, None, None
)
ret = p_handler.build_alive_test_opt_as_prefs(target_options_dict)
self.assertEqual(ret, {})
# alive test was supplied via separate xml element
dummy = DummyDaemon()
target_options_dict = {'alive_test_methods': '1', 'icmp': '0'}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, None, None
)
ret = p_handler.build_alive_test_opt_as_prefs(target_options_dict)
self.assertEqual(ret, {})
def test_build_alive_test_opt(self):
dummy = DummyDaemon()
alive_test_out = {
"1.3.6.1.4.1.25623.1.0.100315:1:checkbox:Do a TCP ping": "no",
"1.3.6.1.4.1.25623.1.0.100315:2:checkbox:"
"TCP ping tries also TCP-SYN ping": "no",
"1.3.6.1.4.1.25623.1.0.100315:7:checkbox:"
"TCP ping tries only TCP-SYN ping": "no",
"1.3.6.1.4.1.25623.1.0.100315:3:checkbox:Do an ICMP ping": "yes",
"1.3.6.1.4.1.25623.1.0.100315:4:checkbox:Use ARP": "no",
(
"1.3.6.1.4.1.25623.1.0.100315:5:checkbox:"
"Mark unrechable Hosts as dead (not scanning)"
): "yes",
}
target_options_dict = {'alive_test': '2'}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, None, None
)
ret = p_handler.build_alive_test_opt_as_prefs(target_options_dict)
self.assertEqual(ret, alive_test_out)
# alive test was supplied via sepertae xml element
dummy = DummyDaemon()
target_options_dict = {'alive_test_methods': '1', 'icmp': '1'}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, None, None
)
ret = p_handler.build_alive_test_opt_as_prefs(target_options_dict)
self.assertEqual(ret, alive_test_out)
def test_build_alive_test_opt_fail_1(self):
dummy = DummyDaemon()
logging.Logger.debug = Mock()
target_options_dict = {'alive_test': 'a'}
p_handler = PreferenceHandler(
'1234-1234', None, dummy.scan_collection, None, None
)
target_options = p_handler.build_alive_test_opt_as_prefs(
target_options_dict
)
assert_called_once(logging.Logger.debug)
self.assertEqual(len(target_options), 0)
@patch('ospd_openvas.db.KbDB')
def test_set_target(self, mock_kb):
dummy = DummyDaemon()
dummy.scan_collection.get_host_list = MagicMock(
return_value='192.168.0.1'
)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_target_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_called_with(
p_handler.scan_id,
['TARGET|||192.168.0.1'],
)
@patch('ospd_openvas.db.KbDB')
def test_set_ports(self, mock_kb):
dummy = DummyDaemon()
dummy.scan_collection.get_ports = MagicMock(return_value='80,443')
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_ports_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_called_with(
p_handler.scan_id,
['port_range|||80,443'],
)
@patch('ospd_openvas.db.KbDB')
def test_set_ports_invalid(self, mock_kb):
dummy = DummyDaemon()
dummy.scan_collection.get_ports = MagicMock(return_value='2,-9,4')
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
self.assertFalse(p_handler.prepare_ports_for_openvas())
@patch('ospd_openvas.db.KbDB')
def test_set_main_kbindex(self, mock_kb):
dummy = DummyDaemon()
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.kbdb.add_scan_preferences = Mock()
p_handler.kbdb.index = 2
p_handler.prepare_main_kbindex_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_called_with(
p_handler.scan_id,
['ov_maindbid|||2'],
)
@patch('ospd_openvas.db.KbDB')
def test_set_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'ssh': {
'type': 'up',
'port': '22',
'username': 'username',
'password': 'pass',
'priv_username': "privuser",
'priv_password': "privpass",
},
'smb': {'type': 'up', 'username': 'username', 'password': 'pass'},
'esxi': {
'type': 'up',
'username': 'username',
'password': 'pass',
},
'snmp': {
'type': 'snmp',
'username': 'username',
'password': 'pass',
'community': 'some comunity',
'auth_algorithm': 'md5',
'privacy_password': 'privacy pass',
'privacy_algorithm': 'aes',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_credentials_to_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
self.assertTrue(ret)
assert_called_once(p_handler.kbdb.add_credentials_to_scan_preferences)
@patch('ospd_openvas.db.KbDB')
def test_set_bad_service_credentials(self, mock_kb):
dummy = DummyDaemon()
# bad cred type shh instead of ssh
creds = {
'shh': {
'type': 'up',
'port': '22',
'username': 'username',
'password': 'pass',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn("Unknown service type for credential: shh", errors)
@patch('ospd_openvas.db.KbDB')
def test_set_bad_ssh_port_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'ssh': {
'type': 'up',
'port': 'ab',
'username': 'username',
'password': 'pass',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn("Port for SSH 'ab' is not a valid number.", errors)
@patch('ospd_openvas.db.KbDB')
def test_missing_ssh_port_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'ssh': {
'type': 'up',
'username': 'username',
'password': 'pass',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
self.assertTrue(ret)
@patch('ospd_openvas.db.KbDB')
def test_ssh_port_out_of_range_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'ssh': {
'type': 'up',
'port': '65536',
'username': 'username',
'password': 'pass',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn("Port for SSH is out of range (1-65535): 65536", errors)
@patch('ospd_openvas.db.KbDB')
def test_bad_type_for_ssh_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'ssh': {
'type': 'ups',
'port': '22',
'username': 'username',
'password': 'pass',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn(
"Unknown Credential Type for SSH: "
+ "ups"
+ ". Use 'up' for Username + Password"
+ " or 'usk' for Username + SSH Key.",
errors,
)
@patch('ospd_openvas.db.KbDB')
def test_missing_type_for_ssh_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'ssh': {
'port': '22',
'username': 'username',
'password': 'pass',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn(
"Missing Credential Type for SSH."
+ " Use 'up' for Username + Password"
+ " or 'usk' for Username + SSH Key.",
errors,
)
@patch('ospd_openvas.db.KbDB')
def test_snmp_no_priv_alg_but_pw_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'snmp': {
'type': 'snmp',
'username': 'username',
'password': 'pass',
'community': 'some comunity',
'auth_algorithm': 'sha1',
'privacy_password': 'privacy pass',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn(
"When no privacy algorithm is used, the privacy"
+ " password also has to be empty.",
errors,
)
@patch('ospd_openvas.db.KbDB')
def test_snmp_unknown_priv_alg_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'snmp': {
'type': 'snmp',
'username': 'username',
'password': 'pass',
'community': 'some comunity',
'auth_algorithm': 'sha1',
'privacy_password': 'privacy pass',
'privacy_algorithm': 'das',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn(
"Unknown privacy algorithm used: "
+ "das"
+ ". Use 'aes', 'des' or '' (none).",
errors,
)
@patch('ospd_openvas.db.KbDB')
def test_snmp_missing_auth_alg_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'snmp': {
'type': 'snmp',
'username': 'username',
'password': 'pass',
'community': 'some comunity',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn(
"Missing authentication algorithm for SNMP."
+ " Use 'md5' or 'sha1'.",
errors,
)
@patch('ospd_openvas.db.KbDB')
def test_snmp_unknown_auth_alg_credentials(self, mock_kb):
dummy = DummyDaemon()
creds = {
'snmp': {
'type': 'snmp',
'username': 'username',
'password': 'pass',
'community': 'some comunity',
'auth_algorithm': 'sha2',
},
}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
errors = p_handler.get_error_messages()
self.assertFalse(ret)
self.assertIn(
"Unknown authentication algorithm: "
+ "sha2"
+ ". Use 'md5' or 'sha1'.",
errors,
)
@patch('ospd_openvas.db.KbDB')
def test_set_credentials_empty(self, mock_kb):
dummy = DummyDaemon()
creds = {}
dummy.scan_collection.get_credentials = MagicMock(return_value=creds)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
ret = p_handler.prepare_credentials_for_openvas()
self.assertTrue(ret)
@patch('ospd_openvas.db.KbDB')
def test_set_host_options(self, mock_kb):
dummy = DummyDaemon()
exc = '192.168.0.1'
dummy.scan_collection.get_exclude_hosts = MagicMock(return_value=exc)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_host_options_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_called_with(
p_handler.scan_id,
['exclude_hosts|||192.168.0.1'],
)
@patch('ospd_openvas.db.KbDB')
def test_set_host_options_none(self, mock_kb):
dummy = DummyDaemon()
exc = ''
dummy.scan_collection.get_exclude_hosts = MagicMock(return_value=exc)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_host_options_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_not_called()
@patch('ospd_openvas.db.KbDB')
def test_set_scan_params(self, mock_kb):
dummy = DummyDaemon()
ospd_param_dict = {
'drop_privileges': {
'type': 'boolean',
'name': 'drop_privileges',
'default': 0,
'mandatory': 1,
'description': '',
},
}
opt = {'drop_privileges': 1}
dummy.scan_collection.get_options = MagicMock(return_value=opt)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_scan_params_for_openvas(ospd_param_dict)
p_handler.kbdb.add_scan_preferences.assert_called_with(
p_handler.scan_id, ['drop_privileges|||yes']
)
@patch('ospd_openvas.db.KbDB')
def test_set_reverse_lookup_opt(self, mock_kb):
dummy = DummyDaemon()
t_opt = {'reverse_lookup_only': 1}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_reverse_lookup_opt_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_called_with(
p_handler.scan_id,
[
'reverse_lookup_only|||yes',
'reverse_lookup_unify|||no',
],
)
@patch('ospd_openvas.db.KbDB')
def test_set_boreas_alive_test_with_settings(self, mock_kb):
# No Boreas config setting (BOREAS_SETTING_NAME) set
dummy = DummyDaemon()
ov_setting = {'not_the_correct_setting': 1}
t_opt = {}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
p_handler.kbdb.add_scan_preferences.assert_not_called()
# Boreas config setting set but invalid alive_test.
dummy = DummyDaemon()
t_opt = {'alive_test': "error"}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||2'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# ALIVE_TEST_TCP_SYN_SERVICE as alive test.
dummy = DummyDaemon()
t_opt = {'alive_test': AliveTest.ALIVE_TEST_TCP_SYN_SERVICE}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||16'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# ICMP was chosen as alive test.
dummy = DummyDaemon()
t_opt = {'alive_test': AliveTest.ALIVE_TEST_ICMP}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||2'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# "Scan Config Default" as alive_test.
dummy = DummyDaemon()
t_opt = {'alive_test': AliveTest.ALIVE_TEST_SCAN_CONFIG_DEFAULT}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||2'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# TCP-SYN alive test and dedicated port list for alive scan provided.
dummy = DummyDaemon()
t_opt = {
'alive_test_ports': "80,137",
'alive_test': AliveTest.ALIVE_TEST_TCP_SYN_SERVICE,
}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [
call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||16']),
call(
p_handler.scan_id, [BOREAS_ALIVE_TEST_PORTS + '|||80,137']
),
]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
@patch('ospd_openvas.db.KbDB')
def test_set_boreas_alive_test_not_as_enum(self, mock_kb):
# No Boreas config setting (BOREAS_SETTING_NAME) set
dummy = DummyDaemon()
ov_setting = {'not_the_correct_setting': 1}
t_opt = {}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
p_handler.kbdb.add_scan_preferences.assert_not_called()
# Boreas config setting set but invalid alive_test.
dummy = DummyDaemon()
t_opt = {'alive_test_methods': "1", 'arp': '-1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||2'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# ICMP was chosen as alive test.
dummy = DummyDaemon()
t_opt = {'alive_test_methods': "1", 'icmp': '1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||2'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# tcp_syn as alive test.
dummy = DummyDaemon()
t_opt = {'alive_test_methods': "1", 'tcp_syn': '1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||16'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# tcp_ack as alive test.
dummy = DummyDaemon()
t_opt = {'alive_test_methods': "1", 'tcp_ack': '1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||1'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# arp as alive test.
dummy = DummyDaemon()
t_opt = {'alive_test_methods': "1", 'arp': '1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||4'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# arp as alive test.
dummy = DummyDaemon()
t_opt = {'alive_test_methods': "1", 'consider_alive': '1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||8'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# all alive test methods
dummy = DummyDaemon()
t_opt = {
'alive_test_methods': "1",
'icmp': '1',
'tcp_ack': '1',
'tcp_syn': '1',
'arp': '1',
'consider_alive': '1',
}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||31'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
# TCP-SYN alive test and dedicated port list for alive scan provided.
dummy = DummyDaemon()
t_opt = {
'alive_test_ports': "80,137",
'alive_test_methods': "1",
'tcp_syn': '1',
}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
calls = [
call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||16']),
call(
p_handler.scan_id, [BOREAS_ALIVE_TEST_PORTS + '|||80,137']
),
]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
@patch('ospd_openvas.db.KbDB')
def test_set_boreas_alive_test_enum_has_precedence(self, mock_kb):
dummy = DummyDaemon()
t_opt = {
'alive_test_methods': "1",
'consider_alive': '1',
'alive_test': AliveTest.ALIVE_TEST_ICMP,
}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {BOREAS_SETTING_NAME: 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
# has icmp and not consider_alive
calls = [call(p_handler.scan_id, [BOREAS_ALIVE_TEST + '|||2'])]
p_handler.kbdb.add_scan_preferences.assert_has_calls(calls)
@patch('ospd_openvas.db.KbDB')
def test_set_boreas_alive_test_without_settings(self, mock_kb):
dummy = DummyDaemon()
t_opt = {'alive_test': 16}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_boreas_alive_test()
p_handler.kbdb.add_scan_preferences.assert_not_called()
@patch('ospd_openvas.db.KbDB')
def test_set_alive_no_setting(self, mock_kb):
dummy = DummyDaemon()
t_opt = {}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_alive_test_option_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_not_called()
@patch('ospd_openvas.db.KbDB')
def test_set_alive_no_invalid_alive_test(self, mock_kb):
dummy = DummyDaemon()
t_opt = {'alive_test': -1}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {'some_setting': 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler._nvts_params = {} # pylint: disable = protected-access
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_alive_test_option_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_not_called()
@patch('ospd_openvas.db.KbDB')
def test_set_alive_no_invalid_alive_test_no_enum(self, mock_kb):
dummy = DummyDaemon()
t_opt = {'alive_test_methods': '1', 'icmp': '-1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {'some_setting': 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler._nvts_params = {} # pylint: disable = protected-access
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_alive_test_option_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_not_called()
@patch('ospd_openvas.db.KbDB')
def test_set_alive_pinghost(self, mock_kb):
dummy = DummyDaemon()
alive_test_out = [
"1.3.6.1.4.1.25623.1.0.100315:1:checkbox:Do a TCP ping|||no",
"1.3.6.1.4.1.25623.1.0.100315:2:checkbox:"
"TCP ping tries also TCP-SYN ping|||no",
"1.3.6.1.4.1.25623.1.0.100315:7:checkbox:"
"TCP ping tries only TCP-SYN ping|||no",
"1.3.6.1.4.1.25623.1.0.100315:3:checkbox:Do an ICMP ping|||yes",
"1.3.6.1.4.1.25623.1.0.100315:4:checkbox:Use ARP|||no",
"1.3.6.1.4.1.25623.1.0.100315:5:checkbox:"
"Mark unrechable Hosts as dead (not scanning)|||yes",
]
t_opt = {'alive_test': 2}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {'some_setting': 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler._nvts_params = {} # pylint: disable = protected-access
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_alive_test_option_for_openvas()
for (
key,
value,
) in (
p_handler._nvts_params.items() # pylint: disable = protected-access
):
self.assertTrue(f"{key}|||{value}" in alive_test_out)
@patch('ospd_openvas.db.KbDB')
def test_prepare_alive_test_not_supplied_as_enum(self, mock_kb):
dummy = DummyDaemon()
alive_test_out = {
"1.3.6.1.4.1.25623.1.0.100315:1:checkbox:Do a TCP ping": "no",
"1.3.6.1.4.1.25623.1.0.100315:2:checkbox:"
"TCP ping tries also TCP-SYN ping": "no",
"1.3.6.1.4.1.25623.1.0.100315:7:checkbox:"
"TCP ping tries only TCP-SYN ping": "no",
"1.3.6.1.4.1.25623.1.0.100315:3:checkbox:Do an ICMP ping": "yes",
"1.3.6.1.4.1.25623.1.0.100315:4:checkbox:Use ARP": "no",
"1.3.6.1.4.1.25623.1.0.100315:5:checkbox:"
"Mark unrechable Hosts as dead (not scanning)": "yes",
}
t_opt = {'alive_test_methods': '1', 'icmp': '1'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {'some_setting': 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler._nvts_params = {} # pylint: disable = protected-access
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_alive_test_option_for_openvas()
self.assertEqual(
p_handler._nvts_params, # pylint: disable = protected-access
alive_test_out,
)
@patch('ospd_openvas.db.KbDB')
def test_prepare_alive_test_no_enum_no_alive_test(self, mock_kb):
dummy = DummyDaemon()
t_opt = {'alive_test_methods': '1', 'icmp': '0'}
dummy.scan_collection.get_target_options = MagicMock(return_value=t_opt)
ov_setting = {'some_setting': 1}
with patch.object(Openvas, 'get_settings', return_value=ov_setting):
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler._nvts_params = {} # pylint: disable = protected-access
p_handler.scan_id = '456-789'
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_alive_test_option_for_openvas()
p_handler.kbdb.add_scan_preferences.assert_not_called()
def test_alive_test_methods_to_bit_field(self):
self.assertEqual(
AliveTest.ALIVE_TEST_TCP_ACK_SERVICE,
alive_test_methods_to_bit_field(
icmp=False,
tcp_ack=True,
tcp_syn=False,
arp=False,
consider_alive=False,
),
)
self.assertEqual(
AliveTest.ALIVE_TEST_ICMP,
alive_test_methods_to_bit_field(
icmp=True,
tcp_ack=False,
tcp_syn=False,
arp=False,
consider_alive=False,
),
)
self.assertEqual(
AliveTest.ALIVE_TEST_ARP,
alive_test_methods_to_bit_field(
icmp=False,
tcp_ack=False,
tcp_syn=False,
arp=True,
consider_alive=False,
),
)
self.assertEqual(
AliveTest.ALIVE_TEST_CONSIDER_ALIVE,
alive_test_methods_to_bit_field(
icmp=False,
tcp_ack=False,
tcp_syn=False,
arp=False,
consider_alive=True,
),
)
self.assertEqual(
AliveTest.ALIVE_TEST_TCP_SYN_SERVICE,
alive_test_methods_to_bit_field(
icmp=False,
tcp_ack=False,
tcp_syn=True,
arp=False,
consider_alive=False,
),
)
all_alive_test_methods = (
AliveTest.ALIVE_TEST_SCAN_CONFIG_DEFAULT
| AliveTest.ALIVE_TEST_TCP_ACK_SERVICE
| AliveTest.ALIVE_TEST_ICMP
| AliveTest.ALIVE_TEST_ARP
| AliveTest.ALIVE_TEST_CONSIDER_ALIVE
| AliveTest.ALIVE_TEST_TCP_SYN_SERVICE
)
self.assertEqual(
all_alive_test_methods,
alive_test_methods_to_bit_field(
icmp=True,
tcp_ack=True,
tcp_syn=True,
arp=True,
consider_alive=True,
),
)
@patch('ospd_openvas.db.KbDB')
def test_prepare_nvt_prefs(self, mock_kb):
dummy = DummyDaemon()
alive_test_out = [
"1.3.6.1.4.1.25623.1.0.100315:1:checkbox:Do a TCP ping|||no"
]
p_handler = PreferenceHandler(
'1234-1234', mock_kb, dummy.scan_collection, None, None
)
p_handler._nvts_params = { # pylint: disable = protected-access
"1.3.6.1.4.1.25623.1.0.100315:1:checkbox:Do a TCP ping": "no"
}
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_nvt_preferences()
p_handler.kbdb.add_scan_preferences.assert_called_with(
p_handler.scan_id,
alive_test_out,
)
@patch('ospd_openvas.db.KbDB')
def test_prepare_nvt_prefs_no_prefs(self, mock_kb):
dummy = DummyDaemon()
p_handler = PreferenceHandler(
'456-789', mock_kb, dummy.scan_collection, None, None
)
p_handler._nvts_params = {} # pylint: disable = protected-access
p_handler.kbdb.add_scan_preferences = MagicMock()
p_handler.prepare_nvt_preferences()
p_handler.kbdb.add_scan_preferences.assert_not_called()
ospd-openvas-22.9.0/tests/test_protocol.py 0000664 0000000 0000000 00000000704 15011310720 0020625 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import unittest
from ospd.protocol import RequestParser
class RequestParserTestCase(unittest.TestCase):
def test_parse(self):
parser = RequestParser()
self.assertFalse(parser.has_ended(b''))
self.assertFalse(parser.has_ended(b''))
self.assertTrue(parser.has_ended(b''))
ospd-openvas-22.9.0/tests/test_scan_and_result.py 0000664 0000000 0000000 00000152214 15011310720 0022134 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=too-many-lines
"""Test module for scan runs"""
import time
import unittest
from unittest.mock import patch, MagicMock, Mock
import logging
import xml.etree.ElementTree as ET
from defusedxml.common import EntitiesForbidden
from ospd.resultlist import ResultList
from ospd.errors import OspdCommandError
from ospd.scan import ScanStatus
from .helper import (
DummyWrapper,
assert_called_once,
FakeStream,
FakeDataManager,
FakePsutil,
DummyXML,
)
class FakeStartProcess:
def __init__(self):
self.run_mock = MagicMock()
self.call_mock = MagicMock()
self.func = None
self.args = None
self.kwargs = None
def __call__(self, func, *, args=None, kwargs=None):
self.func = func
self.args = args or []
self.kwargs = kwargs or {}
return self.call_mock
def run(self):
self.func(*self.args, **self.kwargs)
return self.run_mock
def __repr__(self):
return (
f""
)
class Result(object):
def __init__(self, type_, **kwargs):
self.result_type = type_
self.host = ''
self.hostname = ''
self.name = ''
self.value = ''
self.port = ''
self.test_id = ''
self.severity = ''
self.qod = ''
self.uri = ''
for name, value in kwargs.items():
setattr(self, name, value)
class ScanTestCase(unittest.TestCase):
def setUp(self):
self.daemon = DummyWrapper([])
self.daemon.scan_collection.datamanager = FakeDataManager()
self.daemon.scan_collection.file_storage_dir = '/tmp'
def test_get_default_scanner_params(self):
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
# The status of the response must be success (i.e. 200)
self.assertEqual(response.get('status'), '200')
# The response root element must have the correct name
self.assertEqual(response.tag, 'get_scanner_details_response')
# The response must contain a 'scanner_params' element
self.assertIsNotNone(response.find('scanner_params'))
def test_get_default_help(self):
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
self.assertEqual(response.tag, 'help_response')
def test_get_default_scanner_version(self):
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
self.assertIsNotNone(response.find('protocol'))
def test_get_vts_no_vt(self):
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
self.assertIsNotNone(response.find('vts'))
def test_get_vt_xml_no_dict(self):
single_vt = ('1234', None)
vt = self.daemon.get_vt_xml(single_vt)
self.assertFalse(vt.get('id'))
def test_get_vts_single_vt(self):
fs = FakeStream()
self.daemon.add_vt('1.2.3.4', 'A vulnerability test')
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
vts = response.find('vts')
self.assertIsNotNone(vts.find('vt'))
vt = vts.find('vt')
self.assertEqual(vt.get('id'), '1.2.3.4')
def test_get_vts_version(self):
fs = FakeStream()
self.daemon.add_vt('1.2.3.4', 'A vulnerability test')
self.daemon.set_vts_version('today')
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
vts_version = response.find('vts').attrib['vts_version']
self.assertEqual(vts_version, self.daemon.get_vts_version())
vts = response.find('vts')
self.assertIsNotNone(vts.find('vt'))
vt = vts.find('vt')
self.assertEqual(vt.get('id'), '1.2.3.4')
def test_get_vts_version_only(self):
fs = FakeStream()
self.daemon.add_vt('1.2.3.4', 'A vulnerability test')
self.daemon.set_vts_version('today')
self.daemon.set_feed_vendor('Greenbone')
self.daemon.set_feed_name('GCF')
self.daemon.set_feed_home('www.greenbone.net')
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
vts_version = response.find('vts').attrib['vts_version']
self.assertEqual(vts_version, self.daemon.get_vts_version())
feed_vendor = response.find('vts').attrib['feed_vendor']
self.assertEqual(feed_vendor, self.daemon.get_feed_vendor())
feed_home = response.find('vts').attrib['feed_home']
self.assertEqual(feed_home, self.daemon.get_feed_home())
feed_name = response.find('vts').attrib['feed_name']
self.assertEqual(feed_name, self.daemon.get_feed_name())
vts = response.find('vts')
self.assertIsNone(vts.find('vt'))
def test_get_vts_still_not_init(self):
fs = FakeStream()
self.daemon.initialized = False
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '400')
def test_get_help_still_not_init(self):
fs = FakeStream()
self.daemon.initialized = False
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
def test_get_vts_filter_positive(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
vt_modification_time='19000202',
)
fs = FakeStream()
self.daemon.handle_command(
'', fs
)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
vts = response.find('vts')
vt = vts.find('vt')
self.assertIsNotNone(vt)
self.assertEqual(vt.get('id'), '1.2.3.4')
modification_time = response.findall('vts/vt/modification_time')
self.assertEqual(
'19000202',
ET.tostring(modification_time[0]).decode('utf-8'),
)
def test_get_vts_filter_negative(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
vt_modification_time='19000202',
)
fs = FakeStream()
self.daemon.handle_command(
'',
fs,
)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
vts = response.find('vts')
vt = vts.find('vt')
self.assertIsNotNone(vt)
self.assertEqual(vt.get('id'), '1.2.3.4')
modification_time = response.findall('vts/vt/modification_time')
self.assertEqual(
'19000202',
ET.tostring(modification_time[0]).decode('utf-8'),
)
def test_get_vts_bad_filter(self):
fs = FakeStream()
cmd = ''
self.assertRaises(OspdCommandError, self.daemon.handle_command, cmd, fs)
self.assertTrue(self.daemon.vts.is_cache_available)
def test_get_vtss_multiple_vts(self):
self.daemon.add_vt('1.2.3.4', 'A vulnerability test')
self.daemon.add_vt('1.2.3.5', 'Another vulnerability test')
self.daemon.add_vt('123456789', 'Yet another vulnerability test')
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
vts = response.find('vts')
self.assertIsNotNone(vts.find('vt'))
@patch('ospd.ospd.XmlStringVTHelper')
def test_get_vts_multiple_vts_with_custom(self, mxml):
mxml.side_effect = DummyXML
self.daemon.add_vt('1.2.3.4', 'A vulnerability test', custom='b')
self.daemon.add_vt(
'4.3.2.1', 'Another vulnerability test with custom info', custom='b'
)
self.daemon.add_vt(
'123456789', 'Yet another vulnerability test', custom='b'
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
custom = response.findall('vts/vt/custom')
self.assertEqual(3, len(custom))
@patch('ospd.ospd.XmlStringVTHelper')
def test_get_vts_vts_with_params(self, mxml):
mxml.side_effect = DummyXML
self.daemon.add_vt(
'1.2.3.4', 'A vulnerability test', vt_params='a', custom='b'
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
# The status of the response must be success (i.e. 200)
self.assertEqual(response.get('status'), '200')
# The response root element must have the correct name
self.assertEqual(response.tag, 'get_vts_response')
# The response must contain a 'scanner_params' element
self.assertIsNotNone(response.find('vts'))
vt_params = response[0][0].findall('params')
self.assertEqual(1, len(vt_params))
custom = response[0][0].findall('custom')
self.assertEqual(1, len(custom))
params = response.findall('vts/vt/params/param')
self.assertEqual(2, len(params))
@patch('ospd.ospd.XmlStringVTHelper')
def test_get_vts_vts_with_refs(self, mxml):
mxml.side_effect = DummyXML
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
vt_params="a",
custom="b",
vt_refs="c",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
# The status of the response must be success (i.e. 200)
self.assertEqual(response.get('status'), '200')
# The response root element must have the correct name
self.assertEqual(response.tag, 'get_vts_response')
# The response must contain a 'vts' element
self.assertIsNotNone(response.find('vts'))
vt_params = response[0][0].findall('params')
self.assertEqual(1, len(vt_params))
refs = response.findall('vts/vt/refs/ref')
self.assertEqual(2, len(refs))
@patch('ospd.ospd.XmlStringVTHelper')
def test_get_vts_vts_with_dependencies(self, mxml):
mxml.side_effect = DummyXML
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
vt_dependencies="c",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
deps = response.findall('vts/vt/dependencies/dependency')
self.assertEqual(2, len(deps))
def test_get_vts_vts_with_severities(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
severities="c",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
severity = response.findall('vts/vt/severities/severity')
self.assertEqual(1, len(severity))
def test_get_vts_vts_with_detection_qodt(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
detection="c",
qod_t="d",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
detection = response.findall('vts/vt/detection')
self.assertEqual(1, len(detection))
def test_get_vts_vts_with_detection_qodv(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
detection="c",
qod_v="d",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
detection = response.findall('vts/vt/detection')
self.assertEqual(1, len(detection))
@patch('ospd.ospd.XmlStringVTHelper')
def test_get_vts_vts_with_summary(self, mxml):
mxml.side_effect = DummyXML
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
summary="a",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
summary = response.findall('vts/vt/summary')
self.assertEqual(1, len(summary))
def test_get_vts_vts_with_impact(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
impact="c",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
impact = response.findall('vts/vt/impact')
self.assertEqual(1, len(impact))
def test_get_vts_vts_with_affected(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
affected="c",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
affect = response.findall('vts/vt/affected')
self.assertEqual(1, len(affect))
def test_get_vts_vts_with_insight(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
insight="c",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
insight = response.findall('vts/vt/insight')
self.assertEqual(1, len(insight))
def test_get_vts_vts_with_solution(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
solution="c",
solution_t="d",
solution_m="e",
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
solution = response.findall('vts/vt/solution')
self.assertEqual(1, len(solution))
def test_get_vts_vts_with_ctime(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
vt_creation_time='01-01-1900',
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
creation_time = response.findall('vts/vt/creation_time')
self.assertEqual(
'01-01-1900',
ET.tostring(creation_time[0]).decode('utf-8'),
)
def test_get_vts_vts_with_mtime(self):
self.daemon.add_vt(
'1.2.3.4',
'A vulnerability test',
vt_modification_time='02-01-1900',
)
fs = FakeStream()
self.daemon.handle_command('', fs)
response = fs.get_response()
modification_time = response.findall('vts/vt/modification_time')
self.assertEqual(
'02-01-1900',
ET.tostring(modification_time[0]).decode('utf-8'),
)
def test_clean_forgotten_scans(self):
fs = FakeStream()
self.daemon.handle_command(
'',
fs,
)
response = fs.get_response()
scan_id = response.findtext('id')
finished = False
self.daemon.start_queued_scans()
while not finished:
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
scans = response.findall('scan')
self.assertEqual(1, len(scans))
scan = scans[0]
if scan.get('end_time') != '0':
finished = True
else:
time.sleep(0.01)
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
self.assertEqual(
len(list(self.daemon.scan_collection.ids_iterator())), 1
)
# Set an old end_time
self.daemon.scan_collection.scans_table[scan_id]['end_time'] = 123456
# Run the check
self.daemon.clean_forgotten_scans()
# Not removed
self.assertEqual(
len(list(self.daemon.scan_collection.ids_iterator())), 1
)
# Set the max time and run again
self.daemon.scaninfo_store_time = 1
self.daemon.clean_forgotten_scans()
# Now is removed
self.assertEqual(
len(list(self.daemon.scan_collection.ids_iterator())), 0
)
def test_scan_with_error(self):
fs = FakeStream()
self.daemon.handle_command(
'',
fs,
)
response = fs.get_response()
scan_id = response.findtext('id')
finished = False
self.daemon.start_queued_scans()
self.daemon.add_scan_error(
scan_id, host='a', value='something went wrong'
)
while not finished:
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
scans = response.findall('scan')
self.assertEqual(1, len(scans))
scan = scans[0]
status = scan.get('status')
if status == "init" or status == "running":
self.assertEqual('0', scan.get('end_time'))
time.sleep(0.010)
else:
finished = True
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
self.assertEqual(
response.findtext('scan/results/result'), 'something went wrong'
)
fs = FakeStream()
self.daemon.handle_command(f'', fs)
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
def test_get_scan_pop(self):
fs = FakeStream()
self.daemon.handle_command(
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.add_scan_host_detail(
scan_id, host='a', value='Some Host Detail'
)
time.sleep(1)
fs = FakeStream()
self.daemon.handle_command(f'', fs)
response = fs.get_response()
self.assertEqual(
response.findtext('scan/results/result'), 'Some Host Detail'
)
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
self.assertEqual(
response.findtext('scan/results/result'), 'Some Host Detail'
)
fs = FakeStream()
self.daemon.handle_command(
f'',
fs,
)
response = fs.get_response()
self.assertEqual(response.findtext('scan/results/result'), None)
def test_get_scan_pop_max_res(self):
fs = FakeStream()
self.daemon.handle_command(
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.add_scan_log(scan_id, host='a', name='a')
self.daemon.add_scan_log(scan_id, host='c', name='c')
self.daemon.add_scan_log(scan_id, host='b', name='b')
fs = FakeStream()
self.daemon.handle_command(
f'',
fs,
)
response = fs.get_response()
self.assertEqual(len(response.findall('scan/results/result')), 1)
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
self.assertEqual(len(response.findall('scan/results/result')), 2)
def test_get_scan_results_clean(self):
fs = FakeStream()
self.daemon.handle_command(
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.add_scan_log(scan_id, host='a', name='a')
self.daemon.add_scan_log(scan_id, host='c', name='c')
self.daemon.add_scan_log(scan_id, host='b', name='b')
fs = FakeStream()
self.daemon.handle_command(
f'',
fs,
)
res_len = len(
self.daemon.scan_collection.scans_table[scan_id]['results']
)
self.assertEqual(res_len, 0)
res_len = len(
self.daemon.scan_collection.scans_table[scan_id]['temp_results']
)
self.assertEqual(res_len, 0)
def test_get_scan_results_restore(self):
fs = FakeStream()
self.daemon.handle_command(
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.add_scan_log(scan_id, host='a', name='a')
self.daemon.add_scan_log(scan_id, host='c', name='c')
self.daemon.add_scan_log(scan_id, host='b', name='b')
fs = FakeStream(return_value=False)
self.daemon.handle_command(
f'',
fs,
)
res_len = len(
self.daemon.scan_collection.scans_table[scan_id]['results']
)
self.assertEqual(res_len, 3)
res_len = len(
self.daemon.scan_collection.scans_table[scan_id]['temp_results']
)
self.assertEqual(res_len, 0)
def test_billon_laughs(self):
lol = (
''
' '
' '
' '
' '
' '
' '
' '
' ]>'
)
fs = FakeStream()
self.assertRaises(
EntitiesForbidden, self.daemon.handle_command, lol, fs
)
def test_target_with_credentials(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'192.168.0.0/2422'
''
''
'scanuser'
'mypass'
''
'smbuser'
'mypass'
''
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
cred_dict = {
'ssh': {
'type': 'up',
'password': 'mypass',
'port': '22',
'username': 'scanuser',
},
'smb': {'type': 'up', 'password': 'mypass', 'username': 'smbuser'},
}
scan_id = response.findtext('id')
response = self.daemon.get_scan_credentials(scan_id)
self.assertEqual(response, cred_dict)
def test_target_with_credential_empty_community(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'192.168.0.0/2422'
''
''
''
''
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
self.assertEqual(response.get('status'), '200')
cred_dict = {
'snmp': {'type': 'up', 'community': ''},
}
scan_id = response.findtext('id')
response = self.daemon.get_scan_credentials(scan_id)
self.assertEqual(response, cred_dict)
def test_scan_get_target(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'localhosts,192.168.0.0/24'
'80,443'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
fs = FakeStream()
self.daemon.handle_command(f'', fs)
response = fs.get_response()
scan_res = response.find('scan')
self.assertEqual(scan_res.get('target'), 'localhosts,192.168.0.0/24')
def test_scan_get_target_options(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'192.168.0.1'
'220'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
time.sleep(1)
target_options = self.daemon.get_scan_target_options(scan_id)
self.assertEqual(target_options, {'alive_test': '0'})
def test_scan_get_target_options_alive_test_methods(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'192.168.0.1'
'22'
''
'1'
'1'
'1'
'1'
'1'
''
''
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
time.sleep(1)
target_options = self.daemon.get_scan_target_options(scan_id)
self.assertEqual(
target_options,
{
'alive_test_methods': '1',
'icmp': '1',
'tcp_syn': '1',
'tcp_ack': '1',
'arp': '1',
'consider_alive': '1',
},
)
def test_scan_get_target_options_alive_test_methods_dont_add_empty_or_missing( # pylint: disable=line-too-long
self,
):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'192.168.0.1'
'22'
''
'1'
''
''
''
''
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
time.sleep(1)
target_options = self.daemon.get_scan_target_options(scan_id)
self.assertEqual(
target_options,
{
'alive_test_methods': '1',
'icmp': '1',
},
)
def test_progress(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.set_scan_host_progress(scan_id, 'localhost1', 75)
self.daemon.set_scan_host_progress(scan_id, 'localhost2', 25)
self.assertEqual(
self.daemon.scan_collection.calculate_target_progress(scan_id), 50
)
def test_progress_all_host_dead(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.set_scan_host_progress(scan_id, 'localhost1', -1)
self.daemon.set_scan_host_progress(scan_id, 'localhost2', -1)
self.daemon.sort_host_finished(scan_id, ['localhost1', 'localhost2'])
self.assertEqual(
self.daemon.scan_collection.calculate_target_progress(scan_id), 100
)
@patch('ospd.ospd.os')
def test_interrupted_scan(self, mock_os):
mock_os.setsid.return_value = None
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.exec_scan = Mock(return_value=None)
self.daemon.set_scan_host_progress(scan_id, 'localhost1', 5)
self.daemon.set_scan_host_progress(scan_id, 'localhost2', 14)
while self.daemon.get_scan_status(scan_id) == ScanStatus.INIT:
fs = FakeStream()
self.daemon.handle_command(
f'',
fs,
)
response = fs.get_response()
status = response.find('scan').attrib['status']
self.assertEqual(status, ScanStatus.INTERRUPTED.name.lower())
def test_sort_host_finished(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.set_scan_host_progress(scan_id, 'localhost3', -1)
self.daemon.set_scan_host_progress(scan_id, 'localhost1', 75)
self.daemon.set_scan_host_progress(scan_id, 'localhost4', 100)
self.daemon.set_scan_host_progress(scan_id, 'localhost2', 25)
self.daemon.sort_host_finished(scan_id, ['localhost3', 'localhost4'])
rounded_progress = self.daemon.scan_collection.calculate_target_progress( # pylint: disable=line-too-long)
scan_id
)
self.assertEqual(rounded_progress, 66)
def test_set_status_interrupted(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
end_time = self.daemon.scan_collection.get_end_time(scan_id)
self.assertEqual(end_time, 0)
self.daemon.interrupt_scan(scan_id)
end_time = self.daemon.scan_collection.get_end_time(scan_id)
self.assertNotEqual(end_time, 0)
def test_set_status_stopped(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
end_time = self.daemon.scan_collection.get_end_time(scan_id)
self.assertEqual(end_time, 0)
self.daemon.set_scan_status(scan_id, ScanStatus.STOPPED)
end_time = self.daemon.scan_collection.get_end_time(scan_id)
self.assertNotEqual(end_time, 0)
def test_calculate_progress_without_current_hosts(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.set_scan_host_progress(scan_id)
self.daemon.set_scan_host_progress(scan_id, 'localhost3', -1)
self.daemon.set_scan_host_progress(scan_id, 'localhost4', 100)
self.daemon.sort_host_finished(scan_id, ['localhost3', 'localhost4'])
float_progress = self.daemon.scan_collection.calculate_target_progress(
scan_id
)
self.assertEqual(int(float_progress), 33)
self.daemon.scan_collection.set_progress(scan_id, float_progress)
progress = self.daemon.get_scan_progress(scan_id)
self.assertEqual(progress, 33)
def test_get_scan_host_progress(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.set_scan_host_progress(scan_id, 'localhost', 45)
self.assertEqual(
self.daemon.get_scan_host_progress(scan_id, 'localhost'), 45
)
def test_get_scan_without_scanid(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
fs = FakeStream()
self.assertRaises(
OspdCommandError,
self.daemon.handle_command,
'',
fs,
)
def test_set_scan_total_hosts(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
count = self.daemon.scan_collection.get_count_total(scan_id)
self.assertEqual(count, 4)
self.daemon.set_scan_total_hosts(scan_id, 3)
count = self.daemon.scan_collection.get_count_total(scan_id)
self.assertEqual(count, 3)
def test_set_scan_total_hosts_zero(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
# Default calculated by ospd with the hosts in the target
count = self.daemon.scan_collection.get_count_total(scan_id)
self.assertEqual(count, 4)
# Set to 0 (all hosts unresolved, dead, invalid target) via
# the server. This one has priority and must be still 0 and
# never overwritten with the calculation from host list
self.daemon.set_scan_total_hosts(scan_id, 0)
count = self.daemon.scan_collection.get_count_total(scan_id)
self.assertEqual(count, 0)
def test_set_scan_total_hosts_invalid_target(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
count = self.daemon.scan_collection.get_count_total(scan_id)
self.assertEqual(count, 4)
# The total host is set by the server as -1, because invalid target
self.daemon.set_scan_total_hosts(scan_id, -1)
count = self.daemon.scan_collection.get_count_total(scan_id)
self.assertEqual(count, 0)
def test_scan_invalid_excluded_hosts(self):
logging.Logger.warning = Mock()
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'192.168.0.0/24'
'192.168.0.1-192.168.0.200,10.0.0.0/24'
''
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
# Count only the excluded hosts present in the original target.
count = self.daemon.scan_collection.get_simplified_exclude_host_count(
scan_id
)
self.assertEqual(count, 200)
logging.Logger.warning.assert_called_with( # pylint: disable=no-member
"Please check the excluded host list. It contains hosts "
"which do not belong to the target. This warning can be ignored if "
"this was done on purpose (e.g. to exclude specific hostname)."
)
def test_get_scan_progress_xml(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost1, localhost2, localhost3, localhost4'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.set_scan_host_progress(scan_id, 'localhost3', -1)
self.daemon.set_scan_host_progress(scan_id, 'localhost4', 100)
self.daemon.sort_host_finished(scan_id, ['localhost3', 'localhost4'])
self.daemon.set_scan_host_progress(scan_id, 'localhost1', 75)
self.daemon.set_scan_host_progress(scan_id, 'localhost2', 25)
fs = FakeStream()
self.daemon.handle_command(
f'',
fs,
)
response = fs.get_response()
progress = response.find('scan/progress')
overall = float(progress.findtext('overall'))
self.assertEqual(int(overall), 66)
count_alive = progress.findtext('count_alive')
self.assertEqual(count_alive, '1')
count_dead = progress.findtext('count_dead')
self.assertEqual(count_dead, '1')
current_hosts = progress.findall('host')
self.assertEqual(len(current_hosts), 2)
count_excluded = progress.findtext('count_excluded')
self.assertEqual(count_excluded, '0')
def test_set_get_vts_version(self):
self.daemon.set_vts_version('1234')
version = self.daemon.get_vts_version()
self.assertEqual('1234', version)
def test_set_get_vts_version_error(self):
self.assertRaises(TypeError, self.daemon.set_vts_version)
@patch("ospd.ospd.os")
@patch("ospd.ospd.create_process")
def test_scan_exists(self, mock_create_process, _mock_os):
fp = FakeStartProcess()
mock_create_process.side_effect = fp
mock_process = fp.call_mock
mock_process.start.side_effect = fp.run
mock_process.is_alive.return_value = True
mock_process.pid = "main-scan-process"
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'localhost'
'22'
''
'',
fs,
)
response = fs.get_response()
scan_id = response.findtext('id')
self.assertIsNotNone(scan_id)
status = response.get('status_text')
self.assertEqual(status, 'OK')
self.daemon.start_queued_scans()
assert_called_once(mock_create_process)
assert_called_once(mock_process.start)
self.daemon.handle_command(f'', fs)
fs = FakeStream()
cmd = (
''
''
''
'localhost'
'22'
''
''
)
self.daemon.handle_command(
cmd,
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
status = response.get('status_text')
self.assertEqual(status, 'Continue')
def test_result_order(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'a'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
self.daemon.add_scan_log(scan_id, host='a', name='a')
self.daemon.add_scan_log(scan_id, host='c', name='c')
self.daemon.add_scan_log(scan_id, host='b', name='b')
hosts = ['a', 'c', 'b']
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
results = response.findall("scan/results/")
for idx, res in enumerate(results):
att_dict = res.attrib
self.assertEqual(hosts[idx], att_dict['name'])
def test_batch_result(self):
reslist = ResultList()
fs = FakeStream()
self.daemon.handle_command(
''
''
''
'a'
'22'
''
'',
fs,
)
self.daemon.start_queued_scans()
response = fs.get_response()
scan_id = response.findtext('id')
reslist.add_scan_log_to_list(host='a', name='a')
reslist.add_scan_log_to_list(host='c', name='c')
reslist.add_scan_log_to_list(host='b', name='b')
self.daemon.scan_collection.add_result_list(scan_id, reslist)
hosts = ['a', 'c', 'b']
fs = FakeStream()
self.daemon.handle_command(
f'', fs
)
response = fs.get_response()
results = response.findall("scan/results/")
for idx, res in enumerate(results):
att_dict = res.attrib
self.assertEqual(hosts[idx], att_dict['name'])
def test_is_new_scan_allowed_false(self):
self.daemon.scan_processes = { # pylint: disable=protected-access
'a': 1,
'b': 2,
}
self.daemon.max_scans = 1
self.assertFalse(self.daemon.is_new_scan_allowed())
def test_is_new_scan_allowed_true(self):
self.daemon.scan_processes = { # pylint: disable=protected-access
'a': 1,
'b': 2,
}
self.daemon.max_scans = 3
self.assertTrue(self.daemon.is_new_scan_allowed())
def test_start_queue_scan_daemon_not_init(self):
self.daemon.get_count_queued_scans = MagicMock(return_value=10)
self.daemon.initialized = False
logging.Logger.info = Mock()
self.daemon.start_queued_scans()
logging.Logger.info.assert_called_with( # pylint: disable=no-member
"Queued task can not be started because a "
"feed update is being performed."
)
@patch("ospd.ospd.psutil")
def test_free_memory_true(self, mock_psutil):
self.daemon.min_free_mem_scan_queue = 1000
# 1.5 GB free
mock_psutil.virtual_memory.return_value = FakePsutil(
available=1500000000
)
self.assertTrue(self.daemon.is_enough_free_memory())
@patch("ospd.ospd.psutil")
def test_wait_between_scan_no_scans(self, mock_psutil):
# Enable option
self.daemon.min_free_mem_scan_queue = 1000
# 1.5 GB free
mock_psutil.virtual_memory.return_value = FakePsutil(
available=1500000000
)
# Not enough time between scans, but no running scan
self.daemon.last_scan_start_time = time.time() - 20
self.assertTrue(self.daemon.is_enough_free_memory())
@patch("ospd.ospd.psutil")
def test_wait_between_scan_run_scans_not_allow(self, mock_psutil):
# Enable option
self.daemon.min_free_mem_scan_queue = 1000
# 1.5 GB free
mock_psutil.virtual_memory.return_value = FakePsutil(
available=1500000000
)
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'localhosts,192.168.0.0/24'
'80,443'
''
'',
fs,
)
# There is a running scan
self.daemon.start_queued_scans()
# Not enough time between scans
self.daemon.last_scan_start_time = time.time() - 20
self.assertFalse(self.daemon.is_enough_free_memory())
@patch("ospd.ospd.psutil")
def test_wait_between_scan_allow(self, mock_psutil):
# Enable option
self.daemon.min_free_mem_scan_queue = 1000
# 1.5 GB free
mock_psutil.virtual_memory.return_value = FakePsutil(
available=1500000000
)
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'localhosts,192.168.0.0/24'
'80,443'
''
'',
fs,
)
# There is a running scan, enough memory and enough time
# in between
self.daemon.start_queued_scans()
self.daemon.last_scan_start_time = time.time() - 65
self.assertTrue(self.daemon.is_enough_free_memory())
@patch("ospd.ospd.psutil")
def test_free_memory_false(self, mock_psutil):
self.daemon.min_free_mem_scan_queue = 2000
# 1.5 GB free
mock_psutil.virtual_memory.return_value = FakePsutil(
available=1500000000
)
self.assertFalse(self.daemon.is_enough_free_memory())
def test_count_queued_scans(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'localhosts,192.168.0.0/24'
'80,443'
''
'',
fs,
)
self.assertEqual(self.daemon.get_count_queued_scans(), 1)
self.daemon.start_queued_scans()
self.assertEqual(self.daemon.get_count_queued_scans(), 0)
def test_count_running_scans(self):
fs = FakeStream()
self.daemon.handle_command(
''
''
''
''
'localhosts,192.168.0.0/24'
'80,443'
''
'',
fs,
)
self.assertEqual(self.daemon.get_count_running_scans(), 0)
self.daemon.start_queued_scans()
self.assertEqual(self.daemon.get_count_running_scans(), 1)
def test_ids_iterator_dict_modified(self):
self.daemon.scan_collection.scans_table = {'a': 1, 'b': 2}
for _ in self.daemon.scan_collection.ids_iterator():
self.daemon.scan_collection.scans_table['c'] = 3
self.assertEqual(len(self.daemon.scan_collection.scans_table), 3)
ospd-openvas-22.9.0/tests/test_target_convert.py 0000664 0000000 0000000 00000006263 15011310720 0022020 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Test suites for Target manipulations."""
import unittest
from unittest.mock import patch
from ospd.network import (
target_str_to_list,
get_hostname_by_address,
is_valid_address,
target_to_ipv4,
socket,
)
class ConvertTargetListsTestCase(unittest.TestCase):
def test_24_net(self):
addresses = target_str_to_list('195.70.81.0/24')
self.assertIsNotNone(addresses)
self.assertEqual(len(addresses), 254)
for i in range(1, 255):
self.assertIn(f'195.70.81.{str(i)}', addresses)
def test_bad_ipv4_cidr(self):
addresses = target_str_to_list('195.70.81.0/32')
self.assertIsNotNone(addresses)
self.assertEqual(len(addresses), 0)
addresses = target_str_to_list('195.70.81.0/31')
self.assertIsNotNone(addresses)
self.assertEqual(len(addresses), 0)
def test_good_ipv4_cidr(self):
addresses = target_str_to_list('195.70.81.0/30')
self.assertIsNotNone(addresses)
self.assertEqual(len(addresses), 2)
def test_range(self):
addresses = target_str_to_list('195.70.81.0-10')
self.assertIsNotNone(addresses)
self.assertEqual(len(addresses), 11)
for i in range(0, 10):
self.assertIn(f'195.70.81.{str(i)}', addresses)
def test_target_str_with_trailing_comma(self):
addresses = target_str_to_list(',195.70.81.1,195.70.81.2,')
self.assertIsNotNone(addresses)
self.assertEqual(len(addresses), 2)
for i in range(1, 2):
self.assertIn(f'195.70.81.{str(i)}', addresses)
def test_get_hostname_by_address(self):
with patch.object(socket, "getfqdn", return_value="localhost"):
hostname = get_hostname_by_address('127.0.0.1')
self.assertEqual(hostname, 'localhost')
hostname = get_hostname_by_address('')
self.assertEqual(hostname, '')
hostname = get_hostname_by_address('127.0.0.1111')
self.assertEqual(hostname, '')
def test_is_valid_address(self):
self.assertFalse(is_valid_address(None))
self.assertFalse(is_valid_address(''))
self.assertFalse(is_valid_address('foo'))
self.assertFalse(is_valid_address('127.0.0.1111'))
self.assertFalse(is_valid_address('127.0.0,1'))
self.assertTrue(is_valid_address('127.0.0.1'))
self.assertTrue(is_valid_address('192.168.0.1'))
self.assertTrue(is_valid_address('::1'))
self.assertTrue(is_valid_address('fc00::'))
self.assertTrue(is_valid_address('fec0::'))
self.assertTrue(
is_valid_address('2001:0db8:85a3:08d3:1319:8a2e:0370:7344')
)
def test_target_to_ipv4(self):
self.assertIsNone(target_to_ipv4('foo'))
self.assertIsNone(target_to_ipv4(''))
self.assertIsNone(target_to_ipv4('127,0,0,1'))
self.assertIsNone(target_to_ipv4('127.0.0'))
self.assertIsNone(target_to_ipv4('127.0.0.11111'))
self.assertEqual(target_to_ipv4('127.0.0.1'), ['127.0.0.1'])
self.assertEqual(target_to_ipv4('192.168.1.1'), ['192.168.1.1'])
ospd-openvas-22.9.0/tests/test_vthelper.py 0000664 0000000 0000000 00000022646 15011310720 0020626 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
from hashlib import sha256
from unittest import TestCase
from unittest.mock import MagicMock, patch
from tests.dummydaemon import DummyDaemon
from tests.helper import assert_called_once
from ospd_openvas.vthelper import VtHelper
class VtHelperTestCase(TestCase):
def test_get_single_vt(self):
dummy = DummyDaemon()
vthelper = VtHelper(dummy.nvti)
res = vthelper.get_single_vt("1.3.6.1.4.1.25623.1.0.100061")
assert_called_once(dummy.nvti.get_nvt_metadata)
self.assertEqual("Mantis Detection", res.get('name'))
@patch('ospd_openvas.daemon.NVTICache')
def test_handle_null_severities(self, nvticlass: MagicMock):
nvti = nvticlass.return_value
nvti.notus = None
nvti.get_nvt_metadata.return_value = {
'category': '3',
'creation_date': '1237458156',
'excluded_keys': 'Settings/disable_cgi_scanning',
'family': 'Product detection',
'filename': 'mantis_detect.nasl',
'last_modification': '1533906565',
'name': 'Mantis Detection',
'qod_type': 'remote_banner',
'required_ports': 'Services/www, 80',
'solution': 'some solution',
'solution_type': 'WillNotFix',
'solution_method': 'DebianAPTUpgrade',
'impact': 'some impact',
'insight': 'some insight',
'summary': 'some summary',
'affected': 'some affection',
'timeout': '0',
'vt_params': {
'1': {
'id': '1',
'default': '',
'description': 'Description',
'name': 'Data length :',
'type': 'entry',
},
'2': {
'id': '2',
'default': 'no',
'description': 'Description',
'name': (
'Do not randomize the order in which ports are'
' scanned'
),
'type': 'checkbox',
},
},
'refs': {
'bid': [''],
'cve': [''],
'xref': ['URL:http://www.mantisbt.org/'],
},
}
vthelper = VtHelper(nvti)
res = vthelper.get_single_vt("1.3.6.1.4.1.25623.1.0.100061")
assert not res
def test_calculate_vts_collection_hash_no_params(self):
dummy = DummyDaemon()
vthelper = VtHelper(dummy.nvti)
hash_out = vthelper.calculate_vts_collection_hash()
vt_hash_str = (
'1.3.6.1.4.1.25623.1.0.10006115339065651Data '
+ 'length :2Do not randomize the order in which '
+ 'ports are scannedno'
)
vt_hash = sha256()
vt_hash.update(vt_hash_str.encode('utf-8'))
hash_test = vt_hash.hexdigest()
self.assertEqual(hash_test, hash_out)
def test_get_vt_iterator(self):
dummy = DummyDaemon()
vthelper = VtHelper(dummy.nvti)
vt = ["1.3.6.1.4.1.25623.1.0.100061"]
for key, _ in vthelper.get_vt_iterator():
self.assertIn(key, vt)
def test_get_vt_iterator_with_filter(self):
dummy = DummyDaemon()
vthelper = VtHelper(dummy.nvti)
vt = ["1.3.6.1.4.1.25623.1.0.100061"]
vtout = dummy.VTS["1.3.6.1.4.1.25623.1.0.100061"]
for key, vt_dict in vthelper.get_vt_iterator(vt_selection=vt):
self.assertIn(key, vt)
for key2 in vtout:
self.assertIn(key2, vt_dict)
def test_get_vt_iterator_with_filter_no_vt(self):
dummy = DummyDaemon()
vthelper = VtHelper(dummy.nvti)
dummy.nvti.get_nvt_metadata.return_value = None
vt = ["1.3.6.1.4.1.25623.1.0.100065"]
for _, values in vthelper.get_vt_iterator(vt_selection=vt):
self.assertIs(values, None)
def test_get_single_vt_severity_cvssv3(self):
dummy = DummyDaemon()
dummy.nvti.get_nvt_metadata.return_value = {
'category': '3',
'creation_date': '1237458156',
'cvss_base_vector': 'AV:N/AC:L/Au:N/C:N/I:N/A:N',
'severity_vector': 'CVSS:3.0/AV:L/AC:H/PR:H/UI:R/S:U/C:N/I:L/A:L',
'severity_date': '1237458156',
'severity_origin': 'Greenbone',
'excluded_keys': 'Settings/disable_cgi_scanning',
'family': 'Product detection',
'filename': 'mantis_detect.nasl',
'last_modification': '1533906565',
'name': 'Mantis Detection',
'qod_type': 'remote_banner',
'required_ports': 'Services/www, 80',
'solution': 'some solution',
'solution_type': 'WillNotFix',
'solution_method': 'DebianAPTUpgrade',
'impact': 'some impact',
'insight': 'some insight',
'summary': 'some summary',
'affected': 'some affection',
'timeout': '0',
'vt_params': {
'1': {
'id': '1',
'default': '',
'description': 'Description',
'name': 'Data length :',
'type': 'entry',
},
'2': {
'id': '2',
'default': 'no',
'description': 'Description',
'name': (
'Do not randomize the order in which ports are'
' scanned'
),
'type': 'checkbox',
},
},
'refs': {
'bid': [''],
'cve': [''],
'xref': ['URL:http://www.mantisbt.org/'],
},
}
vthelper = VtHelper(dummy.nvti)
res = vthelper.get_single_vt("1.3.6.1.4.1.25623.1.0.100061")
assert_called_once(dummy.nvti.get_nvt_metadata)
severities = res.get('severities')
self.assertEqual(
"CVSS:3.0/AV:L/AC:H/PR:H/UI:R/S:U/C:N/I:L/A:L",
severities.get('severity_base_vector'),
)
self.assertEqual("cvss_base_v3", severities.get('severity_type'))
self.assertEqual("Greenbone", severities.get('severity_origin'))
self.assertEqual("1237458156", severities.get('severity_date'))
def test_get_single_vt_severity_cvssv2(self):
dummy = DummyDaemon()
dummy.nvti.get_nvt_metadata.return_value = {
'category': '3',
'creation_date': '1237458156',
'cvss_base_vector': 'AV:N/AC:L/Au:N/C:N/I:N/A:N',
'excluded_keys': 'Settings/disable_cgi_scanning',
'family': 'Product detection',
'filename': 'mantis_detect.nasl',
'last_modification': '1533906565',
'name': 'Mantis Detection',
'qod_type': 'remote_banner',
'required_ports': 'Services/www, 80',
'solution': 'some solution',
'solution_type': 'WillNotFix',
'solution_method': 'DebianAPTUpgrade',
'impact': 'some impact',
'insight': 'some insight',
'summary': 'some summary',
'affected': 'some affection',
'timeout': '0',
'vt_params': {
'1': {
'id': '1',
'default': '',
'description': 'Description',
'name': 'Data length :',
'type': 'entry',
},
'2': {
'id': '2',
'default': 'no',
'description': 'Description',
'name': (
'Do not randomize the order in which ports are'
' scanned'
),
'type': 'checkbox',
},
},
'refs': {
'bid': [''],
'cve': [''],
'xref': ['URL:http://www.mantisbt.org/'],
},
}
vthelper = VtHelper(dummy.nvti)
res = vthelper.get_single_vt("1.3.6.1.4.1.25623.1.0.100061")
assert_called_once(dummy.nvti.get_nvt_metadata)
severities = res.get('severities')
self.assertEqual(
"AV:N/AC:L/Au:N/C:N/I:N/A:N",
severities.get('severity_base_vector'),
)
self.assertEqual("cvss_base_v2", severities.get('severity_type'))
self.assertEqual(None, severities.get('severity_origin'))
self.assertEqual("1237458156", severities.get('severity_date'))
def test_get_severity_score_v2(self):
dummy = DummyDaemon()
vthelper = VtHelper(dummy.nvti)
vtaux = {
'severities': {
'severity_type': 'cvss_base_v2',
'severity_base_vector': 'AV:N/AC:L/Au:N/C:P/I:N/A:N',
}
}
self.assertEqual(vthelper.get_severity_score(vtaux), 5.0)
def test_get_severity_score_v3(self):
dummy = DummyDaemon()
vthelper = VtHelper(dummy.nvti)
vtaux = {
'severities': {
'severity_type': 'cvss_base_v3',
'severity_base_vector': (
'CVSS:3.0/AV:L/AC:H/PR:H/UI:R/S:U/C:N/I:L/A:L'
),
}
}
self.assertEqual(vthelper.get_severity_score(vtaux), 2.9)
ospd-openvas-22.9.0/tests/test_vts.py 0000664 0000000 0000000 00000011440 15011310720 0017577 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import logging
from hashlib import sha256
from unittest import TestCase
from unittest.mock import Mock
from collections import OrderedDict
from ospd.errors import OspdError
from ospd.vts import Vts
class VtsTestCase(TestCase):
def test_add_vt(self):
vts = Vts()
vts.add('id_1', name='foo')
self.assertEqual(len(vts.vts), 1)
def test_add_duplicate_vt(self):
vts = Vts()
vts.add('id_1', name='foo')
with self.assertRaises(OspdError):
vts.add('id_1', name='bar')
self.assertEqual(len(vts.vts), 1)
def test_add_vt_with_empty_id(self):
vts = Vts()
with self.assertRaises(OspdError):
vts.add(None, name='foo')
with self.assertRaises(OspdError):
vts.add('', name='foo')
def test_add_vt_with_invalid_id(self):
vts = Vts()
with self.assertRaises(OspdError):
vts.add('$$$_1', name='foo')
self.assertEqual(len(vts.vts), 0)
def test_contains(self):
vts = Vts()
vts.add('id_1', name='foo')
self.assertIn('id_1', vts)
def test_get(self):
vts = Vts()
vts.add('id_1', name='foo')
vt = vts.get('id_1')
self.assertIsNotNone(vt)
self.assertEqual(vt['name'], 'foo')
self.assertIsNone(vt.get('bar'))
def test_iterator(self):
vts = Vts()
vts.add('id_1', name='foo')
vts.add('id_2', name='bar')
it = iter(vts)
vt_id = next(it)
self.assertIn(vt_id, ['id_1', 'id_2'])
vt_id = next(it)
self.assertIn(vt_id, ['id_1', 'id_2'])
with self.assertRaises(StopIteration):
next(it)
def test_keys(self):
vts = Vts()
vts.add('id_1', name='foo')
vts.add('id_2', name='bar')
self.assertEqual(vts.keys(), ['id_1', 'id_2'])
def test_getitem(self):
vts = Vts()
vts.add('id_1', name='foo')
vt = vts['id_1']
self.assertEqual(vt['name'], 'foo')
with self.assertRaises(KeyError):
vt = vts['foo']
def test_copy(self):
vts = Vts()
vts.add('id_1', name='foo')
vts.add('id_2', name='bar')
vts2 = vts.copy()
self.assertIsNot(vts, vts2)
self.assertIsNot(vts.vts, vts2.vts)
vta = vts.get('id_1')
vtb = vts2.get('id_1')
self.assertEqual(vta['name'], vtb['name'])
self.assertIsNot(vta, vtb)
vta = vts.get('id_2')
vtb = vts2.get('id_2')
self.assertEqual(vta['name'], vtb['name'])
self.assertIsNot(vta, vtb)
def test_calculate_vts_collection_hash(self):
vts = Vts(storage=OrderedDict)
vts.add(
'id_1',
name='foo',
vt_modification_time='01234',
vt_params={
'0': {
'id': '0',
'name': 'timeout',
'default': '20',
},
'1': {
'id': '1',
'name': 'foo_pref:',
'default': 'bar_value',
},
},
)
vts.add('id_2', name='bar', vt_modification_time='56789')
vts.calculate_vts_collection_hash()
vt_hash = sha256()
vt_hash.update(
"id_1012340timeout201foo_pref:bar_valueid_256789".encode('utf-8')
)
hash_test = vt_hash.hexdigest()
self.assertEqual(hash_test, vts.sha256_hash)
def test_calculate_vts_collection_hash_no_params(self):
vts = Vts(storage=OrderedDict)
vts.add(
'id_1',
name='foo',
vt_modification_time='01234',
vt_params={
'0': {
'id': '0',
'name': 'timeout',
'default': '20',
},
'1': {
'id': '1',
'name': 'foo_pref:',
'default': 'bar_value',
},
},
)
vts.add('id_2', name='bar', vt_modification_time='56789')
vts.calculate_vts_collection_hash(include_vt_params=False)
vt_hash = sha256()
vt_hash.update("id_101234id_256789".encode('utf-8'))
hash_test = vt_hash.hexdigest()
self.assertEqual(hash_test, vts.sha256_hash)
def test_calculate_vts_collection_hash_empty(self):
vts = Vts()
logging.Logger.debug = Mock()
vts.calculate_vts_collection_hash()
self.assertEqual(vts.sha256_hash, None)
logging.Logger.debug.assert_called_with( # pylint: disable=no-member
"Error calculating VTs collection hash. Cache is empty"
)
ospd-openvas-22.9.0/tests/test_xml.py 0000664 0000000 0000000 00000034074 15011310720 0017573 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=invalid-name
from collections import OrderedDict
import logging
from unittest import TestCase
from unittest.mock import Mock
from xml.etree.ElementTree import Element, tostring, fromstring
from ospd.xml import (
elements_as_text,
escape_ctrl_chars,
)
from ospd.xmlvt import (
XmlStringVTHelper,
)
from .dummydaemon import DummyDaemon
from .helper import assert_called_once
logger = logging.getLogger(__name__)
class ElementsAsText(TestCase):
def test_simple_element(self):
elements = {'foo': 'bar'}
text = elements_as_text(elements)
self.assertEqual(text, '\t foo bar\n')
def test_simple_elements(self):
elements = OrderedDict([('foo', 'bar'), ('lorem', 'ipsum')])
text = elements_as_text(elements)
self.assertEqual(
text,
'\t foo bar\n'
'\t lorem ipsum\n',
)
def test_elements(self):
elements = OrderedDict(
[
('foo', 'bar'),
(
'lorem',
OrderedDict(
[
('dolor', 'sit amet'),
('consectetur', 'adipiscing elit'),
]
),
),
]
)
text = elements_as_text(elements)
self.assertEqual(
text,
'\t foo bar\n'
'\t lorem \n'
'\t dolor sit amet\n'
'\t consectetur adipiscing elit\n',
)
class EscapeText(TestCase):
def test_escape_xml_valid_text(self):
text = 'this is a valid xml'
res = escape_ctrl_chars(text)
self.assertEqual(text, res)
def test_escape_xml_invalid_char(self):
text = 'End of transmission is not printable \x04.'
res = escape_ctrl_chars(text)
self.assertEqual(res, 'End of transmission is not printable \\x0004.')
# Create element
elem = Element('text')
elem.text = res
self.assertEqual(
tostring(elem),
b'End of transmission is not printable \\x0004.',
)
# The string format of the element does not break the xml.
elem_as_str = tostring(elem, encoding='utf-8')
new_elem = fromstring(elem_as_str)
self.assertEqual(
b'' + new_elem.text.encode('utf-8') + b'', elem_as_str
)
def test_escape_xml_printable_char(self):
text = 'Latin Capital Letter A With Circumflex \xc2 is printable.'
res = escape_ctrl_chars(text)
self.assertEqual(
res, 'Latin Capital Letter A With Circumflex  is printable.'
)
# Create the element
elem = Element('text')
elem.text = res
self.assertEqual(
tostring(elem),
b'Latin Capital Letter A With Circumflex  is '
b'printable.',
)
# The string format of the element does not break the xml
elem_as_str = tostring(elem, encoding='utf-8')
new_elem = fromstring(elem_as_str)
self.assertEqual(
b'' + new_elem.text.encode('utf-8') + b'', elem_as_str
)
class VTsText(TestCase):
def test_get_custom_xml(self):
out = (
''
'Services/www, 80'
'3'
'Settings/disable_cgi_scanning'
'Product detection'
'mantis_detect.nasl'
'0'
''
)
w = DummyDaemon()
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
xml_str = XmlStringVTHelper()
res = xml_str.get_custom_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', vt.get('custom')
)
self.assertEqual(len(res), len(out))
def test_get_custom_xml_failed(self):
logging.Logger.warning = Mock()
custom = {'a': "\u0006"}
xml_str = XmlStringVTHelper()
xml_str.get_custom_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', custom=custom
)
assert_called_once(logging.Logger.warning)
def test_get_severities_xml(self):
w = DummyDaemon()
out = (
''
''
'AV:N/AC:L/Au:N/C:N/I:N/A:N'
'Greenbone'
'1237458156'
''
''
)
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
severities = vt.get('severities')
xml_str = XmlStringVTHelper()
res = xml_str.get_severities_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', severities
)
self.assertEqual(res, out)
def test_get_severities_xml_failed(self):
logging.Logger.warning = Mock()
sever = {'severity_base_vector': "\u0006"}
xml_str = XmlStringVTHelper()
xml_str.get_severities_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', severities=sever
)
assert_called_once(logging.Logger.warning)
def test_get_params_xml(self):
w = DummyDaemon()
out = (
''
''
'Do not randomize the order in which ports are '
'scanned'
'no'
''
''
'Data length :'
''
''
)
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
params = vt.get('vt_params')
xml_str = XmlStringVTHelper()
res = xml_str.get_params_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', params
)
self.assertEqual(len(res), len(out))
def test_get_params_xml_failed(self):
logging.Logger.warning = Mock()
params = {
'1': {
'id': '1',
'type': 'entry',
'default': '\u0006',
'name': 'dns-fuzz.timelimit',
'description': 'Description',
}
}
xml_str = XmlStringVTHelper()
xml_str.get_params_vt_as_xml_str('1.3.6.1.4.1.25623.1.0.100061', params)
assert_called_once(logging.Logger.warning)
def test_get_refs_xml(self):
w = DummyDaemon()
out = ''
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
refs = vt.get('vt_refs')
xml_str = XmlStringVTHelper()
res = xml_str.get_refs_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', refs
)
self.assertEqual(res, out)
def test_get_dependencies_xml(self):
out = (
''
''
''
''
)
dep = ['1.3.6.1.4.1.25623.1.2.3.4', '1.3.6.1.4.1.25623.4.3.2.1']
xml_str = XmlStringVTHelper()
res = xml_str.get_dependencies_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', dep
)
self.assertEqual(res, out)
def test_get_dependencies_xml_missing_dep(self):
out = (
''
''
''
)
dep = ['1.3.6.1.4.1.25623.1.2.3.4', 'file_name.nasl']
xml_str = XmlStringVTHelper()
res = xml_str.get_dependencies_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', dep
)
self.assertEqual(res, out)
def test_get_dependencies_xml_failed(self):
logging.Logger.error = Mock()
dep = ["\u0006"]
xml_str = XmlStringVTHelper()
xml_str.get_dependencies_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', vt_dependencies=dep
)
assert_called_once(logging.Logger.error)
def test_get_ctime_xml(self):
w = DummyDaemon()
out = '1237458156'
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
ctime = vt.get('creation_time')
xml_str = XmlStringVTHelper()
res = xml_str.get_creation_time_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', ctime
)
self.assertEqual(res, out)
def test_get_ctime_xml_failed(self):
logging.Logger.warning = Mock()
ctime = '\u0006'
xml_str = XmlStringVTHelper()
xml_str.get_creation_time_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', vt_creation_time=ctime
)
assert_called_once(logging.Logger.warning)
def test_get_mtime_xml(self):
w = DummyDaemon()
out = '1533906565'
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
mtime = vt.get('modification_time')
xml_str = XmlStringVTHelper()
res = xml_str.get_modification_time_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', mtime
)
self.assertEqual(res, out)
def test_get_mtime_xml_failed(self):
logging.Logger.warning = Mock()
mtime = '\u0006'
xml_str = XmlStringVTHelper()
xml_str.get_modification_time_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', mtime
)
assert_called_once(logging.Logger.warning)
def test_get_summary_xml(self):
w = DummyDaemon()
out = 'some summary'
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
summary = vt.get('summary')
xml_str = XmlStringVTHelper()
res = xml_str.get_summary_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', summary
)
self.assertEqual(res, out)
def test_get_summary_xml_failed(self):
summary = '\u0006 > <'
logging.Logger.warning = Mock()
xml_str = XmlStringVTHelper()
xml_str.get_summary_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', summary
)
assert_called_once(logging.Logger.warning)
def test_get_impact_xml(self):
w = DummyDaemon()
out = 'some impact'
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
impact = vt.get('impact')
xml_str = XmlStringVTHelper()
res = xml_str.get_impact_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', impact
)
self.assertEqual(res, out)
def test_get_impact_xml_failed(self):
logging.Logger.warning = Mock()
impact = '\u0006'
xml_str = XmlStringVTHelper()
xml_str.get_impact_vt_as_xml_str('1.3.6.1.4.1.25623.1.0.100061', impact)
assert_called_once(logging.Logger.warning)
def test_get_insight_xml(self):
w = DummyDaemon()
out = 'some insight'
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
insight = vt.get('insight')
xml_str = XmlStringVTHelper()
res = xml_str.get_insight_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', insight
)
self.assertEqual(res, out)
def test_get_insight_xml_failed(self):
logging.Logger.warning = Mock()
insight = '\u0006'
xml_str = XmlStringVTHelper()
xml_str.get_insight_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', insight
)
assert_called_once(logging.Logger.warning)
def test_get_solution_xml(self):
w = DummyDaemon()
out = (
''
'some solution'
''
)
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
solution = vt.get('solution')
solution_type = vt.get('solution_type')
solution_method = vt.get('solution_method')
xml_str = XmlStringVTHelper()
res = xml_str.get_solution_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061',
solution,
solution_type,
solution_method,
)
self.assertEqual(res, out)
def test_get_solution_xml_failed(self):
logging.Logger.warning = Mock()
solution = '\u0006'
xml_str = XmlStringVTHelper()
xml_str.get_solution_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', solution
)
assert_called_once(logging.Logger.warning)
def test_get_detection_xml(self):
w = DummyDaemon()
out = ''
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
detection_type = vt.get('qod_type')
xml_str = XmlStringVTHelper()
res = xml_str.get_detection_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', qod_type=detection_type
)
self.assertEqual(res, out)
def test_get_detection_xml_failed(self):
logging.Logger.warning = Mock()
detection = '\u0006'
xml_str = XmlStringVTHelper()
xml_str.get_detection_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', detection
)
assert_called_once(logging.Logger.warning)
def test_get_affected_xml(self):
w = DummyDaemon()
out = 'some affection'
vt = w.VTS['1.3.6.1.4.1.25623.1.0.100061']
affected = vt.get('affected')
xml_str = XmlStringVTHelper()
res = xml_str.get_affected_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', affected=affected
)
self.assertEqual(res, out)
def test_get_affected_xml_failed(self):
logging.Logger.warning = Mock()
affected = "\u0006" + "affected"
xml_str = XmlStringVTHelper()
xml_str.get_affected_vt_as_xml_str(
'1.3.6.1.4.1.25623.1.0.100061', affected=affected
)
assert_called_once(logging.Logger.warning)
ospd-openvas-22.9.0/tests/testing.conf 0000664 0000000 0000000 00000000565 15011310720 0017704 0 ustar 00root root 0000000 0000000 [OSPD - openvas]
log_level = DEBUG
socket_mode = 0o770
unix_socket = /foo/ospd-openvas.sock
pid_file = /foo/ospd-openvas.pid
log_file = /foo/ospd-openvas.log
lock_file_dir = /foo/openvas
niceness = 666
key_file = /foo/key.pem
address = 6.6.6.6
port = 6666
mqtt_broker_address = foo.bar.com
mqtt_broker_port = 1234
notus_feed_dir = /foo/advisories
scaninfo_store_time = 123