pax_global_header00006660000000000000000000000064150052161460014512gustar00rootroot0000000000000052 comment=52faea07aedb69e0bba21e9003da2b26d90eabe9 GuestProxyAgent-1.0.30/000077500000000000000000000000001500521614600147035ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.cargo/000077500000000000000000000000001500521614600160545ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.cargo/config.toml000066400000000000000000000005031500521614600202140ustar00rootroot00000000000000[target.x86_64-unknown-linux-musl] runner = 'sudo -E' # run with root permission but keep the current environment variables [http] check-revoke = false # Set to false to disable revocation checks [target.aarch64-unknown-linux-musl] runner = 'sudo -E' # run with root permission but keep the current environment variablesGuestProxyAgent-1.0.30/.devcontainer/000077500000000000000000000000001500521614600174425ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.devcontainer/devcontainer.json000066400000000000000000000021531500521614600230170ustar00rootroot00000000000000{ "name": "gpalinuxdev", "build": { "dockerfile": "../docker/linux/Dockerfile", "args": { "RUST_VERSION": "1.85.0" } }, "customizations": { "jetbrains": { "settings": { "Git4Idea:app:Git-Application-Settings.use_credential_helper": true, "com.intellij:app:BaseRefactoringSettings.rename_search_in_comments_for_file": false, "Docker:app:DockerSettings.dockerComposePath": "C:\\Program Files\\Docker\\Docker\\resources\\bin\\docker.exe", "Docker:app:DockerSettings.dockerPath": "C:\\Program Files\\Docker\\Docker\\resources\\bin\\docker.exe" } }, "vscode": { "extensions": [ "bierner.markdown-mermaid", "rust-lang.rust-analyzer", "ms-vscode.cpptools", "ms-vscode.cpptools-extension-pack", "ms-vscode.cpptools-themes", "ms-vscode.cmake-tools", "ms-vscode-remote.remote-containers", "ms-azuretools.vscode-docker", "shd101wyy.markdown-preview-enhanced", "davidanson.vscode-markdownlint", "ms-vscode.powershell", "redhat.vscode-yaml" ] } } }GuestProxyAgent-1.0.30/.dockerignore000066400000000000000000000000271500521614600173560ustar00rootroot00000000000000/out /packages /target GuestProxyAgent-1.0.30/.gitattributes000066400000000000000000000010101500521614600175660ustar00rootroot00000000000000# Set the default behavior, in case people don't have core.autocrlf set. * text=auto # Use text conventions for commonly used text extensions. *.csv text *.ini text *.json text *.txt text *.xml text *.md text *.rs text *.lock text # Exceptions Cargo.lock eol=lf .run/*.xml eol=lf *.sh eol=lf *.spec eol=lf # Denote all files that are truly binary and should not be modified. *.dll binary *.exe binary *.gz binary *.ico binary *.jpg binary *.lib binary *.pdb binary *.pdf binary *.png binary *.wim binary *.zip binary GuestProxyAgent-1.0.30/.github/000077500000000000000000000000001500521614600162435ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.github/actions/000077500000000000000000000000001500521614600177035ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.github/actions/spelling/000077500000000000000000000000001500521614600215205ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.github/actions/spelling/excludes.txt000066400000000000000000000002571500521614600241010ustar00rootroot00000000000000.github\actions\spelling\expect.txt Cargo.lock doc/GPA Arch Diagram.vsdx doc/GuestProxyAgent.png e2etest/GuestProxyAgentTest/Resources/GuestProxyAgentLoadedModulesBaseline.txtGuestProxyAgent-1.0.30/.github/actions/spelling/expect.txt000066400000000000000000000054751500521614600235640ustar00rootroot00000000000000aab AAFFBB aarch abe addrpair almalinux ATL ATLMFC autobuild autocrlf aya AZUREPUBLICCLOUD azuretools backcompat bierner binpath binskim bitflag bpf bpftool btf btrfs bufptr Bufs BUILDIN buildroot buildshell byos cacheline cbl ccbdee ccbf cicd cimv cla cmds codeofconduct codeql collectguestlogs commandline comspec consoleloggerparameters CPlat cplusplus cpptools crpteste CRYPTOAPI csum customout customoutput cvd czf DABC daddr datacenter davidanson DDCE debbuild DEBHELPER debian Debpf defattr deploymentid devcontainer distros dllmain dnf dockerenv dodce dodce dotnet doxygen dport dtolnay Dvm EAccess EAF ebpf ebpfapi EBPFCORE egor ele ent entriesread EStorage etest etestoutputs etestsharedstorage EToken EUID evt exampledatadiskname exampleosdiskname examplevmname exthandlers fde FFF FFFF FFFFFFFF fffi ffi FIXEDFILEINFO FOF FSETID FSO fsprogs fstorage fstype fwlink Fzpeng gaplugin getifaddrs goalstate gpa gpalinuxdev gpawindev guestproxyagentmsis guiddef hklm hlist hostga httpwg Iaa idstepsrun IEnumerable ieq iex ifaddrs ifindex IList imds imm immediateruncommandservice intellectualproperty Intelli intellij INVM Ioctl iusr jetbrains jobsjob joutvhu JScript keyonly kinvolk kotlin kprobe ktime kusto lgrui libbpf libbpfcc libloading linkid llvmorg logdir Loggerhas logon Lrs Lsa ltsc luid macikgo mcr MEMORYSTATUSEX metabuild MFC microsoftcblmariner microsoftlosangeles microsoftwindowsdesktop mnt msasn msp msrc multilib netapi netcoreapp netebpfext nethook Newtonsoft nic nifs nmake nocapture NOCONFIRMATION NOERRORUI NONINFRINGEMENT notjson norestart ntdll NTSTATUS onscreen onebranch oneshot opencode opensource PERCPU pgpkey pgrep pidof pkgversion portaddr portpair postinst pprev prandom predef prefmaxlen printk PROCESSINFOCLASS proxyagent proxyagentextensionvalidation proxyagentvalidation ptrace pwstr rcv RDFE redhat Redist refcnt registrykey relativeurl resf reuseport rgr rgs rhel RINGBUF rockylinux rolename rootdir rpmbuild RPMS rstr rul runthis Runtimes rustfmt rustup saddr sandboxing sas schtasks scm secauthz serice SETFCAP SETPCAP shd sids sigid SIO skc sku sles sln smp spellright splitn SRPMS SSRF stackoverflow stdbool stdint stdoutput subsecond substatus Substatuses SUIDSGID suse Swatinem SWbem sysinfoapi SYSLIB SYSTEMDRIVE taiki TASKKILL telemetrydata tensin testcasesetting testrg testurl tgid THH thiserror timedout timeup tlsv tmpfs tokio topdir totalentries transitioning UBR UBRSTRING udev uers uninstalls unistd unmark Unregistering unregisters unspec uzers vcruntime vflji vhd vmagentlog VMGA VMId vmlinux vmr vmrcs vms vns VTeam vtpm waagent waappagent walinuxagent wasecagentprov Wbem wdk wdksetup Werror westus WFP winapi windowsazureguestagent winget winmgmts winnt winres wireserver wireserverand wireserverandimds WMI WORKDIR WScript wsf Wsh WSL wstr wsum wyy xamarin xcopy XDP xfsprogs xsi xxxx xxxxxxxx xxxxxxxxxxx zipsasGuestProxyAgent-1.0.30/.github/workflows/000077500000000000000000000000001500521614600203005ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.github/workflows/cd.yml000066400000000000000000000005501500521614600214110ustar00rootroot00000000000000name: CD Release on: push: branches: ["main"] tags: - '*' jobs: release: uses: ./.github/workflows/reusable-build.yml with: generate_release: true permissions: actions: read contents: write deployments: read packages: write pull-requests: write security-events: writeGuestProxyAgent-1.0.30/.github/workflows/ci.yml000066400000000000000000000021341500521614600214160ustar00rootroot00000000000000name: CI Build & Test on: push: branches: ["main", "dev"] pull_request: branches: ["main", "dev"] env: CARGO_TERM_COLOR: always concurrency: group: cicd-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: true jobs: linux_lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - uses: dtolnay/rust-toolchain@stable with: components: rustfmt - run: cargo fmt --all -- --check -l - name: Clippy run: cargo clippy -- -D warnings - name: cargo-audit run: | cargo install cargo-audit cargo-audit audit win_lint: runs-on: windows-latest steps: - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - uses: dtolnay/rust-toolchain@stable - name: Clippy run: cargo clippy -- -D warnings - name: cargo-audit run: | cargo install cargo-audit cargo-audit audit build: uses: ./.github/workflows/reusable-build.yml with: generate_release: false GuestProxyAgent-1.0.30/.github/workflows/codeql.yml000066400000000000000000000120111500521614600222650ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ "dev", "main" ] pull_request: branches: [ "dev", "main" ] schedule: - cron: '18 19 * * 0' jobs: analyze: name: Analyze (${{ matrix.language }}) # Runner size impacts CodeQL analysis time. To learn more, please see: # - https://gh.io/recommended-hardware-resources-for-running-codeql # - https://gh.io/supported-runners-and-hardware-resources # - https://gh.io/using-larger-runners (GitHub.com only) # Consider using larger runners or machines with greater resources for possible analysis time improvements. runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} permissions: # required for all workflows security-events: write # required to fetch internal or private CodeQL packs packages: read # only required for workflows in private repositories actions: read contents: read strategy: fail-fast: false matrix: include: - language: c-cpp build-mode: manual - language: csharp build-mode: autobuild # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' # Use `c-cpp` to analyze code written in C, C++ or both # Use 'java-kotlin' to analyze code written in Java, Kotlin or both # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install Clang uses: egor-tensin/setup-clang@v1 - name: Setup an eBPF Development Environment in Ubuntu run: | sudo apt update sudo apt install linux-headers-$(uname -r) \ libbpfcc-dev \ libbpf-dev \ llvm \ clang \ gcc-multilib \ build-essential \ linux-tools-$(uname -r) \ linux-tools-common \ linux-tools-generic \ rpm \ musl-tools \ sudo snap install dotnet-sdk --classic sudo chown -R root:root /var/lib # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality # If the analyze step fails for one of the languages you are analyzing with # "We were unable to automatically build your code", modify the matrix above # to set the build mode to "manual" for that language. Then modify this step # to build your code. # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - if: matrix.build-mode == 'manual' shell: bash run: | mkdir ./out clang -g -target bpf -Werror -O2 -D__TARGET_ARCH_x86 -c ./linux-ebpf/ebpf_cgroup.c -o ./out/ebpf_cgroup.o error_code=$? if [ $error_code -ne 0 ] then echo "call clang failed with exit-code: $error_code" exit $error_code fi - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 with: category: "/language:${{matrix.language}}" GuestProxyAgent-1.0.30/.github/workflows/reusable-build.yml000066400000000000000000000452041500521614600237270ustar00rootroot00000000000000name: Reusable Build and Artifacts on: workflow_call: inputs: generate_release: required: true type: boolean env: CARGO_TERM_COLOR: always CARGO_LLVM_COV_TARGET_DIR: out jobs: create-release: runs-on: ubuntu-latest steps: - name: Create GitHub Release id: create_release if: inputs.generate_release == true uses: actions/create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref }} release_name: Release ${{ github.ref }} body: Release notes build-windows-amd64: runs-on: windows-latest needs: create-release steps: - name: Checkout Project uses: actions/checkout@v3.3.0 - name: Install Clang working-directory: ${{env.GITHUB_WORKSPACE}} run: | choco install -y llvm --version 11.0.1 --allow-downgrade - name: setup-msbuild uses: microsoft/setup-msbuild@v1.3.1 - name: rust-toolchain uses: actions-rs/toolchain@v1.0.6 with: toolchain: 1.69.0 - name: Install llvm Code Coverage uses: taiki-e/install-action@cargo-llvm-cov - name: Run Build.cmd Debug amd64 run: .\build.cmd debug amd64 - name: Upload debug amd64 build zip packages if: (success() || failure()) uses: actions/upload-artifact@v4 with: # Artifact name name: build-debug-windows-amd64.zip # A file, directory or wildcard pattern that describes what to upload path: out/x86_64-pc-windows-msvc/debug/package/ - name: Run Build.cmd Release amd64 if: (success() || failure()) run: .\build.cmd release amd64 - name: Upload release amd64 build zip package if: (success() || failure()) uses: actions/upload-artifact@v4 with: # Artifact name name: build-release-windows-amd64.zip # A file, directory or wildcard pattern that describes what to upload path: out/x86_64-pc-windows-msvc/release/package/ - name: Run Code Coverage for proxy_agent_shared run: | cargo llvm-cov --target x86_64-pc-windows-msvc --manifest-path ./proxy_agent_shared/Cargo.toml --output-path ./out/proxy_agent_shared_codeCov.txt --release type ./out/proxy_agent_shared_codeCov.txt - name: Parse Code Coverage for proxy_agent_shared run: | $threshold = 70 Get-Content out\proxy_agent_shared_codeCov.txt | ForEach-Object { if ($_ -match 'TOTAL') { $totalLine = $_ } } $coverageData = $totalLine -split '\s+' $totalLinesPercent = $coverageData[9] -replace '%', '' if ($totalLinesPercent -lt $threshold) { Write-Output "Code coverage below threshold: $totalLinesPercent. Failing the step." exit 1 } else { Write-Output "Percent of Lines Covered: $totalLinesPercent" } - name: Run Code Coverage for proxy_agent run: | cargo llvm-cov --target x86_64-pc-windows-msvc --manifest-path proxy_agent\Cargo.toml --output-path out\codeCov.txt --release --ignore-filename-regex "(proxy_agent_shared|main.rs|bpf_api.rs|bpf_obj.rs)" -- --test-threads=1 type out\codeCov.txt - name: Parse Code Coverage for proxy_agent run: | $threshold = 70 Get-Content out\codeCov.txt | ForEach-Object { if ($_ -match 'TOTAL') { $totalLine = $_ } } $coverageData = $totalLine -split '\s+' $totalLinesPercent = $coverageData[9] -replace '%', '' if ($totalLinesPercent -lt $threshold) { Write-Output "Code coverage below threshold: $totalLinesPercent. Failing the step." exit 1 } else { Write-Output "Percent of Lines Covered: $totalLinesPercent" } - name: Run Code Coverage for proxy_agent_extension run: | cargo llvm-cov --target x86_64-pc-windows-msvc --manifest-path proxy_agent_extension\Cargo.toml --output-path out\proxy_agent_extension_codeCov.txt --release --ignore-filename-regex "(proxy_agent_shared|main.rs|service_ext.rs)" -- --test-threads=1 type out\proxy_agent_extension_codeCov.txt - name: Parse Code Coverage for proxy_agent_extension run: | $threshold = 70 Get-Content out\proxy_agent_extension_codeCov.txt | ForEach-Object { if ($_ -match 'TOTAL') { $totalLine = $_ } } $coverageData = $totalLine -split '\s+' $totalLinesPercent = $coverageData[9] -replace '%', '' if ($totalLinesPercent -lt $threshold) { Write-Output "Code coverage below threshold: $totalLinesPercent. Failing the step." exit 1 } else { Write-Output "Percent of Lines Covered: $totalLinesPercent" } - name: get current release id: get_current_release if: inputs.generate_release == true uses: joutvhu/get-release@v1.0.2 with: # do not push two tags/releases at the same time latest: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload debug amd64 build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/x86_64-pc-windows-msvc/debug/build-debug-windows-amd64.zip asset_name: build-debug-windows-amd64.zip # ID of the release to attach the asset to asset_content_type: application/zip - name: Upload release amd64 build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/x86_64-pc-windows-msvc/release/build-release-windows-amd64.zip asset_name: build-release-windows-amd64.zip # ID of the release to attach the asset to asset_content_type: application/zip build-windows-arm64: runs-on: windows-latest needs: create-release steps: - name: Checkout Project uses: actions/checkout@v3.3.0 - name: Install Clang working-directory: ${{env.GITHUB_WORKSPACE}} run: | choco install -y llvm --version 11.0.1 --allow-downgrade - name: setup-msbuild uses: microsoft/setup-msbuild@v1.3.1 - name: rust-toolchain uses: actions-rs/toolchain@v1.0.6 with: toolchain: 1.69.0 - name: Run Build.cmd Debug arm64 run: .\build.cmd debug arm64 - name: Upload debug arm64 build zip packages if: (success() || failure()) uses: actions/upload-artifact@v4 with: # Artifact name name: build-debug-windows-arm64.zip # A file, directory or wildcard pattern that describes what to upload path: out/aarch64-pc-windows-msvc/debug/package/ - name: Run Build.cmd Release arm64 if: (success() || failure()) run: .\build.cmd release arm64 - name: Upload release arm64 build zip package if: (success() || failure()) uses: actions/upload-artifact@v4 with: # Artifact name name: build-release-windows-arm64.zip # A file, directory or wildcard pattern that describes what to upload path: out/aarch64-pc-windows-msvc/release/package/ - name: get current release id: get_current_release if: inputs.generate_release == true uses: joutvhu/get-release@v1.0.2 with: # do not push two tags/releases at the same time latest: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload debug arm64 build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/aarch64-pc-windows-msvc/debug/build-debug-windows-arm64.zip asset_name: build-debug-windows-arm64.zip # ID of the release to attach the asset to asset_content_type: application/zip - name: Upload release arm64 build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/aarch64-pc-windows-msvc/release/build-release-windows-arm64.zip asset_name: build-release-windows-arm64.zip # ID of the release to attach the asset to asset_content_type: application/zip build-linux-amd64: runs-on: ubuntu-latest needs: create-release steps: - name: Checkout Project uses: actions/checkout@v3.3.0 - name: Install Clang uses: egor-tensin/setup-clang@v1 - name: rust-toolchain uses: actions-rs/toolchain@v1.0.6 with: toolchain: stable - name: Install Code Coverage uses: taiki-e/install-action@cargo-llvm-cov - name: Setup an eBPF Development Environment in Ubuntu run: | sudo apt update sudo apt install linux-headers-$(uname -r) \ libbpfcc-dev \ libbpf-dev \ llvm \ clang \ gcc-multilib \ build-essential \ linux-tools-$(uname -r) \ linux-tools-common \ linux-tools-generic \ rpm \ musl-tools \ sudo snap install dotnet-sdk --classic sudo chown -R root:root /var/lib - name: Run build-linux.sh Debug amd64 run: ./build-linux.sh debug amd64 - name: Upload debug build amd64 zip package uses: actions/upload-artifact@v4 with: # Artifact name name: build-debug-linux-amd64.zip # A file, directory or wildcard pattern that describes what to upload path: out/x86_64-unknown-linux-musl/debug/package/ - name: Run build-linux.sh Release amd64 run: ./build-linux.sh release - name: Upload release amd64 build zip package uses: actions/upload-artifact@v4 with: # Artifact name name: build-release-linux-amd64.zip # A file, directory or wildcard pattern that describes what to upload path: out/x86_64-unknown-linux-musl/release/package/ - name: Run Code Coverage for proxy_agent_shared run: | cargo llvm-cov --target x86_64-unknown-linux-musl --manifest-path ./proxy_agent_shared/Cargo.toml --output-path ./out/proxy_agent_shared_codeCov.txt --release cat ./out/proxy_agent_shared_codeCov.txt - name: Parse Code Coverage for proxy_agent_shared run: | threshold=70 while IFS= read -r line || [ -n "$line" ]; do if [[ $line == *"TOTAL"* ]]; then totalLine=$line fi done < ./out/proxy_agent_shared_codeCov.txt read -ra total_value_array <<< "$totalLine" value=${total_value_array[9]} totalLinesPercent=${value//%/} totalLinesPercent=$(echo "$totalLinesPercent" | bc -l) if [[ $totalLinesPercent < $threshold ]] then echo "Code coverage below threshold: $totalLinesPercent. Failing the step." exit 1 fi echo "Percent of Lines Covered: $totalLinesPercent" - name: Run Code Coverage for proxy_agent run: | cargo llvm-cov --target x86_64-unknown-linux-musl --manifest-path ./proxy_agent/Cargo.toml --output-path ./out/proxy_agent_codeCov.txt --release --ignore-filename-regex "(proxy_agent_shared|main.rs|linux.rs)" -- --test-threads=1 cat ./out/proxy_agent_codeCov.txt - name: Parse Code Coverage for proxy_agent run: | threshold=70 while IFS= read -r line || [ -n "$line" ]; do if [[ $line == *"TOTAL"* ]]; then totalLine=$line fi done < ./out/proxy_agent_codeCov.txt read -ra total_value_array <<< "$totalLine" value=${total_value_array[9]} totalLinesPercent=${value//%/} totalLinesPercent=$(echo "$totalLinesPercent" | bc -l) if [[ $totalLinesPercent < $threshold ]] then echo "Code coverage below threshold: $totalLinesPercent. Failing the step." exit 1 fi echo "Percent of Lines Covered: $totalLinesPercent" - name: Run Code Coverage for proxy_agent_extension run: | cargo llvm-cov --target x86_64-unknown-linux-musl --manifest-path ./proxy_agent_extension/Cargo.toml --output-path ./out/proxy_agent_extension_codeCov.txt --release --ignore-filename-regex "(proxy_agent_shared|main.rs)" -- --test-threads=1 cat ./out/proxy_agent_extension_codeCov.txt - name: Parse Code Coverage for proxy_agent_extension run: | threshold=70 while IFS= read -r line || [ -n "$line" ]; do if [[ $line == *"TOTAL"* ]]; then totalLine=$line fi done < ./out/proxy_agent_extension_codeCov.txt read -ra total_value_array <<< "$totalLine" value=${total_value_array[9]} totalLinesPercent=${value//%/} totalLinesPercent=$(echo "$totalLinesPercent" | bc -l) if [[ $totalLinesPercent < $threshold ]] then echo "Code coverage below threshold: $totalLinesPercent. Failing the step." exit 1 fi echo "Percent of Lines Covered: $totalLinesPercent" - name: get current release id: get_current_release if: inputs.generate_release == true uses: joutvhu/get-release@v1.0.2 with: # do not push two tags/releases at the same time latest: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload debug build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/x86_64-unknown-linux-musl/debug/build-debug-linux-amd64.zip asset_name: build-debug-linux-amd64.zip # ID of the release to attach the asset to asset_content_type: application/zip - name: Upload release build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/x86_64-unknown-linux-musl/release/build-release-linux-amd64.zip asset_name: build-release-linux-amd64.zip # ID of the release to attach the asset to asset_content_type: application/zip build-linux-arm64: runs-on: ubuntu-24.04-arm needs: create-release steps: - name: Checkout Project uses: actions/checkout@v3.3.0 - name: Install Clang uses: egor-tensin/setup-clang@v1 - name: rust-toolchain uses: actions-rs/toolchain@v1.0.6 with: toolchain: stable - name: Setup an eBPF Development Environment in Ubuntu run: | sudo apt update sudo apt install linux-headers-$(uname -r) \ libbpfcc-dev \ libbpf-dev \ llvm \ clang \ build-essential \ linux-tools-$(uname -r) \ linux-tools-common \ linux-tools-generic \ rpm \ musl-tools \ gcc-aarch64-linux-gnu \ sudo sudo apt-get install -y dotnet-sdk-8.0 - name: Run build-linux.sh Debug arm64 run: ./build-linux.sh debug arm64 - name: Upload debug arm64 build zip package uses: actions/upload-artifact@v4 with: # Artifact name name: build-debug-linux-arm64.zip # A file, directory or wildcard pattern that describes what to upload path: out/aarch64-unknown-linux-musl/debug/package/ - name: Run build-linux.sh Release arm64 run: ./build-linux.sh release arm64 - name: Upload release arm64 build zip package uses: actions/upload-artifact@v4 with: # Artifact name name: build-release-linux-arm64.zip # A file, directory or wildcard pattern that describes what to upload path: out/aarch64-unknown-linux-musl/release/package/ - name: get current release id: get_current_release if: inputs.generate_release == true uses: joutvhu/get-release@v1.0.2 with: # do not push two tags/releases at the same time latest: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload debug build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/aarch64-unknown-linux-musl/debug/build-debug-linux-arm64.zip asset_name: build-debug-linux-arm64.zip # ID of the release to attach the asset to asset_content_type: application/zip - name: Upload release build if: inputs.generate_release == true uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.get_current_release.outputs.upload_url }} # Path to the asset asset_path: out/aarch64-unknown-linux-musl/release/build-release-linux-arm64.zip asset_name: build-release-linux-arm64.zip # ID of the release to attach the asset to asset_content_type: application/zip GuestProxyAgent-1.0.30/.github/workflows/spelling.yml000066400000000000000000000047751500521614600226550ustar00rootroot00000000000000name: Check Spelling on: push: branches: ["main", "dev"] pull_request: branches: ["main", "dev"] jobs: spelling: name: Check Spelling permissions: contents: read pull-requests: read actions: read security-events: write outputs: followup: ${{ steps.spelling.outputs.followup }} runs-on: ubuntu-latest if: ${{ contains(github.event_name, 'pull_request') || github.event_name == 'push' }} concurrency: group: spelling-${{ github.event.pull_request.number || github.ref }} # note: If you use only_check_changed_files, you do not want cancel-in-progress cancel-in-progress: true steps: - name: check-spelling id: spelling uses: check-spelling/check-spelling@main with: config: .github/actions/spelling suppress_push_for_open_pull_request: ${{ github.actor != 'dependabot[bot]' && 1 }} checkout: true spell_check_this: Azure/GuestProxyAgent@dev post_comment: 0 warnings: bad-regex,binary-file,deprecated-feature,large-file,limited-references,no-newline-at-eof,noisy-file,non-alpha-in-dictionary,token-is-substring,unexpected-line-ending,whitespace-in-dictionary,minified-file,unsupported-configuration,no-files-to-check experimental_apply_changes_via_bot: 1 use_sarif: ${{ (!github.event.pull_request || (github.event.pull_request.head.repo.full_name == github.repository)) && 1 }} extra_dictionary_limit: 20 extra_dictionaries: cspell:software-terms/dict/softwareTerms.txt cspell:cpp/src/stdlib-cpp.txt cspell:cpp/src/lang-keywords.txt cspell:node/dict/node.txt cspell:python/src/python/python-lib.txt cspell:golang/dict/go.txt cspell:rust/dict/rust.txt cspell:php/dict/php.txt cspell:dotnet/dict/dotnet.txt cspell:filetypes/filetypes.txt cspell:java/src/java.txt cspell:python/src/common/extra.txt cspell:python/src/python/python.txt cspell:k8s/dict/k8s.txt cspell:django/dict/django.txt cspell:npm/dict/npm.txt cspell:aws/aws.txt cspell:scala/dict/scala.txt cspell:fullstack/dict/fullstack.txt cspell:r/src/r.txt cspell:typescript/dict/typescript.txt cspell:cpp/src/ecosystem.txt cspell:powershell/dict/powershell.txt cspell:cpp/src/template-strings.txt cspell:csharp/csharp.txt cspell:cpp/src/stdlib-c.txtGuestProxyAgent-1.0.30/.gitignore000066400000000000000000000010411500521614600166670ustar00rootroot00000000000000# Generated by Cargo # will have compiled files and executables /target/ /proxy_agent/target/ /proxy_agent_shared/target/ /proxy_agent_extension/target/ /proxy_agent_setup/target/ /out/ /build/ /rpmbuild/SOURCES /rpmbuild/BUILD/ /rpmbuild/RPMS/ /rpmbuild/SRPMS/ /debbuild/ .idea/ # ignore the nuget restore packages /packages/ # ignore Redirector x64 build out /Redirector/x64/ /proxy_agent/*.lib # These are backup files generated by rustfmt **/*.rs.bk # Build results [Bb]in/ [Oo]bj/ [Ll]og/ # Visual Studio cache/options directory .vs/ GuestProxyAgent-1.0.30/.vscode/000077500000000000000000000000001500521614600162445ustar00rootroot00000000000000GuestProxyAgent-1.0.30/.vscode/extensions.json000066400000000000000000000001521500521614600213340ustar00rootroot00000000000000{ "recommendations": [ "ms-vscode-remote.remote-containers", "ms-azuretools.vscode-docker" ] }GuestProxyAgent-1.0.30/.vscode/launch.json000066400000000000000000000010051500521614600204050ustar00rootroot00000000000000{ // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ { "type": "lldb", "request": "launch", "name": "Debug", "program": "${workspaceFolder}/out/debug/GuestProxyAgent.exe ", "args": ["console"], "cwd": "${workspaceFolder}" } ] }GuestProxyAgent-1.0.30/.vscode/settings.json000066400000000000000000000014241500521614600210000ustar00rootroot00000000000000{ "files.associations": { "guiddef.h": "c", "socket.h": "c", "cstdint": "cpp", "type_traits": "cpp" }, "spellright.language": [ "en" ], "spellright.documentTypes": [ "markdown", "latex", "plaintext", "cpp", "Log", "rust" ], "spellright.parserByClass": { "Log": { "parser": "plain" } }, "rust-analyzer.linkedProjects": [ "./proxy_agent/Cargo.toml", "./proxy_agent_shared/Cargo.toml", "./proxy_agent_extension/Cargo.toml", "./proxy_agent_setup/Cargo.toml" ], "rust-analyzer.cargo.features": [ "test-with-root" ], "vscode-nmake-tools.workspaceBuildDirectories": [ "." ] }GuestProxyAgent-1.0.30/.vscode/tasks.json000066400000000000000000000023651500521614600202720ustar00rootroot00000000000000{ "version": "2.0.0", "tasks": [ { "label": "build", "command": "dotnet", "type": "process", "args": [ "build", "${workspaceFolder}/e2etest/GuestProxyAgentE2ETest/GuestProxyAgentE2ETest.csproj", "/property:GenerateFullPaths=true", "/consoleloggerparameters:NoSummary" ], "problemMatcher": "$msCompile" }, { "label": "publish", "command": "dotnet", "type": "process", "args": [ "publish", "${workspaceFolder}/e2etest/GuestProxyAgentE2ETest/GuestProxyAgentE2ETest.csproj", "/property:GenerateFullPaths=true", "/consoleloggerparameters:NoSummary" ], "problemMatcher": "$msCompile" }, { "label": "watch", "command": "dotnet", "type": "process", "args": [ "watch", "run", "--project", "${workspaceFolder}/e2etest/GuestProxyAgentE2ETest/GuestProxyAgentE2ETest.csproj" ], "problemMatcher": "$msCompile" } ] }GuestProxyAgent-1.0.30/CODE_OF_CONDUCT.md000066400000000000000000000006741500521614600175110ustar00rootroot00000000000000# Microsoft Open Source Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Resources: - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns GuestProxyAgent-1.0.30/Cargo.lock000066400000000000000000001151611500521614600166150ustar00rootroot00000000000000# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "ProxyAgentExt" version = "1.0.30" dependencies = [ "clap", "ctor", "nix", "once_cell", "proxy_agent_shared", "serde", "serde_json", "static_vcruntime", "sysinfo", "thiserror", "tokio", "windows-service", "winres", ] [[package]] name = "addr2line" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] [[package]] name = "adler2" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "ahash" version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", "version_check", "zerocopy", ] [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "anstream" version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", ] [[package]] name = "assert_matches" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "autocfg" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aya" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d18bc4e506fbb85ab7392ed993a7db4d1a452c71b75a246af4a80ab8c9d2dd50" dependencies = [ "assert_matches", "aya-obj", "bitflags", "bytes", "libc", "log", "object", "once_cell", "thiserror", ] [[package]] name = "aya-obj" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c51b96c5a8ed8705b40d655273bc4212cbbf38d4e3be2788f36306f154523ec7" dependencies = [ "bytes", "core-error", "hashbrown 0.15.2", "log", "object", "thiserror", ] [[package]] name = "azure-proxy-agent" version = "1.0.30" dependencies = [ "aya", "bitflags", "clap", "ctor", "hex", "hmac-sha256", "http", "http-body-util", "hyper", "hyper-util", "itertools", "libc", "libloading", "nix", "once_cell", "proxy_agent_shared", "regex", "serde", "serde-xml-rs", "serde_derive", "serde_json", "static_vcruntime", "sysinfo", "thiserror", "tokio", "tokio-util", "tower", "tower-http", "uuid", "uzers", "winapi", "windows-acl", "windows-service", "windows-sys 0.42.0", "winres", ] [[package]] name = "backtrace" version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", "windows-targets", ] [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "clap" version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", ] [[package]] name = "clap_builder" version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", ] [[package]] name = "clap_derive" version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", "syn", ] [[package]] name = "clap_lex" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "colorchoice" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "concurrent-queue" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] [[package]] name = "core-error" version = "0.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efcdb2972eb64230b4c50646d8498ff73f5128d196a90c7236eec4cbe8619b8f" dependencies = [ "version_check", ] [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "crc32fast" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-deque" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "ctor" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21d960ecacd0a1bf55e73144b72de745e7bf275c7952c50e36e8af0a0cb7ab1f" dependencies = [ "ctor-proc-macro", ] [[package]] name = "ctor-proc-macro" version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c426d2ba3e525b39c1f0a9ba41b9fe61878dee11fa4e4a76b6ab440f46c5db5d" [[package]] name = "deranged" version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "field-offset" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38e2275cc4e4fc009b0669731a1e5ab7ebf11f469eaede2bab9309a5b4d6057f" dependencies = [ "memoffset", "rustc_version", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "futures-channel" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", ] [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-sink" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-core", "futures-task", "pin-project-lite", "pin-utils", ] [[package]] name = "getrandom" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "gimli" version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", ] [[package]] name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", "foldhash", ] [[package]] name = "hdrhistogram" version = "7.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" dependencies = [ "byteorder", "num-traits", ] [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hmac-sha256" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3688e69b38018fec1557254f64c8dc2cc8ec502890182f395dbb0aa997aa5735" [[package]] name = "http" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", ] [[package]] name = "http-body-util" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", "http", "http-body", "pin-project-lite", ] [[package]] name = "httparse" version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", "futures-util", "http", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "smallvec", "tokio", "want", ] [[package]] name = "hyper-util" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-util", "http", "http-body", "hyper", "pin-project-lite", "tokio", ] [[package]] name = "indexmap" version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown 0.15.2", ] [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "libc" version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libloading" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", "windows-targets", ] [[package]] name = "log" version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memoffset" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] [[package]] name = "miniz_oxide" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ "adler2", ] [[package]] name = "mio" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi", "libc", "wasi", "windows-sys 0.52.0", ] [[package]] name = "nix" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags", "cfg-if", "cfg_aliases", "libc", "memoffset", ] [[package]] name = "ntapi" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] [[package]] name = "num-conv" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "object" version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "crc32fast", "hashbrown 0.14.5", "indexmap", "memchr", ] [[package]] name = "once_cell" version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "os_info" version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092" dependencies = [ "log", "serde", "windows-sys 0.52.0", ] [[package]] name = "pin-project-lite" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "powerfmt" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ "zerocopy", ] [[package]] name = "proc-macro2" version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] [[package]] name = "proxy_agent_setup" version = "1.0.30" dependencies = [ "clap", "proxy_agent_shared", "static_vcruntime", "thiserror", "tokio", "winres", ] [[package]] name = "proxy_agent_shared" version = "1.0.30" dependencies = [ "concurrent-queue", "ctor", "log", "once_cell", "os_info", "regex", "serde", "serde_derive", "serde_json", "thiserror", "thread-id", "time", "tokio", "windows-service", "windows-sys 0.42.0", "winreg", ] [[package]] name = "quote" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "rayon" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "regex" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "ryu" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde-xml-rs" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb3aa78ecda1ebc9ec9847d5d3aba7d618823446a049ba2491940506da6e2782" dependencies = [ "log", "serde", "thiserror", "xml-rs", ] [[package]] name = "serde_derive" version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", "ryu", "serde", ] [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "static_vcruntime" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "954e3e877803def9dc46075bf4060147c55cd70db97873077232eae0269dc89b" [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "sync_wrapper" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "sysinfo" version = "0.30.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" dependencies = [ "cfg-if", "core-foundation-sys", "libc", "ntapi", "once_cell", "rayon", "windows", ] [[package]] name = "thiserror" version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thread-id" version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe8f25bbdd100db7e1d34acf7fd2dc59c4bf8f7483f505eaa7d4f12f76cc0ea" dependencies = [ "libc", "winapi", ] [[package]] name = "time" version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", ] [[package]] name = "tokio" version = "1.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "492a604e2fd7f814268a378409e6c92b5525d747d10db9a229723f55a417958c" dependencies = [ "backtrace", "libc", "mio", "pin-project-lite", "socket2", "tokio-macros", "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-util" version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", ] [[package]] name = "toml" version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] [[package]] name = "tower" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "hdrhistogram", "indexmap", "pin-project-lite", "slab", "sync_wrapper", "tokio", "tokio-util", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-http" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "bitflags", "bytes", "http", "http-body", "http-body-util", "pin-project-lite", "tower-layer", "tower-service", ] [[package]] name = "tower-layer" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", ] [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "utf8parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", "rand", "uuid-macro-internal", ] [[package]] name = "uuid-macro-internal" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee1cd046f83ea2c4e920d6ee9f7c3537ef928d75dce5d84a87c2c5d6b3999a3a" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "uzers" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4df81ff504e7d82ad53e95ed1ad5b72103c11253f39238bcc0235b90768a97dd" dependencies = [ "libc", "log", ] [[package]] name = "version_check" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "widestring" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", "windows-targets", ] [[package]] name = "windows-acl" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "177b1723986bcb4c606058e77f6e8614b51c7f9ad2face6f6fd63dd5c8b3cec3" dependencies = [ "field-offset", "libc", "widestring 0.4.3", "winapi", ] [[package]] name = "windows-core" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ "windows-targets", ] [[package]] name = "windows-service" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24d6bcc7f734a4091ecf8d7a64c5f7d7066f45585c1861eba06449909609c8a" dependencies = [ "bitflags", "widestring 1.1.0", "windows-sys 0.52.0", ] [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winreg" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76a1a57ff50e9b408431e8f97d5456f2807f8eb2a2cd79b06068fc87f8ecf189" dependencies = [ "cfg-if", "winapi", ] [[package]] name = "winres" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b68db261ef59e9e52806f688020631e987592bd83619edccda9c47d42cde4f6c" dependencies = [ "toml", ] [[package]] name = "xml-rs" version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "zerocopy" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", "syn", ] GuestProxyAgent-1.0.30/Cargo.toml000066400000000000000000000002541500521614600166340ustar00rootroot00000000000000[workspace] resolver = "2" members = [ "proxy_agent_shared", "proxy_agent", "proxy_agent_extension", "proxy_agent_setup", ] exclude = [ "debbuild" ] GuestProxyAgent-1.0.30/LICENSE000066400000000000000000000021651500521614600157140ustar00rootroot00000000000000 MIT License Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE GuestProxyAgent-1.0.30/NuGet.config000066400000000000000000000011551500521614600171160ustar00rootroot00000000000000 GuestProxyAgent-1.0.30/README.md000066400000000000000000000143561500521614600161730ustar00rootroot00000000000000# Microsoft Azure Guest Proxy Agent ## Introduction This project introduces the Guest Proxy Agent (GPA) to enhance the security of the Azure Instance Metadata Service and Azure Wireserver endpoints (available in Azure IaaS VM/VMSS at `169.254.169.254` and `168.63.129.16` respectively). Analogous metadata servers are offered from the majority of cloud providers. These services are used for providing metadata and bootstrapping VM credentials. As a result, they are frequently targeted by threat actors. Common vectors include confused deputy attacks (e.g. SSRF) against in guest workloads and sandbox escapes, which are of particular concern for hosted-on-behalf-of workloads where untrusted code is intentionally loaded into the VM. With metadata services, the trust boundary is the VM itself. Any software within the guest is authorized to request secrets from IMDS. VM owners are responsible for carefully sandboxing any software they run inside the VM and ensuring that external actors can't exfiltrate data. This is achievable, but in practice the complexity of the problem leads to mistakes at scale which in turn lead to exploits. While numerous defense in depth strategies exist, providing secrets over an unauthenticated HTTP API carries inherent risk. This project closes many of the most common vulnerabilities by addressing the root cause of these attacks and introducing strong Authentication (AuthN) and Authorization (AuthZ) concepts to cloud metadata services. ## Implementation The GPA hardens against these types of attacks by: - Limiting metadata access to a subset of the VM (applying the principle of least privileged access). - Switching from a "default-open" to "default-closed" model. For instance, with nested virtualization a misconfigured L2 VM that has access to the L1 VM's vNIC can communicate with a metadata service as the L1. With the GPA, a misconfigured L2 would no longer be able to gain access, as it would be unable to authenticate with the service. At provisioning time the metadata service establishes a trusted delegate within the guest (the GPA). A long-lived secret is negotiated to authenticate with the trusted delegate, and all requests to the metadata service must be endorsed by the delegate using an [HMAC](https://en.wikipedia.org/wiki/HMAC). This establishes a point-to-point trust relationship with strong AuthN. The GPA leverages [eBPF](https://ebpf.io/what-is-ebpf/) to intercept HTTP requests to the metadata services only (not intercept any other http traffic). eBPF enables the GPA to authoritatively verify the identity of the in guest software that made the request without introducing an additional kernel module. Using this information, it compares the identity of the client against an allow list defined as a part of the VM model in the Azure Resource Manager (ARM) and endorses requests that are authorized by transparently adding a signature header. This means that the feature can be enabled on existing workloads without breaking changes. - By default, the existing authorization levels are enforced: IMDS is open to all users and Wireserver is root / admin only. - Today this restriction is accomplished with firewall rules in the guest. This is still a default-open mechanism, because if that rule can be disabled or bypassed for any reason the metadata service will accept the request. The AuthN mechanism enabled here default-closed. Bypassing interception maliciously or by error does not grant access to the metadata service. - Advanced AuthZ configuration to authorize specific in-guest processes and users to access only specific endpoints is supported by defining a custom allow list with RBAC semantics. ## Compatibility eBPF is available in Linux kernels 5.15+ and on Windows VMs by installing [eBPF-for-Windows](https://github.com/microsoft/ebpf-for-windows). The design is not dependent on any modern security hardware, but it can be further enhanced by hardware like vTPM when available. This project supports Azure VMs running: - Windows 10 or later - Windows Server 2019 or later - Ubuntu 20.04+ - Redhat 9+ - Flatcar - Rocky-Linux9+ - SUSE 15 SP4+ ## Architectural Overview The following diagram shows the basic architecture of this project and related components: ![Architectural Overview](doc/GuestProxyAgent.png) ## Development Refer to the instructions that correspond to the Operating System you wish to *target*: - [Getting Started Guide - Linux](/doc/GettingStartedLinux.md) - [Getting Started Guide - Windows](/doc/GettingStartedWindows.md) ## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit . When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. ## Telemetry The GPA can emit diagnostic and audit telemetry from the VM locally and/or to Azure for analysis. The collected events are labeled `Azure GuestProxyAgent logs`. Engineering teams and support professionals can use this telemetry to understand metadata service usage in their workload, investigate issues, and detect hostile actors. ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. GuestProxyAgent-1.0.30/SECURITY.md000066400000000000000000000053051500521614600164770ustar00rootroot00000000000000 ## Security Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. ## Reporting Security Issues **Please do not report security vulnerabilities through public GitHub issues.** Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) * Full paths of source file(s) related to the manifestation of the issue * The location of the affected source code (tag/branch/commit or direct URL) * Any special configuration required to reproduce the issue * Step-by-step instructions to reproduce the issue * Proof-of-concept or exploit code (if possible) * Impact of the issue, including how an attacker might exploit the issue This information will help us triage your report more quickly. If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. ## Preferred Languages We prefer all communications to be in English. ## Policy Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). GuestProxyAgent-1.0.30/SUPPORT.md000066400000000000000000000005711500521614600164040ustar00rootroot00000000000000# Support ## How to file issues and get help This project uses GitHub Issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new Issue. ## Microsoft Support Policy Support for this **PROJECT or PRODUCT** is limited to the resources listed above. GuestProxyAgent-1.0.30/Setup/000077500000000000000000000000001500521614600160035ustar00rootroot00000000000000GuestProxyAgent-1.0.30/Setup/Windows/000077500000000000000000000000001500521614600174355ustar00rootroot00000000000000GuestProxyAgent-1.0.30/Setup/Windows/InstallOrUpdateProxyAgent.vbs000066400000000000000000000010321500521614600252400ustar00rootroot00000000000000Dim proxyAgent set proxyAgent = GetScriptObject(WScript, "ProxyAgent.wsf", "ProxyAgent") set proxyAgent.Script = WScript proxyAgent.Initialize "." proxyAgent.ConfigureProxyAgent() proxyAgent.PostInstallation() Function GetScriptObject(WScript, scriptPath, componentId) Dim FSO, scriptDir Set FSO = CreateObject("Scripting.FileSystemObject") scriptDir = FSO.GetParentFolderName(WScript.ScriptFullName) Set GetScriptObject = GetObject("script:" & FSO.BuildPath(scriptDir, scriptPath) & "#" & componentId) End Function GuestProxyAgent-1.0.30/Setup/Windows/InstallOrUpdateProxyAgentOnly.vbs000066400000000000000000000007771500521614600261210ustar00rootroot00000000000000Dim proxyAgent set proxyAgent = GetScriptObject(WScript, "ProxyAgent.wsf", "ProxyAgent") set proxyAgent.Script = WScript proxyAgent.Initialize "." proxyAgent.ConfigureProxyAgentOnly() Function GetScriptObject(WScript, scriptPath, componentId) Dim FSO, scriptDir Set FSO = CreateObject("Scripting.FileSystemObject") scriptDir = FSO.GetParentFolderName(WScript.ScriptFullName) Set GetScriptObject = GetObject("script:" & FSO.BuildPath(scriptDir, scriptPath) & "#" & componentId) End Function GuestProxyAgent-1.0.30/Setup/Windows/ProxyAgent.wsf000066400000000000000000001473301500521614600222660ustar00rootroot00000000000000īģŋ Configures the Proxy Agent Service. GuestProxyAgent-1.0.30/Setup/Windows/Tracing.wsf000066400000000000000000000056531500521614600215560ustar00rootroot00000000000000īģŋ GuestProxyAgent-1.0.30/Setup/Windows/UninstallProxyAgent.vbs000066400000000000000000000012141500521614600241410ustar00rootroot00000000000000Dim proxyAgent set proxyAgent = GetScriptObject(WScript, "ProxyAgent.wsf", "ProxyAgent") set proxyAgent.Script = WScript proxyAgent.Initialize "." ' Remove the GuestProxyAgent service only, ' Do not delete the ProxyAgent main folder to avoid delete the local Logs and Keys folder proxyAgent.UninstallProxyAgent "ServiceOnly" Function GetScriptObject(WScript, scriptPath, componentId) Dim FSO, scriptDir Set FSO = CreateObject("Scripting.FileSystemObject") scriptDir = FSO.GetParentFolderName(WScript.ScriptFullName) Set GetScriptObject = GetObject("script:" & FSO.BuildPath(scriptDir, scriptPath) & "#" & componentId) End FunctionGuestProxyAgent-1.0.30/Setup/Windows/UninstallProxyAgentOnly.vbs000066400000000000000000000012201500521614600250000ustar00rootroot00000000000000Dim proxyAgent set proxyAgent = GetScriptObject(WScript, "ProxyAgent.wsf", "ProxyAgent") set proxyAgent.Script = WScript proxyAgent.Initialize "." ' Remove the GuestProxyAgent service only, ' Do not delete the ProxyAgent main folder to avoid delete the local Logs and Keys folder proxyAgent.UninstallProxyAgentOnly "ServiceOnly" Function GetScriptObject(WScript, scriptPath, componentId) Dim FSO, scriptDir Set FSO = CreateObject("Scripting.FileSystemObject") scriptDir = FSO.GetParentFolderName(WScript.ScriptFullName) Set GetScriptObject = GetObject("script:" & FSO.BuildPath(scriptDir, scriptPath) & "#" & componentId) End FunctionGuestProxyAgent-1.0.30/Setup/Windows/Utility.vbs000066400000000000000000000110171500521614600216140ustar00rootroot00000000000000Option Explicit Const TemporaryFolder = 2 Const ForReading = 1 ' execute the given command, collecting the results to an object Function ExecuteWithResults (strCommand) Dim FSO, WshShell Dim outFile, errFile Dim runCmd, oResults Set WshShell = CreateObject("WScript.Shell") Set FSO = CreateObject("Scripting.FileSystemObject") Set oResults = new ExecResults oResults.StdOut = "" oResults.StdErr = "" oResults.ExitCode = Null outFile = CreateTempFile(FSO) errFile = CreateTempFile(FSO) ' build a command that will capture the stdout, stderr runCmd = "%comspec% /c """ & strCommand & " > """ & outFile & """ 2> """ & errFile & """""" ' run the command oResults.ExitCode = WshShell.Run(runCmd, 0, True) ' read the result streams oResults.StdOut = ReadTempFile(FSO, outFile) oResults.StdErr = ReadTempFile(FSO, errFile) Set ExecuteWithResults = oResults End Function Function ExecuteAndTraceWithResults(strCommand, tracer) Dim oResults, commandElem, outputElem, errOutputElem, eventType Set oResults = ExecuteWithResults(strCommand) Set ExecuteAndTraceWithResults = oResults If oResults.ExitCode = 0 Then eventType = "INFO" Else eventType = "ERROR" Set oTraceEvent = tracer.CreateEvent(eventType) Set commandElem = oTraceEvent.ownerDocument.CreateElement("Command") commandElem.appendChild(oTraceEvent.ownerDocument.CreateTextNode(strCommand)) Set outputElem = oTraceEvent.ownerDocument.CreateElement("Output") If Not IsNull(oResults.StdOut) Then outputElem.appendChild(oTraceEvent.ownerDocument.CreateTextNode(CStr(oResults.StdOut))) Set errOutputElem = oTraceEvent.ownerDocument.CreateElement("ErrorOutput") If Not IsNull(oResults.StdErr) Then errOutputElem.appendChild(oTraceEvent.ownerDocument.CreateTextNode(CStr(oResults.StdErr))) With oTraceEvent.appendChild(oTraceEvent.ownerDocument.CreateElement("ExecuteAndTraceWithResults")) .appendChild(commandElem) .appendChild(outputElem) .appendChild(errOutputElem) End With tracer.TraceEvent oTraceEvent End Function Class ExecResults Dim StdOut Dim StdErr Dim ExitCode End Class Function CreateTempFile(FSO) Dim folder, file Set folder = FSO.GetSpecialFolder(TemporaryFolder) file = FSO.GetTempName CreateTempFile = FSO.BuildPath(folder, file) End Function Private Function ReadTempFile(FSO, file) Dim stream Dim str str = Null Set stream = FSO.OpenTextFile(file, ForReading, False) If Not stream.AtEndOfStream Then str = stream.ReadAll() End If stream.Close FSO.DeleteFile file ReadTempFile = str End Function Function GetScriptObject(WScript, scriptPath, componentId) Dim FSO, scriptDir Set FSO = CreateObject("Scripting.FileSystemObject") scriptDir = FSO.GetParentFolderName(WScript.ScriptFullName) Set GetScriptObject = GetObject("script:" & FSO.BuildPath(scriptDir, scriptPath) & "#" & componentId) End Function Function TraceError(objTrace, message) Dim oTraceEvent TraceError = Err.number If Err.number <> 0 Then Set oTraceEvent = objTrace.CreateEvent("ERROR") With oTraceEvent.appendChild(oTraceEvent.ownerDocument.createElement("UnhandledError")) With .appendChild(oTraceEvent.ownerDocument.createElement("Message")) .text = message End With With .appendChild(oTraceEvent.ownerDocument.createElement("Number")) .text = Err.number End With With .appendChild(oTraceEvent.ownerDocument.createElement("Description")) .text = Err.Description End With With .appendChild(oTraceEvent.ownerDocument.createElement("Source")) .text = Err.Source End With End With objTrace.TraceEvent oTraceEvent Err.Clear End If End Function ' gets the active operating system ' function not supported in specialize pass Function GetCurrentOperatingSystem Dim objWMIService, colOS, objItem Set GetCurrentOperatingSystem = Nothing Set objWMIService = GetObject("winmgmts:\\.\root\cimv2") Set colOS = objWMIService.ExecQuery ("Select * from Win32_OperatingSystem") For Each objItem in colOS Set GetCurrentOperatingSystem = objItem Exit Function Next End Function Function LeftPad( strText, intLen, chrPad ) 'LeftPad( "1234", 7, "x" ) = "xxx1234" 'LeftPad( "1234", 3, "x" ) = "234" LeftPad = Right( String( intLen, chrPad ) & strText, intLen ) End Function GuestProxyAgent-1.0.30/build-linux.sh000077500000000000000000000236421500521614600175050ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT # Prints then runs the command based on: https://stackoverflow.com/questions/31656645/how-do-i-echo-directly-on-standard-output-inside-a-shell-function runthis(){ echo "$@" ## Run the command and redirect its error output "$@" >&2 } echo "======= Get the directory of the script" root_path="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" out_path=$root_path"/out" echo "Set out_path to: $out_path" echo "======= Set Build Configuration" Configuration=$1 if [ "$Configuration" != "release" ] then Configuration="debug" fi release_flag="" if [ "$Configuration" = "release" ] then release_flag="--release" fi echo "======= Set Build Target" Target=$2 rpm_target="x86_64" build_target="x86_64-unknown-linux-musl" gnu_target="x86_64-unknown-linux-gnu" if [ "$Target" == "arm64" ] then build_target="aarch64-unknown-linux-musl" rpm_target="aarch64" gnu_target="aarch64-unknown-linux-gnu" else Target="amd64" fi out_dir=$out_path/$build_target/$Configuration echo "The out_dir is: $out_dir" CleanBuild=$3 if [ "$CleanBuild" = "clean" ] then echo "======= delete old files" runthis rm -rf $out_dir fi BuildEnvironment=$4 if [ "$BuildEnvironment" = "" ] then BuildEnvironment="normal" fi echo "======= BuildEnvironment is $BuildEnvironment" echo "======= rustup update to a particular version" rustup_version=1.85.0 rustup update $rustup_version # This command sets a specific Rust toolchain version for the current directory. # It means that whenever you are in this directory, Rust commands will use the specified toolchain version, regardless of the global default. rustup override set $rustup_version rustup target install $build_target cargo install cargo-deb echo "======= cargo fmt & clippy" runthis rustup component add --toolchain $rustup_version-$gnu_target rustfmt cargo fmt --all runthis rustup component add --toolchain $rustup_version-$gnu_target clippy cargo clippy -- -D warnings error_code=$? if [ $error_code -ne 0 ] then echo "cargo clippy with exit-code: $error_code" exit $error_code fi echo "======= build proxy_agent_shared" cargo_toml=$root_path/proxy_agent_shared/Cargo.toml echo "Defined: cargo_toml=$cargo_toml" runthis cargo build $release_flag --manifest-path $cargo_toml --target-dir $out_path --target $build_target error_code=$? if [ $error_code -ne 0 ] then echo "cargo build proxy_agent_shared failed with exit-code: $error_code" exit $error_code fi echo "======= run rust proxy_agent_shared unit tests" runthis cargo test --all-features $release_flag --target $build_target --manifest-path $cargo_toml --target-dir $out_path -- --test-threads=1 error_code=$? if [ $error_code -ne 0 ] then echo "cargo test proxy_agent_shared with exit-code: $error_code" exit $error_code fi echo "======= build ebpf program after the proxy_agent_shared is built to let $out_dir created." ebpf_path=$root_path/linux-ebpf if [ "$Target" == "arm64" ] then runthis clang -g -target bpf -Werror -O2 -D__TARGET_ARCH_arm64 -I/usr/include/aarch64-linux-gnu -c $ebpf_path/ebpf_cgroup.c -o $out_dir/ebpf_cgroup.o else runthis clang -g -target bpf -Werror -O2 -D__TARGET_ARCH_x86 -c $ebpf_path/ebpf_cgroup.c -o $out_dir/ebpf_cgroup.o fi error_code=$? if [ $error_code -ne 0 ] then echo "call clang failed with exit-code: $error_code" exit $error_code fi llvm-objdump -h $out_dir/ebpf_cgroup.o ls -l $out_dir/ebpf_cgroup.o echo "======= build proxy_agent" cargo_toml=$root_path/proxy_agent/Cargo.toml echo "Defined: cargo_toml=$cargo_toml" runthis cargo build $release_flag --manifest-path $cargo_toml --target-dir $out_path --target $build_target error_code=$? if [ $error_code -ne 0 ] then echo "cargo build proxy_agent failed with exit-code: $error_code" exit $error_code fi echo "======= copy config file for Linux platform" cp -f -T $root_path/proxy_agent/config/GuestProxyAgent.linux.json $out_dir/proxy-agent.json echo "======= copy files for run/debug proxy_agent Unit test" runthis cp -f $out_dir/* $out_dir/deps/ runthis cp -f -r $out_dir/* $root_path/proxy_agent/target/$Configuration/ echo "======= run rust proxy_agent tests" Environment="$BuildEnvironment" runthis cargo test --all-features $release_flag --target $build_target --manifest-path $cargo_toml --target-dir $out_path -- --test-threads=1 error_code=$? if [ $error_code -ne 0 ] then echo "cargo test proxy_agent with exit-code: $error_code" exit $error_code fi echo "======= build proxy_agent_extension" cargo_toml=$root_path/proxy_agent_extension/Cargo.toml extension_src_path=$root_path/proxy_agent_extension/src/linux echo "Defined: cargo_toml=$cargo_toml" runthis cargo build $release_flag --manifest-path $cargo_toml --target-dir $out_path --target $build_target error_code=$? if [ $error_code -ne 0 ] then echo "cargo build proxy_agent_extension failed with exit-code: $error_code" exit $error_code fi echo "======= copy files for run/debug proxy_agent_extension Unit test" runthis cp -f $out_dir/* $out_dir/deps/ runthis cp -f -r $out_dir/* $root_path/proxy_agent_extension/target/$Configuration/ echo "======= run rust proxy_agent_extension tests" runthis cargo test --all-features $release_flag --target $build_target --manifest-path $cargo_toml --target-dir $out_path -- --test-threads=1 error_code=$? if [ $error_code -ne 0 ] then echo "cargo test proxy_agent_extension with exit-code: $error_code" exit $error_code fi echo "======= copy config file for Linux platform" cp -f -r $root_path/proxy_agent_setup/src/linux/* $out_dir/ echo "======= build proxy_agent_setup" cargo_toml=$root_path/proxy_agent_setup/Cargo.toml echo "Defined: cargo_toml=$cargo_toml" runthis cargo build $release_flag --manifest-path $cargo_toml --target-dir $out_path --target $build_target error_code=$? if [ $error_code -ne 0 ] then echo "cargo build proxy_agent_setup failed with exit-code: $error_code" exit $error_code fi echo "======= build e2e test solution" runthis dotnet build $root_path/e2etest/GuestProxyAgentTest.sln --configuration $Configuration -o $out_dir/e2etest -v normal error_code=$? if [ $error_code -ne 0 ] then echo "dotnet build failed with exit-code: $error_code" exit $error_code fi echo "======= prepare out-package folder structure" out_package_dir=$out_dir/package if [ ! -d $out_package_dir ]; then mkdir $out_package_dir fi echo "======= copy to package folder" cp -f $out_dir/proxy_agent_setup $out_package_dir/ cp -f $out_dir/azure-proxy-agent.service $out_package_dir/ out_package_proxyagent_dir=$out_package_dir/ProxyAgent if [ ! -d $out_package_proxyagent_dir ]; then mkdir $out_package_proxyagent_dir fi echo "======= copy to proxyagent folder" cp -f $out_dir/azure-proxy-agent $out_package_proxyagent_dir/ cp -f $out_dir/proxy-agent.json $out_package_proxyagent_dir/ cp -f $out_dir/ebpf_cgroup.o $out_package_proxyagent_dir/ echo "======= generate rpm package" echo "Generating rpm package -------------- " pkgversion=$($out_dir/azure-proxy-agent --version) echo "Package version: '$pkgversion'" rootdir=$(pwd) rm -rf build mkdir build pushd build mkdir azure-proxy-agent pushd azure-proxy-agent cp -rf $out_package_dir/ ./ popd mv azure-proxy-agent azure-proxy-agent_${pkgversion} tar -czf azure-proxy-agent_${pkgversion}.tar.gz azure-proxy-agent_${pkgversion} popd pushd rpmbuild mkdir SOURCES BUILD RPMS SRPMS cp ../build/azure-proxy-agent_${pkgversion}.tar.gz SOURCES/ rpmbuild --target $rpm_target --define "_topdir ${rootdir}/rpmbuild" --define "pkgversion ${pkgversion}" -ba SPECS/azure-proxy-agent.spec error_code=$? if [ $error_code -ne 0 ] then echo "rpmbuild failed with exit-code: $error_code" exit $error_code fi popd rm -rf build echo "======= copy rpm package file to Package folder" cp -f $rootdir/rpmbuild/RPMS/${rpm_target}/azure-proxy-agent-${pkgversion}-0.${rpm_target}.rpm $out_package_dir/ echo "======= generate deb package" echo "Generating deb package -------------- " rm -rf debbuild mkdir debbuild pushd debbuild mkdir -p DEBIAN src cp -rf $rootdir/pkg_debian/* ./DEBIAN/ cp -rf $rootdir/proxy_agent/Cargo.toml ./Cargo.toml cp -rf $rootdir/proxy_agent/src/* ./src/ # cargo deb --no-build command still requires ./src/main.rs cp -f $out_package_proxyagent_dir/azure-proxy-agent ./ cp -f $out_package_proxyagent_dir/proxy-agent.json ./ cp -f $out_package_proxyagent_dir/ebpf_cgroup.o ./ cp -f $out_package_dir/azure-proxy-agent.service ./DEBIAN/ sed -i "s/pkgversion/${pkgversion}/g" DEBIAN/control # replace pkgversion with actual version sed -i "s/pkgversion/${pkgversion}/g" DEBIAN/postinst # replace pkgversion with actual version sed -i "s/pkgversion/${pkgversion}/g" Cargo.toml # replace pkgversion with actual version echo cargo deb -v --manifest-path $rootdir/debbuild/Cargo.toml --no-build -o $out_package_dir --target $build_target cargo deb -v --manifest-path $rootdir/debbuild/Cargo.toml --no-build -o $out_package_dir --target $build_target error_code=$? if [ $error_code -ne 0 ] then echo "cargo deb: failed with exit-code: $error_code" exit $error_code fi popd rm -rf $rootdir/debbuild echo "======= copy to proxyagent extension folder" out_package_proxyagent_extension_dir=$out_package_dir/ProxyAgent_Extension if [ ! -d $out_package_proxyagent_extension_dir ]; then mkdir $out_package_proxyagent_extension_dir fi cp -f $extension_src_path/HandlerManifest.json $out_package_proxyagent_extension_dir/ for f in $extension_src_path/*.sh; do cp -f $f $out_package_proxyagent_extension_dir/ done cp -f $out_dir/ProxyAgentExt $out_package_proxyagent_extension_dir/ echo "======= copy e2e test project to Package folder" cp -rf $out_dir/e2etest/ $out_package_dir/e2etest/ echo "======= Generate build-configuration-linux-$Target.zip file with relative path within the zip file" cd $out_package_dir zip -r $out_dir/build-$Configuration-linux-$Target.zip . GuestProxyAgent-1.0.30/build.cmd000066400000000000000000000311261500521614600164720ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT @echo off SET root_path=%~dp0 set Configuration=%1 Set Target=%2 set CleanBuild=%3 set ContinueAtConvertBpfToNative=%4 set out_path=%5 if "%Configuration%"=="" (SET Configuration=debug) echo Configuration=%Configuration% SET eBPF_Platform="x64" Set build_target=x86_64-pc-windows-msvc if "%Target%"=="arm64" ( Set build_target=aarch64-pc-windows-msvc SET eBPF_Platform="arm64" ) else ( Set build_target=x86_64-pc-windows-msvc Set Target=amd64 ) if "%out_path%"=="" (SET out_path=%root_path%out) SET out_dir=%out_path%\%build_target%\%Configuration% echo out_path=%out_path% echo out_dir=%out_dir% REM Set the path to the eBPF-for-Windows binaries and include files, REM We build ARM64 binaries on x64 machine, so we need to set the path to the x64 binaries SET eBPF_for_Windows_bin_path=%root_path%packages\eBPF-for-Windows.x64.0.21.0\build\native\bin SET eBPF_for_Windows_inc_path=%root_path%packages\eBPF-for-Windows.%eBPF_Platform%.0.21.0\build\native\include SET bin_skim_path=%root_path%packages\Microsoft.CodeAnalysis.BinSkim.1.9.5\tools\netcoreapp3.1\win-x64 if "%CleanBuild%"=="clean" ( echo ======= delete old files echo RD /S /Q %out_dir% RD /S /Q %out_dir% echo RD /S /Q %root_path%packages RD /S /Q %root_path%packages ) echo ======= nuget restore call nuget restore if %ERRORLEVEL% NEQ 0 ( echo call nuget restore with exit-code: %errorlevel% exit /b %errorlevel% ) if "%Target%"=="arm64" ( REM Install the latest Windows Driver Kit (WDK) NuGet packages nuget.exe install Microsoft.Windows.WDK.arm64 -Version 10.0.26100.2454 ) else ( REM Install the latest Windows Driver Kit (WDK) NuGet packages nuget.exe install Microsoft.Windows.WDK.x64 -Version 10.0.26100.2454 ) echo ======= rustup update to a particular version of the Rust toolchain SET rustup_version=1.85.0 call rustup update %rustup_version% REM This command sets a specific Rust toolchain version for the current directory. REM It means that whenever you are in this directory, Rust commands will use the specified toolchain version, regardless of the global default. call rustup override set %rustup_version% call rustup target add %build_target% echo ======= create out path folder and subfolder if not exist "%out_path%" (md "%out_path%") if not exist "%out_dir%" (md "%out_dir%") echo ======= Prepare out-package folder structure SET out_package_dir=%out_dir%\package if not exist "%out_package_dir%" (md "%out_package_dir%") SET out_package_proxyagent_dir="%out_package_dir%"\ProxyAgent if not exist "%out_package_proxyagent_dir%" (md "%out_package_proxyagent_dir%") echo ======= copy VB Scripts to Package folder xcopy /Y %root_path%\Setup\Windows\*.* %out_package_dir%\ echo ======= build ebpf program SET ebpf_path=%root_path%\ebpf echo call clang -I"%ebpf_path%" -I "%eBPF_for_Windows_inc_path%" -target bpf -Werror -O2 -c %ebpf_path%\redirect.bpf.c -o %out_dir%\redirect.bpf.o call clang -I"%ebpf_path%" -I "%eBPF_for_Windows_inc_path%" -target bpf -Werror -O2 -c %ebpf_path%\redirect.bpf.c -o %out_dir%\redirect.bpf.o if %ERRORLEVEL% NEQ 0 ( echo call clang failed with exit-code: %errorlevel% exit /b %errorlevel% ) echo ======= copy redirect.bpf.o xcopy /Y %out_dir%\redirect.bpf.o %out_package_proxyagent_dir%\ echo ======= convert redirect.bpf.o to redirect.bpf.sys call %eBPF_for_Windows_bin_path%\export_program_info.exe --clear call %eBPF_for_Windows_bin_path%\export_program_info.exe echo call powershell.exe %eBPF_for_Windows_bin_path%\Convert-BpfToNative.ps1 -OutDir "%out_dir%" -FileName redirect.bpf.o -IncludeDir "%eBPF_for_Windows_inc_path%" -Platform %eBPF_Platform% -Packages "%root_path%packages" call powershell.exe %eBPF_for_Windows_bin_path%\Convert-BpfToNative.ps1 -OutDir "%out_dir%" -FileName redirect.bpf.o -IncludeDir "%eBPF_for_Windows_inc_path%" -Platform %eBPF_Platform% -Packages "%root_path%packages" if %ERRORLEVEL% NEQ 0 ( echo call Convert-BpfToNative.ps1 failed with exit-code: %errorlevel% if "%ContinueAtConvertBpfToNative%"=="" ( exit /b %errorlevel% ) echo Skip the error and continue to build other projects ) echo ======= copy redirect.bpf.sys xcopy /Y %out_dir%\redirect.bpf.sys %out_package_proxyagent_dir%\ xcopy /Y %out_dir%\redirect.bpf.pdb %out_package_proxyagent_dir%\ echo ======= cargo fmt and clippy echo call rustup component add --toolchain %rustup_version%-x86_64-pc-windows-msvc rustfmt call rustup component add --toolchain %rustup_version%-x86_64-pc-windows-msvc rustfmt echo call cargo fmt --all cargo fmt --all echo call rustup component add --toolchain %rustup_version%-x86_64-pc-windows-msvc clippy call rustup component add --toolchain %rustup_version%-x86_64-pc-windows-msvc clippy echo call cargo clippy -- -D warnings cargo clippy -- -D warnings if %ERRORLEVEL% NEQ 0 ( echo cargo clippy failed with exit-code: %errorlevel% exit /b %errorlevel% ) echo ======= build proxy_agent_shared set cargo_toml=%root_path%proxy_agent_shared\Cargo.toml SET release_flag= if "%Configuration%"=="release" (SET release_flag=--release) echo cargo_toml=%cargo_toml% echo call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% if %ERRORLEVEL% NEQ 0 ( echo call cargo build proxy_agent_shared failed with exit-code: %errorlevel% exit /b %errorlevel% ) if "%Target%"=="arm64" ( echo ======= skip running proxy_agent_shared arm64 tests on amd64 machine ) else ( echo ======= copy files for run/debug proxy_agent_shared Unit test in VS Code echo xcopy /Y /C /Q %out_dir% %out_dir%\deps\ xcopy /Y /C /Q %out_dir% %out_dir%\deps\ echo xcopy /Y /S /C /Q %out_dir% %root_path%proxy_agent_shared\target\%Configuration%\ xcopy /Y /S /C /Q %out_dir% %root_path%proxy_agent_shared\target\%Configuration%\ echo ======= run rust proxy_agent_shared tests echo call cargo test --all-features %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% -- --test-threads=1 --nocapture call cargo test --all-features %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% -- --test-threads=1 --nocapture if %ERRORLEVEL% NEQ 0 ( echo call cargo test proxy_agent_shared with exit-code: %errorlevel% exit /b %errorlevel% ) ) echo ======= copy config file for windows platform REM Adding a wildcard (*) to the end of the destination will suppress this prompt and default to copying as a file: xcopy /Y %root_path%proxy_agent\config\GuestProxyAgent.windows.json %out_dir%\GuestProxyAgent.json* echo ======= build proxy_agent set cargo_toml=%root_path%proxy_agent\Cargo.toml SET release_flag= if "%Configuration%"=="release" (SET release_flag=--release) echo cargo_toml=%cargo_toml% echo call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% if %ERRORLEVEL% NEQ 0 ( echo call cargo build proxy_agent failed with exit-code: %errorlevel% exit /b %errorlevel% ) if "%Target%"=="arm64" ( echo ======= skip running proxy_agent arm64 tests on amd64 machine ) else ( echo ======= copy files for run/debug proxy_agent Unit test echo xcopy /Y /C /Q %out_dir% %out_dir%\deps\ xcopy /Y /C /Q %out_dir% %out_dir%\deps\ echo xcopy /Y /S /C /Q %out_dir% %root_path%proxy_agent\target\%Configuration%\ xcopy /Y /S /C /Q %out_dir% %root_path%proxy_agent\target\%Configuration%\ echo ======= run rust proxy_agent tests echo call cargo test --all-features %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% -- --test-threads=1 --nocapture call cargo test --all-features %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% -- --test-threads=1 --nocapture if %ERRORLEVEL% NEQ 0 ( echo call cargo test proxy_agent with exit-code: %errorlevel% exit /b %errorlevel% ) ) echo ======= build proxy_agent_extension SET extension_root_path=%root_path%proxy_agent_extension SET extension_src_path=%root_path%proxy_agent_extension\src\windows set cargo_toml=%extension_root_path%\Cargo.toml echo cargo_toml=%cargo_toml% echo call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% if %ERRORLEVEL% NEQ 0 ( echo call cargo build proxy_agent_extension failed with exit-code: %errorlevel% exit /b %errorlevel% ) if "%Target%"=="arm64" ( echo ======= skip running proxy_agent_extension arm64 tests on amd64 machine ) else ( echo ======= copy files for run/debug proxy_agent_extension Unit test echo xcopy /Y /C /Q %out_dir% %out_dir%\deps\ xcopy /Y /C /Q %out_dir% %out_dir%\deps\ echo xcopy /Y /S /C /Q %out_dir% %root_path%proxy_agent_extension\target\%Configuration%\ xcopy /Y /S /C /Q %out_dir% %root_path%proxy_agent_extension\target\%Configuration%\ echo ======= run rust proxy_agent_extension tests echo call cargo test --all-features %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% -- --test-threads=1 --nocapture call cargo test --all-features %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% -- --test-threads=1 --nocapture if %ERRORLEVEL% NEQ 0 ( echo call cargo test proxy_agent_extension with exit-code: %errorlevel% exit /b %errorlevel% ) ) echo ======= build proxy_agent_setup set cargo_toml=%root_path%proxy_agent_setup\Cargo.toml SET release_flag= if "%Configuration%"=="release" (SET release_flag=--release) echo cargo_toml=%cargo_toml% echo call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% call cargo build %release_flag% --manifest-path %cargo_toml% --target-dir %out_path% --target %build_target% if %ERRORLEVEL% NEQ 0 ( echo call cargo build proxy_agent_setup failed with exit-code: %errorlevel% exit /b %errorlevel% ) echo ======= restore e2e test project dependencies echo call dotnet.exe restore %root_path%\e2etest\GuestProxyAgentTest.sln -v normal call dotnet.exe restore %root_path%\e2etest\GuestProxyAgentTest.sln if %ERRORLEVEL% NEQ 0 ( echo call dotnet.exe restore failed with exit-code: %errorlevel% exit /b %errorlevel% ) echo ======= build e2e test project SET out_e2etest_dir=%out_dir%\e2etest echo call dotnet.exe build %root_path%\e2etest\GuestProxyAgentTest.sln --no-restore --configuration %Configuration% -o %out_e2etest_dir% call dotnet.exe build %root_path%\e2etest\GuestProxyAgentTest.sln --no-restore --configuration %Configuration% -o %out_e2etest_dir% if %ERRORLEVEL% NEQ 0 ( echo call dotnet.exe build failed with exit-code: %errorlevel% exit /b %errorlevel% ) dir /S /B %out_e2etest_dir%\ echo ======= copy setup tool to Package folder xcopy /Y %out_dir%\proxy_agent_setup.exe %out_package_dir%\ xcopy /Y %out_dir%\proxy_agent_setup.pdb %out_package_dir%\ echo ======= copy to ProxyAgent folder xcopy /Y %out_dir%\azure-proxy-agent.exe %out_package_proxyagent_dir%\GuestProxyAgent.exe* xcopy /Y %out_dir%\azure_proxy_agent.pdb %out_package_proxyagent_dir%\GuestProxyAgent.pdb* xcopy /Y %out_dir%\GuestProxyAgent.json %out_package_proxyagent_dir%\ SET out_package_proxyagent_extension_dir=%out_package_dir%\ProxyAgent_Extension if not exist "%out_package_proxyagent_extension_dir%" (md "%out_package_proxyagent_extension_dir%") echo ======= copy ProxyAgent Extension files xcopy /Y %extension_src_path%\HandlerManifest.json %out_package_proxyagent_extension_dir%\ for %%F in (%extension_src_path%\*.cmd) do ( echo Found file: %%F xcopy /Y %%F %out_package_proxyagent_extension_dir%\ ) xcopy /Y %out_dir%\ProxyAgentExt.exe %out_package_proxyagent_extension_dir%\ echo ======= copy e2e test project to Package folder SET out_package_e2etest_dir=%out_package_dir%\e2etest echo xcopy /Y /S /C /Q %out_e2etest_dir% %out_package_e2etest_dir%\ xcopy /Y /S /C /Q %out_e2etest_dir% %out_package_e2etest_dir%\ echo ======= run binskim command call %bin_skim_path%\BinSkim.exe analyze %out_package_proxyagent_dir%\GuestProxyAgent.exe --output %out_package_proxyagent_dir%\GuestProxyAgent.exe.binskim.json --rich-return-code=true --force echo ======= Generate build-configuration.zip file call powershell.exe Compress-Archive -Path "%out_package_dir%" -DestinationPath "%out_dir%"\build-%Configuration%-windows-%Target%.zip" -ForceGuestProxyAgent-1.0.30/doc/000077500000000000000000000000001500521614600154505ustar00rootroot00000000000000GuestProxyAgent-1.0.30/doc/GPA Arch Diagram.vsdx000066400000000000000000001056621500521614600211620ustar00rootroot00000000000000PK!@žƒŪŖ[Content_Types].xml ĸ( ´•ÉnÂ0E÷•ú‘ˇUb UUU–- ú–ũVãAļ™ūžĪ!Ђ˜ŦŠMĸÄž÷\ûÅ/ũáRÕŲœ—F—¤[tHš!õ¤$_ã÷ü‰d>0-Xm4”dž ˇ7ũņʂĪP­}IĻ!ØgJ=Ÿ‚bž04ŽTÆ)đŅM¨eü›M€ö:Gʍ Cĸô_Ąbŗ:doK|ŊNĒ"ŲËz^D•DǍ_æq„Ô8¨ũžˆY[KÎŽŽÎĩØK–ˇŠ T6süTZ‡ŅâČnĒŋ€V÷‰Û餀lÄ\ø` ŗĶšÄeQaøLá‹Ķ>1¨ōyŖ)„c ,FĄ˜Ô›dĮ ¸û‹IÛ{ ¨•¤2ēéŒTDīúˆûë#ށ°x&<.ŧĻØ7‚ķeøuOĒr”7Į¯Z˜ERėVrŪ;`Ú\/IžÛ¯rSU’Ãö°66§˜8säŒõØ×\P†=ÜĻ EunŅ\°mC‡û–ˆí'¸ˇ>ˆ]W€Heķ™Fũŋļ9§Í/fđ˙˙PK!§XÂĩ%^ _rels/.rels ĸ( Ŧ“ËNÃ0E÷Hüƒå}ã¤<„P“nRw…ėIb?dOĒöīą "%eŅ.=ß{fŽŧZīLĪļĸvļäE–s†V:Ĩm[ōˇúyņĀY$° zgąä{Œ|]]_­^ąJb§}dIÅÆ’wDūQˆ(;43įŅĻ›Æ”ŽĄä´(–y~/Â_ ^4ŲF•Ũq#ĩģņŽÛUãûĐh4ЍFƒ:ųëˇeT{bé:LâĶzs¯Q¯ąxšĖÂøņ´žÉæīŽęĩŋžũá˙N>†&Ķ͒ÅY JÅëĶú"ËVßī ļ Ö{Ëpš&ëdžíM“å~2Ÿ‡Sļ˙„÷[fk„q]”ũ>uJ'+CŊķ$]Ųz/Iķ*dĢPIãp?eQˇëE¸ZķÚž_¯‚);­¯Rļfé̟Č"–eЕuí>YŨ‚Ų<ØDŲ=û–M˛į¨‡—a˜‡ã0Šœ‡į›p&Ąmhō<Ú0ŲÜŲņÉžõųd+EmwNö­'œ>ú–ąb}vĐã‡ôâĮˆ­÷ĄƒĪqâ>OÃŲ("6;kžėOOnĶ$cSŅĶõYãdß~ č‹`Å,ēx éWÁ:51*O$ĸ˙å1ž™€ü°•ĢĀÉ ‰¨LüÅYú\ģø|ZoĩëĩģķūiũģîëĐS5  ķ r(!Ŗ6ūRŽ„4;‡ŖÍŧ4íŪá9–ö Č蛨ep„ŋ¤)!‡ÍALą{В˙Ąj uÚũÖ°KA”tGŊņá˜dWKˇ3îGT-JēãŽQŌí5ZũcJtJēGƒVį¨IÕĸ¤;nŒŽF/m%Ũņҏ=Ļ:ŨVŌmzŨY‹’nģ}|Đ"kQŌ›#jÛJēĀ pCô¨­¤;:î6aP]õn+醇­>QŌ÷G}š%ŨņpÜĄyQŌmvDC%ŨFŋ{ØkP%ŨÆa§ĶĨ:ŨQŌmö;CJt%ŨfŗĶîRré(éöúf‹„(éļ;Į>×ē}šĐŒÁB\KXáNäkøųÃi}DáCÖkâ,ģ âGÆí]ûđ¸Ûi4šŨÚ;XāīQŖsP;ށX‹ …Õ PƒÃŖnŗŨ:ĀĮˇAœŦÁž´jÍN­Skņßvíū˙ã(x„Žaû’`‰—É‚Aæ‡ÚŐŦœŅë¤ÆõÚÅz°YgÉ2'Ā ā˙Në4ė:ا‹ĸÚõi]˜ ´s`Ђ~D:jĸ@›W @ķéŧË8NŌŪj=ķú…ęäõc͟Xø¸Čm¯Ōmę1 4DēĸÛ ŗ^EąwÉ&F/Æ-6ŠgŊ4MžNŸ§ eĘÛëŗĮ0ætI—‚}+— ĢŖ›ãŅŪ§A,ÃÖ Yް'÷Ī+ÁŦEÅAA30ŧŧik‘ČM°Ë\Ÿä&‹ŲWŗRĢE$š2ˇˆ;žî(žfßFŗĐÃ"ĘĶO…)û,ēÚķ4ŲxZk9ũôž.6™øä… Í]rœ&KŪhW÷°Ëd ĶĩpĀh>&ˆĨG&¯"ސ’jÆI\ZÉE“˜­ķégSa>¤OˇŨ˜D´ÂJÂ51Sš+˛ p­"ļ2ÂāŪŠ’īÃp fāY­]†X]îÁŗcyįLŨšXcpĐ Z_ŦĨa÷!† 6ôx‹0šëŽørqËŌ%ÃØ!áŲÂ?˛ˇ´Áģŗcĩ°GsßqøÍR„.ũ6Jž–aŸ—‰0Ŧ¯—ČZžo.2úUŌ…Ų‚/)b`ÍN|XƒT č#Ąn{}ô`Ĩ e/Šžøœ$ûbtœ.™’ú Ŧ (m…yc˜Rālp€ŠÖ[s“ĶÁãĘ , ĢđBJ*-gŠJz{‰+7ėÂ*@´VYŧđ҇Pëį-ôˇĐ˙ą…NsÛ ?…@‰ĢMāXfÄdā ‹ūĀØ ˇ:pnF¸‚wlŽŅFčž'\Ŧô8h:2ã˛¨éôžņœoLÔĒkę‘"yXC:ÍÔ˛gF3°ō–MģšüN8Íģro §Iĸŗ–Ÿ&¨¸đØGsÖÁøHpJzZŋŊ7;Ģ‹ Îöšâ08:ˇhČŦˆĪ¨ æ1OtU˜’RĖNÜ-EĄĄ)đ@ą‡ e]8ÃÛ@Üâ{@#EQ¸^`ԃɪÉŊækˇ§3ļĘĐŅ_Œ=¯ã]x,wW8âũ››K8ĀöbŅg2—år¤ĩ<ûĄî)^ŦôũXdV‡A\Ŗ/Ž˙\Ōŋa|…•øg–‚Ũ‘ąBú3[ŠTā"^„a†Áĸ|9đWŽŊF˛'*Ž-n †0ÕgTŠ~¸ĀŦÁ‡bĪ[,œéú{ÂõÂ=™-/‡Î8Ŗ…a÷js>"ēQ:a€0­ÖžŽ€!|vr—|åél #íW.ˆīķtcnz\ OÜ`PĘÛDÎWŖbąĖÁoŲÚÅhû˛gæ^7ËÃ˙']i–!BŖūČĨ)Ø?€=Z¤ˇÔ0Ų@°I!’ÍãÂ]Bæ&:äA˜ĮļöÖa+i[ū&SDėÛdš†ĢŦDÉč%:ęfÆü r6…B@ÖŒ[PŒĮ4X-<šëū8L)(¸Ãw…ž¯w)“Úpūü]sΊ9OV}6‡dĒPoŽš‹I4–ĀŪ'éOüh“,!ô7€ģEÄs\N2Íģ Đb×4ĖҍŠs>}Mķü*ÍQɐÜkk4@§ÉąCaĒ-=ą%“ôDļdå@<Ēkâ ŗĄ&QŌs/hõKgu™uŋ(›û|ãÉmNn[ž¸Žoí&Æ|1_r›‚Ųk5'ŪĪåFqčOXN}ŌŽ2ôdą ÖU ⚠'ŒbųĩÆ}§<8îqθYģnNäQY9ÎĖÍō#!ꨓãü08_/fĘĄÕ,°§SÜōšG`dM~īdîĮø9ŌmŠuĸU*2=Ķü•:9v~(j¸Ī*ᎊĨĘqÕ ũžjm#˛ZFĒH áŧSUĀŋÕāÅÄ>ŋĐøFˆûF–_e¸Ø"TŊĖ eŠŽf…"*ŧPk¤ ĶŦx`Ž"ÎA€I,œ˜$RĢž.h9¸8ęY`‘<é‚D¯°Õ Ęŋ‚¨ļ´ļ /ŋ°^áJ#­ĄŒû%oĸuĖĮ)RŊv]Ĩv“3ŌSŸj%xÔPIȎûÛ!Č.üįûńbØū1 0ũdQô—Iˆå7“Ķ&ļmM˙%Š ēō§Ŋï.Å(ģ {ČŊĘĪ&°Ļŋm‹~j™Æ[]nĀm‹Ī÷ģNŠÔ7ˇOŪ×Ā5Á´ö–zŊyzĻ-û]yzÖ8ΡpîĘ)‡ûÍšŗwG_đŋØšû/ô[Ŧ9`:CĻĢbūŖŪ‰œc:“WŊ÷ÆÔ ÃŌšpėÂpĐ]'uŖŧ~ˆ"īP?r_ȧ(¯ØßīĻvųœčŨ ˙Ôøŗšöj™ĸ̞K+…|Á×õĒs9ž‚ōØg™īq`øpōũŪ‡K/úp2ŠƒŸšāzRęĶī)ėTĖkt ?įŗyÜĢRǰĢŦTÆŨnV*F][#T]íũÛČߗ–éh[é(Ũ„_E rQCAß …O††@ÔXŽfbéå^%ÖŦëEŖ°¤89ö˛ü]wŽŋæjÃŲ7n¸ļÄKōl„7¯Ø”Õ›W\÷ģÛ¨æo!OĘëQŽ÷oō|ķŠ!ûāEŧâbtŲo˜ ËģƒA”.xæęå: &Õõ Ŧ3ņßŖ? í0q˜ DÆŦkÁÖäuâ\ &Ģāgüúz0!¯ ¸Âuaá^&@…ëÂŧFLā^â:1Ņ,ØT}­˜ ׋ y͘ĀA~VPŧnLœkĮĻpũ˜@8é/†Ûû:2ĸ¯%@^]áēĸĨč6@ĀJúēĸĸŽ+ú¸)Ūôá —Į|0đHíkŒ> sÁBlöō›ƒ…ëtučN#ohũ{'ŖJãF[y•ãE†VÄĀŽ?ú+äbŠkWh^‡,Ř×" q=Ō@g]ƒĒ؁~GT‡}ėÄj|ƒá–ą‘1h°'íŒÎ$ˆĢŅi¯ä‘‘:QjīɃD…$BĄR7~Õķ'ēɅˆ |ËY˛lX6xTæaEŧ0}dWļīr1Ģü,Žķå5äbŦį˛Ž†Á'#Ö`fÅūŪb ƈÜŋ]>ö.˙dE1íĀC%¸y6ˇĩ€/2As#ĶĒ”†ÉP:iĘ?}dM~īš› E3Ļ›,gM‰SĪŗ­"* WĐÜ *z2§ütÚĘVŽx(Ė6m-PrÚG3TrčG0ēŧƒhą”?ØAˇdôe‡–„6_ãVAhÄĢŨv-åú:%Û*IÛR´$,˛Ĩ$•ļĨȖŧ°ō ]7ÎM™ÉDæ&ŅÅP‡d1"Ŋj{ĄÚ2Y4O ˇj…ÉäN+Zč^nˑĸKÉģŗęlkīd }%ĢrãnVĩ2"üĸx#xΏj–ļ! NüPį2/áĮņ֋—z}8ØÁؗ{ĢI!/ό]ĩRÎ%ājŜ‹ÁģÛa~é+°;,íēPuqč2ę.îÖNé2Õ×Ku1ˇ¸cĸ§ž‚īĐ}ënīÖN¸÷}ˇ)Ū&tY\ˇŨieEŒûÁÁ5ظ3ŧC)ØËëÂKá˛ŊK) M>đūWd`ÛߤŪ‹“ˇa\‹ĶÍâöšB}Č]!#‚ŽØĢ‚sķ2ü=v“1ČĻËWxŨc; ƒ\šš@Ў¯áõáV`ƒžëíĀ`åĨī{;Hʰo—; ވWœŋÛĶĄĢîņNTōY Đs\ĩü ŖHôÂĻurûĖŖLžZõÍTbMú5ãLÖۚāõNDs˙ÃÁ'Ī萡_ˇŽ?nˇÕĨYeŅy(ĘĐkĶßéúâVæåFD]ÎÜŐįo ²ÖmLoĢ"Ú&o›/Ũwø¨č,đ)û‚ėˇŒ” WČž™Ņô ŲĮo’úWČ~įuŗøēŲīžnö^7ûĮ¯“ũ_÷ũ^é]Ô^Đ´ŊYfĮ¤ŧ ôß,ķo)ũ7Ëü[J˙Í2˙–Ōõ–šėĒȑJßP!1 ‡RLãx=WF*_j""EŋjjōNIŅnîōNÅŨäf2AËNrūnÎxȌŠÄe=3úcžĐį1Å7ņÃ˙ü—Ļ•ķâ¤¤ËøķĸÉô[btũ-1ZGX^mb´>wy]\æ ŨŒ›Į[4Š8ÜĸÅ{Æ4Ę8Ø"žÔ‰Ožg€üGŸ…;Ęöā{cįI, }y‚˜LëSq{Ŗmĩâ[ĢģÎÜĢĸ–-TCsA¸I„Ž5ÕųV"&w˛…ĘRs¨q<'ÎÕ^íj×iPž'ÜŅšõ~ĖËer7ísîüŸŦ9ÃjœárSœexĨÛŋü,ÍÚāÛ¤‡Étƒ7‘ø 8oxî߀:ôâŪũ*X¯ķWĀ'OŠ÷Í&ƒ¯r‚ôÛ%õ|ŗ4|äSȈoŋíÍfđŪš/ÔW‘~„>°^ōĻkcũ/0 6ę+ŸÔ­ H^—zų5žF;7<īĘß7pKŊķÚ¨.§OĻ õuÎØ¤ÄŋãŅXŠ ƒPž`n}.×O“ÍÃ*MĻđ-xWđ=yœ(Ũå|'įõ^õâ=¸I?šŋ3g3Üá_Ž„ââc}†)¸Nz› äÄŗ,ŒČļŽŨēBeֆ[-øFrúü‹;ëĐc­Đ8ž/<‘Īū ˙˙PK!r4`:KG#visio/masters/masters.xmlėY]oâH}”˙ā7?4ÄC°[!ŖÂ†M'Íäcļw^Vn(Āc3ļi†ŦöŋĪšUļ)CH:ŦVģJEÁõqëÔ­û_üôį2Ôžņ$ â¨Ģ7ÎL]ãŅ$žŅŧ̝ŗYŨÖĩŸ.OO.Žü4ƒ™û(íę‹,[}4Œt˛āK?=[“$NãYv6‰—F<›n| €j4ÍFĶXúA¤Ëž“ƒŪņŠGĀÅÉŌĪŌŗ8™į^ûiBķ;=Ų¯~)BĨŋ€{āvũ+Ū=1‹ŠĨÅÁÎ]ÅNdw>(-{”ŪM~hS÷;ŊÅAQ0ā§ŌęĐ÷‚˜ÄđhpĪX<Õ.î™âIšvJîÁn÷ˆ‹jÉĮ`ÚՓŅT<Ą  +Ü´'gŽærŒ54>ÃjÄ"mrA!ļŸņFK˙Xû`0ˆÅÔkļâÁø–Ã6{âũī ^ĢaÛNŋ_ī€QÔ[–ŨŽ;ŽkÖģס›ĻwÎīã`( ī„`LļaMyRŗĻĩŪhV ^ūŋųDOÆyņ@–Īō^k÷„ü@g&öâš´Ũ{Ú#†QÚõ§bĨ˛ņ –1ėG$~Ūŋ€+ũ.*EOqÜ+*ũīö×ḄŌ;ę/ļĸ\ÉA;ņüŽ §§'V´|%1oœ†V0œb$.3jöOCe/ĨWTĮÜą2Y4ˆHO C^xÜZ(ጚžL*Ÿ@ŋ™1‡;!ĖÛŖ23ZøO™XöËzÚc,đuü íÜ;zp‡r)Žņ‰ĻxŒ<Â'œ’OxÛȇ\ĸá}%ÂŖ‡ŪĻŽą‹gL+\ãn¤;LÍ_Cã€Z‚×ÜpĢ%ņ:ŋøĘŗ ũ õB°‹T ˛ĸSúǁ¸§jP T-Ãl2§aڒe<¯e :MĮd­vŊo1¯ŪhxÍ:ëˇˆŠœ›?œA›9/ íV2Pųføz5ÃR_t W‘1*mâ=ú?Ą_üs ē jų›¸”97S_Č[ĮpŪ5Œ\š%ŽG¤t}ōˇ<Áģo}ĄöÆEo bGÜōŪVø‘‡štŌlWe­ĖĪÖRЍ.BÍ)Ĩ,iœ‘”Ņ* “D†¤• )M­JmŠLU0ūŽå*ĩâU( ž=3šģď”yX#ˆ4ųâŊË5UøpŅâ2ģûę“ ĐeĨwũpõđ˜.Å6=’Ô´A\Ú\IQǍÎí‘ÆĄŪE@ ļqs+yÎũíwÔ>ˆ+ŧrīP)ŧ+Žō­ņ$’ŧRČ%‚@WI`DŽÆôŽŽ,ü? =÷AɯÕ*#ģđ ‡>A\+caįŠ‘Pz@•ųĒ pĨĩsЉŽËߘ'ËbLԐ„EĢ$Ęģ$ĘģY)F>=Ąu†jcĢ”§Ų‹Phd9ēŦ-v–ö­đGĩa^ĀíŅoLcNLĻãā§åA?ą™ETîeŸ™([›ŽÁg3cf ŠÜv ëĄTlf͙g9†c;ÚQ6Ŧ‡˛<éĄÜd_Đ>HDe äĐĄ4 9C‚KÚ V{éŊáö1šĻ ’ë5c?įáaŌ)Ėæ…‰˛ߎâJN~×× ĨN“Š9īߋ”`QÄĨęÅŲGžQĐ?/"Dî=Ô:a:Ú¯ĩQéŽŋ‘oǰ/úžî”+…öÃ“FA!ø <#ĊäA>xfQm>xyÚȞgämŁgō+aŲ7Ī<ĩf…méųđúôEņâ)œ—WšR´c=qvAŌä{t@˜ØLQœ¨SĩGLtžQgO4‰ōđ10ŲŽn^švŽņ]<“^ū˙˙PK!œ*6å^visio/pages/pages.xmlŒUMsÚ0ŊwĻ˙Á7âH`b2 ”– LŌäÔQĖk*KŽ, ô×w%ą !ņIOģOowW×/wV  &EH×'ˆXΙX„dŠ“ŗKâ\w?ēēŖ (D‹"$ŠÖyĮķŠ8…ŒnÆb% ™h7–™'“„Åā­Æô~Đđ2Ę)īvÔÁm™ƒĀ¸‰TՅ+Õbĸ/ãeBcŋå)āT#Ë"eyaŖuŠœÆ’\Aj¤kY:Ã~HđcšÁ}H ņŗ \žŽŦŖ˜rŧ¸vĐÚ}Ä1g=Ė ę1$M÷ŧŨlˇ.ÚAāAíô)$įnŖéã|ņ/Λ[Q  éIlŒķ×Õ ^ô~ÕŊęįθd÷“ÍuŠ$˛Æ ĮÄĢCž[¤Úb.ŨæP”Îד$)@?Z”īĶ¸'‹;{ hD´r•ÄŽ¤ė+ēÆĻ9ũTéī`ė/Ė69”ėŽLmŽ#†"eĪLG‚æ‡× Ņ‘ŒO!įØ[€‘ņv2UíũeÎYLõ)ėũđģų™qĻ7‡9ÚĮŠÚ:ÉücĀ[ŠĩĖ>€ULhCbĸ:‰5.ˡa”P*t23ņ÷č\čSš6ĢŦXY&jnˇwžđ@ųōHķÜ)™åeQŅ`B2–Î]tÉŠÉâaPüŨ&ĢeŅÍ>íMÎG ģ\Æ6-Õ}MĢÍ`ûĨ|IŗY=ˆđ‘KôeS“ęžœmį×:īÎHdņĩí”­ü ŽęÃŨŊ Ôb|ÛŠUÛĩ˙‚­l”ˇ7STTŪņŋŠ^9QhđX5āŽę°yHÔpnsŲSŨ€Šî?˙˙PK!ŦM„visio/_rels/document.xml.rels¤’ÁjÃ0Dī…ūƒØ{-;-Ĩ”Čš”@Ž%ũ!¯mQK+$%iūžK꒸¸šä$v„įíx´\}šAė1&K^AU” ĐjŦī|l×/ RÖžŅyTpÄĢúūnųŽƒÎüQęmH‚]|RĐį^ĨLĻG§SA=ß´Î<ÆNm>u‡rQ–Ī2^z@=ņ›FAÜ4 ļĮĀä?ŪΚH‰Ú\rro9›VåÔTŦo萨DĮŗ‚Q(x/ķČÅmČĀų.€§‘sŗx ZŨåߝšÅsÎQãy ũôzĻ>j[kđĖÎĄĪ3-ĘĖÕãyĶø#Vŋ;ČÉÛŠŋ˙˙PK!‚Ôe$visio/masters/_rels/masters.xml.rels´’ËjÃ@ E÷…ūà},Û !”Œŗ+dŌƊ=Äķ`4„æī#…ē”ėŧŧsÚžũ¤n”ŲÅ ĄŠjPlė]4|?W;P\LčÍi¸ÃĄ{۟h2E–xt‰•PkKIˆlGō†Ģ˜(Čäŗ7Eb0{5a[×[ĖŋĐ͘ęØkČĮ~ ę|ObūÃöÎæČņR*=ۜ ĐĻžCQjĘÂ0y ĸá™×•Ôüߨ.al_›%ŒÍ+ãf ãæĮˆŗįč˙˙PK!Ã™ė¸  visio/pages/_rels/pages.xml.rels\ĪĪ Â0 đģā;”Ü]7"˛v7aWŅ(]ėŠëš"úöƛķB~_ž~x…E<ąOQA×´ 0Ú4ųčÜŽįŨU'ŗ¤ˆ ŪH0číĻŋāb*Ņė3 V")˜kÍ')ÉÎ 5)cäÍ=•`*ÅÉlėÃ8”ûļ=Čōk€^™bœ”qę@\ߙ“˙ėāmI”îĩą)ȧįŒvíå<‡,˜â°*øN]Ã/ÔŊ\UĐ˙˙PK!Úw– ’ visio/masters/master1.xmlĖW[oÚ0~Ÿ´˙āun§5(kWNJK‹T+ĨOS ą–Ä‘h؝߉sÁ!ÉD÷05/QėãsėĪßųÎIûęÅs҆đ2ßĀēĸaD|›-¨ŋ2đ:Zž^`tÕy˙Ž=˛Âˆđ.ķ#âG!‚e~h`'Š‚KU m‡xV¨xÔæ,dËHą™§˛å’ÚDŨPpŽ64ŊĄzõq˛ö’—Vŗ€øāwɸgEĄÂø*uŅcöڃ°āDûĸrâZl7th o—a`ŲÄĀ'!á‚;íŠc$LßhØ3p #s€•˜ÂčŽúdm]ib4 Žģû2ÉK”uÚ]âēhlā õįÍ ÜĀ臁‡cŦ&k'č"rÄŦŽĀFJ‹o ]9QbP1}Įė<ļϜįĐĀĀÂõ' ÜJ›I$û‰§Ōˆą}jÁĩŋr‰ˆ €íõú7˛ŋKƒäčÚūpDž'!ũMFl‘y”vÖßĀEöžÜŽKí_Y@ØÖˇIlöįæÃp||";ģ%n`˛€ÚÂxFßĶ'KąīÃĮŗÖyĢ!ÛvY°å9’ų:ļOPL@4ĘŠēŒŒ ")]Ë…!J9´PdĮ‚3ĶĀĨŲI'Љô°ģ?MŅ´æŪ# ˜@3›ˇũQv}w|´[xôYSgŗpđīk@H°0æ­8Ŋ8딨1ũcBB:úÉPūž=#ŗ0††sk0•8ŊÂC”]ksd`z”WđF“õēl›(@Œ@ǝ]K6ē^G놀JĖâ­×ÛM8ķ‚ä6*ŦTŖŊ„ˆ””qÄ$Ĩ {E*吜<ä˜2č­`Ō(arÎdˇ\fÉūŲ˙ƒ"Po’fYGSŨ•šû/$ŲĮ)õ!ģ}+€œ•y…’R…ōÔ)äHž9Ŧū;*j*ŧĐTėø4KŦŊ ĮĐęĖ’^J‚jfšëDžõB>DÁ*cv‹[6tciā}•ŸBņMôSŅ›ōķ5+DPž‰Ŧ pC˜G"žÅĨ2fqŖ”ŠŗTĮ,.fUãS‡=WŽûVŽ×‹ü˜‰"ØãÖĒÖ6-{#ļ!&Kv\ø¤Š†m'īŠeYWÖ1aV('ņIŗHeŲ<¸”ČZ’õdqÖׯ*ëŅĄąôŠX"—jcU¤ēėŖVû^Š%eJV#¤[Ȩ¨+ķBöW\ØÎôQ:܎āĒhâ`ņ†ĩøOĶų˙˙PK!÷ŊũŊW°visio/masters/master2.xmlÔXmOÛHū~ŌũˇũāPc'$D¨BŪ@ #&$Ÿ*ŸŗIVįx-ہŌ_ŗoö:ļĄęY:$"oÆķėĖÎ<ķØį_ŋo=í …&~Gˇ Sאī’%ö×}¯ŽNtíëşœß8QŒÂņcäĮ‘ˇųQGßÄqpV¯GîmČØb7$YņKļu˛Za՟08¯7LĢQß:Ø×ųŊgaîn üŽH¸uâČ áZ¸čwˇXpbļë!ōœļmp1ogQ⏍Ŗ!ŠPø„ô‹ķéÆ P$žĩë~Goéšũ€Õ($ģ@ׯØG͸Ń•ĻŽ ąįĨW6ú'Wį=äyÚ¤ŖßaŽkŗŽŪĐë™ÕE~õ/ã [ļTã+„×›˜¯–ú͘¸ €iĀv‡yųL¯@nČ1MŖ ^¨)÷ŧoÛõ×bpĻębčဇ’[~Uë{áč†, ôDķšLúĩ¤lNÍÃaw<Ā7_ˤhŪÖ…Ú†H SMÔ"k!Ģ&sˆ9ÉôÄBŽfjąįø#oWPqw!Ųü f=SiŧĘ!]uČ üYÛOŸĪ“)2hŗ”Ę5ízNcËeŗ4ZžŒ7Ú¯Ã‚6ƒÕ‚.Kj9“†î.&#$S¯°€’ļĮ Ņ†Āœ;ĪĄበ…RfžhxEpļɞÍof#šo÷/īĸâ}ŸgŲü• -:z”Ą‘9ō÷įņ/ÅɂÄŒ0…ĘH˙s°ÅLĩqBĮĩ!ĶŪgĻ) cŅ–VķDũœĘqš0@!$áÆčŗ€JŠgŽä^+Ŗ=TÖyd„ČÅዞÛ˙„PĩÅ#PĪbBčÜ/ZŸnČsáēcCIaÆ aĄ:ëôGA…”O 4Šy|Uw‰áiX™æ9#ôhŋĖrZLė†Jžū¸l°\2+‹Ŗ¤–2–ąČtk;ĨĖĘÆžpLwŸš Ļc>sĶ3'ĸÛRD3ÅôßD´ĖõtƒPl´>ną•ɤ•ö’Ŗŗâ˜yᑲ Kë‚ŲÆ(Y]#ÍÕ´H}õ{ ôTK‹JŠH„'▹MŽ*‘ēY‚uåx+qdŠJKyĘÜīŲ—ōHßėĪBnŖŧ$I äZûËb–d=ÕģßŪgĘNŊOĄ¨B¨BŽĻFĄŗÄđHS2äĪ͘=⑐Č'tL˙ØnFŨû~ÍžžØ5öÕģœÕhŽ e{‡ÖÁáQŖuĀT*÷y˛"°F)˜ĒÚ*k–‚ŠęŠ"°ãR0UÂTÖ*k)eZXģŦ]=ؗR°/Ճ”‚TvZ vZ=˜eæĐ iFʋ÷ĨÖnȲIŠX+“PR1HĄÆ¨.?EũĒjÃģŽ× ] ĢŌ”ķYųmXŠ(ēb$gYĄâĒNsĘ-ķļ@¨An6ŠęšË?>D€\^>7öFŧA "BfDåĖŸĘˆĐ˛ Žøø>úÉķPAUî”oTĘŪCą¤VĒr¨ *›Å2æ-ž_wŠ×ėûúēÎT4}_%„v~!ûÚũâ_˙˙PK!ĀkšĒw visio/masters/master3.xmlŒV]oÚH}_i˙ƒÛ}0´Ä6l“VQHå‚ H@Rėmāiå˜ ž=cÍ84ô×ī|Ø0ƒiU?$ōĖ=įŪ{ųüšgÖ(C÷íŽãŲā„ŦŪöí—rsņÉļ>ßūųĮÍ,f%ĐÁ%ā’Y†YßN˲¸v]–¤ĮĖÉQB #›ŌIHî’Í%āî'w{^ˇįæ1ÂļÂ^Ķš€9ī†Đ<.™Cčļĸ’ä%įn9‰wåRČ⒇ËRT0Év͊8ž]P`@w`ßŪ„i\Ģū[“aßž´­{ŠļĮ™xå‰Fû‚ƒ¤ĨmM†°Ügü„§Ūf â’Dō†žÁ;ĘPĄÄÔ­Fū4 vĘû¯íĀĐ˜‘uíZ+‚’­’ôô\‘÷ôĸ ­ĨõéiUJŨ6z-zĘ|à ōŖE0j‰ŲĄ$cŽh§hb#9ŽÄUÕû5Ú1‹ÉņĮnáÅ1Ÿ+ÎĖ_ļĸ`=N†Ņ¸Ĩ ĸiw.ß Ō˜:!í4$­Ĩ<§÷Á|$§ā“ģqTvęPNÉĖ~ë}4žOŠ­ĘBtŌ‰´ ]Õßéö˧FņގĸVwÎúŦ%ŲÕQângÉã¸Ų/cȊˆ(‘Wßû7 œ$ÍßüÅ÷”įé4RėųōŠ&ķđfqÃŽ5̎Ļ5 ´ Tn7Į˛|žf$ˆYÕb[;:霸Úâ/äĩÛpGë§8ynļî]öbų5oîŸū;1'S´ÎķΎިzz,r‹†E†Ę2~ĒæŨ0˜Æ{ 3ȟ€J´”'„D XĻÕtđÕŊ ߣ>%ü´^ēj qŖÕtÉC­üˆĨsvöôøWĩ}ĩ5Īœ•YréB-˙ɨuŋh…Ņ"ôgA+û˜‹z"ڝˇo۝1Z××ņŒŲXÕlFjƒ‹bĢex $/Ԟ_@Q-Kh&Btš†üo%/˙ už’CI÷ļ5YНāQŲ9_=éʨœ(=>œ„)ųŪ Ÿã¸8wūõ%ĪCo—ĸØQߞ‘DDÅÔÕbúiĩĨJ*͊BYSô~‡‚Wú ÕŽŋ†*zĶĘíĘi¨~…¸æ¯§Û˙˙˙PK!6̧غ. visio/masters/master4.xmlÔVßsâ6~īL˙ŨõAÉMNļ!IĶLČ åG’™@¸āxēqĀšÚ–G2ÜŅŋž+Y;×öŠ-xŧ^íjŋŨowo>}Kb´ĨB2žv°G\Œhō%K×ŧÉW¯0útûã7Ŗ@æTôxšĶ4—ŽĨ˛ƒŖ<ĪŽG†MI .ų*'!OžZą:[Ɲ–ëĩœ$`).Î^‹ÚižŅėŽ¸H‚\.ÖÆDŸ‡›܂÷Ō4r¸ŽŒX&ĩĩk™!íāLPIŖâۛidTš'zčwđFū.-ũ ŖG–Ōiž‹AŌÆhČâøđæĶoųūíöĻGã;xÂŌ9Fŗna§"]ÔĨ¯l™GZė‘ [ũž˛u”_lų#÷ö]ō3\wØÁÚʡjĄĐ,|ĒOJą°zŦŲM×1ÕŽ\ÛÕ0fYHMlŦÚÚĪT˛?čˆ/,)_šÄuÛG?Œ^9_ßŌŋŒŗîãÉûÃÁ÷g.iOüSÛįį ×P9ĢŖ;ØB9ô‹{1č™ O“ÁØĖũׇņIÅ Nú4‹Yō÷4Î|žąPš1ųeō<_0JŪũtéļ[•ôõxļû îßĐIxŠTŖQÉÔã"ãB×*A¨ 5¤JdĘtITČSĒjVõĨØ%x åûĖŋ*<žxđZ IņФ˙úô25❚gÃîãtO-ĢP$Öŗ‹ÅŗŸ÷w@M¨mč‡Âŗ5U˛âl•9D`ŒXY”ŌŠÃ^Ūś†ššždEšĻlIŅĶj%iŽÜ8€üĎĄK zž†ŗ”Ą‡šŠŦޤkˆf —DC~ĖŖŋÃĩ> œiĨöE]á_w“ķ;Ú …ˆNΘŖ!4ÉMPQĶsŊz˜üũۇž9™^ļ×ëözŊšāĨëÕŅÁûõúæëÃÃÕųûöjēŠŽfįËÅjņn/ŽīŪÍÎÛÃßfčú0“ôđj:ģ>ī~Ŋ4Ū^Ü´×č÷Ũby5]¯ĸÅō˛ëâņâüö dŅI\.ÛųtÁŽŪĪnVĸˇ¯W7ĶķöčāfŲŽÚåoíÁˇßŧy?ŊiWŨ˙'Īd˜ØĢéUûĶŅÁëö|=Ŋžœˇō‰ōāô÷ô$^?˜ŧœŽÖ-ZĄĮGí|>yutp2ģ>;˜ü|tPDE‘ÄY]—I–Äu\*­Ūv­ō¸LķĻ)ã8ÉŌ”7úevą~/ZÕQÖTMšÖU•Vq–”ŧŲ÷íėōũZ´Ë#ŲY]ÕMZĮeÃÛŊXœƒËŖ¤Š›2Éŗēˊ¤Ā&>=:x~ũŪ|AŽ3@–Æ™5y™ĮYn}avŨū2Ž&Žâ$ΓrüSL°ž'§ļ—ÅšžyņáÍûÅ1e1ūëvvūĪ7ëßįí Py´˜/–˛Qœđ1ÍžÎæķ€f {ąøĐđ)x; îåtŊœ}ÃË܃ÛÜęÉģw`ÅÕæ†46WĢũobZ1eŠ~n—ëŲųt~<Ÿ]âØecėÄņ!^Æqž–˙ƒŋ^|˜œ*Ī&ĪĪ蝑Ģ%ãņ–ä*œš‘íAÁI&1Șg‚õÕŸÆæõ’M ˛[ĖÎ<l„^˛™AVYŠ}Í.7Čl1;¯Dŗ;ėxIaĒ“érzšœŪŧqâ)‰ž_,˙YRČOoĪÚÅUģ^ū~0ôĶņéËÅoíéB>6™ĮŗœĘOĘ.ŅŅîģ4c ~ #a2Á$ÂyĪĘde¨*ßíFĸ0xÎCBųIį‚C †'&†GI &X=ē]­WĀwͰ­ ęLÂÁ>Ę´ˆã8/Ō˛NëR. Ž”CE¤€}RspŅžŒh›ĻN˛*+ōF錡}•u…–uœ.ķŪ8Øg˜č0.ë˛Ēō’KŨ~Oå rœITįõœq‘4@íQJíī‚õNLÚY P4"ˇāhf*ކēāhöŲ)Ÿ ëƒXK2•å,;T S\›'‘›yW‘æLšØaJô-&gE6>/USČ+ëԟļ;Î͔ë[ĖÍ' t™Ė”Įíp^¨žžŪ>œß‚•­T؀ƒŽÉ[f[N/fpuÔt›ĸ˙ųÍzq3ÚđƒwbZb9Fíf<¤Ôĸģ⍞c\>ėØ?đ;vÁOôŽ]pŪÚą îiÛą ~ĘwėĸaüģeŖIÜË+ųŅū8‰Ō' /›bƒĐ‰>0ažã˙"ąĐũ‹sĖ ČáÁ+â,M˛˛ĖpĐlūČ*‚'NËĒiŠœūĘ[&JeqAĘ,…?°*x3fĸÄQZņ?Ĩbōp#%ŽJHNüŠ“2ÍŌŦ’VŠ ųīq¤āļJŅpŲ陔cĐ_<–^ŨŪÕÕŨĶųėFúŽ„Â÷ē+[+ŨŲíËŅ[Ę+O~ƒzüëüŅŪĘΧFâåĮ“'¯NŸœūōüÕ_ōĩĸ=™ŽáFî]pÔüôû'/Ÿ<ûéøõã/bĨ9mųÍɝēEsLĪĪIJ9°A==~ņæ‰1°_­ĩŸ§Ë™đŨ Ē‚õš˜*`ŋQ¯´.Ŗ s|N3˛=|ĖĘîŊZŧšžB!ĐųãÕBŒņ1W㏝ņōēor51Ūaė4B ŧĒ腆ęö.)žÛĀ^¸ėméE<ō…c‡åt– ņ´¯Î^ŦFSwž_Ŧ֓W8xŖ~cšÅ õ œHß=Vœ>âŠ"aŊ!<*š Ú`^d$ĩ"”]°ˆĐˆü-9šÄ•Ē1Ž"´oV§Yœ×Uĸ¸o˜M{ī ¤v…—ĒZ˛˜N5Ō”@ÍŗÂîļ/t›ÖE‹đZ–M¨?CĖĒōēELĮĐ]û^ÆíėQ–|”tūGÅ.Í@V ö8ę~G3ZWÜEÜæV{ ö|*?OoõbÉĒO[BG\Ŋ–2Á<Ŧ¯žw˜W‘D›=[ĖÎ<l„^˛Ļ4öŨˆŗ‹¨°õhzɘâz‹ŲXÍU_Īü¨lϞCØM&؂D8īq’&C(CUOÕn$lāΎ…JBĄ.jä)éĘaŪr!LDū†ķa,čÉ„ųŨc ķņtwĖW ĸķķž!ō!˛™ŌÛcžĸ§q+)č,ĢæôXzjĮį…ęŠîŖ'GuG3Õ uTw4ģGunĄūЍN†>cWEŽîĮŨO…—Œ)˛ Ļ ˇ-Æo æ ĶŨ퇍Đ(GīąYfãmb^”æFoÍn~â$Ė-÷đRX—Ÿ{€SĒ /,Múv5ôÅĶpôÅĀĻ…Åf%Ōc{ôĨ€īP¨ËEŠ˙ÃˇÉå׈žIoI×5 ēLf7´`iÁ—2ĢĉƐVRKo0>ØúĩŨ‚f&7Ūȅ‘^gU B˜Gy§€Q/ĸv6ēWąžÍLxv4ÔáŲŅLƒgˆ“G€ŨŽVĻŅíhHcĶģcB–a“3…ŅĖ” ãŸM2ŒŅARĶŦ6Ÿ_ĪŽxÔ ”<ÔÕšø‚·$l:6F/]Sn{„Ē˙zéX„÷ķ !lÅīgŸM†ã6läT¯8š;ˇ‰p†á$-›Č‡Ē†ŨHė‘ŸŨļĢõÃ'ËÅĮß>8žD€Ék ×v0îk`Œ§á`Œä`āšŋ‹˛¨âÚÆd 7UAžī>đLAĪíÁ8ŽōŦ)øÛĶĸȲF!ĘMã0aĒÚÆũH“´*Ë‹1Ú='ķ¨ūī;aņ_3<ëčŠwƒ “5Øūy隂Į ģNĪ"lļBŒtīc Āg—c7{Œöģ“§ĀÅårzåEŠÆŠE—5§"= GЉîHæ¨SÜZkĘąWœĻeDF]Z%Mž* ÅĩʑâOĘ5Ņ}¤ŠŨfÍ2Ņßøė°ÂšSŖzö…žCL÷@ÚÛž…ô mXl1?3‰ ŌKxK´Øy‚6¸Øf‚› öxÎG\WSv osįĖÄ5Æ:ĢškŦ­“ĻeųX÷BcΈ!LŒ–EÉâæƒØ5ŧĀĶ}㜁5y÷ęŦ [ËĶ˛ĐšˆsķŨ97Q“ŖĒiá;Œ=|~ļÅâ†3ĩrBüQúąƒÕ/Î=^č)uúÂZ$=‰VcÔđ•ŊĮ‹{ŧ Kų#ˆ ĸjĮ5Š€l† Äąl°Č=Ö`CĪx÷æcöf9šPCÍBĖ â¨5ęŧjǏRŗ6ī͌ģ|p¤îÛĨÎ=l¸*i˜ĘĒ×)åS0ŧx|Æåˇúád×{3c(yät h°E‘Ī•wŽ'\˙ã‰û…5qŸž†ÛÃ%Î8+́Ģ?JK#ČûB1PâŊÄ(Ž€Kœį™…ĄšyB%Љ ŨģâüÔkēϰEŠũÎA}evî žŖ™Ôw4ԃúŽf´,žŽl:¸Í­Ė žĢ;5¨¯´2Ž.Ü!rã< zeŨĮH^qĪŨf}g˙2číŒcTßŲÎ#ã@ŪŲ(T؎<Û6HŪŅãĮQ™8ī– zĪļXÁ+Ī`[¨:â~#į Œ ,”ũ_f]Ų<\.˙‡žŋ|üÆ+Å;w¤øã߯§(ų79—eÄK¤dCJļëWĢTį7nĄØŸ‰kxYD—O‹ ¸Hę„nx™ņÛÁ ČŖēĄ t^ÆEĘ|Öhīh œœÕˆLĀĪ„ŽĶ˛+‘'/e>šž8ûęģö#QnA2ø –c{ļ1 îTU‚:>btÃûoåûo•÷ÕxvZ6qS%ĄÄč ĩ˜ŖÍWPÄĢ:K0đ*Ģ‹˛Bũ?ˊČqw+(nņ–IŒ;ūÆ žŋūâäÕéoŪˇí:ʓĢč,ũOëķˇé—ĘTÉcĻļ7*´ŨLēĢy"Q ­äbŠ3Éju&y7íųÛ\ hČy˜™ æ<´žÎiŧ˜ūŪ._ļWŋĸ`‚qũKyēœ]ĸŧƒøMnŌ?Ξž~ūėŲ“×ã>‰ÛĮgOq5YŲ L ā}ŒÜņūéĮõpæpŨų˜6ļ’­äJĢĸi #•UŠJ”`-ū Ü`­õr1īę6 Jɜ“ÅjF5Ų5{šËuĻmšč)i|g5rYŖÛ7Ų0t ŖˆdSqbåKüsb—a;5÷ÃąÅߏEéŽEŪ1ĮšžcUĒ7Pģ 0ĨŨ@d\,ķ˛nŪy\h1¤[Ņ•JėG‹*.Ō”ĸ°(UˆI×9bרHļ´ļ€E,`Tl“jū ë°ĸ*Ķč„ėåSĩáõp¨^ î?—m¸TԀ5¤7ŖČ\–ĸ<­u†¨ˆ+”mÚ$T›ĮĒ€ B}î… =ÄcŠØŠÜCÅąKÔ˙K ĸ–„kõ¨ˆ”åPk9ī2ņē7¤ĐČ)Æû–HĄuÅGū(4)4ˆp ypb‚&N'‘‹„%: % îd(ÁŠø5ÜĻČ# ĄW¤›Ļ5ūË÷´wØ 6EÅy…x4 8 Nffŋ˙1 XâlP",Gâ“烠‰4)áoÂU¤ß 3ŧŋ(ČBčj‹â Œ’Ō*ö5¤@U.xޚ8ķ,EU ے0¤UNéΊ…#Lh)˛JEŠAMVŸëǏbTčw~L¤Ø‘Ę€fÂŗ…†fTdŖ"s}~sžWe’JčÖVk7{0*°‚Ŗ`ŸHAlYŖĒ<*IĄ~^‰{^ ôĮŽ[HŪP Ō[õ9ŧ1ž>JEZz@ƒā‰Ŋ .Ū`F<¨äy6FÆ yRiÄk‘-ŧŖîȒ‡†ZģyQĸjŋU^zÛ[&p%įeSV¤ÄÁIÍOŲÎ0hŽŦ’?ßĶ6֒IîÍžŦģ4XRa[t\īL^Ļbī= ŲË}öŨ:Đ#§Đ´¸ē¤&ŠÄnF#â ?[M^,Ļ‘ž{ŗ9}ˇ/nPDIKØ+e—ŸÅ˙-/¤Îåx͞*pįQMÆOÖ9 âF}EÚ§(GT@‚ø¯JčtJ3Ž.q$K%5Χ9ĒÍ1č=Y§Ÿė†ę|€žķ~}Ž?đeП:M’ÂIMĢŽ~ŗÎŋa)‚ÃoöäÜkĸp'ëŪ¸øwĮŽ8ŗ÷]i;ĘvĘ;*Ž(ö]í¸žN˓9*ø­ZŗÚOĸ˜6ĘHå"{lô=Ÿ}× bŗ-é/l¯<î^Ņ—ÆxAgqâÅIŋšŪ hë‡?OP ~ŲzKˇæv‡‡ĒĨ[‡rĀ|įÖ˛NÎųe§–‘íėĀī§ķwŨĀĮŒÅdņíeS ĒbnîëĶôõˇŦ’ËeŎ]pąc<´ĩc÷õiyĻ;€NšëĒ|Ų‹]ËŽ j .?G´<ˇ•Á ę˛åR.—‘üō,e8–ę¸á"\ĄÎāœ{ĸ&$8ĻÁ;âL}§Ž8k÷Ŧė¨_ö~wųkŠáē|—Ĩisĸāƒ& ũîbĶDVŅD˜2ÉbģM5˜ēÁëm3‹ãĩZ`Ŗ‘‡Ā“@úNņ,:?õā"ŲčbÕ"Ã[ō=iņø…OS=EYKq)adĸGØĄļVˇ7”¨'ã“J7˛Ŋß+Đ{Ûĩ¤ĮÛžK@3ĘāC7¨ĢāŌÛ´>ęLōRõ%}ˆY}ū6qúP¨úˇJöFc>g`5ˇ‚Õqø`āCĀ99ë”P0bŪ –!…˛0ĸ|C ɈđĶŅG ĄSū´ũ~ŧĘĨbaŦÁ)‚`Ue=Ž ĸ蔈Ÿ%EƒŦ öNá+uŅÎUœTJčÂ'B1*¨oŖĖíåŧáDAj_™Đ ŠA- Đ:28ęl,-ŗzŨ‹É NĄoÜĻÁ‡¸Gûsĸø] GpĨ‹¤1NQN­NĮĻ߅0j8áQÜ]úųŽōåQĻĖŊPĻ nžtļEÚ2‘0wnáuYø´,ģČĮĒMc7ÜīÖYü'…Ú/?AÜ.onŸ’_Ā…äsÅÁJwg;•øÕŠ ūœØî…Uo„œR"}ĸDí\äoįĸôeÁW ĄÂö‹&ž2Xø*¤ČŠÍāɅ7_ũŧրĨ äÎKdŊÁ˜a–Į|\^~R—|ՌˇSM&ĄÕö@SĩŒ jjôŸ_)jÜ ÄįĖ8ËŲ=˜&GÛ Q%ÄWā–ļCƒ‘zôgÜĖVÖMŊüü¯˙JÁ7KÜįq3۟ŅÕ{tĄŊh<čV‚#DõT6dmuUÉ3fy ‚‚=ëžh=f¯ c aæÁņ°ą"ĸtĘ<Ī­§œËĪY Hë)ē3aĢ—ūGʰ…÷U‰Cy'ašĐųdҤÂlPƒÚ8ę'ąãÎîŒTČ$&ļÃÍø™”‘)Ķäpb›â~™×¸ąaØ';2Â:úĄõF>ú*ƒ”ÂešVNļ…Į(¯V´ŗE€1\÷÷ņŌ 5Įk–0Đ˰• Â˙ÔëK=ōŒā‡$téf¤ Ũ’Ūˇŗ‰>lėYË+6U€I„)9p7đÕÛė¸I¤Di^ ‹ä¸ŠŽÃiEJfa‘đ`ÂpkBģ˙ĄŨšTäŲ-û ÂДŖ/æĄZDz8!Ę`UQL[‹˛ķeƒ VeĩíR{îBĄ>w†ELĨÉbŲŠ} a‘ 4öq‡5Â,"ŒĐaQŽžÎ>ļúīh+ö†j/ŅÕ~¨•UĢätÉÆ˛Z…đ¯ČOá֊¤í%ŪĪea§(׃OĐį†DTũt—Û!yš  > ŸPî ¤ÁOq;}e™Æ>b¤ļ–ŦÍ/˛tŔ¤~Éyė(`‡ö—ÖĀ•ëũЊK8¤änČĻTgk´™ÆhŪ (Lđhqu5[Oūšģ(’ Á[Tįá¨XEā[”~€L/qJđYJ§~ūĘÆŊ Qķ ŠdŲā"Žw(Š]Īã!Ä÷ī:p‹c¤ŅAi nŋŋáÆėn<&Ũņ'|Ŗø ŽĄpĒÚU/~(Ũ!Ōŋ$š5ÔØæ¨¸›kxÕãŽöü­†î€yVJ$ī‹Û} ŦeøŽ€`˛ŊĐø 0̰L,J\ƅ[ ųnHŒ•Ē‰Æ­*&<éT°.é8ZĪĸ‰”×CŠ0ôEÛaĮ+ûĀDܡFŊ"¨ZôŲ×løņƒ^ú‰0QYY\EĩČrc-YމJWöMb˜hôjĐũaâÆT?Ē“Eö*žna ›Õ‹Š6X…E=%BZ"–˛ŦšxĀ–š‡Õ­h˜vm ôȝ hBø ų¯•ø2ĄÔtėžF“×í˙Đ7ģ6‚7N Ąŗ ŧ~đƯۀ7ICŅ$^QũPUxkå0‘4caĮŧ“N™Ŧkúö˜…‘ÆnĘ)däôOoō=ŠŅž…€˛4œ¨6/]vXåCnfĐ ĀL'ĶĀBē9$ĮkfŦ¯6Úöe Š flMü„*+]õSļ5~TŌUD×ĶäÖxĻģhšä–6WRŲˆËŠâĀ éCgÆ,üE0€qķĮņĘxaÍĸô Â>­Áē.SnYj=AD„o %T¸ęLßģĸßĻOQĘûÎÛūĸސC€D[-ŗ_uŸĢž|2Ž<Ĩá:[vXW+l+ÉqÜŪŧElÛzĩl韅ۤW†?¸å]ŽÚgb 9Ζ¨ßA)ODF%%U Éc.[S%…Š›žG“§‹å‡éōâáƒéíúũb9ûŖÅߗC‘˙á°ŊžX,Wí•øōåjvy=]ß.[o Møt@k5TJ°ų’é×m õŪ.>:āāJęŌÆ n'ņŊ]Ü Ŋ̝ØmK8ėÅōPä.yøĄĒî:„Ļëiø)Ppšĸ¨#"ū.×.ĮO\ŖC]|6xüt¤ € ĶUwQ_#sz5…úšb>pUÃ+lŋ>Šéđ†ķEJeĀ`Tá[X ŧpb*+̤eHÉOG[8ôØZ2xTīĘēâA^˙…ŠIŪŨō…•(â|Áˆ’ž>6cŧšYŠœPļÎ= oGdŖík#B;lûĖ×ëÎô|˜ŊĪ…{MîžÖVtŅ[\Cu[¤¨†ÃõP3<Ŧ ,ōPBĘĸA)/›zËĨFW(w(€H\lû~•"'I„͏IŠH­ 7ŧÛíŌ]ÃËĀâ į2ē Fņ5H̜cFqŸíęĢ›5jDt0WÕįžlWķð´Ēš#•Á(žĪvÅ :üĐX¤= Į7~ØĄq‚ŧ¸‘]ąØÁī„ši1.d¤ĀQ8,€õ.K”Ÿ<$& ™Ą—:)ņ•/Ģs0ĒāõN×ŌQĄŠ.ËŖ'Ē~ēÆĻâõ'ē–nŦŦ–ĩdmŧÆĻ€M;h‡ögņēĸˇR#$uHũô[‡[BŽĢ‹\öaš ŗÃEˆÔĪuš•)ü"š‹9…‹ųt9Ŋ^ŨL—0‰įțš]¯Ûåy{ŗ⎖ŗpĄ1z\Īäaō.~ ÜŅ<ÂĩK܇*¸ŲõŒÂ$€#Š= (Ųa…ĨņøCIQÜÆâߚߘü$BŠ2x™ŋ9’ģ”­pœ%ČnāǧŸ>ĮøF 3î2Ŋ4C…Aë°P(–ŅuæƒzĒvüē„ú\wĪ *’Ī2Ŧ”đÛ Dž\UIiŧ*´3ŲŪßÜ_øÖ#ÅŽĘ/¸#Ü/9M*):š¯NGëkČžŌžëZûđ7;#ŊafģŲ‘›Á¸^Đą#"%EVØĶøđÆxŦp‹¯$NÄâRÁ˙ĻąŠŋ:¨ŌqÅ€¸AdQYO.ŪQā_i’ ež`×"œã ¤/Jڇ§:Æ|’`?6pˇ– #;,1WĩpÔŊv†íÃū°Ķ啨),ŨÅ%}8íEMĶ],§,D ˛_ ęĻĒ.qũD SŲîÖ ‡ņŗ“ãÉųâúŨėōv9ĨoĈRÉĢ•\~A)Įųlĩ^MŪ-đŊĒ›vųÕíĒ]Ō_ēJ+“×ß?2­ŅĄ6Z—'*iŨß&O—‹+ņÍŖŖIÁ?)wéč@J<ú÷kÖ(‹ítŅ5Ĩ:é§ Ų°ë ÃÅ”p$NŨ ą¸¤´‘P#)œÅ]k´R" (Ém‘ŸâRĻD6%J…ŪĄ Sķŗ,ž˜jŋxŌ)b™ų›ÂĻäÜ%ō* k'b(ļ]Rm˜ÖŲēxa”Čĸšã”ÂØA!´ã”Âv‰Äü§ļv Ą§F ­§Døke‡0ņ ú¤S"ÛĨ;Š…JTTÅēxAŒ'ȧä:´Aģ¤ōO‰ #딂N­HÍĻ´’ûޞUĨäEĀģņ}ldŧ Sē?(”>å”ö![ÃøAĄ´Û”‚$ž(æČxN~;ĩ¤á”ŧSrž%ķÔv¨NEqOĻ—-ūš†Seõí˙˙˙PK!q¯.ב visio/pages/_rels/page1.xml.rels´’ÍjÃ0 €īƒŊƒŅ}V’–1FŪŊŽîŒŖ&fņ–)íÛW‡–1zËIHBß'vûK˜Õ™ û ´ēEŅĨÁĮŅĀ×ņãå W;§HŽÄ°īŸŸvŸ4Û*C<ųĖJ(‘ LĩæwDvË:eŠŌ9Ĩl•´Œ˜­ûļ#a×4¯X~3 _0Õa0PÃÔņšÅü‡ŧ+‰ĶŠj—žŊ ĐļYBQÖ¨T„aËHՀÖ÷ßãVˆ€˙Ëģ•å›GōveyûHž]YŪũČqņHũ ˙˙PK!Sš§Ī§.visio/windows.xmlÔTÉnÛ0ŧč?čÆS%Q’’ĮNŨ-bXiĶFĻmĸ)ô’ŋīŖļÚMZäCŖƒaÎ[ČÎctq,rkOĨb‚ĮÛ.˛(ĪĊņMŒvzũaˆŦ‹äũģčŽņ•8(k’3Ęõ[émŒü €‚úDŲfĢĄ1˛ -W1Új]ŽGe[Ze,“B‰ĩļ3Q8bŊfuö 6w<{NAojGōIĩ()‡žk! ĸ•-äĻi1ŲŽ€SAˇīHš tԖ•Ēę6R%ÉhŒJI•{Š’†Žu=0¨ÉŨ>–4•äė[0ÕDŠŨ?đĐ ÚĀgēļFŸĻZ”°ô{Ŋî$ęõ[¨“Čķl‚kāKeŊīœl(˛Ėou¤oŒԌäfoÛ qŋũČ2ą đĨō{ŒzvöÂū ÄØÅø,ú#Fíõ\ø°ī‚Ū˜§[qXėr¸ōGÎÉNJĖ$[%nW˙+ԜęRRōŗĢ9Aęē[Ņ.:ĢWUhrš™ ™ Æu—ķfųŽĻTkP_%a䜭Ŗ”“˛‹zaÎxŠTņĢŖĻÜXY%~P'œ@Uʘorǜ$š>r~4$¯8yČéĘčņ Ũ’‡´Ė™ŊįB%Ž {˙EN}égÆęŦP_p gËXۚĄ1V€Ũp8 [¸ļÕoOVŽę:5Ļō¸įzĮÎSÕØĩ“ŖÉh9—b#Ia}dĀxųĨ=ëϚģĨB/ë˙¸ŋŦĻĐĘĀ“`Ŧ%v}y9N¯'÷_íŊRGãLY žyŒCÁI5Ą™ģ2ÁÆ4§ĀYØčfÜv’` geķZj˙’ ģ^¯Íû$/7wéëiæŊP3ŋÕâ jv9žŸ^ŊĸĪüjÖŊäoSŗŲbūz>3å_gŗyÛTō ˙˙PK!Đ_īxŨ r>visio/theme/theme1.xmlė[YSÛJ~ŸĒų*Ŋëĸ}ĄBni.(–LåiĒąe[-I&aĻîŸĶ­ÅŨVÛėd†<ĩüõ麟ŖVëÃī_ŗTxŒË*)ō#QųM…8“$Ÿ‰wˇ‘d‹BUŖ|‚Ō"Ä§¸˙ø÷ŋ}@‡õ<ÎbæįÕ!:įuŊ8<8¨ÆpUŋ‹8‡ßĻE™Ą†åė`Rĸ/@7KTY62”äĸŖ Č^N§É8?vdÃhįu…oŒĶō‡ØÉƒ‚U9ģ÷ĶRxDé‘(“âÁĮč°¤õ…ükq-`ō čéēĄ›nOŌzˆ ­Đ ͞ ņ¤Ž­ĻlĢ-–5—CÚĻâ{‘Îā)úڀgŲŗLWfđÔĐ×xĮ üČfđÔā>t,Ecų' ođzčZjČĐ' yšä´l˜š¯´č2-Ō.Ü1ôČ꘥Qņ×úŧĒ1y¸–er$ūGĩdÍÕÅč9CņcūûŠīĪEßŧ[ Å7 éh ŗ%ļųÛŨęeœv§E^7ÕsØ6gčĪĸŒ€)ÔÚ\¨ŸņĄ÷QšÜ— Ni° ĸ~inĢÁ-ŧt5e˛€éÄŊ¸)ČJn“{Ēϐ&­a†ßŌđĢ<"œ'wÍb å?ŽÜ zr|SÄÉzX.ŠEZ–™)ĀĖ1=%(ęeÆ!í^ģsĄI9¸“ĐģĻqįņcœ'™0ēå€oOÜS$i htwåsĐáí ƒžxB)âāŧéS™ĀĶx|÷ÃëÍŧ\Ö<%Œā¨(R¯(QÎ!zv¸´LˇË|Æ_üúŽÆ]#ôČ[Û_ķ–pš€GŊ„GŌ? 6¯R”×hįq-øķ¸,â˜ÃņįĶSF¯ŖŽi>'‚‡ŽJnO=Æ+W“N’ ėōÄcėÍčfôIđŠ”'u~b‘´ČķˇÛđœQã1ZÖ(ã‘ŧuGį´ÂĪQ=į1yķųÚ§qaUƒĨgqZá$Ž*ۜËëĪ ģg"øfŅÔĪPY'<šįîå% Šޞ{szqBcOĢpQ$\5>ēd#dTä3°Ę7šûĶiȘû6ÉâJ¸ˆŋ×EƍƒģĶc†Ĩ•ƒÜ%ŗų˛ä8âqxÉøīÍS:E1 2ČãLNΒü=A3‰ü=AS%õ=A‹ī šîšŪt×ōö}åū4“Ąûl-zš/[ļ [æŽīKēx’kX$›ĻîGk̊÷so-˛8˙6ODšŲÆĮ‰)ôĩ7õSˇ*Š ØN"¸‰į1{ķÔv!ü˜ŊŸŋyÎŦDdaš&‹#1‡me-ŋČsČ%Ew‚Ļ9­ķúŖ ƒĶ:ëMÅķôǰÛĪ$Ŧßĩ4ÁfŪĻÕPātV¤ *ķɡˇAn_VŪ`å§ ÷ZœKÅ>ûĻRƒ9Xõķ:u‡Ќo]ø€OŠ÷Š3|L~¯8ŋjŁ×ųÃ2AJéđ—eD‘Žj’ŖÂ9]uÉÕŖPRā°nÛ¨ŗÚĘCžŨv`/ˇš"Gņš‹ŌÔ‹|qUË ;Đ%Kļálœ¤“<Õõ$Ë ŊPUtÛĩũUĄm„ ŋūŦâö}Eø•/ŽoÂņA͆ŗƒĄcƒuC_r/’dÛY NMvô†ķSËŊÖmR€aĒēρėŠeÁÉP_sā,%öpeÕu|EˇztMDĩsãžäĒ,CP]Į%>ˆ 4ŽÍŸā'|Ĩo›š ŠAn0}×sˆŲĩĀ{<¸ō Į’ Õ LU3-ßĶÛ`9&j{RŪ É=ī *ė×ëGā­ĐÖÁ&)Ū>Ģá õęę&ų7ėĢøš.ƒv&Ž6$gܯč!KĄoĀ‹ÉĀ„ŨÅÕ$߈dWąá@ļŲ}ķ@ŪųáWyÕqY,{)oãn"Â/ ņĻļÁ°){ž‰Ķ-š?ÆĮīˇî'QģW›Ū'A"kéÕuKũ?.Œ°Í;›owė_ß߇*ņĢÕã[¸O÷ļvŸ*ú!4ģHꆓ„ÁļĐŨĄ}–ŀÖÚ |É1B[˛ ĶPC×vM+lģÃūc˛H›īöũųE[ˆŗû8M“jŽŋ7Ä­IĀ ¸ņ=|=ĄCÃŨ^wŨlu#œ6ÛëĻoÎרųpM͇Q?Ÿ|Ž2œoúúõᚚŖ~>ų,‚;˙øĮvåŲĄĶLķ‘Î%Ž>ĨZS"4ÖŊ¸É&ÛyDq0ę… ĮÄšBėĒÄoPÂŽëīŦDm“'š”ášR"Œz%’/QšBėĒ„7\g%n į]…ØuūŽáü ‘°Y‰›~![Üx×oÚüín‘m3úëėâūOxÁĀĮ#Ë´Ž û‘i%‚Mâ>{ãRŪ~/ūņŋ˙˙PK!@ܰßá\NdocProps/thumbnail.emfė›{L[WžĮ]­æÕėėCÚŽFšYi:ŗÛŽÔÎjˇģŌ6ĢÕ´IJĨĻi’F3U'Ũ¤MģiŌ„dB'™t40$æi!äe4Į€mlc0$& áQĀ †đÆ/ Əũž{}¯¯¯m ii:X›sĪ=6öĮŋsîīœ{ũŒ@ HÝš}„ÂkĖūūđĪ‚;ß~ķöÁ3‚m›‚?Cũ3¸˙î{q'ˇ?Äh÷vn&;9ˇĮ¯ü…āüŋ'ø•āMÁ;¸oÆž× v bģüm7âoũہ'ōßËĮØĩ÷ŸÚ_ŧØ”}>UM^ãŸp'Ÿņ_đÚ¤ĖŧÎ3‚b‹Ž§ œ<õļDDÛ›~ĮššŨnŊVYUv:ûķuŠû^<žãY–SģžCåÅė}ÍMˇ÷ƒ˙éú˛X,ōē+ŦŖ°…‹9ą=ZõE ŦúâÆAôōĒ¯čŽ¸-V}qmD/s}Iv<Ģ2äĒjr%ĸ\IM.;–­Ž_\_Ŧ ^aÕW_ĸĪJDą]W븈TU#=K‹[õÆ/Ϗ›ĢžžlËŊ>­$>V"ŠÅĮĒjbģjrģFúPP‰P‰,öbNōjūåWÆŋß#ĄgÉüė?Vûc ˛č|)ëĢj*„:N.ĢŽ•äŗČn^B%Ģ/eŽÆ—_œÍfSĢä§ŪåXÜ2ĸLŲøu?×ôŸîüzÜĶi5ĩ—k*20´_~">ŗ…ëÅ_ RÛŦēĶŅęņxV}q ‰äØØúŧúlšđCFFFP‰Œ vÔ‰¯ŅģÅ]Õ[WŠÍū¯Ōğ/âƈ/SsĸcüŽĪģāķēƒņøŧ >„%×įûnr[U^U°/Ō§s9Fƒ|UĒ-ŲVŽË%Ûūč˙f"’ļ~yžŽũcZŋÖD3 5 čž;\ÜyŊãz'ë%RaYžn×Ė0õâNÃw‰WöŨļ:Ē×­ˆ¯œæŒkf›Ũcwŗ„Y§įœˁyĪ<Ëãry] Ūˇ×Máņx ^æöDãŨܜĶb™™˜Į‹ŊMMMĸģ"ůūÃCãs+éëĒŲj÷”[s:‰˛'ŗÆû$‹lBG[‹FŖjĀúzíõsHˆXnUĄģĐĀí^XäEč]+î+žlekT ąąF[#ÆÆhTŦ9N“éŅÇÁ°Â) XÂ~āąąŅ¯.grSíĐ2 Yاs+—åĢĪX¯š™Å}:ˇĖ=>&ØĮČø…ū_ĢÛB) km–éĄÜ@ķ÷PĘÚČČhŖô‘ ’’/ÛÛ´Ôâg…(_ü˙y›äOá|UšĪ\1ĪXÜ3PÆZãoP Û=‡GFŋLjˆ,Ā—Ņį5J¤2IN™^j”ŦÕKņĻWÚרĪ+“ôېä;Äe™p™M)ĘeTŽ/“HSˇŽ÷™sč(‹čkÚâ3âKŪĻ•I¤x—eōžS…ûŒÂõxwt|a™˜,ŨŅį ȋÄ`=žŽīˆy*ž(‹=Œšû˜Ũaú#ņ•Mųšœvžĩ@÷ w( }!ķ€2ķđ2Ư_ŖŅ×gD—”ôQžŧ´/z /īä;Üđüąˇ!ģž}|1.‚ūrqë#ų’ Ĩ^6OLš'(e\kŧîÉÍ90¨ąž Ėáp >ƒĒKzģģpÜôC=§§gx™ųÃÃf•R†ĩ'iUIõ%˛ŌÂrķęYTb<~ģaČ2˛‚ž2ĩE›erjrrÚĪÔô$afjšaffšaÆjĩxŊūã#;p†J“ˇ vŠBë—XSwņ(ˆÜ˜ûÃĻëJļ­¨¯"øŌ鴍uW–ˆŽY e8>æ^ųķx—îËãq›ÍCœY9)ļĒ̝ÍИú§>ŖĪGŌ’”īCjnbY™¤+æë1ŨáĢGkvrĩPYzRZq<éãŋŋYrˆ›áņĘK÷…ŝļéĢâŨQA34FB€‰dASj\&Ą˜3`6JĻ~ú}ËķĨš|#īĶHßzđü‘ņ•Áø"W­`î†Yæn5˜ÁŎ¤*P˜ÄÁfõĨtøB|Ũ甀ôīÉ^™‰Ÿū wËķĨŽú~Ttęzfá÷í6C}ŲĻL7ĪŊY“–YëÂÖŖōņ7 ŧ.}~(ÁNûRÁW‹ļŒ›ÍfÆŊ|Ģ"‹ d>HMŅ5Fõ˜Ü“ÛJûÂŋƗD…fËC¯sā˙†úšęh,]öÉCUŲ;wŽâƒ|eĀWsĄŅØnhÕZ›—BG{‹cÖAû"b"ÜVÚĻ5dEAY°–#‹Ã÷D–VÎWō øJo.@hąWĒD-`ŽŌŲZ{&öÅ›Œú^ãPŨŌ÷bš ÉNÛ‹^ųX/}b_U‡žojØ3ãëU– ŀĶOŸŦ?†/sõVsÄlš qũ=ÂŽę8ŪYÛāøŌ ?ųŠ:Ėŗ .;Xxō-Œ_įã_[đ,¸päōxipŪÃSšŧY+=‡”"*ŊÆFyí%Ž—He4CcŧāÄpînSÍŨĻäũ'jR>¨špĸīî…>YĶԨ◠m†ęōßfÔį] ˙uˇAō­û#ņÕJâKUHĮģĩ RH‘ЉOm nQæ(skëÎAÅÄÚ"ˆ“6]ĖÚÍ:B|Ũ)ßsįîÄWUųļŌ,iĶ"/…]q^Č?š!ũq6.†n)ūōͧėëŅđŖūÁ~–Á?ρŠAĶÅā iđá#“sŪ)>ĩ ¸h_DYXk$Đäē‚ĀǝLō‰e#žÚƌ_$žZ_éĒÂ3š|ũ23Öéâ¤MĀ垧 ĸ,(ĐĐ=ŊÄQø'5÷ō|Qņõ Ķh­˛ŧV_œņë[ų˛â&zôÃŋ$ÖĄe a ŒbÃhčøEųj"ž0ŪŗÃSÔō¯ÚKĮp|„¯ų…šųZƒŦŨö,ܑÔ>Â!Ŧ÷‚0[&Ų[&ÉĻÎ)ô‘#Î/åsé¨YĘķ…~eÜnȖŸ–/Ž—H刞Ō4yđEÖÎEą*Ž|%WņS¤Š¤ŒEôq_.‡=v<‹üžļü ČēYvdŽø"Ęk.ŋĩđŨĶíīž¸ˆĶ=C}ą‚xÆWø˜ĨkÃöGYŅ–ö+ËC.ŪÂÉŋØūØT@ųÂüņr4üLmŗã}ũõ¤†ëIđÅ( hKԞĀ™< Ĩ$Y%qēžĘ“‘|…Ÿ?ēæŦēĒߊŽîåĢŧ_%Ž ­GMŗdŋeKCtž _#t,HSįõö÷vuwQ_'­ø¸UžÔ7fēTQŪ'Ošô™¤åŲ€šē—°MŒ9æžh‚ŦEéž.vJĀÉokLĻVrj `Ūz¤.čĪk˙é_eY÷i]čxĪŧH˜dÉ×ˑūHųR¤¨ĪwŪmomQĶÜe ­-\BħUsģCouXísvZYĀ 4NŦEąļø æ…,Q‰¯ĻÖ¯e’>¤xdø˛8˛bęnNÉ^x¯!pŊßģ7^ÍÛšëĢė–Ä—"?Eu~Â>9I˜šŽi0M˜™™ …Õi6§0gƒ/ĸŒą6ËÆŖl؃šî‰(‰ĩų{ŗ>ûˇE†ė*>š!įäŽwwÜ*Ė˙ĻpJŪû×ukRr|!q Ę]xąļŦø"žŌų§áË61a›äYŖ”-fVÖZP =Ҡύ< ŸHáÃßëŸzéŊęįß̧yá×Õ˙Ÿč÷u­“\ëíŋÄ;ŧĩåøf}ˇMÚhXķZPŦ‘@ķĮh”52ĸē'īPĨ{5úč)¯< hTDDŋ°ˇ¨ē;ŗâËkųÂ+Ŧņ‡°eûŠ—į*KŠT Õĩ7†ę„e…Â{uUCŌ*tŌ~—íĄkAŨĶÜ=‰5JYĀړjǚŒŦĪ^.IŪ,:žŗZ-<žuÃņG؟āqCuƆ GĢ…ŋ Û[ƒķ z„÷X,9{ä?ĨåGy”Ũ >>Ōņ%Ī˙õ•ØdÕšĶÎŖ‡bPÃq€B„Cæ,˜{hÎĮŌ—Œæ,Ū´pzg¤¸uEB]1—,ljōĶ%GĶ+ãi2*ã2*ãÍÔĩmmÔú—¨tđ~ƒjđ>EČz܂+â•Ä<_ÍŋîôĨÉķŌäų~ųŅhp(8Ŗ( (ƒHo*`(LoĸĀ"šĒKi Ed™›CĻ爠 Ĩ'Œ°9Úb!E–ļ8K+ g“­ôââökčÅU‡k;ŽáRFî¯W˜ß­ēgøA-R|_mS[ĻâVæmCĨŌPiĘ}KihWĘ6*&Œû•ƒUJë¤Í:iˇNŲm4ĶvÛ´ÃfhfíäæV§Ãęœ6‚Ķ6į´38ææķ„Y?ķŗķķNāĸqÍšĖcÚŗ°@ãÂU9˛üą€+¤xÜHāhđ›2rķ_÷Ūí.ÔP‹äK_•§ëh’ë’ĨÉō§åRŠ[§åŠ[)AČR˛TĨ,•!MYĪåLS=‡†3M~Ō›‚P5¤3"Šj¤gĒä™ęBĩ<…P-ÚTBâ˃HũFjÜëîĒŠëÆyÂŪŽķąãž^üØāˇīÔÉÃ>,Īt¤X’Áė=¸?š5÷oäé2yz}8äé rŠŒÆČaQ(2#ĄTdFĨI!ŒŠJ!äŖ:§]˜u_~eAÖ¨~hüîäkâÁeģYãg¸Ų@kfD‹äķzL×Z°ÔÕÚZqŠ€Ą §W 8õÉgԀë_ÚqmZ0ö1ˇq=<Įøíîā7x!ã\:㠝Ž`,C¸ūƒZĪ™15š4 hšL+N"~ĒøņûúKœ´ũ[râˇŋƝ”˙˙˙PK!˙VMŠ™docProps/core.xml ĸ( ”’ÍnÛ0„īōī2%Ų-Áf`$H/-` 6R䯒™ør]ÅyúPT¤Ú@rČqw†ŸvGģŧzÖmö|PÖŦH9+HFXŠLŗ"ģímūdš‘ŧĩVä\ą‹/Kája=lŧuāQAČ"ɄZ¸Ų#ēšŌ ö y˜E‡‰âŖõšc,}COŧZÅ7ĒšäČiĖŨD$oH)&¤;ø6¤ Đ‚ƒ–ŗ’ū÷"xŪ}”§Vxtq§ˇqOŲR âä~j2v]7ëæiŒ8I˙úy—V͕éŗ@ØRŠļĀ~lÖŲڋ}vŖxãõ’NJī ‡?A K튈‚đĀŅz–%eŦú؟āØY/C|sVÅG‚đĘaü™ņŦŨ-¸ņĘ HVÕ"/.ķōëļĒęjQĪ yjŠ3ĻØ†y@f1ˆzˆmTîį×7Û[ōotĨ âW' ŽGö¨>CŧŦĢ2N8GKˇČëlũrđÖcĢĪšåĻ9ċc`ōŨ]Ętj îŗcf¯˙˙PK!!’.áĖdocProps/app.xml ĸ( œSÁŽĶ0Ŋ#ņ‘ī­[TV¨rŧ‚VhTTlˇ÷Á™¤ŽmŲ&ÚōõL’m’.ä4ķfô2īÍXÜ?×&k0DílΖķËĐ*Wh[åėéđyöe1-Ā8‹9;cd÷ōíąÎcHcF6æė”’_sÕ kˆs*[Ē”.Ԑ( weŠnúYŖMüŨbqĮņ9Ą-°˜ųõŒë&ũ/iáT;_<Ξ–â€ĩ7P >†Ŋ7ZA"ér§UpŅ•);jōBđiQ<*0¸!FY‚‰(øˆ„Ö­=čĨhŌēA•\ČĸūE~­Xö"ļsäŦ Á&š§më“.6>Ļ ÷Paœj}Ū…ĶļiŦWrŲ5Pđׯžk1Ņ–˙}u›Ŋ¯WHŋŊÖ~ĐÉ`üZî!¤VŧŸZŅMÕ1ŠŸõŠ^Ô>|#CÁV†|°ÖÁžoö&ž=[¨ĩʔŗļÛČÍ.šŒAãĒõ´ãJį+e;°´Ē@ ŅÆÕėyŧ#Á/øĸíøänÛ^āË]ƒâņ z—úˆ:ž`ˆä]Rëúu>¤qs"›°¸PüYtÕĮūąËåŨ|A_wéLđņYËß˙˙PK!ŊH”kodocProps/custom.xml ĸ( ŧ“OkÂ0Æīƒ}‡’{M­ŗĨ­¨­ L‘­ķ°‹Ô6Õ@“”$u“ąīžX§xØi“Ũōūáų=ŧ †īŦ˛öD**xœáš((߆ā%Ú`)ņ"Ģ'!8†Ņũ]°”ĸ&RSĸ,#ÁUvZ×>„*ß–ŠŽs3)…d™6ĨÜBQ–4'ąČF¸†Ą>ĖĨŗë‹8éų{ũ[ÉBäGwj•jc7 žÅVÉ4-Bđģ“8v‘kãěØrÆļ×õl4@ņdꍒO`ÕĮe ,ž1}ŊZÎâõč1MžŖ4YŒæÉŗßkŋĒߔ–Q¯ßgčņŨ3~ÜĐĒX4lCäD’L“â§ŊČA.FNŋį´L'€7Â÷~Ā'ũ'ē{ĻĪԜhIķSäUTf•"mŪļŧYâū™™RFŽŖ–´"Úô"ŒpĪFží¸)Æ>ö|gđÚšl\›Į[<ũ”č ˙˙PK-!@žƒŪŖ[Content_Types].xmlPK-!§XÂĩ%^ Č_rels/.relsPK-!ŧÕ8ģž„visio/document.xmlPK-!r4`:KG#Pvisio/masters/masters.xmlPK-!œ*6å^Ō$visio/pages/pages.xmlPK-!ŦM„ę'visio/_rels/document.xml.relsPK-!‚Ôe$')visio/masters/_rels/masters.xml.relsPK-!Ã™ė¸  =*visio/pages/_rels/pages.xml.relsPK-!Úw– ’ 3+visio/masters/master1.xmlPK-!÷ŊũŊW°ü.visio/masters/master2.xmlPK-!ĀkšĒw Š4visio/masters/master3.xmlPK-!6̧غ. k8visio/masters/master4.xmlPK-!ūƒŨå, ųÄU<visio/pages/page1.xmlPK-!q¯.ב ´\visio/pages/_rels/page1.xml.relsPK-!Sš§Ī§.É]visio/windows.xmlPK-!Đ_īxŨ r>Ÿ`visio/theme/theme1.xmlPK-!@ܰßá\N°kdocProps/thumbnail.emfPK-!˙VMŠ™Å}docProps/core.xmlPK-!!’.áĖ•€docProps/app.xmlPK-!ŊH”ko—ƒdocProps/custom.xmlPKa;†GuestProxyAgent-1.0.30/doc/GettingStartedLinux.md000066400000000000000000000034451500521614600217500ustar00rootroot00000000000000# Getting Started with Linux Developing outside the container isn't recommended. See [Local Dev](#Local-Dev) for details. ## Dev Container We use Ubuntu as the base image and validate two methods for consuming the build image. Ensure you have `docker-compose` installed and can download ~5GB on first run for the Linux container. The repo provides a build image for Linux. Clone the repo locally, then launch the container. Your local repo will be mounted with `rw`. > On a Windows host, install with `winget install -e --id Docker.DockerDesktop` ### With Dev Containers Use `/.devcontainer/devcontainer.json`. Launch however you prefer / is appropriate for your choice of tools. If using VS Code with the repo already opened as your workspace, you can open the command palette and select `Dev Container: Reopen in Container`. ### Manually ```shell cd docker/linux docker-compose build docker-compose up --detach docker-compose exec gpalinuxdev /bin/bash ``` ### Once Within Container The full build, all tests, and all packaging can be run with: ```shell chmod +x ./build-linux.sh ./build-linux.sh ``` > A `uname -r` matched linux headers package isn't available under WSL2, so the > generic package will be used instead. If you're still having issues building, or generic doesn't work with a different > base image, you can disable the WSL2 backend in Docker Desktop's settings (note this will come with a performance > penalty). ## Local Dev The dockerfile relies upon [this script](../docker/linux/install.sh) which you can try to run directly. Keep in mind it and the build script make changes to your system which may be undesired and your results may vary. If you chose to use the script, run it as root to set up your machine then follow the [Once Within Container](#once-within-container) steps to build. GuestProxyAgent-1.0.30/doc/GettingStartedWindows.md000066400000000000000000000101611500521614600222740ustar00rootroot00000000000000# Getting Started with Windows Required tools can be installed locally, or via a Docker Windows Container build image. ## Local Build Prerequisites The following must be installed in order to build this project: 1. Git (e.g., [Git for Windows 64-bit](https://git-scm.com/download/win)) 2. **Visual Studio 2022** - one of the following editions should be installed (once installed, upgrade to **v17.4.2 or later**): - [Download Visual Studio Community 2022](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=Community&rel=17) (free) - [Download Visual Studio Professional 2022](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=Professional&rel=17) - [Download Visual Studio Enterprise 2022](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=Enterprise&rel=17) during the installation, select the following feature from the *Visual Studio Installer*: - `"Desktop development with C++"` - `"The Windows 10 or 11 SDK"` including the following *Spectre* library, which must be selected from the "*Individual components*" tab in the Visual Studio Installer: - `"MSVC v143 - VS 2022 C++ x64/x86 Spectre-mitigated libs (latest)"` 3. [WDK for Windows 11, version 22H2](https://go.microsoft.com/fwlink/?linkid=2196230) (version **10.0.22621.x**), including the "*Windows Driver Kit Visual Studio extension*" (make sure the "*Install Windows Driver Kit Visual Studio Extension*" check box is checked before completing the installer). >Note: as multiple versions of WDKs cannot coexist side-by-side, you may be asked to uninstall previous versions. 4. [Clang for Windows 64-bit](https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.1/LLVM-11.0.1-win64.exe) (version **11.0.1**). Note: clang versions 12 and higher are NOT yet supported, as they perform program optimizations that are incompatible with the PREVAIL verifier. 5. [NuGet Windows x86 Command-line](https://www.nuget.org/downloads) (version **6.31 or higher**), which can be installed to a location such as "C:\Program Files (x86)\NuGet\". 6. [Installing rustup on Windows](https://www.rust-lang.org/tools/install), to start using Rust, download the installer, then run the program and follow the default onscreen instructions. You should add the paths to `git.exe` and `nuget.exe` to the Windows PATH environment variable after the software packages above have been installed. ## How to clone and build the project This section outlines the steps to build, prepare and build this project. ### Cloning the project 1. ```git clone https://github.com/Azure/GuestProxyAgent.git```. By default this will clone the project under the `GuestProxyAgent` directory. ### Build the project 1. Launch `Developer Command Prompt for VS 2022` with administrators permission. 2. Navigate to this repo root folder. 3. ```build.cmd``` ## Building with Docker If you are prefer an isolated build environment, you can use the Windows Container build image. > Windows Containers don't have full Docker feature support. Additionally, the image is very large at ~30GB. > If either of these are concern, use the previous instructions. ```shell cd docker/windows docker-compose build docker-compose up --detach docker-compose exec gpawindev cmd ``` Within the attached container, the full build, all tests, and all packaging can be run with: ```shell vs-init build ``` > In certain Windows environments you may notice inordinately slow downloads during > the docker image build. If you are in one of these edge cases, consider disabling RCS which [can potentially help](https://github.com/microsoft/Windows-Containers/issues/145): > `powershell "Get-NetAdapterRSC | Disable-NetAdapterRSC"` ### Other tips - If your setup is relying on OpenSSH for container communication, use a modern version of OpenSSH for better performance. On Windows, this can be installed with `winget install -e --id Microsoft.OpenSSH.Beta`. - Docker Desktop can only manage Windows or Linux containers at one time. Right-click on the Docker icon in the system tray and select `Switch to windows containers...` or `Switch to linux containers...` as needed. GuestProxyAgent-1.0.30/doc/GuestProxyAgent.png000066400000000000000000002443711500521614600213010ustar00rootroot00000000000000‰PNG  IHDRŦKt~wsRGBŽÎégAMAą üa pHYsÃÃĮo¨d˙ĨIDATx^ė ŧEÕö+TB ! $"KQ‘E–ČǐWA4QÅÔ ú⊂Ÿ.€Ę"Fe ˛$‚ o@Ų";˛…EL „ !$ųæéޚ[Ũ]]KWUOĪÜķį×Ėt׊įœęé™ÛuRU=`u FAAAü• ‚ ‚ ‚ "8”x ‚ ‚ ‚ "”x ‚ ‚ ‚ "”x ‚ ‚ ‚ "”x ‚ ‚ ‚ "…§ZŦlí.|ų5žGAAAĄgØÚk˛5 ā{Y ‰‡į—¯`Į˜Í÷‚ ‚ ‚ ‚ ôœ3~,1h-ž—Ĩ4ņpÂNŗõ_ŋ&?ęŽ:ĪŅ˜žn ģļ8=uÚĐQ‰°ĻfvÃgē%Î ŧkĪŖ^m—-Ôū ôČIëīŸŊ`@Éŋ"]ŠÃĮIŸ|J#ĪCMA5ũXđßרɎ<]-ņđŨŨ7cÃÖVWŌŅØ“’ ŦéžL7ÄZ[ŒŽēãī "fjrãE7Ä”16,øn8Uˆ~šö9æéÕĪՆĘMœ´Ęíī1(áĐc84¨'Û_Fžƒš>œ^Ŋŧ=)RPŨöÁuCŦĩÅčņáyT­ J8„Ĩ&7ŪtKŒ…8v‚N0jiWƒO\¯~ŽļTj{œ´ūūŲ h”Cápa;˜ö4<5ÕČļ¤r⡑'%÷iuĶ— Ŋ‘ÔŖ‡ŖÚbô Ā;Â?ŒæŸÍZOGeJclPāŨpĢŊ] >yŊüšÚPŠũ=tŌúķg/ „C] ęšļW¤‘įĄÆ úÃ5āœx膋ĸ‘1–Đ ąÖcEGŨpAˇ$ŧŖŦŠ™AbLWĮذā›~Ģũ47ėsĖĶĢŸĢ •>š†ž.ôPS*C ‡ŧÔ߯ĐØk Ļ ÛūX'ēáĸčĻŽ[b­-FŌį1ũ¯Éx|}1S“/ē!F ŒąaÁwËšt%zģ~â^T*ˇŊGNXūėe(áĐc84Ē'Û_Fžƒš>œūx =)šĀēéƒë†Xs§7Žj‹ŅJ8„Ĩ&7ŪtKŒĘ8|iŒ]N-íjđ‰ëÕĪՖJmī‘“Öß?{rč1.lĶžĻ‘įĄÆ úë5 M<4ō¤ä.ŠÜnŖé†Xk‹ŅÃQm1zĐ $ššŲüŗ™ÆØô8KclPđ %8ŅÛÕā“×ËŸĢ •ÚßC'­?öJ8ôØu⨠ūûÉ÷ŅØk Ļ Ûūš(M<4î¤ä>Šnúāē!ÖZcôpÔôķē%áāe35šņĸĢclXđM?U‰~šö9æéÕĪՆJMÃ?Oz¨)•Ą„C^ ęÉö;ŌØsPS`mÍ8/.Ų¤OĒ›>¸n‰ĩļ=NH7œËtŒCŗŖ rkú0jrãE7Ä”16,øn9—ŽÔŌŽŸ¸^ũ\mŠÔö9aũũŗPÂĄĮphTOļŋ<5}8t divâ!÷iuĶ× ąæNo<<ÕŖ”pOŗĪfJˇÄ¨ŒŗAÁ—ÆØåÔŌŽŸŧ‡V •Úß#'­Gšá rč1.lĶžĻ‘įĄÆ č(ŌĖÄCîĸ¨ņņĻb­-FGĩÅčIĶ H„55ŗ>÷ŽŽąAÁ7(”āDoWƒO^/Ž6Tj´ūüŲ (áĐcׁcƒúī'ßGc¯š‚jlû@ŗšOĒ›>¸nˆĩÖ=5ũ<‚tŒCŗ# ōy1S“/ē!F ŒąaÁwÃyŦBôĶܰĪ1O¯~Ž6TúhūyēĐCMņ‚=†Cƒz˛ũŽ4öÔXcÛß š“x>Šnûāē!ÖÚbôøđ<ĒÖ%ÂR“oē%ÆBœ ;Á 'ĩ´ĢÁ'ŽW?W[*ĩŊGNX˙ė4ĘĄĮp¸°L{šFžƒš>ēėé|â!÷iuĶ× Zm1z8Ē-Fē!áŧ#ŦņÃhūŲŦõtTĻ4ÆŪ įą*ŅÛÕā“×ËŸĢ •ÚßC'­?öJ8ôØuāĐ žk{Eyj ŠŽ7:—xČ]5^#ŪtCŦĩÅčá¨ļ=閄ƒw”553HŦ‘éę|ĶĪcUĸŸæ†}Ž2 ­*ĩŋ‡NZ5Ĩ2”pčąkĀąAũ÷“īŖą×@MA5ļũ §ūÄCî“ęĻŽ[b­-FGŨqĶ˙š ĸķŽ0ˆˆ™šÜxŅ 1eŒ ž[ÎĨ+ŅÛÕđ׋ŸŠ-•?š9i•ÛßcPÂĄĮphPOļŋ<5}8t øQoâ!÷IuĶ× ąÖöeđpT[ŒPÂ!,5šņĻ[b,ÄŲ°ܰp‚QKģ|âzõsĩĨRÛ{ä¤õ÷Ī^@Ŗz ‡ ÛÁ´§iäy¨)¨FļŊ Š'ņû´ēéÃë†Xk‹ŅÃQm1zĐ āaFķĪf­§Ŗ2Ĩ16(đn8U‰ŪŽŸŧ^ū\m¨Ôū:iũųŗPÂĄĮŽĮõßOžÆ^5ÕM׉ŋž–šüvŲm÷ķ#ÍbĀęü}ÂķËW°cfÎfßÛ}36líĩøQwV­\Å^žŋ˜īuŨp‘ÕŖ‡ŗî8—͏2H„55ŗ;>ķ.ĻaÁwõšÔĐĢí˛Ĩ?ˇŋRÛ{č„õ÷kôįdClo8” \ŖËĖ9|¤ŊōéŸuõ-ė”?^Ÿŧ?û“˛íúöäŊ M:lu¤ũՕėČņ;ŗS>ą?ÚÚųŊËŽg_˙đ>ėsīߝõCn˙]ŗį°ũŋ{>ÛĮ­ŲŸ=˜-į}ß9ũķ‰šÉûyœ”ŧƜ)Å{\ xįÍŧ“ũėS˛;|Ö!@áØ™ŗŲ9ãĮ˛ƒÔ9„h‰‡eĪ.b—íw6ß#‚ ‚ ĸŋ3yæqlč˜õø^ ôă`˙īž—ŧŪŨęŋ¯Õi>˙Xs§4í<ԙxPĩŨ'ņ ĒC‰ŽwâZËæĻ‰‡ ?û[gäāô  ĨWzM˙&íåD_Ų(]K›Ŗča)Ôt2ŧÜxÆhQ=ęYˆ(n”.5ˆÚâ> nôŞ1FlbV:ĸŖĐhC5ˇŖļ–FuXÜAŽļķ—AōڙĒAą0;íĻS¨$Ķ€ú[ãä1Xxa„–<ˇ˜ũáЋęK<8„]˙'Yųvâ‡öažûá$ųđėųÅ}—iäyh•$~?ņPÖūlj‡wl>:yÍ'(ņĀŠœxČ}R"ņpČĨ‡'Cnj”^éiAiq(ŧ” ĨŖ7N p¤ņ]KX'áŧj•ŧŨxXT sJTˆ+1J—D JÆāF_ėcÄ&fĨsŽ"úõF›]āĩ5/ĒŖĀâ–rĩģ’įÎáF.Nģ°;Ô¸ŽSŗã‚EĮb­@&Öúwō4ŧpb‹į,bį?+~âÁ1äú?ÍzĶ,Žūú‘ė–‡ŸJŪ# ‘ī˜#9Ąõv;†ø›kŲųčĖæĻjdF!š&&˙ėRöįģfĪÛᄟ˛g_\’tž¯ųÆQI96‚|yūáø$ö—F€ÖÂîūņįų^ Ül˙ŋiŧ1EBŊ ŖKˆ:H n´ņĪ'õĩQ—xČûCŧ÷äâXĮڂŖZį¨"q":9a“xđŸ`…OJúÆæv͔VH œõ\ņvP.`”6„BáHã[SĨ“0^ņ{š1Ē—cYÕ+ŧ#ņR´ŌĨ!•„Å⯅GŒfņĘdĨŽ"ųõFjsāF‰PDuXÜA.Z“´H:ÄÚq¤8íÂî`ã:âÖÜ^ĨEGb­H&ÖúˇöÃ`á̇ģ´…Ö`”:­ī;†ížõĻíc2h?’ōĐQFŌĄ*H:šĪΉžœTĀņ{ž˜“ņ‡N6Žgđá ŗŊáäī$Ʉį.hųâĀq” 7H:$°ĄSŽ@Âāšo™ŧĮˆac3R{ÔAM >", ‰x…lĮQ.@âI9žkūųp’tČ[$äļHR >ĘęÄ/ņ ]•ŽŌ 5$€—}„Zi}Հ(|GKéß”%F/7^•­Ēzzā”(„Wĸ•6րƍ6ŧŗE)UMdĨއ1.sāf‹@DuXÜA.°gK$¯  ÎaÛYEĄ#ŽÍN•æjÍ!kũ;y Zũí ‚CØ]ÚB'ūŲę¤bjÅö›§É$’é—ū%]u0ē5~—äĩ*đ•ymtĒĪ=&;UáëÚ'9ŽQ !?œ“~›ļ%?ēá\žÎÅĪū|kōŠ=üī˙Ž­“}€„ãŲE}ĶĘ@|­ßК΃Qš‘g§–˙dz‡4Ũ`Ä@ Đ^Œ0@rC7 ,^ԗ㉒xRIFNž1Ęb‡ÍŠē¨:qK<äžąÎ_āŌ i“VJũÛR^Ų(ííÛ…#ßZÂ*8 įUĢäíÆCĀĸLJēD‰Jq%FéRcÍ0ܘŖđˆŅ,^™Ŧ´ÂQ$ŋŪ(BÍbŧļĻcõ!°¸ĨTÔ&i‘ŧv&€j8‡ Ģ5°#nÍN g¤p ÁdbíLāÖƒ†šAcå8jFnaãø3īDË „yG5?ŨBpöĩéŋ¨vŋô_čCƒN.ļ üNf{˙÷˛kčĀō´ ąaŒ˜–°ũf}sų’ŲpũžõåÄĻ4` X—B• "Ū¤xeäxŸœŋ0yŨhŊâč†b´ƒĪÚ&đ/÷íõrÉÕĸ‘!°šāk`J@ĮOˆ‰HŽ„ÖĩkVˆõō[˜Ō'eķÁ(­PcÂĄ˛}emŠžj`rŽ žŖ‡ĻtÆĢQÅˍWeĢĒž8% aĕĨK #”GãÆģ…E)UMdĨއ1.sāf‹@DuXÜA.°gK$¯  ÎaÛYEĄ#ŽÍN•æjÍ!kũ;y ^0ĄrbČ;„]C ËÕ|´ÖsĀ+äŅ1CpŪĖ;’× øząH†ö×øáĢüÉg_šluĸM<„ûëˇūäoėW{œ™ŲūīÄĢyiʕŸüCÁæŪiwņRwpkĢ˙ĢŨ[_Ëj”ÛžíŒdûíŲ€ö\č…#ƒī|ҝ'žËūōÕ?ņŊ,HVübמįčâ=69‰‘įōŖ.IlūōYS†ĻYčđ¨lYĩrhJTˆ+ŅJŖPiPZsqzT5‘•V8RjÆ¸Ė›-ÕQ`qšĀž-‘ŧv&€j8‡mg…ޏ6;UZ˜Ģ5‡LœõítĒ‚…įäĩ1ä4khaW€s ŽüĐ;˟JņžԤğūųp{=lō” lÉã-9)!ۈcFrÖr@Į[~"Åû[ëĢŋqdģ“ tō1mnyQEŦã ?˛zŒpmcūqšxŦ%΁°OŒpåģīK ŒÖ7˛Â6^€‘ X3#„’bD… ڇ5d[l@~g XŨ‚ŋOx~ų vĖĖŲė{{lƆ¯Ŋ?ęÎŌš‹ØeûÍ>vŲlđ†åÃU~ûÁķØ˛į—ąOŪ|?’‚DÃf{eÛMÚ1Ų‡ŨȡlČūį”÷'ûH:Üņ‹[Ø.ŸŪmwXj#øíįąA#†°įzŽ}ō–Ŧnžų>ĮŽúÔØ.SZ:“vâGû0ūYūŌa4ÃSœzÕŅüHŊMΑÆ/FĖN ‹ĩ™XëÜÉcĐđ"ˇ3hŦGÍČ-ė b| A¨)(ēēŽ&žŧqv2bÁūMŧÁ:éžÄS{ßܗ=xŲŊIA€¤Ã¸cöHĘąĄŗ~ÅŅŋįĨ)‰1뜛36ÅÅ$ûbDᎊ7ŗõQöŠÛ>Ÿlđ+'~3ņÜDS'°=pŲ=ŧ¤Œb@Ō@æ $ەކȺyë8ė1*BpËi7$ Œ-÷+_Öũ'ŌÂh Ã̞UUu‰•0âJ´Ō(,5ˆ”ŒÆ6ŧŗE)UMdĨއ1.sāf‹@DuXÜA.°gK$¯  ÎaÛYEĄ#ŽÍN•æjÍ!kũ;y Z íŒ!īv -ė yjúpčč~:–xX6o)×ÖqĐ.)$Ņ‘ßôŨ}e1ÍB$3ļØ7í§:úK‰1š›œToûđvíŅĐ]gÄ:lۃĪ%ûë2`ä[GąåĪgW,Eyũ†ˇŧ]oŪ—āÉOüŽÜf#~$ãŠŋ§ÉÔÃ4ˆ=žđŽdÖbP ôÄô 1ÍBLÃČŗũáiģąĻƒøÚ?؊ ŒĒ<Œ:ŧ*[U÷ôĀ)Q #ŽÄ(]Z1(ƒ}ž1zT5‘•Î9ō ;*Ú¸Ė×Ö´¨Ž‹;Čöl‰äĩ3TŖRØjœ}€ąsZ°čHŦÉÄZāNƒ…L¨œ.4c¸īFyj ŠŽŪ ŖS-ō|üŠŖ’õ2k>H5F/ˆ§O<øĮûØŋüHĢŗ˙æ´°ĻUā_˙1Íāũ덇>äī —üÔ-Įˇˇ$‰ (XŪęôËČS(02Á„HT,ˇŦõ˙œŖÖ.’Б§R]X‹äœ˜°!?ŨBLŗĀņ6šp0É€[î÷–dßÍ)M ĩ&<*[øö/ĄD%Œ¸ŖtЁąf nôņĘ&<ĢëČJįåv…66ģĀkkZTGÅ-å`VÛųË yíLÕpVh`‡ÜÚ8-„ÖąX+‰ĩ3[{ ^ävĩvßđ1¨Š)¨ÆļŸ¨DĮbTÂŗŧĶĢ$wĨaĒžR!6‘` Á€ÎēüxLŒ(ĀąųöM0^ÄŽr$0@LŖĀČ3%N¤Ãģ|fö4 1ÂfTƒ yēEfš…˛í’ĩ’E&¯}˜=yÃci‚Į1ᡔ–Ņš0Ē—cYÕ+ŧ#ņR´ŌĨ!•„Å⯅GŒfņĘdĨŽ"ųõFjsāF‰PDuXÜA.Z“´H:ÄÚqœÃﺊBGܚÛĢ´čHŦÉÄZāĘķWF°đœŧēYŪ†„Đq{j Ŧąí'ŧčXâ!Yüņ-˛/WŦIPáJĶ20 ⓷ßŪ°D‚C+]ņ*S%ļ;´ü)YRGĸŪāQƒ•~1*I  kÔk!¯Ŋ`‹Ũ€u$ÚĶ,4mĮZˆįū?ܝ$,Ūū‘x‰™´ĩŒ:ŧ*[UõôĀ)Q#ŽD+m,Ŧmx f‹R<ǚČJ+)5c\æĀ́ˆę(°¸ƒ\`Ī–H^;@5œÃﺊBG\›*-Ė՚C&Öúwō,ŧ`BåtXž†v<5}8t ô6jGfbDÖsHāWÛ|ií[0Ú!f‘ũWxėã8Ęc]ČCĮŦ›ŧŠ) H(¨ĻZ ­ŋ9ā<žĮØũ—Ū“ÄļEɍoûđöI'_^ KŠÅ%ÛɃ_÷­…ņë‰ųÅ*Õ`´|ĪúųÍIB!ģH¤úkQHR¸,*Š=įj7xTļđí^B‰Jq%FéŌˆAÉÜč#đŒŅŖĒ‰ŦtΑgØQŅÆeŧÖĻEuPÜá¤8˜DōڙĒQ)ė5Î>Ā€Ø9-Xt$֊db­?p'Á &TNM.ʨÁ}WĐČķPcPt ô>_ãë9` E˛ $ŸqÕ§ūĀŪöĄmŲv“ėG C<ō-Ŗø~¸ˆqøĢ&É Q>îØ=?6ŒzkzžFļíõŋ.bM›E%ĩJz7xXTõP—(Q #ŽÄ(]j`Ŧƒs1šÅ+“•V8Šä×E¨Yˁ×Ö4cŦ>ˇ”ŠÚ$-’×ÎP į°aÕĄvÄ­ŲiáŒ4˜LŦ ÜÚcĐđ"ˇ3hŦjL."ģī jøĒQSPm?œĢ[đ÷ Ī/_ÁŽ™9›}oÍØđĩ×âGŨY:wģlŋŗŲĮ.;‚ Ūp(?ĒĀëJĶW֖Öv…+|GMé ŒWŖŠ—Ę–UũĪ‚FÁ_ŧ­tiaĀd nĖQxÄą‰Yi…ŖˆžŊ0ÆeŧļĻEuXÜAŽļķ—AōڙĒáv××f§J‹ž&g2ąÖ¸“Į`áÕĐÎČ.Tō‹į,bįîs›|ũqlŨ1ëņŖũ—>åjÔXcÛOT9„cgÎfįŒËF Rį:7âW[å+N_Y[ǝ˜œ#ƒīčĄ)„ņjTņrãUŲĒǧN‰Bq%FéŌÂHåҏ1ÆnaQŠGUYi…#ÅĄF`ŒË¸Ų"Qw ėŲÉkg¨†sØvVQčˆkŗSĨ…šZsČÄZāNƒ…L¨œČ.lä#ˇ°+hä9°ųđP“ĸÔŸxđēÚô•Ō•ũē’‹Ä˜Ą8 áŧjUŧŨxTļđí^B‰Jq%FéRcÍ0Üč#đŒŅŖĒ‰ŦtΑgØQŅÆexmM‹ę(°¸ĨĖj;m$¯  •ÂîPãė ˆĶ‚EGb­H&Öúwō4ŧČí ̚Čō=A ƒ;5E×H˙ĻžÄƒ÷E]^Ų(ííÛ…#ßZÂ*8 įÕ¨äåÆ#N‹Ēę%*aĕĨK Œ5Ã`pcŽÂ#Fŗxe˛Ō G‘üzŖ5‹9pŖD(ĸ: ,î ­IZ$¯  ÎaÃĒC ėˆ[ŗSåéĐ)rĻ|ũ[{,ÄęCPą"‘åA .ēžÆžŖš‚ĸk„õ$ŧŽ4ũĨĒ•ÖW ˆÂ‘Áwô°”ūÃxUJË txTļŦZ9´ %*aĕhĨQ¨4(-Æ9 8=ǚČJ+)5c\æĀ́ˆę(°¸ƒ\`Ī–H^;@5œÃﺊBG\›*-Ė՚C&Îúwō,´ÚYƒŧŊ‹ÚÛPŲęš>ŽūûŠ*ĸ%ÚYåĢMŠęK[TöëJΑ!0Cą?JaŧUŧÜxUļĒęéSĸF\‰Qē´0R@y4jŠ8f‹R<ǚ(JįEôí…1.sāf‹@DuXÜA.°gK$¯  ÎaÛYEĄ#ŽÍN•æjÍ!kũ;y ^0Ąr"ģp“LƒidËk Ē‘m':NđÄC˜ ­\Á¨& Ž4~k Ģ⠜W­ŠˇĘžŊÃK(Q #ŽÄ(]j`Ŧƒ}ž1zT5‘•ÎřÛmÚ¸ė¯­iQˇ”ƒYmį¯MÎkũTŖRØj\.Öú0;-Xt,Ö db­?p'AËÜΠąĒą—¯!˜†Ōؖ×T˙üÔ ‚%Â|ÉĘUŒúFƒP(i|kŠÂĄtÆĢ1~/7Fõr,Ēz¨K”¨„Wb”.50Ö ƒÁ9 Íâ•ÉJ+Eōë"Ô,æĀĄˆę(°¸ƒ\´&i‘ŧ:ÄÚqœÃî`ã:âÖÜ^ĨEGb­H&ÖúˇöÃ`á+Y¸šˆLCŠác¨FM5ļũDc°ēŸ€gp3s6ûŪ›ąák̟Á)Sv-ģˆ]ēßŲėc<‚ Ūp(?ZFQeåĒ•láËķ“÷ڋ¸ļ+ÜŨQôДj:!^nîU¤ž„đ–RŌRU‡ņĨWqōQjlVŠŪˇšn÷ØÕĸrŒˆŋr]ęÍĢů"ŖTžãEQÉC[S5- ˇ=–žŖ†V¯Ų] ŊĮ¨ņ$Ø{ČX ŦšXØüÕüļ†Kš+ŒĄŗ‡œE䐊ōĩ;ːGށˆÆŧÅsؤ‹Į[$4ŸqIQ/${e×}ŗ‰2lV‹Kâ"ķŋĐô*ö>pĐ$ė…4¤A¤ ´qs˜ãK¯âäŖÔØŦbļ(ífr­ÍŨ´nĩÖ.õžôĨ.d”ĘwŧÉ*yhkĒöUÔö"ũŌz6øP¯Ų] ŊG}i(ė<b XA؊jĩJpû1,ÅO!Pk ÃĨÜĨŠ‘0{PXD IOíÛ¤Å#ĸ°¸z”F6Íđq„ĸ&7DcL<ø_dúKÕūBN-ĩļvBú|xKh)FK8ŊŠĩŌ€J ژ-ĘpĢé~]=*;`™ŗVŌĄ7uŌPŠŊS(ņ"̿ЭКyęWŋŋEQÃƊ×ėŽ…ŪcôxėŧŦėĒYRM(œû–R „CuŋÚ¸Œģb ˙%˜ãQX˜+UF-]ģà iq$˙DC0\†ânĻ‘MĢ1¨ũX‰Pšxˆ}=õ3…†d€QĖD*ā-Ŗ„Ģæ„ÃųŌ+Yû)5´S°ōĄÄžfĩ„ƒ{dnĩr–Ž.õæŽb2*mŲpú ¨æĄ­ --ŌD#M7ŊF ĢØîšŨĩĐ{ŒO‚2(%+ûjT ‚ŋ’_<[ÃĨÜŨk¸`§žŗˆ’Bēv‡Rī‘c €æķíáŋąMĢ)¨ūh‰a5Õ"4ڋ8w•koôsļîô xɔŌRísŅ&Œ/…°„ž4GŠĄYÁÉOˇšÍL8HÖn• æŽb2J…0ddŠGŦŅTM‹<´+ĶįYKÔЊâ5ģkĄ÷¨/ …‡B,Á+([Q­V îØūņjˆ›ĸT1fuE CRxkQģÃ6iąÁˆč4ŸqülZMįŧĮ?ZĸƒÔšx0^ČļWyoD*DĒ@K1š9äģœ0žĖ*Ö>JĨė|č-ĘpĢé~]=*;`™ŗvpЍ-Ą/uĄ ÔŪ įd•<´5UûŠ*j{aqkm4đĄ(^ŗģzúŌPØy)XŲUŗ šP0÷ @Âø)j‘$ãĻčf]ŗ……šReÔŌĩ;lĶWÉ?Ņ BŌw7ˆš‚jdۉžĸ–ăņBvšŌŊŋŠ3—ö´Ŗ%€^ÅÚOŠĄYÁlQ†[Íj § öĩ–ö•ôĻB !ĩJŧ(ĒyhkĒĻEEoņÁoDú;QJô°˛â5ģkaö5žs ‚Œ•}5 Ē …sßR ”p¨ŽâW;—qW äŋs< sĨʨĨkw˜!-ŽŅ|zøãodĶj ĒG?VĸaDO/™RZĒ9á>žč•œü( íŦ}°¯Ųŧ„ČY:ēԛ;ŠiȨ´eÃéƒĸš‡žĻjZäĄ]‹„ˆVąŨ5ģkĄ÷¨Ŧ{KûjT ‚ŋ’_<[ÃĨÜŨk¸bVĪYDŠ(]ģà Š÷Č1Íχ?ūÆ6­Ļ zøŖ%HGÖxpēĘŊŋ}ŪRJZŠIo™īrÂøJš’ž4GŠąYÁÉOˇšÍL8äŦ\ę}éK]Č(v‘UË8rCS5-ōĐöÂ2á-´ĸxÍîZč=ęKCaįĄK°Ā ĘVTĢU‚{ļ€<Z#âĻ(UŒ„ŲƒÂ"bHęxjwØ&-6ŊOülZM_ššÜD†úŽō˙3ûøE{%¯yÎŋũ´¤L8‹ķåj)–$ü}™UŦ}”JŲųĐ[”áVĶũģzTvĀ2g­8T†ŪÔAČ@AŠŊÎČĒyhkĒöUÔöˇÖ†¤C_€(ŠGu âzŅãI°ķR°˛ĢfI5Ąpî[Jž ā§€ÚZÄeÜų/ÁÂÂ\Š2jéÚļé+ŽäŸč ×I7ĶČĻÕT~ŦDP_âĄÖoyę(ŽKޚįK¯bí§ÔĐNÁʇûšÕî‘Ų×RXÚWNЛ:i(„Ô>P(ņĸ¨æĄ­ŠšŊÅ§Ķ ¯Å]ƁŲcÔxĖ12VöÕ,¨&,„€ ‡ę*~ĩ3p)wE÷.˜ÕæJ^Ĩkw˜!-ŽAtˆÆ^Ų5EßlĸĶ XŨ‚ŋOx~ų vĖĖŲė”=6cÃ×^‹ugéÜEėŌũÎfŋ|2ŧáP~Ԟį—=Ë>=ã@væ‡ūĀ6ŧ!?ÚF:LŊåûlĘî_c{mņ>~4ũ:aÄÃ˙=rûŨá7%ûáhéįžąážĀz%'?Jc;…ęíą¯é~]-*ˇZ9kG—zķjņĢČ(•īxQTōĐÖTM‹ÂÅmĨį¨ĄÅkv×Bī1j< ö2–AĢ&65?…€­áRîŠc(AåáĩUĢØM=Åæŋ´´ĩ?™zŊĩ°rX{TDYüō‹ė—ˇūˆ}jˇ/ŗĄk¯Ī†áŋ/.g˙á_Ųž_y[{ŊAüh|{eĶWÎČČuŗŊŪļ[s`gV ėAáØ™ŗŲ9ãĮ˛ƒÔ9„J<¤ß^ü˙ˇšî#ÜqŒˆŋJ8čMĢů"ŖTžãMVÍC[S5- ˇ=† jhEņšŨĩĐ{ŒO;/Ģ`ÁU zn$€ŸJ I2nЁükĐy¸á'ØĮr ß#‚ šČ…Ÿû{ß;ŪĖ÷ˆĻŌO'&‰ųæ"ŸxĀū?žš™ũüā+“ũS¯˙*›¸ÍĮŲ›Gn[°}dū}ė˙ũųXž|huú[Âģp¤ ‰‚ŧ/’ —Ū}ģ䈛“ũso;•m9böŽ–Ŋ(ĸ\$).9"M é°pų íDÂī=?Šķ­ũĪa[Ü.9†¤Ã{Ū|;z×/'û_ûĶQlŋ­?”$~4ķËlö ŗ_|tzRö÷ĮŽaįÜü=vĖßāÉ5ÕoûÜj6o”ƒÂŌŅĨŪŧZüy *íaôY5OmMõ´(lėvt:á˛jv×Bī1z< v^Cw¤šX°z)ḔģbĀ˜Õ°ßßrûüų3ØĄ{nĪvŪrc~<<ŖÖĖF­7˜ī5¸ŸCĖ{i)ĩB„ WF<4ōĘîū¯[­ÜųØ3ė×7ŪÍÎ8j";d´/D4—O<\ËĻŪr û O<Čä“ H4,Zž€}īŋJöe>séė=[áĐļŸpn˛ÄFIŊ뗒}H4 š1õ#}#ų¤~yžo2úŖ>”Հß%”ŗUžŒZØb˙$1ņ×V›~Ī ŌØG/ØŊ0Âák3ŽdëÚ 3*BĻúīŸ}MJ8TŖ Ō>F_PTķĐ×TM‹ÂÆnGķ ĒKĨ¸ŪcôSāā!c4°jbaCđWķSØ.åŽ0v꩕H<üôČ ėŖ{l›‹ÁĀÖįnņKDxš‘%ŧ˜ˇx›tņx6í°™lÔĐ1ühĪYÄÎŨûLväõĮąĄcÖãGÃŌØoZMõŌ/Í%7ßˎ?o:%ē›ÄCN˜ÁWĘík…Ņ ˜f)HJ0ē †Kī9?)[2]"įxHJ@ë ÷HF3¨é‹ŖÖ4œŊ¸ü…dœ:ķË­úģ'#tÁhˆÍ7ؚīĩâÃč€H@|ô‚Ũ’í‰ŌŠ%yúĸpÅ­ĻÛ=vĩ¨#â¯G—zsG1 •Œl}‘M(ąFS5-ōĐŽLŸg-QÃ*ļģx$ JqŊG}iė=d,íĢYPM,l-%Τƒ_<~ĩ3p)wE÷ޘÕãĮ S¯ˇîâîĮŸaÃ>!y%ˆ4öûVS`m?AHtYâ!ũJá˙˛°čÔ 0ĨŖ0}#!\@ŌA€Qŋ;üföģ#úļī} íāō%ƈLĮ@â ˆŋ+õŠ>t¸ĢU#°aŠ…—õ­÷˙œũ~ō­™Mí ŠjĀ­ĻÛ=ļ›ļĀ­–ÂÚÁĨŪ—žÔ…‚R{'œUōĐÖTí+Ǎí…EĒÃhāCQŧfw-ôõĨĄ°ķPˆ%X`e+ĒÕ*!@Âø)j d¸”›ĸT1f ‹ˆ!•Åsöôŋ%í/H§}ú°ī×ĪJ´Ä†ũĻqÄé%AÄ&âך:e?ŠÉ AĄKé×Jūr!™{áÁäUæ‰aÃÖÁ÷úĀt $ ”¸ų‰ŋd5rßZŲ— XĶ ŒN˜õôßZGÔ*˜JÃøHŠŲ NÖo(cØ:”Œ`ĀF­ûÆäŨƒsīJ^ķTm‹kM÷{ėęQŲ˜ĩâŊЃ†BHí…/˛jžÚšĒi‘§~%đaå=ŦŦxÍîZč=F'ÁÎKÁĘŽš%Մšo)J8TWņĢË¸+ō_‚9……šReÔŌéQ$ūņØĶlį­6M`Ęū{˛…—žžlsŧ$ĄAŨDįsuj Ēqm'QažsBĄü_/ąîF0`ĄIĻR`ZÅaģ|ŽI×q#ÄôŠõ×F‡@[C”ÃĪŠ3ŋ’ŦËā‰::3ÂI‘T`…`ÆũŋI b}‡aƒF°Ų IۃŸüí$ūŽE+¨=Æî›ŧÅZ)Ø×f•LŗĀ´ß´ûÃŨé( ÖxĶ0ÜQq5ÕNÜjå,]ęÍÅ4dTÚ˛áôAQÍC[ZZ¤1ˆ†EÂD ĢØîšŨĩĐ{ŒO‚2(%+ûjT ‚ŋ’_<[ÃĨÜŨk¸`§žŗˆ’B:ëđēī}Ž]xÂá|/‹ m›)ßæGŨ˜8n[6wáK|¯uOrÁ•Ĩšōh øåWÜzOÆ6?Bė‹ öÔĮĄ Ķoŋ/ŲÄ1°—GG-š.lä䊈‡čDū:W§Ļ Û~‚0<ņā˙EČ~tzx$&ĻOüü–SÚë3ˆE%Åhđųw};yRĘņŠdÃAÛONďÚõKÉ>zÄ"’˜"„Á‡¤Å&m8åį&ÛD}lxœĻxú„Ķ0|†iŋøHú đũœ—$+°žļ7NŸ¤!@rá[ī?‡ũõáĢÚë8`Ũ ņÄ LŠ@ōAŦī€m§–Æģ5O´P“=˙&š™pŦŨ*ĖÅ4d” ;aČČ&XŖŠšyh{a™pˆZQŧfw-ôõĨĄ°ķPˆ%X`e+ĒÕ*Á=[Ā?ž@­‘qS”*FÂŦވ!bH o-ėĸķõš›Úš;mšIģS2ŒŽeS¯š19nÃø¯ũ”}{Ō„¤Ūy_˜ÄŽúÉ´ä8â…ĄųŲ‰īN-ßšm˛‰ceˆØDbI 1šcæ÷?ĪNž6#9OÆŊ=i§āŌ›î :‚„č"~ĢŖū!NMn"Áaž î bú„ŧåk<ˆulÁ˜!ļüĶ+p˜ˆвK‰Mõ$‰woą{ ‡ĖĶ)8í˛Éˇ°Ŗwûrō*? së‘Û—Žß°˙‡Öqą}xûėĶ2ô¸}‚î÷ØîŸ-pˆˆo.ĩ%ôĨ.”Ú;á|€Ŧ’‡ļĻj_QEm/,F9ôĸxÍîZč=ęKCaįĨ`eW͂jBÁ܃ ⧍E’Œģb˙Ėņ(,Ė•*Ŗ–vw¸Ã'tÁFÆfF.č@rAŒ øÎaŲ)“LŽcJ:î‚vŨ.9äĀčęŲ€Ņ Ŗ‡¯ÛN |pˇí“ũôøzm}WθęzöėÂÅís€ÃÜ/ĩۂķƒäÂ-ÍN|ŖLŒ´¸‹'#ˆūûˇĢj Ē‘m'ˆ x'Â|r*Áŋ]-Á俐īr°ƕ^ÅÚGi@ĨmĖe¸ÕŦ–ppǐ`_Kai_9Aoę ¤ĄRû@ĄÄ‹ĸš‡ļĻjZTôŸN'@Vŧfw-ĖŖÆ“`ŽAą˛¯fA5Ąpî[JÕUüjgā2îŠcP`VWX˜+UF-íį˙â/˜š`‹€×“.îA‰Žŧœ”#H¸2į…EI§_hbÃ>@B#p,?­Ãڋd‹āŠų ’WŲĪ>•Aqį#O% ׍¤ zŋoWDj Ē‘m'ˆŠTN<„ų!P¨˙†ĩ.¸Ņ+9ųQÚ)Xû(`_ŗy ŗttŠ7w͐QiˆĶE5}MÕ´ČCģ2 5Ŧbģkv×BīQY%(ö2–öÕ,¨&6%ŋxļ†Kš+ē×pÅŦžŗˆRQÚß!Ļ>`´€˜Ļ€Žĩ+€$€X yyáIlL=9)Ģʘ ÖKF8ȚØ0ō`šöåi6`*’"öMGĻO$Ëû ŒŪĀt Œ€ iŊMä¯suj Ŧąí'*%üŋНSđoXK,é-ķ]NP)WʗbIÕԊ„RŠr}AiU#n5›™pČY;¸ÔûŌ—ēQ*ė„#Ģ–qä†ĻjZäĄí…eÂ!ZhEņšŨĩĐ{ԗ†ÂÎC!–`”­¨VĢ÷ l˙xĩF ÄMQĒ ŗ…EĐÔņ„q8wÁĸd]<âqü‹ŋ H4`@į_^ AfĮ-6ÎŦë †@§_žĘđUi­ $ä2°æF_˜ĀzbL­@‚C$"ōˆ8`OĶ,z—ˆ_įꨂS“‚čN‰‡0_†œBđoXKŦ$áāīÆŦbíŖTĘ·Ūĸ ˇšî÷ØÕŖ˛–9kÅĄ2ôĻB Jíp>@VÍC[Sĩ¯¨ĸļŖúŒDVŧfw-ôŖĮ“`įĨ`eW͒jBáܡ”<ĀOĩĩˆË¸+ō_‚9……šReÔŌÕЧ>`z€˜Î5F+`ąG1­ Ęˆpđž;&ÚčŧctÖqšØÄ(øCĸCĮb‘tú‘Ā@2eųŽ=F'ˆ2ąų)HœˆE"EL8^–HH&Āī¤ĶŌ§‰at BÚ%įˆĻYôÕž]‘Š1¨Æĩ 3`u ū>áųå+Ø13gŗSö، _{­äX•/ÂŌš‹Øö;›}üōÉlđ†˜ŋ§PŅŋ°ôYöаŗ>t)Û`đ†ü¨.–ĶŦģŊ’ĩŸRC;…ęíą¯é~]-*‡ˆøĢ„ŖKŊyĩøķTÚÂč ŠjúšĒiQØØí°á•ŦƒšŨĩ0{Œ“ƒ‡ŒeĐĀlj !@˛øŠ<Ą\Ę]1` ėÔsVBúũÍ÷ąĪŸ?ƒũôČ ėŖ{d;üEųę´ŽŖáF#°>„īTŒ:ÁS4°°¤ŧ(gæF–đbŪâ9lŌÅãŲ´Ãf˛QCĮđŖaXJ*-hcļ(íĻû=võ¨ė€eÎZqH‡ŪÔAHC!¤öNĄÄ‹Ŧš§ļĻjZäĄ]$ ŖP5´ŦxÍîZč=F'ÁÎKÁĘŽš%Մšo)J8TWņĢË¸+ō_‚9……š’EéČÖWčĻiX‡OšĀ+ŅŊ4îÛUãWžü˛„šz%¸Īy\%w2ôJÖ~J íŦ|(ą¯Y-áā™}-…Ĩ}åŊšŖX •öB‰E5mMhi‘Æ  5Ŧbģkv×Bī1j< Ę ”dŦėĢYPM,lūJ~ņl —rWt¯á‚zÎ"nH éȉž`Ųü˙ôÄļrÅ+ŧEũœšžōôëB­ī@Ųâ’ß——ŦBßâ’G˛ÁaqÉ,Ļ/ßķ…Å%[5•Â|‰õ*N>JÍ*ÕÛâVĶ=áāŽcDü•ãčRo^-~Ĩō/ŠJښĒiQ¸¸íąô5´ĸxÍîZč=F'ÁŪCÆ2h`ÕĆā¯Ö…IÆM1 f ‹ˆa i,.yüų3ØÉâ’ņ.K/ŗōEljŋ Î…%ŅaŋvĘģø^wŗĪŠW˛õĮ×ʨcqÉŖr‹Kv„šžxôũŽ-.Ų]x/.|Ũž„-küÕÎUr×QĄWqōQjlV1[”áV39NŽĒGe,sÖ.ĩ%ôĨ.”Ú;á|€Ŧ’‡ļĻj_QEm/,ÆV |(Š×ėŽ…ŪŖž4v ą Ŧ lEĩZ%¸˙*ņSÔ"IÆMŅÍē f sĨĘD”VÃĸcŧZņŸč2‹w´UÛędɜ'’׍×}ÛxŊîÜFŦķē¤ ƒ7Ü$yíwÔôCP“‚č*jKņáFR˙s /ÍQjlVpō“Á­f3’ĩ[eƒšŖ˜†ŒRa'YĩŒ#74UĶ"m/,ŅB+Š×ėŽ…ŪŖž4v ą Ŧ lEĩZ%¸g` øĮ¨5R nŠRÅH˜Õ1D Iá­EíÛ¤Å#ĸņ,™ķ8[“ĩzî]Ėk˛!c6į{=NM_9úf„áũ֙ŗģTĘ·Ūĸ ˇšî÷ØÕŖ˛–9k—ŠÚúR Jíp>@VÍC[Sĩ¯¨ĸļ¸ĩ6$úŒ@Qŧfw-ôõĨĄ°ķR°˛ĢfA5Ą`îA€„đSØ".ãŽČ æxæJ•QK×î°M_q$˙D­`Lŗčf^[=€ Ū¨Į×wčûâEĨ&7Ņ3ø'ũ­ĶfzŠĄ‚•%ö5Ģ%Ü#ŗ¯Ĩ°´¯œ 7uŌPŠ} PâEQÍC[S5-*z‹O§ +^ŗģfQãI0Į ČXŲWŗ šP8÷-Ĩ@ ‡ę*~ĩ3p)wE÷.˜ÕæJ•QK×î0CZ1ĸv–ĪŸ›Ŧ“ĐÍŦxu<ē‡G<ÔôņСš ÜņK<4ö[‡ĀĘƒĶ—æPÚ)8ųÉ`_ŗy ŗttŠ7w͐QiˆĶE5}MÕ´ČCģ2 5Ŧbģkv×BīQY%(ö2–öÕ,¨&6%ŋxļ†Kš+ē×pÅŦžŗˆRQēv‡Rī‘c jįåĪąU+W°5׈ŗ@VÍS[S5-ōĐŽ n­ IF -+^ŗģzŅãI°ķR°˛ĢfI5Ąpî[Jž ā§€ÚZÄeÜų/ÁÂÂ\Š2jéÚļé+ŽäŸhKæĖfŽÁÖđ›Ķ)VŦ\ÅÖxŨØÖÁt1}_ŧčĐ7› Â`—x¨ņ˝āäKolzŠĄ‚•%ö5Ģ%Ü#ŗ¯Ĩ°´¯œ 7uŌPŠ} PâEQÍC[ZZ¤1ˆFS}jv×Âė1j< æ+ûjT ‚ŋ’_<[ÃĨÜŨk¸`§žŗˆ’Bēv‡ŌâČ1`ųs˙f¯[k ž×ŦXĩš ŪđM|¯‹ŠéëFßl‚‹9ņPį7ÎéŽ7v–RbVpō“Á­fŗ géčRoî(Ļ!Ŗ’‘ Ŗ2˛ Å#ÖhĒĻEڕéķŦ%jXÅv×ėŽ…ŪŖ˛JPė=d,íĢYPM,l-%Τƒ_<~ĩ3p)wE÷ޘÕs1DŠ(_ģà iqäˆFądΓlÍÕ+ų^wōÚĒlȘą|¯ Šé+Gßl‚ˆCyâÁã[į\UYĄLA¯Ž/ÍQjlV1[”áVĶíÛM[āVKaíāRīK_ęBFŠ|Į›Ŧ’‡ļĻjZäĄí……į¨ĄÅkv×BīQ_ ;…X‚VPļĸZ­$€ŸB Ö@†Kš)J#aö °ˆ’:žÚļI‹ FDãyāßĪą—–˙—īŲątîlÕ–Fk +ŽŲ KÖô•Ŗo6AÄ%č/hĨ/lĄ‚NE¯níģÔEiAŗEn5ŨīąĢGe,s֊C:ôĻB !ĩw %^dÕ<ĩ5UĶ"íĘāÖ:ŊŊ.…QCˊ×ėŽ…ŪcôxėŧŦėĒYRM(œû–R „CuŋÚ¸Œģb ˙%˜ãQX˜+UF-]ģà iq$˙D­\ûĪGØÎ_:‹våß­˞ŸÃ\fZŧņã'˛Î}€ ŪrG~„ą‘ãMŽĄŦŧēâ5ļN7Mĩ°ø^†‚žŲŸ ‰‡Jŋ …J:]™ŠTĸÔĐNÁʇûšÕî‘Ų×RXÚWNЛ;ŠiȨ´eÃéƒĸš‡ļ&´´Hc ‹„ˆVąŨ5ģkĄ÷5žePJ2VöÕ,¨&6%ŋxļ†Kš+ē×pÁN=g7$…tí3¤Ū#Į@Ô§^uŖUâ•Å/˛×ū윭åņ(M$ Æ|ėklŲ÷˛˙üö~´>VŽjm¯­čž'ZÔôuŖo6AԇwâÁųËĒü†WûĘ+ĨĘ(54+8ųÉāVŗY ŗttŠ7w͐Q*ė„!#›PĪZĸ†V¯Ų] ŊG}iė=d,íĢYPM,l-%Τƒ<Z#âĻ(UŒ„Y]CĐŪZDv¨!GŅ;Ø$ –=÷TōēÖĀę×ÂfĮžÁ^™÷4{ô”Cų‘zymÕĒäĩņk<Ôô•Ŗo6AÔOåăķVYÁYĨu­Rfßf‹2ÜjēßcWĘXæŦ\*jKčK]((ĩwÂųY%mMÕžĸŠÚ^XÜZ |(Š×ėŽ…ŪŖž4v ą Ŧ lEĩZ%H8?…@-’dÜŨŦĢ`ö °0WnjZēv‡mŌâˆū‰FĸK@,}öéÖå0€­Y1ņ°Õ‰ŋak^=ôõ÷ķ#}lūŲŗ’éb“§aŧũ§7'ueTS8lĀŖ4 ČÖž!?Ō0júĘŅ7› :Į€Õ-øû„į—¯`ĮĖœÍžŋįfløÚkņŖ}Ø~Y—Î]Ä~ŋīŲėĐˏdƒGåGf•į—>Ë>5}";ë׹ƒ7âG~,J Í Ö> ¸ÕtŋŋŽ™}-…ĨŖKŊyĩøķTÚÂč ˛jžÚšęiQØØí°á•ŦƒšŨĩ0{Œ“ƒ‡ŒeĐĀlj !@˛øŠ<Ą\Ę]1` Ėę ‹ˆ!ŠĨësøÛ›îa˙{ūŸØĄ{mĪvŲrc~DŒhXãáĪw?Â÷ÔŦ;č ėSīŲ…}ōŊãØÜĢĪc˙ņlöÆÁö‹< A0bŸŗ—­ģũŪėąΖ>v/MAŌewŊM˛d–_šˆ=ũo“éųr$1ÖQ'1tŧøōklÅēą}ĪšQ3oņ6éâņlÚa3Ų¨ĄcøŅ0,žŗˆģ÷™ė¨ëcCĮŦ—ŦņëFßėî⒛īeĮŸ7qÔDvČÛņŖDSAáØ™ŗŲ9ãĮ˛ƒŠ9`xpũ˛ļWÉo$'ė”ō‰'˙Jc;×vöa_ŗy ŗvtŠ7¯ŋŠŒR{'œ>(ĒyčkĒĻEacˇÃ"áĸ†V¯Ų] ŊĮ¨ņ$Ø{ČX ŦšXØüÕüļ†Kš+ŒĄŗ‡œE䐊ōĩ;d˙ī’˙cS¯ģī„š7X}käŋŲĀģŽb~?jF$ĀkKąû?ŋGō^#^ŧãšĖšrbA$"æüîûlūĖtäF;ˆÄ„ ķ—­`CŪžÛíÄ_ņ#jjMP(ņĸ¨æĄ­ŠšŊÅ CŌ!zXYņšŨĩ0{ŒO‚9AÆĘžšÕÅāūc¨Ä/ŋÚ¸”ģĸ{ Ėę sĨʨĨkw˜!-ŽŅ• ÉđĨöbwžú9öÅßÕN:ā1š¯.[ęõD $0â ą(¤Xīađ[Ū™ŧ–ąčÎë’W$ ÖŲ|;ļüŠ“}V­nĩcÅ ļÎČ7ō#Ŋ }ŗ ĸšx?Nŗc(Uė~nĒ˙(Ų×l^Âä,]ęÍÅ4dTÚ˛áôAQÍC_S5-ōĐŽLŸg-QÃ*ļģfw-ô•U‚bī!ci_͂jbaCđWō‹'`k¸”ģĸ{ WĖę9‹Č!Ĩkw˜!õ9D$´…ß\(K8–>ËĨš†ĢrŒRĀZxŦĻ@,<)’Ŗd AŠHZ×ĩžho´IōڋTųė ‚¨—F&´?Ĩŋ,柛ŌĒFÜj63áŗvpŠ÷Ĩ/u!ŖTØ GV-ãČ MÕ´ČCÛ ĪQC+Š×ėŽ…ŪŖž4v ą Ŧ lEĩZ%¸g` øĮ¨5R nŠRÅH˜=(,"†¤Ž§v‡mŌbƒQÍ4'’ŪÄæüš‚ĨĪ=ŧŽ5Đ˙–ųɟŸÍ^÷ú×9_FH`]ųšxcō H>☨°ŸįÕyiōcŅŦk’WW^[šš­3b4°†ũã@ģîúfҘÄCß ‚eļFŗEö5Ģ%Ü#ŗ¯Ĩ°´¯œ 7w+Ą Ō>P(ņĸ¨æĄ­ --ŌDŖ) ‡>5ģkaö5žs ‚Œ•}5 lj… Á_É/ž€­áRîŠî5\°SĪYÄ I!]ģà iq䀎6’ gO˙?âΧ_”hŠ­“ $°+nŊ'‰§ lķĻ  ÁōįūÍÖXNUčëÛ?™ĻĄ[„RF< Ú°wĻY4˙›M„ŠŽ'Ēũx˜kT˙QrĢŲŦ„ČY:ēԛ;ŠiȨddÃ背lBņˆ5šĒi‘‡veú†Š @čëöĢÚÂ’ bÔBŒČ@rČ68?Ų ƒÚ)ˇõ‘”į1ss[‘g˙q=ģõ”O˛M×_› ŒxÉĮdųĢ+ŲsK_eüö~ļæņŖåĖ[<‡Mēx<›vØL6jč~4 ‹į,bįî}&;ę†ãØĐ1ëņŖYēô4¸äæ{ŲņįMgg5‘˛Įvü(ŅTC8vælvÎøąlÄ uĄŗ‰ͯ‹.ņ`ķŖ„ÄÄŠá{ŅfLy¯G⥠#˛ÔėŽ…ŪcÔxÚØy)X ޚPĐs áüTĩH’qS ä_ƒŲƒÂ"bXjéÚļé+C7%Ё=l]íšēŽŊДË0bÆŦû“|Õd°ĩĩI<ėØj/,X S$Pō …<8˜æ‘)"ņđ؟.`]üCöĻĄkō#ŨĮK¯ŧƖĖ>pŅ?ø=J:âĸC/ƒŠxBĘeĐ1GgeŲ5nL:ôuąéČáÉ+ÚŖIŠ9 ^lĮŠd$e`j’Ą§Y õËņŦƒšŨĩĐ{TV ŠŊ‡ŒĨ}5 lj… Á_É/ž€­áRîŠî5\1Ģį,"‡T”ŽŨa†Ô{ä:„xJĻ`1EŧĮ¨’8†‘ SößŗŊ†‚1•Bhc›öÅÉÉ1”aQHq M–"ˆ’H =ųäHČXq>0ĘkWČm’(ėšūKhŒ$ʒ˙<ÆÖ 2vĸsŧÆÖ`C7ۊī5‹ŪüfQF}S-YÄT‹ŸiÖxĐAS-ˆ&`?Õĸ ‡,5ģkĄ÷5ž6v^ VÁ‚Ģ&ôÜĐ:-ų×`ö °ˆ–Zēv‡mŌâˆū[4eĒá‡jj ‘.øķ§vg¯_ŋv÷NĩøĪōÕl‹ÃŪrđąüˆž:ĻZ­Yだ4ÕĸģččT‹v7 /ĩū…­Ũ!AxŽqĐ^ąŅ/éŦxÍîZč=F'ÁÎKÁĘŽš%Մšo)u<é€ÚZÄeÜų/ÁÂÂ\Š2jéÚļé+ŽäŸč9.ŊéŽ(Ķ,ĀË įŗ­;åĢVwíöę+¯tŨ-‚čMĸ$,ūíւ*tŖBt MI8ô9¨Ų] ŗĮ¨ņ$˜cdŦėĢYPM,XÕUüjgāRîŠî5\°SĪYÄ I!]ģà iq䈞OōĀ:ĄY6˙?lõĒUlÁ˛ė™E˙íڍ­^M‰‚ AĐŠr7 S-.Ų÷,vč•Ķːj¤S-&XNĩ(Ū¤<úĀŖ4Ղč8ÅŠéĩjŧ­6øP¯Ų] ŊĮ¨ņ$Ø{ČX ŦšXØüÕüļ†Kš+ŒĄŗ‡œE䐊ōĩ;ːGŽAMĩčm|§ZŦ^š’-_đ,ßën ߈ XÃn•LšjA4šjŅ]Ô6ÕÂâßn-@}W*uĸS¤ßíõ’.Š×ėŽ…ŪŖž4v ą Ŧ lEĩZ%4fZE ÃĨÜĨŠ‘0{PXD IOíÛ¤ÅŖĨģ—+l>!Î/:ęëŒ|cOlļI‚ ˆ˜x%Â$€ĢF˙ŧQ!ē‹oJôK:+^ŗģzŅãI°ķR°˛ĢfI5Ąpî[JŊ6­‚ŋ¸)ō_‚9……šReÔŌĩ;lĶWÉ—€Î1mq6‚ ĸyTN<„K8¸ęôīĸû0^ąQ/éâwŦfw-ôŖÆ“  JIÆĘžšÕĆā¯äOĀÖp)wE÷.ØŠį,↤ŽŨa†´8r AA4įăÅŋŨZPåĻŖwoTŽųƑlŪ'eļ‹Žû(/m§|bŋ$ŽÛ~`÷8&‚¨—tQŧfw-ôõĨ!°÷ą´¯fA5ą°!´”<“~ņøÕÎĀĨÜŨk¸bVWÄ1$…ˇĩ;l“Œ‚ ‚čYŦŨ’p4’¸6Йß|Ô06jōw2Û¸-7fŸ|Ī.Ü*>"ąPÆŪیeOĖ[˜ÄúέŪďÖbDŦ]Û%íHQŧfw-ôõĨĄ°ķPˆ%X`e+ĒÕ*!ЏĄŸB Ö@†Kš)J#aö °0WnjZēv‡mŌâˆū ‚ ‚č Œ‰‡0 ÷ĻŖÛŖ^\ú2Ûús§ņ#}⨝ūzßë,H4 áđ…ķg$ûwykōJ8‚ë3ę5š¯Ų] ŊĮčņ$Øy)XŲUŗ¤šP8÷-Ĩ@ ‡ę*~ĩ3pwÅ@ūK0ĮŖ°0WnjZēv‡ŌâHū ‚ ‚č*´‰‡p ‡x7Ũ˜pĸ3úôų‘r`‹é—G@ˆcųũĮ1ą=|ÖųŅ>ärĄ‡ČQãwNŪįËĀgöÛ5IÜūčŋŲ]ŗį$Ŗō`jˆŦ-6ytâ‘ËdĐ”ïĘF‘XņST熍×hņ;VŗģzQãIPĨ$ce_͂jbaCđWō‹'`k¸”ģĸ{ ėÔsqCRH×î0Cę=r AAtĨ‰‡0I‡ŠXŪ¯tcÂA F „Õ€Îøĩw?ÚžŽŠrō $Dų‘<Ų°ëWÆÎ›ygō^”Éqm5z6ëąg’÷÷<5ˇ0Ũɂũv؊đũ‹’ēB ûHVāx„>â” `ũÁkŗ&îÕļŊXSâÄ__›ĐĮûũŋ{~˛ßxĸ]§Å/JņH@”âzúŌPØy(Ä,°‚˛Õj• ëO ÖH¸)J#aVWÄ1$…ˇĩ;l“Œ‚ ‚č—8/.‹û•÷Øg˰uųģ>Đ—7yԁ Œ„ÃĪü=?ÂØˇ˙đIg^čŦŋÎÚėųÅ˒÷ bdÆÍ˙z2ŲG`„`ˇlÖ„ÍÛ7Ų0y…Ä!' Dœųō´“˜øîfūųĖvŲ­FŲūūāSėĩUĢ?Qoõ•âzúŌPØy)XŲUŗ šP0÷ ЏĄŸBĀqwÅ@ūK0ĮŖ°0WnjZēv‡múŠ#ų'‚ ĸëiNâÁpcz!á CüK?F ¸˛ũĻŖ“ÎŊœ¸¸ęk‡ķŌ”Ëg=ŒLpIjˆiōŒDĀâ—$%ā[Œ‚ɄûŸ~.yEbČąaŗE]Ņm|ķęģØ/ē6Ęvø™—ąî{"î­~AôãEÍ12VöÕ,¨&Î}K)PÂĄēŠ_í \Ę]ŅŊ† fu……šReÔŌĩ;ːGŒ ‚ ˆž ķ‰››VšÛ=vķo€Ä—Q &äŠ ō&’bēĻ[|÷ãû*׀ȃC>ĄQōH (Į ßøíuíɋ|\ØÄčˆ^åëûnĪN;|ŋāÛ!{ŧ=Ņių“×ā(ŋ—úJPė=d,íĢYPM,lūJ~ņl —rWt¯áŠY=g9¤ĸtí3¤Ū#Į@ADĪĐŲăÅũŠ{ÂĄ;n‚ @g\Ŧŗā ĻPØNKĀ”ŒĒP%dÄ $ō  F2|pÜ6™ĩ#°É#$æ,|)3"ĸ?ņÎÍF˛īö6m›ÂvpkÛiėî!0Ɲū{Ĩ/ …‡B,Á+([Q­V îØūņjˆ›ĸT1f ‹ˆ!ŠãŠŨa›´Ø`DA‘Ŗ3‰‹{÷[šîģ :âŦ?$ɂüČą6‚#ōIŠü4 ąf‚ü¤tô…ļüˆÅ-Åtˆ'į-L^åä ųi$Ät $=vlu„åQ؄F5@į'GNHö°q:Ēõ1z“ė5mņĩņŖ Ž÷=ž;/+ģj–T įžĨh”CuP;P‹¸Œģb ˙%˜ãQX˜+UF-]ģÃ6}őüAŅĶԛx0ÜØ “î5š đⲗ3vLƒĀ“äŋHRÕ:ВHNˆÅá k<ˆ˛ütøÂT 1]IøÃ1xē…1i#HČ#˜“#ˆ‹[ ˙ØĒŦe!¯SGxö&Ųk:ú^p`ö5žs ‚Œ•}5 lj !`ÂĄēŠ_í \Ę]ŅŊ† vę9‹¸!)¤kw˜!-ŽAA=̀Õ-øû„į—¯`Į˜Í~¸įæløÚkņŖî,ģˆ]˛īYėĐ+fCFåGõȡ4ķ—>Ë>yåvÎÁ—ŗC6âGeô7@Ü˙›0õ/|ˆ FRāq›ō5°Ā$’HBôgfLy/{ûéz fŠ×tŲU~é­$ LbŊLŊ¨„R\˙ŊŌ—†ĀŪCÆ2h`ÕÄÂ†ā¯æ§°5\Ę]1` %˜=ä,"‡T”¯Ũa†´8r ä/øû͍wŗC÷ځũxōøQ‚čßĖ[<‡Mēx<›vØL6jhØi‹į,bįî}&;ú†ãØĐ1ëņŖĄæ’›īeĮŸ7qÔDvČÛņŖDSAáØ™ŗŲ9ãĮ˛ƒÔ9„ø#,îY`bkãfMÔÃVŖ7āīRˇMéh "OņšŽz•+ÅõõĨĄ°ķPˆ%X`e+ĒÕ*Ą1Ķ*.åĻ(UŒ„ŲƒÂ"bHęxjwØ&-6AA8ÄÃUGŗ!ŠG<čniÔ#ėo‚hÄCŊ`ęƒ ’ģ~õg|¯˙bņŊĻm¯đĘ# ôíŋq>Øyq Ũ‘jbÁBl~*O(—rW ƒŗēÂ"bHjéÚfHM"ÆĐ >Á öģīaģžyú×4‚ā,~ųEöË[Ä>ĩۗŲĐĩ×įGÃđß—ŗŋ˙đ¯ė]_y{ÃúƒøQ‚PsĮcĪ$ŖŌ~|ävčžÛķŖDSąņБăÍ-M6ņ0šĩ‡D(O<ŋ.ˇúΉĨ¸ŪŖK<Õ°÷ą X5ą°!øĢų)l —rW ƒ;õœUܐōĩ;ːGŽĄarúoŲ Ėæ{AD9úvaß;t_žG4•F&lokúW”ŦņP|‚ :IÄC˜2¨č•Ė‚0Dc)\âæoGüo…9AÆĘžšՄšo)J8TWņĢKš+ē×pÁŦް0WnjZēv‡Ōâˆ1ÁNŧāJļī×Īâ{A„¯ÄC˜[š鐛{K‚č:”_ũ5¯Ŧ{KûjT ‚ŋ’_<[ÃĨÜŨk¸bVĪYDŠ(]ģà Š÷Č1„čœ;øvöôŋņ#~Ŋ+nŊ‡IQ%ļ™ōmoŋ*_A„;•§is;ƒĮiūnßŗØ'øã4Ք(ĩĪ_:—}ōŠ ėį֏ĶĖj=|˙ÃüAt–mwؖŋķ ÷UI§yáĩė´#Ū§~œf ›īŠ?v^ VÁ‚Ģ&ôÜt|JÔ"IÆM1 f ‹ˆaŠĨkwØ&-ŽčŋĄĮiÆW#:ūĮmËĻĪēMŲ/öŲ‰īN =€æ˜áëŗ† ežp8?š&îzüvŨ÷>Į¤ļž~‘x8ī “ØwÛ>= ĄōŲt27ũ čqšDS Įiv6ĶtJ<¸ÜŌč%JŌaûăBË!PŊЃ‚Rû@8 ĢæŠ­Šž…ŨŽôßō´D+ë fw-đÄߕ‰‡čņ$ØyQ„jbÁBp~*O(—rW ƒŗēÂ"bHjéÚļé+ŽCB‰‡¸ČW¤*€QGũdZō~ôđuŲSONŪë¸ģÕɟtÚlÚ'ŗņ_û)[xééÉņ#NŋˆMŋũžä=˜˛˙žlę57ņŊ”™ß˙<Ûa‹3ļ;oĩi;i Į`2Ÿ@N< 6؋$FZœųöˆØ)ņ@t ”xč.lÖS-ÂÜŌ@EĄTrXĸ’ŖŽŪÔAHC!¤öB‰E5mMhi‘Æ đjH:D+ë fw-ĖŖÆ“`ŽAą˛¯fA5ą°!ø+ųŰ5\Ę]ŅŊ† vę9‹¸!)¤kw˜!-ŽADptŠŅIG‡Ŗ"lĻD\zĶ]l§-7I:íčd‹:č€#Ų€DôN™|`ō ›oOšŧGtØ˙ņØĶÉ>6€c⤋§ˇãöÂFׁ„|!é€N?’H0 bÃ4h"F´C€Ņ h“Ėģn—Ä)Ā{yÉÔÁyœ1ëūvĖ RÖ‚ ˆNcņPõv&;âa]~TA‰ũˆ‡\%Į õæU[\$ŖTžãEQÉC[S5- ˇ=–žŖ†V¯Ų]‹ėÁüˆ‡¨ņ$Ø{ČX ŦšXØüÕüļ†Kš+ŒĄŗ‡œE䐊ōĩ;ːģÅpĮŦYü!øŅĖûŲM=ËvŲt$;iŋâ0úūÎ.ãÆņwՐ¯Đüˆ$ Đavĸī61éÜŖƒ-OsČīƒŧ_tū'Œ{ģ2”í¸ÅÆIŌBÆfĒŜ/füā¸<ŠAŒt@>§^sc22A>ž~‘¸_méÁFz y2UydDY{hÄŅ-Ј‡îÂkĒŏ4k<Ø`L<î™Ô‰E%‡{/ŊЃŒRųŽ7Y5mMÕ´(lÜötzZEQŧfw-ÔEâátÍá°kuÁ*ØÉĒ&Ė=čĨi’Œ›b ˙ĖÃRK×î°MZ\Í?ã/§ĩ˛Ŧf¯_ú"{eđú­÷?×.dæA[GM<ä;æ6ˆŠĸÃ-öÅ´›Äöį.x)y/§[ C0BAtØM‰1ĨCļÉOũˆØER#Ę΃H’€š %vå0fƒõØÔĢojĮœ÷5ņÛļõTíĄÄŅ-PâĄģ:Õ üĩ)ûŽ++EQÉQGoę ¤ĄR{§PâEVÍS[S5-ōĐŽLSĻUôQŗģzQciŖAP°˛ĢfI5Ąpî[JŊ6­‚ŋ¸)ō_‚9……šReÔŌĩ;ːGōßoĀ^<,y%ęeôđõØŗ ķ=;ŽēíŪäilbũ‚[šŧڀŠbę…ØäD…8†d6 qMy=…ŅÃÖM˛l”!é ĻL¨Āh1Åbį7ošØatƝ<•”Œž@šĐ‡ŽŒ8îŌ‚ ˆX„O<”ũũÆņJÛs•uôæŽb2*mŲpú ¨æĄ­ --ŌDÃ"áĸ†UlwÍîZč=F'A”’Œ•}5 lj… Á_É/ž€­áRîŠî5\°SĪYÄ I!]ģà Š÷Č1DÍėūÖąėÎGŸjw†ŅÆ†ŠH*¨:ÉøWũ|ŌmtÆ’˜Ž ƒDF Đaöļ`ˆ9/ôi¨Āˆ ĂQ ‰Ո’HāˆŅyŪsĮvâ#)`{$+PĐ6´Q ķIŅiÂ%t÷E!î—īģôæŽb2J…0ddŠGŦŅTM‹<´+ĶįYKÔЊâ5ģkĄ÷¨/ Ŋ‡‚e°ĀėcŠVĢ„Ŗüã Ô)7EŠb$ĖęŠ"†¤đÖĸv‡mŌbƒA41:ͰØĸH*`j„%€cXķ ŦP €ŧÍņė“tƑ°@ü@S Ļ* ƒc°Át<†ûbƒ­Hxˆ I1m _ŠØu`zĐC]´OÖI ڑĄ ƒsäÄFW‡(C{°/|=]{ĸi\yĮƒė \\ØūīŪĮ’rŧæËP‡č>ü×x(š'JÖxxīYėĶ ‹K–СÆÃ•lÄPŨã4ŗčoŅÂŨĀ”ÚÂųY5mMÕžĸ°ąÛŅ„Yjv×BīQUŠ5N篃]Ģ VvÕ,¨&Ė=0ÂøŠj‘$ãĻČŋŗ…EİÔŌĩ;l“‡÷Ok<.„^ã(’_ĸ.hĸi,^ū_ļõq§ŗ•+Wņ#z´î×î<õŗlãát 5‰¸k<ā/JU|čMÃ[hvû@ĄÄ‹Ŧš§ļĻjZäŠ_ $ I‡čaeÅkv×Âė1j< æ+ûjT įžĨ é…ę*~ĩ3pwÅ@ūK0ĮŖ°0WnjZēv‡ŌâHū ‚h ÁŅu'ĸ‰ ôvÄŪ;ņ=3ģmõ&J:t)Õ ģ/ŌßĪéK]Ȩ´eÃéƒĸš‡ļ&´´Hc ‹„ˆVąŨ5ģkĄ÷¨Ŧ{KûjT ‚ŋ’_<[ÃĨÜŨk¸bVĪYDŠ(]ģà Š÷Č1Ұ@$ĻM‘rĘĄû˛uŊžī•ƒŅg|r"ß#ē ˇÄCÃî‹ôᄠ6ŖTØ GV-ãČ MÕ´ČCÛ Ë„C´ĐŠâ5ģkĄ÷¨/ …‡B,Á+([Q­V F9øĮ¨5R nŠRÅH˜=(,"†¤Ž§v‡mŌbƒA=ÖfŸĻAcgmN(Đh‡îÆ.ņаû"}8á‚-(ĩwÂųY5mMÕžĸŠÚ^āÖڐtč 0EņšŨĩĐ{ԗ†ÂÎKÁĘŽš%Մšo)åPÔÔ".ãŽČ æxæJ•QK×î°M_q$˙AŅeŧo‡­ŲVcFđŊ"4ÚĄû1/.YņžČgqI¸Äâ’Gcqɏ\ÉF é[\RN_éŋîûGeÛwlĮߊŠōs[\ŌŪCÆ˛J`ĨT B€dđS xBš”ģbĀ˜ÕCRKĮ=&ų´8r hqIÂZ\˛wĄÅ%‰&ķĖ‚Elį/ÍrŨĶ„Ũßŧ ģü̇ņ=ĸiØ,.YžxØËōŠ%TM<ˆ?TųăūXą‰‡ S˙Â÷ĸ3˘ōŪŌăĪM™]âÁŪCÆŌ'°ÕÄÂ†ā¯æ§°5\Ę]1` %˜=ä,"‡T”¯Ũa†´8r (ņ@¸@‰‡Ū…DĶųĖ/Ž`—ßūßKĄ'Y4Ÿ¸Oĩ ū@ŠūH•OŅ—DŠįĒĩķPˆ%X`e+ĒÕ* Τƒ<Z#âĻ(UŒ„ŲƒÂ"bHęxjwØ&-6AHāQ›xōAôG~rÔöú5×ä{)´ļCoĐņÄCõÛ1ē‰#ē‹zēv^ VvÕ,Š&Î}KŠŖĩˆË¸+ō_‚9……šReÔŌĩ;lĶWÉ?AãÄ ŽdÃ>īĨ‰‚ŗ§˙īĨË”Ë ‚°į kŽÉž~đxž×ú+Úē§ŖĩzƒŽ% ÷lšBcM‚hņ¯XûīEÆĘžšÕĆā¯äOĀÖp)wE÷.ØŠį,↤ŽŨa†´8r At <•báĨ§ķ=‚ Bķé÷îÂŪ8<ĒOŖz‡ŽŦņ Ŋã…É—O`SۋKēŨĀŅDĀÛ—ŦBßû[,.™’ųíU ‚ŋšŸBĀÖp)wŀ1”`öŗˆ’Z>ĸSƒtZšŅĄ5šēÆFLŊæĻäũÄwnË.<áđäũ§_ÄĻß~_ō~tĢŗōĀԓ“÷…0cÖũlŖaCÛå3ŋ˙yvÆU×göwØbcgÛŠ×ܘøÉløôûödŋøs#ØyĢMۏΔmŋ=iûėÄw'īĄwō´É{´ë=ÍžsØDöÁŨļOŽ…„Öx¨ÆĘUĢŲ Ë^æ{DÜũÄØä3ūĀŽųæŅltë{IÔĮëŦÍÖčö+Ū¸Å%áK}‰‡Ģ2Oĩ°…DhBâĄđŊ v7XM(˜{ áüTĩH’qS ä_ƒŲƒÂ"bXjéÚļI‹#ú%š˜xs1Ō`߯ŸÅ~0ų@véMwąģĻŨšGâŲ…‹“}QgĘū{&ŖD‚BŪH`¸ÚŠÄĀT‹)ûīÕN$`ĒÅQ?™–›‰ãļM´înÅ;ūk?m'28ī “’DƒHވũĐPâĄķ–,g‡MģžīuąlÉRļΐÁ|¨‹‹'íÃF Ä÷ėčhâaŲÜEėˇ<ņ0ÔôT Å_'J<Ŋ@œÄÞx¸F›x(|­‚ŪV B/%—rW ƒŗēÂ"bHjéÚfHM"ÆJ<.41ņ€Ž˙čaë&wtčåŅĸSN>A ÚĮ(‘¤¨jkJ<Č1 8™0îíÉ{Y äÛJ•c†¯ĪßMI‡kŦEmŊšENĒK<8Ũ~•;ŠD?Âî{Qøû:”­¨VĢ$<“ūņjˆ›ĸT1fuE CRxkQģÃ6iąÁˆ ˆā`텹 ņŊ>°ĻƒčØ‘p ˆ& #ņ ob„ADĀ;ņā|ûUjæ&Î9‚h4vWtÁĘŽšՄ‚šĀO!P‹$7E7ë*˜=(,Ė•*Ŗ–ŽŨa›žâHū ‚Đ˛Ķ–›0ą°$ĀÔ $’ãW÷ĮbX ąN0bANŠˆDƒœA‚S4ōėūÖąÉčLĪhFqAô^‰§Û¯žģļĨN„Q)ōέŪÄæ]pRaĢ‹Û~p,{øŦ/ō==ˆëšoÉ÷ÂKČᗐąŋĸ3VAŋՄšo)J8TWņĢË¸+ō_‚9……šReÔŌĩ;ːGŒ #€„‚˜ĒĐĄOļĮ1%CĢ ŦՀ¤ü#Ų€¸đ4 Ŧë€ĩÖpĀâ•"NŲO¸Āš8†u,P— ĸר´¸¤Í­žjÅ%'Í(>N3E¯’..ųĢÅ%3J|įĄ{Ã,.ųÉ÷ėžûņ}Ų7~{ûÕ_īHŽ!°ų¨alÔäī$û1/°ëW–ŧę@įũÚģe‡Ÿų{~$ ątÁEĮ}”m5zƒä|ž7ķNv⯯å%õ‚ÄĘúƒ×ļ:Ī.¸/.i׹‹KūX,.iW͒jbaCđWķSØ.åŽ0†Ėr‘C*Ę×î0CZ9†š Å% šú8MÂZ\˛bqÉ!Ŗ7IæÂD/˛jå ļdîĶÍX\DÂü! §ŌV*ėÄã į§ĪZ>åû%¯1AGØļ3ŒDHŒä@,]0nËŲ ĖfOĖ[Čļßt4?Ú_ąģn3—:vš”­¨VĢ„Ŗüã Ô)7EŠb$ĖCRĮSģÃ6iąÁˆ ‚ ‚°Æ:ņæö+Ė\AĨŊFßÄíū›ŋKÁŋÚcd^1:@žb*AYĻČ@KŒtĀŋċ÷ ?õCŽ mÄ ƒē˛}>Y‚:8&Ûå5ōē6uōí›ė6e0ũއ’äÎc‹™õ˛Š.˛?ŧ—Ëdĸ~^ž؇oŒēåõbwŨ*­‚]îՄšo)5b”C qwÅ@ūK0ĮŖ°0WnjZēv‡múŠ#ų'‚ ‚视û4K"Š´Jĸ":­OÎ[˜ŧtZ1eŖļūÜiÉ1t€Å4 ĮöⲗÛÉ1uã3ûíšŧ  …ޏР?÷‘dڃĐûɑxIáGØ"ŽŖÆīœé˜ƒ?aŗß[e:æ*LuĐnL@ųßŋ(9–ŸJąĮ[6KF: ‘ƒäČĮ–o/§} éßĸžTmŧęk‡'e°A}Ä'ĀąģfĪIb:õ`ŨfŦ‚^îÕÅ0áP]ův.åŽč^Ã;õœEܐŌĩ;ːGށ ‚ ĸŸRšxwûA%\}N˜¸{qéËíā ?%bīmÆ&ZŲĶ4đ¯üĸcŒr$,ĸķ^ļÖęÎYØˇÚqŲ4 čĀVL ˆūˇ ?’‚ޏđ´ Iē:ĸ "™€Ä:õc†e×úĀ4‹Gįžŧ68gŒR@nūדüHzžF ]‡īĨHh Y 9"FĒüüÚےWSr%˙|fA˛Cv{oųãÅí2ž%ûˇņ­õūÎĮū“č¯\ĩ*yu'ķ%˛ĻZ­(áĐÂŊ†+fõ\ ‘C*Ę×î0CZ9‚č‡˜Ö ę‡>‚ :‰×S-ę s;Xžü š†Q bTƒī<špĪSsų^ :ŋ°đ+f=Ø‰)ˇMŌq.~ü‹>âuT ?ĸŗ-@<čĖ›;÷ļˆ:"Ņ2q—ˇ&¯ˆm”&"1"’Ių\ˆs%'A0%âųÅ˒÷"1!·ØTS6TlÖō›o^};áÂ?įļkŦˇ˙•ˇ úļßŨ|_ĸ˙Đ3Ī'¯öTûŪũļ5fZE ÃĨÜĨŠ‘0{PXD IOíÛ¤Å#‚ ŧ@G—ļælA¤ņ‰‡6í{ÃÎÜ(ĘĶ%l{´AŒttŧ‘Œ(S F`ú€ŧūCĶ Ä)H°”‰„ĻL€üÔ6HĘäē”?›Ÿ‘_ßw{vúīkmû[mxJEf›ŦŪŪšåÆ‰ū_]‘ŧÚQí{ėÛ֋Ķ*ø‹›b ˙%˜ãQX˜+UF-]ģÃ6}őüAAšŸxhß%öŨ.v H(äŸÔ ū•^ž>0ëąg’Šbčŋ1 kt¸å2]?ų2ă¤EL0m—ųdF% !Û`Clbę‰!—ī˙Ũķ“2 Fs˜Ļ…t’wn6’ŧÛÛ[Û6Æí#ōūî|“IÛfŖÖįl¨öŨ úŖi-Ük¸`§žŗˆ’Bēv‡ŌâČ1AAēdÄCwŪ$^>끤ƒ-¯%€Å Ņš–“ ˜n€:Đē¤täĸݟN0ēyņIÔG<įKk"ċn"a F)ˆMŦk!^åi,X)’)÷?ũ\r,¯#?ŅÉ Œˆ,xH~qI˜žąū:æ)(1Čtƒ‚ö‰Ē‰… ĄĨä™tđ‹Į¯v.åŽč^Ãŗē"†ˆ!)ŧĩ¨Ũa›´Ø`DAADŖ Ũ{ŖˆÎ?>”ׇųiH ဴnš’XAhÁ^<1BÖĄ@gZ؋'l،¨đA苧H`ÃyĀÔ $?0ęIUÂD,H‰éby*ŪŖŨ"ą€‘H>ČS6D§uØ‘,O‰MĄėR/([Q­V §UT'Pk ÃĨÜĨŠ‘0{PX˜+UF-]ģÃ6iqD˙AA„Vˇāīž_ž‚3s6;u¯ÍŲđĩ×âGŨY:wûí{Īb“f|’ Ų(û4æ/ËŽžüėÎF ؈ĩįĄ{bĻū…īu‚čÄË ŒH@rĀ%ņ‘ xjE~!O$ōæl*3Ļŧ—m÷Žíų^™nP…>Ņ—/ē6Y`ōc{lË~t¸<ēŖZ+Xˇ,@˛øŠkM[Ę]1` Ėę ‹ˆ!ŠĨkw˜!5‰C—qĮŦYlüåķ=‚Đ3ķ ­Ų.ãÆņ=ĸ?1oņ6éâņlÚa3Ų¨Ąv‹uÛ˛xÎ"vîŪg˛Ŗo8Ž ŗ?ÚĖ[˛œ6íz6dô&lāÕûGŅdV­\Á–Ė}š]:—x0ÜUÛŪpe ÂQ7Á.Įj×vĐo%Z¸×p%ŽzŽ8ũ"vâWō=‚ ‚ ‚(§3‰Í]ĩË-}ÖÎĨ&AtÕŽí ßˆŖüã Ô)7EŠb$Ę<\që=lØÁ'´7ė›8{úߨ6SžÍ÷šr;°É ^š mä˚Ö6‚ ‚ ĸœz†ûvÛ[úŦŒA” ēšj×v°oDĀiÕAí@-â2ü— ‹įîĮŸaGũd;ī “ØÂKOgߞ4!ŲīVvŪjͤØ&žs[ļī×Īâ%ŒÍ]đRģx=yڌL’mu˜z2?JAA4z†ģ|Cq›ĸM-‚čFlŋYĒÕR0áP]ův.åŽč^Ãõ[štÖ?¸ÛöÉūg'ž›žnf4€+čė‹‘rĮČeōT $@ÄqĶhLÃļy}™ļ܄ŋ+‚öĸŨw>ō?bFL˙GG n€ķ…XDûÄųˏ‘Û!—CCŽōŖR¯Ŧ…˜ōûAAŊL”ÄCæfZsWmsĶ Švļ5 ĸÛ¨vmũFPÂĄ…{ WĖęi w=ūoļҰĄé!Θáëŗš ņ=7Đᅞ9 ŽtĻ'Œ{{r|æ÷?ĪĻ^sSģSüÕVŲ”ũ÷LĘžsØD6ũöû’ãy ņĮžÎč㘊ŗîg;ną1ß bF|đxˇāÎGŸJüĄ $ĻĪē¯+ėEReĸ8@ŧøÁ>ĘÁIOoë`$‡8§~G[ˇ]6ūk?ÍėŸqÕõܒpáž/}-ųáá™í´ÆņR‚ †3ö؜ˇī–|¯īîą ûÄ­Ų[† âGR°ãGn3*Ų/ĢOy‚'lnÕ͡ô}Y;sM‚čNĒ_ÛÁž™VČp)7EŠb$ĖâÆ€„ÁņėÃ÷›ōū=“68eōI‡ėĀOÍ_$Đy>xĪ“c€ŗŠģZļSöߋīą$‘cčˆéĮûßŧ)/ɂ˛O€ŠĸnŲH Ä%F‡ .4)‚6 øcEĻ{ ŊHėˆ8¨‹s!@’Bœ# Oũ@RáŲ…‹ų^Ö/4ķû˛-afˇÍF%I0ä+eļOīļuR^—ž{ú›‡đ=‚ ˆîãŪųËØ×­QH0l´Îë’×]GI^b˙üæ%¯`銕üA”,ņ`{Ģ\í†ÛV ē‘_ÛŊ8­‚ŋ¸)ō_‚9…EāÄčšÃŧéČáIg[ :öØōČõʘŗāÅL‚īeä5°ŽÖ̐§`õÄē˛Oy‡ëž÷9~´QW֗AŦc6XīĨˆÃčáëĩ2i"âĨä\•!OŊĀhaĪīۛÍ~a1ÛūÔ+ø‘>|¸õÉž›a‚ BĪms—$¯r‚I$#ĀæëŊ!y`˙Ųe¯ō=Ǝŋū‰d#Vˇāīž_ž‚3s6;u¯ÍŲđĩ×âGË)ģ/^:wûÍ{Īb“ūôI6dŖu-îŸŗķ—ÎeGũņũė—ÁF ؈ĩįÁ{äīĸŗlŋcú¯¯EĒõ*QëKũ™ũîæûØĮöؖzøûŌ‚*ôĘĀĨÜÆ ĀN=g%ížtáôdĀÜŅÆtL'˙˛¯SĻ^scaFtŒ1E@tĘ1˛Ķ`‡Q˜Š ū5ļčüŖÃéčđ 0•˙˛/ ĐĀ(1rBsU[0ęöŸĒ]˛]bzÅ'¤˙Ž„ˆ[u>TąŠķƒĩ5ä¤ ’Â.žp‘(įGngŪ¯i?ķ9 w˚ů_ū0ßë^>ŗû[؏&îÂöz­1Á ˛Įž<ũöķ[ū•ÉëĻė—ŧų¤Fž\ča¤Ã°A¯įGSlâęf´5ÛeM[éĖ[<‡Mēx<›vØL6jč~4 ‹į,bįî}&;ú†ãØĐ1Ų¤oĶ™ˇd9;lÚõlČčMØĀ5ĖũŖnS%LøÆÍiÂĶ(öÛl}öį–˛6Ė>2ŖīīlYø2ûŅ˙Iö1%I ‘|Āԋ9K^ec†ŧ.5ņč‹/ˇuaģÕúk'īÁ…Ėc×<Ų—|—Ëåz`˙VfhbG K^]ŠÔ‘}cęˆöG]÷ßcėË;ŋ1ŅA›p.ōåŊÄĒ•+ؒšOŗ‹'íÃF Ɏ€1Âą3gŗsƏe#Šŋ#•G<āžØæ†ÚlgĢä@KîmÛŋMšm“lÛt`{{˛ŊŨ´ísÛļ°ms{‡jÛNģm}ÛŪzÛ^ŪvŦžŠvÍũĻåāO ÖH¸)J#aVWĐÛũđžīHĻ ˆ§; ŖŠ}st”qĖŒ8×˜zõMlâ¸tÚūå˙ŌdM$)0@ŦՀ˛˛5Đ!ĮÄH‚Ũß:–ņq‰s…vĸŊe VtúhėŅ^ŒvGXčs^X”œ֍ âđŽ-6b —ŋŦs/’ ŋ¸õáöt €õ#aqõCĪ´ËĪųđnÉņMžuIrņˆ˛^H:Ņ˙@ŌAL­bTÃ/,KöŅYb$„8^:čč¨#a!'āĮ°Ąã€ĐF˛IQžø••í29Y ĘĄM$K|@9#ˆANt€m6X'IČI$)„.ڝ_ŗž|@y¯&ę Râ!Ė­ēâ†Ûd_Q`ŸV ü—`4đĄ(^ŗģzúŌPØy(Ä,°‚˛Õj•pZEuĩH’qStŗŽ‚ŲƒÂĸ¤:ÁâšH2ā_áåEu S:bCÂ˙ Nĩ8†…&ÅŋÜc…˜&šÜiŸöÅÉ­c7%eéÚ{ō’,ЂĻĐĮ&@^ãŖĐ61ú"Xã#8 $„nJbÅ "$ „=FMČSF°‰„F$%°ā§°Ķ=­ƒđcÄāė_ąŪƒŧšŦņpÜ^oKF8|ņĒYüK’c7ÚÖÁ¨†˙,ęģÉVMņ ‚čfnmuÖåuĐŅÆÚ蜪“ŽÎ:Āt šĶ^:ībD„ €?>úßc‰ėv“. ŊÁÚk%ÚÔ~`ƒ˛_$ōIØČkO˘Ŋ0y €Fl$;āSŽõ7=Ÿœ š é$ū8MĩĐßHgS-ãS-ú°SqžjĄ‘M‹\ĸEšpĐ=ŦŦƒšŨĩ0{Œ“ƒ‡ŒeĐĀlj•Õržj ŲüTžP.åŽ0fu……ĄŌĀÖggņKB´ČOĩđ!?IyJD,hĒ…ž™ĮîĪļØ`h2Ú O~j…ÍT Œl@’A…¨‡$ß˙Ö49–ŸJ˛]7Ќ§›ĄŠũšjĄĻ×§ZüË˙ĩ­Ž=Ö|øÖîobßŧåßė_ —gĻRˆōÔš`Ē:ķēi2ō” 1åA5>T`ČOČņ ą’o[>y!S10ÕâÍÃÖî#1Õˇŧan{#Ü!—lć@p)wŀ1”`öŗˆRQžv‡ŌâČ1•č•Å%‰z Å%û/´¸¤šū°¸$A4bqÉčā>ĩė^ĩäxZEW1&I‡¨ĄÅkv×BīQ_ ;…X‚VPļĸZ­2ZJžI˙xĩF ÄMQĒ ŗ…EĐÔņÔî°MZl0"‚ ‚ ú-O5­Ō‰Ü>ĪZĸ†UlwÍîZč=*ĢÅŪCÆŌžšÕĆĐRęč´ ŋÚ¸”ģĸ{ WĖęš"‡T”¯Ũa†´8r AADOQ_âÁë>ĩ7¸ˇÖ^m2Q¯Ų] ŊG}i(ė<b XA؊jĩJp~ Z.åĻ(UŒ„ŲƒÂ"bHęxjwØ&-6AA„‚z]uŸŠ[ëôöē”č÷ŪYņšŨĩĐ{ŒO‚—‚•]5KĒ …sßR ”p¨ŽâW;—qW äŋs< sĨʨĨkw˜!-ŽäŸ ‚ ‚čyâ'ēæ^Õ"áĸļâY5ģkĄ÷5žePJ2VöÕ,¨&6%ŋxļ†Kš+ē×pÁN=g7$…tí3¤Ū#Į@AAô<Vˇāīđ ÎcfÎf§íĩ9žvõįÔ.ģˆũæŊgąÃŽū˛Ņēü¨=ķ—ÎeG]ö~öËCf°‘CFķŖöÍ.ž´5d?jrĮΜÍÎ?–¤ūŽôtâaÂÔŋđ=‚č 3Ļŧ×.ņā8ÂĄ,ņā×U ÔŅ”dÜų×`ö °ˆ–Zēv‡múŠ#Æ@tJ<.PâĄ˙B‰5”x úą{œ&A­ū]̃Gë8´äŋs< sĨʨĨkw˜!-ŽAdÉ/lģm6Š—AčyāßĪą…K—ķ=‚ ę€Ņ)$@u•€L.åŽč^ÃŗzÎ"rHEéÚfHŊGށ " ŋ¸õa6ä+%Û˙~ž]7e?J>aÅĩ˙|„m˙ŋg°“÷öäü…ü(A1idân‰ž&Đ(?ų—žŦnŠRÅH˜=(,"†¤Ž§v‡mŌbƒAt ãvMōzÜ^oK^ ‚ Lŧ˛â5ö‹ŋĖb{|íįlĘÔ˓QAÄ#JâÁį6–n‰ž%ⴊę v o—qW äŋs< sĨʨĨkwØĻ¯8’‚č —ŋÂß1vÚãØĶß<$yS1!OҐËęŨķĨō=Æ>ŗû[ŧÎ{˙[7nëŒxĀqa ß –ŋŌžîŅũŦf§î<’­ˇaØ'ŨÁâ—_dŋŧõGėSģ}™ ]{}~4 ˙}q9ûû˙ĘŪõ•÷°7ŦßŲÉÆžûžWÎf#‡ąÉãwbûīôvô%7ŌS-j#žĩû›Ø7oų7û×BZ´.˙8Må-Lë`’xxOyâ![¯¨R-ņÎX÷Sâhž‰å÷Ģ…]âĄŦļ#’Œ›b ˙ĖÃRKĢŪp˙löąŸüŽīD|^Y$[šÖø^÷"2˜fąÉˇ.á{}‰9aōI*y FAČ pÉáû°]7™øą|yúɈˆ^`āk¯˛7,¤šî!ŗŲ¨alíQąá›mI‰‡HˆdC”„ˆOcĨ÷îŧ@—xČÖÍ)ņŨųK\} %ˆ&P5ņŋžķčĻÚp)wŀ1(0Ģ+,"†¤–Ö;ü}ëķ;ūüėĐŊv`ģlņF~ÔëļDl4ŅU<ûĖĶėä{°r{÷_Ēy|rrC ädFY`:‡œôč~Vŗīí0œ|cy€č]hÄCņĐhÄCgh\âĄô–%W JÚú­1ȧőMtw˚ů_ū0ßën|bŅHÕT‹üÔ đđü—’Šų)H`<ūÂâdÚFoMŗH™yĐÖl—q´XfdŪâ9lŌÅãŲ´Ãf˛QCÃNˇYž}ûcīM$">v‰‡÷ˇ5ŒUBÄF5āŠēĐĨ@ÜĨŠ‘0Ģ+bˆ’Â[‹ÚļI‹ FADįļÍfŋ°8ŗā$AŨF7 ÜŲ'kDCŸxuã™îŊ‰.ÂīR tąK2nŠnÖU0{PX˜+UF-]ģÃ6}őüដ‘2AAå”'šzÛwNįŸĪ,`—Ūr?ûCĨíž˞œ‡įņ3ļŠež†[˙θ}â~ŲĖņ(,Ė•*Ŗ–Žč¤ĶâČ1Atņ+G[܍ ĸS¸­ņĐič“č2žyõ]ė \]qûSévûcĪ$úĪ.\œŧ–Âī4Üo8Ük¸bVĪYDŠ(]ģà Š÷Č1A4ú•Ģ:×AtŠîH<ĐŊ7ŅĨ|}ßíŲO&ŋßaû€Õ6vÃt!¤WWŦL^ Hߡ¯Nü/›ŲƒÂ"bHęxjwØ&-6Aũ–ŗ§˙íûõŗøADwĐüÄŨ{]Ė;7É>˛ûÛ-ļmųĻ*+n#†ŦÃ=(āß÷ŽkÜ/›9……šReÔŌĩ;lĶWÉ?AD1ėāÚۉ\ɏēs÷ãĪd´°qēúqŦAD5ēkĒ…S>ą›wÁIÉĢûˇũāXžĮØ5ß829öέŪďa0ôF]āRîŠî5\°SĪYÄ I!]ģà iqä‚ z$ÎûÂ$ļđŌĶŲĖīžMŊæ&vÅ­÷đŌj@zØ.<áp~” ‚%ĸ#ėdr)wE÷ޘÕs1DŠ(_ģC‘c ˆåž/}-ųááė´Æņ#D‰îļ}ō~‡-6fŖ‡¯Ëæŧ°(Ų˜Š F/`j‚ÛLųļr$Ūc¤…(ûäOĻeĻ@ LŪ—§H I"ęaŋˆ]ŽavD9oß-Ųw÷؄ī1öåßČū0aëd+c˙ÍÖoÛŧeXßŖNĪØgķöqąA_…ėۑیâ%D ÄÁNüõĩlÔäī$¯ėīú՟ņŊ֏ÁwĪOŽŨūčŋųĸ:;™\ĘM1~G×ėAa1$u<ĩ;l“EôO=În›bc7Ęfŋ°˜ũĪVŖųŅúųĖîoI’ˆ‡č,sŧÄÆl°^ō^$Ä腩×ܘėW €‰ãļmkũãą§3Ķ:0ŌBŒŧ8fÂģŲ>ÅKģëņg2û¨ģã'īOēxz[sâ;ˇÍ$4Pv(ûėÄw'ūÆ _ŋmŸŅM }ũlÉĢéēdeɀ}7[ŋm#3x­5ØŖ/žĖ>2ãáöļtÅĘ$ą€d…I‡6ĖžyËŋÛvûIåDįĄÄAԆĄ7ę‚$ãώŗknĄÂÂ\Š2jéÚļé+ŽäŸ ú Įíõļ$éđÎMD˙ķˇÚ´=ü)īß3y˜z2§gü×~ÚUMŦũ€$Ā)“äŒMŲ¯$Ą @Ō ?ōBLų˜ŗāÅ$.ą¸ŪsĮäŊĶčaëfžR ŲįôY÷ą ãŪÎ÷ûö¤ üAtH l7˛¸NF8l´ÎëØ-s OkãũėŲe¯˛mĩ?ÂØ˜!¯KŽũkár~„%Éĸ9P⁠ĸcčēPY*` ėÔsqCRH×î0CZ9‚č'ėēéHöđü—دš•ėĢĻ[ˆŠōöô7áĨé¨ š ö2Øŋäđ}ØĖc÷oÛāŊī4q—äũuSöKĘiÚGũ S×}īsüH:úaĶ‘Ãųž=ōčø?5A’Á¨ $ĘØiËM؝<•Ä…÷š€}$1’‰ ‘ä°Á FsD7sëœÅI‚AžJ&Œ–$ žkmļ\÷ä‹lČëÖhzĀh hÍĨtcOô ü˛vŋēŨk¸bVWÄ1$…ˇĩ;l“Œ‚° ƒaƒ^ĪÎŧņÁd_5Ũ ƒõ[6CžrQ˛ŨņīįŲÂ寰MžuIR $ ~qëÃmO>ŧ˙­ŗį—ū7)ßwęĩl—7h'Æ˙ėöåéw$īQ‘!ęûŖ~2-IČ Y€¤/H^ ‰!ƒu$0íĄ $’)<•ŧßų͛&#nyhv˛D˛D$9Ļėß7:ƒ z™kž|1IúÖüH ĻHÜ;ßŗãɗ^I^7äÉ$5­íĐ\z(ņ@7öD"]ÖļW÷€ØĀkđm`´m žŠĘ˛âáÛ@žÉĮohß6oųãˇŌ˙đ™I AAĀ4 $n}r^˛/Ļ[Čë,l=r]öø }Cv/ģįÉ$Y!S5äD’y$,ščúä=üĄÎot˙—t"ŧũfI˛B€Ä’ b 61mÂÄp)AtŅ—×e <ņĶ!Äq]ĸĀÖbĀ襅…&‘\Ёuā_$D8@2B$!PŖ&„Ļ Q†Ü´‹YDˇ2cöÂäU$°æÖ~p%?]CpÔueō”Dg‰’x¨īv[Üâ÷AˇúD×Ã/ëâÕmÂŊAD7§H`äĻ@ˆ„ĻL$$d`'l0EBLŗ`4ƒ˜f!ob$Ņl°Ļƒ˜Ē 6y$ Äq‘(@9:ņy €]~ä„@č`“}„–J~äÅ#aƒē2rü(ą#VÕb˜r{ĀÛJŨF*`=$Äĸ’bš„ ›­›&o›ģ$y̓’ģĄˆ›BĐÄCĐnV¨č)¨o‚čŌEėv-ĶÕ_7xŽ:žŠNDŊˆ‘ ųdĻI Ņ ĻI !¯ß€MfAKCAAÔÖs@ÂaĘö&IL“pßĖ?łh6AAģ=ZąbĄÖœ ē~‡ŧžÅNlōüTWĐɖĩä eũĖ v™LD0rAžf!k5ˆé/.…}zˇ­Û#°ÉO´8éšģ’DĄ`4E~qI÷ÎM‡ o7zXōJA˜ë9 ųāē¨$8cŸÍ“ēSīyŽI•”§_|yį7&OŊøÍCĪķ#D§ņJ<īôkŞ…Á}D'ārčëdĖŐL,ŧ…ųŠXˆĢ ö)†wâyåØÄžiž+AD(üŲä5~Ķ-°ÆC~*‰L§Ā“(ä)_ũŸíØö§^‘”ÛøÁúĐ1AzÄēĻE%—ŽX™,F‰Ä‚ØÖqG;\ØŌųÖîojÛāIßŧåß4"ĸATN<íôk{]Å ž ĸđËēxu›°Ģųĸb+ą*7VŅ`š@¨Q XĖK~9ĀĒÛb›x~9’đ-ÛËū‘0ĮÅŧÂ^ŽYčU ûĶ"ōq‰dŒ8.î@]€öMŋũžd62Ж‰6–Ÿßō¯$€WH`Lˇ@BO˛‘Ÿr4ų5  ‡GfĘāX>9ĄQADë+|ãæ§ųKŪ㘠Ž!y #žL!' Žŋū‰ä˜ŧáXQWŪ(éĐ,œî% ŽbA}D'.bˇkŲīęĮsČĮl°^ōķī61ą€Q!˅YM0é´ ÚŖ"đ|ō¯ōN<@,sž””}{Ō„ļtØ˙ņØĶízō‚]Đ1Ëzčđļnģ’ōČņœtąPV‡OCÛņ(6<îL”Ą.XĀKé‘åÎQOpÕm÷W('">īÚb#ūŽ%ÉLĶĀH‚ ‚ ęgĀęü}ÂķËW°cfÎf§ŊksļÁÚkņŖî]ž%są_ŋį,vøÕŸfCF÷=̏Vp›ŋd.;ōŌ÷ą_}ėj6jČh~ܞûîÖ?™ ęâ;혞á×ŧ{ú Xã Ncˇ?ú {įVŗËŋ’}ŦÖĀéã4ø—{‰€s€įÄcT‰œÜ ÄŅú[âđŖŋ&TŖš‚*y^J<¸C‰ĸ?;ņ jôÄ´‚5ūŲQÆĸ÷¯Ŧ{KûjT ‚ŋ’_<[ÃĨÜŨk¨7PčLË`„˜n/ čāC>āË$ DÄŠú@$%0­u°!ÁQ’(ļbkaø€¤Ļ[ÜųČS4͂ ĸKĀß‘X ūVŠÖ"‚蝔&üģ=c*L'Ë Ĩ+Ŋ}i(ė<b XA؊jĩJ@Â!PŌĄ:Z.åĻ(UôDN:ˆÁČë=\zĶ]ÉĢ˙Ę/UŦĘSķ$¯Â¯ŧūAUäĩθęzļķV›&īŸ]¸8Yã \ų ” āžĘØũ­cŲ>•Ŧ qĀŽ4z ĸŋƒŋHn‹QwAũ¯ĮiúŽ“eEÁ•Ū=ŅŲy)XŲUŗ¤šP8÷-Ĩ^åĀ_ÜųįLŊúĻäĶđ/.Ø0`4’8†ĩtŖĒ€>b~76”—čO Ā†˜æ €†˜ĸ Ią^Å&˜töqm‚$bDÛÅfÃÁ{î˜$`¯ú×+Ü`"‚sbAá†üģ.ĖbԁŒ(×=ąī…Vū7_Į&'ęĄ'˙xąˆãbŖ÷ä:HūË@S”aDAD7SēÆÃéšÅ%]i¯ņpjqIs'ëų%sŲäk<(]éũ‡íǰ÷ą X5ą°!øĢų)l —rWŦƒËâ’ŨŽj‡&ĸŠ“Öx š­ņ@¸Đmk< ƒŽD4’ĀHä˙ÅČ:ņXÛGĩ~’"‰cĐB"\ÔΝ-ąŽb@‚Zi(û Ņ |ÎëËņČí")AkßK=|=íú?y0ĸŖ)DruÅtAUėĐ'‚čjH<˜ģAÁ:J!ĐĢķ]Š9AÆĘžšՄšo)J8TWņĢË¸+ō_‚{<ŨūåIüëSSAÂAĘKAÔ:ė˜Ō€ßaŧÆ#)æ,x‘īĨĖ]°Čz#$G02Nū{†ēņ€¸Åb*ADéāâ’;JJ!Ŋr0ßĨØ{ČXÚWŗ šXØü•üâ Ø.åŽč^ŸęAŅ\Ä"›Žžŧ^uÛŊÉ+# Ä‘b é‰E2H0c ‡Đ.O2ÂĸĪųĮKãņËĶgŨĮ÷˛ė؊k<bä ‚¨‹Ž$‚u””Bzu}i(ė<b XA؊jĩJ0ĘÁ?ž@­‘qS”*A$0j@<Å):ûXĀeH4ČOr2=ą(ŽZØ m3ÅÉø‘ëb֋Ā:â6‘9eōÉÔq<ÖH‚ ˆē¨áŠSÚOĩpé‚Í×=ÕB)¤W¯§ûgįĨ`,¸ęBáBŖä§Ŧ5m)wŀ1(Ÿjq…âŠ}„ôT ĸ)ĐS-ē튄ôT wčŠD öS-jK<¸ūQ)M<„ÔĘ÷ũŗo¨At’wŪ‰ŋķG\í,M< HĨ:ņ I&Ļ#˙°NƒUÂÕNŦÁ@‰ĸ)PâpŊ %ÜĄÄŅčúÄÃԈ ‰å_Ĩō?UH(ņ@4J<.P⥡ĄÄƒ;”x úą]\Ō ü5*üERlCˆ^BĩŗE0Įx°Í”o' 1÷t߯Ÿ•?ņ‚+“9ŦX€ ĮÅ\UŒ˜宅Ž8Žz˛ŊؗŧĮ…O€÷r<؎‰šä:˛žŦCAADxšŸxČ ī`ÕĶũ"ˆz°ģžs5~ `˙ÂĸYčäc1,LĮĀâ]8.&ĻiāČƝÎ}×ãĪ´íĶ7ĩĩąâ8Hüãą§ÛĮŽ ūBž“Ž$Ļ…ˆšpüēī}.Ņ’õqŒ ‚ ‚ ˆxtIâÁÜ›ĒŠ¯EŅąËä,ė*‰:úxžš Œn@"BŦ Āą/F=€L>ŋcIr+” ĀĒåHNLŲ/~”ą ãŪž ŽxtoÂFöOAAD<ē ņ īMu ŋEŅ0_ˊ+žá_€9/,jOŊöˇŅËs?į,x1ķ(2ŧ/Īs×ųŖ. C ‚ ‚ ‚ˆK—MĩčCŅũ"ˆŽÅ|=wī?fƒõÚS/ä­ėŲį Ē=į™‹éb+›&ņÔü‰Ī2āõUĶ>‚ "6”ø&ĸŋŅu‰‡îí~DķõŦ°hø‰y´:ųØˇŊÁÚiËM’5XĢë2ė¸ÅÆlÆŦûųŅ"¨ģäũÕ7%:Ŗ€(#‚ ‚ ĸ^š›xPtŽŪß"ké9 ģJGŦá€ÍO¤ĀĸbjƒØĘĀZX3BļÃē X¸Ŗ!d yqIŦņ€E,qĶ2Ä–¨ģķV›&eų§_ &y­ ‚ ĸwĀožü#ØĮ†ŋ $ČÅqÔÅß0Q.ū~P&ūÆŧ—ë äŋ9x"’āxÄßEˇ#‚č5š—xPtŽē¤ŋEV˜¯å’/A‡‘§G<0õäöb‘|$(‡ŊÜųĶ#Ä& > POØ Į&ûâ8üË`JŽãq ;lr;‚ ˆŪŖáď:õHTcĮą^wŌÅĶÛSú°€ņôÛīKŽ›Đ=uI~âū6‰ŋ{ĶũÄûŧAD¯ŅœÄƒĸ¯Đß"ˆ ”\â s%‚ ‚ JŸx„Ļū‰¤5ŽcDÜ-ÍNF; L$ŖņŠ2tO]Â@蛰ĩ#‚čVš‘x豎Õ;ˇz›wÁI™ Įú3÷Ņä<\ķ#ų‘¸ÜöƒcsŪÍš……šAA`áa Ļ4`ģķҧ’c@ˇ(ąŨS—Ļ}qrģLžĒ‘Į֎č,Ģ^{­Zš‚6Úzsk]ß1°ēŸđüōė˜™ŗŲéīڜm°öZü¨;Kæ.bŋ~ĪYėˆkϰ!ŖK~Č5ĢųKæ˛Éx;īã×°‘CFķŖöÜûĪ{Ų„Šá{õqĘ'öcGß™7ķNv⯯MŽĄķ{Õ×gŖ&'Ų÷åáŗžČf=ö ;üĖßķ#Í1ŋ¸ėeļų¨aÁ΃ qŽ|˙"vûŖ˙æ{õ3cĘ{ŲN;īÄ÷T(|đĶ’vĄŊW|u?š2pĀ€– e-š@æĮ5Į%7ßˎ?o:;㨉ė=ļãG "w˚ů_ū0ß#=3ښí2nߋOŦŋXXk ‹ņ€õ~ÄôŒxĀT yšęcôF?ČīXßSķe*„oL§ĀH $0Õ"˙„§ŧ] t›æ-žÃ&]<žM;l&5t ?†Åsąs÷>“}Ãqlč˜âcŗ›Ėŧ%ËŲaĶŽį{D]ŧúĘĢėu¯ß#ęââIû°QCņ=;C8vælvÎøąlÄ u!Jâ@x˜V–x°ø Ķ­‰ü+ûĩw?5)Đm‰‡OžgöŨī›$øÆo¯cŋúëŧ4,87OĖ[Čîyjn’jnâAņ%p¸ķĸÄCw@‰ĸ)PâpĄW ]ãa/e’ÉŦņ€2ŦŅ€õ!Ä> Ī.\œŦ„$†ÄBČH<ĀS+Ęņ ō …˛8(ņĐLVŽZÍ^Xö2ß#ę`î—Ø'~ü;vÕ×'ŗ!kŋž%ę`ƒuÖfk tûeŽ=ņ ‡§L<8Äߍ‰1ÚA÷/úâ_äåΡ8&’ĀԄũvØ*yŽôŽ_ũYģ/#Ę@žüÅĨ/ŗ­?wßK5ˇŊģáŲIœ@ØČūîš=‡í˙Ũķ“÷L‘Øqlß Uü8&üËå¨ģūāĩ“81>e}QI‚ ?÷‘ÄČ ņY4/ņ øT¸ãĸÄCw@‰ĸ)PâpĄ— ŅÉ €¤ ōHQS2&ŽÛ6鉐‘˜ŖÄÚ;ŋyĶļĀ1‹HlŒ|(ŗ‹%ˆn`ĘÔËŲŗd_:`/öÅßŏMÅ&ņlã~ĐÚ~ĶŅI§Ú$@§I $ БFGûđ7Ū‹¤ĘŅéG‡Įģe/'#d0ÕaīmÆļmЁĮ(C×IöQ č 8S$„ |!N1Ë#PmÎ}!“Abáôé7&õ‘„Á9Čû°ąi&øäžŠCAA„#TxLĩ71˛ĶÄ1ų‰JQ†éЕmđ^”cCšŦ'Ž đ^×ŲD䖇ŸN’ā—­>ÅKË˙›ŧ'ēīăąīԑÎUįzsčėû˛YĢŖÄŋÔc„ Q’ Š wøŋpūŒ$ą '´É €:5!F  >lÆ ë›ƒD:ûØ ÎĮmϤ ]€¤b˜~ĮCÉūΝŊ-y•cČŖ$Đ^đ÷acĶ$ūųĖöû[îom÷ˇ›ĢoĪ/^–č¯ÖūģAAAt¯­Z•é÷ éđĢŋĖâ{D7S9ņБ|‚jũuŌ)>ˆ5F"`j‚ U€u dČ'lÁ F`4âüåyrŪBūŽ$¤ %&POLŠ(ÃÆĻ“|ķęģØįΟ|›ũ\ē2÷ĸĨ}āæ~'ëCeķÃh ‚ ‚ ˆæķ›ŋßÍž3ŸīĨШ‡ŪĀ9ņĐė„Cg#ÃŋB‡ęcŨL[@}Ձ:S(ä-ŋ„ Œ–mÎŽa}‚^įëûnĪ~zä„āÛ¨u'úČ 7÷;IAŨ Ļj¨Ą$"H.|ī˛âĶChÔCoā”x „ƒŨ4‚*`t:ø˜a!€QXcB{tōoūדüˆ;÷?ũ\ō:q—ˇ&¯.ˆķOZ`˜4‘œ@ģtØØt’wn6’}tmƒoƒųęžÅ+ŋžīF`q,ŧb/ Fˆ Ģs „-6ŧĮŠŪĸëdtė÷'GNHĻ"Čë>¸‚x‘øOÁ` ˆiQG$Käiėãx>™"/X‰ö#iōí?ü_˛/°ąé¨¯üúŋĶgŨ—,„…Bâ~Ŧ΍cxŲ¤Ķ.HėP†Õģąr8ʰøÜ/%e&<ØiËMڋnA l˙xėéöq,ė…™‚€câągy;‚ ‚ ĸ9`zĻY”AŖēmâĄŗ]{įÎEevÄ"‹ų5Äŋōƒ#ÎúCfú\0…á q۴ˑh§5œßŌG’eHRŦ ‘÷ ä…$ĢßH>]lxJ…)Ą6â)*påōt ņ´ č#ҁũ|ŌÂdƒ§xˆ2€¤öCBé4ęī¤úh|į°‰ücˇ<4›íŧÕĻÉęÜI$t¸ôĻģ’2ąr¸Kį˙ÎGŸbĮ°ßKqvį#OąMGoëë°ĩ#‚ ‚ :ú2˜BŦƒF=t7Vˇāīđ ÎcfÎf?~׿lƒĩÕĪā´aÉÜElÚ{ÎbGüy 2ÚuqÃlū’šlōöcį}ü6rˆ˜B`ך‚Õ=˙ŧ‡M˜ú—ôŅh|@‚@~bE›&2cĘ{ŲNģdG‹T!åīqâTöøs Øg7Ÿō~4eĀ€ĩ¤!0mŖ DĸA~&š F9 ņ ?ī žxvēü`jŔũ÷bģŋu,˙ĩŸ&ĮdÄ3Î1ĩâäi3’cųį°ctƒ@eWēgŽ\rķŊėøķĻŗ3ŽšČŲc;~” âpĮŦYlüåķ=‚Đ3ķ ­Ų.ãÆņŊøTũ›…qÆŊ=Ø: øûqŌÅĶ“Įeö2ēŋMķĪa“.ĪĻ6“Ē~üyUĪYÄÎŨûLvô ĮąĄcÖãG ĸõŨ›õ ›2õrž§įKėÅžxāģøŅC8vælvÎøąlÄ uĄōS-â€?=Ē??eĮ‹TũãEMĨxM7ķ*=|ŊdTƒ˜Ō 6ŸNž¨‹dŦ)žqŽ›MėËĶ:TØÚAŨFąaD\¨¤C¯‚uhm#ĸÉŧ˛â5傒eШ‡îĨ!‰]bÁŽ“ĨS ˆnD}M7÷*ĮčÜĒĻ4ėüæMŲôÛīk—‰EŖ‡¯›ŒŠ9!¯˙€d†(ŗS+M­ ‚č]đwSī‚čn{övČîÛ&#äí};ŧ9)ĮĢ|üSīŲ…=ķÂĸ¤Œč.:œxđOü%‰šÁ xڅn …M¯ ž„›actĻ^`j„x‚ĻLLĮĀôQ†Ąą2X+Ķ4P†)H6°@$ąšØ0V~JĻQˆõ&ęÃevAD3Āt8ņ÷ä÷ņ›/īü]8`מijøŊŋõōŋđã8ôDY^GއõƒdāW”aÞûâī‹HĻC[ØÂ§@>Ž:a'—É>e[$ĶÅql"q<¯’ū؄ A4mŪ´a2u"ŋí÷Ž4ņ€×|ęŨGū¨ŒBķûeĄE} 7÷ÂÆÔąžƒûb:6yž,ĻGˆãųĄąr=Ŧ‘v:tD96ØËzâ˜õq ¯:;‚ ĸķā÷^^O"’÷‘ĀĶĸĶ-~ĪEĸAüÎŖžÜÉFŌYLŲĸc›9 ^l×C2CßX/HÔCbû"& ?Ų šō˜Äß0tūUOfLŊæÆä8ôQ†5&ä}’é˜.(ĘÄŖŖ3fŨŸŠ‡8F…`CMK!ĸ“ÄIŠģ°yĒE¸ÄCާ͗xøL…ĮifŖÚĶäXôĒ(ņ@4˛ÄƒúļOôBâĄ?@‰‚h.XI}ü7Åf~ë“ėõk­ÉŗD§Ržģ`;xĪ“5:×S¯ž)™60=ĶäŠ|rG;OÕăl'íU‰ÜŲ‡]Y’Ũ6ņĘķ)’”x z ēOę.ęyœ&ūZDėåd¤#û"ˆ:(^/ėÕĢ“› ۍˆ[‚h6ŪpW˛š:^‰Î#:ۘū€'!ĄŠ H:Č 'Ķ0¤ék(`ú…+HH ‰!×MČ? I‰*īD\žĖ¤įk^ČS+lÁg.æ{AŖzâĄÎ$@ž"ÅË8΅$’äƒū?Ņ5īh ģŅ\^[ĩŠ]0ķÉ{ŧbŸčÁözëf|¨úëT/4âčč>Šģ°ņ O<”T˛Á9ņûĢã—x¨ö',čžūj~ [#IšŠŒĄŗ…EİÔŌáfßĪMĩH=Gl0QúƒJI$ō é€äQ?ôWĒ^(ņ@ttŸÔ]„Ÿjüĩ ö§šXØZJŊ’t€ —’ŪZāf]ŗ……šReÔŌH=wÎ?ADyāßĪ)“ĮQNADx:›xÚ'Ē&,„€ ‡ę*~ĩ3pwÅ@ūK0ĮŖ°0WnjZēv‡ŌâHū ‚ ēĶZ´ÖAAÄĄ3‰‹Î“=ÕĆā¯äOĀÖp)wE÷.ØŠį,↤ŽŨa†Ô{ä‚ ē”'į/d3îüßSƒrØA–z}"ŗYĩVĩZ%åāO ÖH¸)J#aVWÄ1$…ˇĩ;l“Œ‚ ú9Ėü{mÕ*ž§å°#z<ŠsØÁ'°ģ† ˧_”lA„šz}"ŗŠƒ˜DĩZ%œVQ€-â2ü—`ŽGaaŽTĩtíDōOŅ#,\ēœ]xÃ]|Oė`OôŸønļđŌĶŲ[lĖAu?ņ`Ų'˛ëcUë`ë–L8TWņĢKš+ē×pÁŦް0WnjZēv‡ADEæ.\ĖŽÛ7öĨöjoīÛáÍI^å㰃=ŅŊˆŅ Øļ™ōí䨎ۓė NŧāĘļØ0ŖP†z⸠ĘÅq1Âa߯ŸÅĻß~_˛á8üÃŸđ  21âīE ¨DŒb‹5:ƒ ĸtvqIŽšU­'´˙F ‡î5\1Ģį,"‡T”ŽŨa†Ô{D˙A=Č6oڐ}ņĀweļũŪ‘&đš/ƒ=ŅŊœy?aÜÛŲŗ|  §L>0yāGŽį ’ċ8„_øšģā%õ@DĪБÄCĖîS0Ũ€Ķ*ĒƒÚZÄeÜų/ÁÂÂ\Š2jéÚļé+ŽäŸ ‚ zŒi_œœŒzĀtŒRČ3fƒõ؝>Å÷XōĮl§` 1Šš %qmlAŊD­‰Cˋ`ÚÕUüjgāRîŠî5\°SĪYÄ I!]ģà iqä‚ ĸĮĀ” LWĀt $ ĘF ˆū”ũ÷l4Ё);mšIĸ #B1zøzɈĄ-6Z “ ˆ^ĄļÄCŦŽSĐn%Z¸×pÅŦž‹!rHEųÚfH‹#Į@Aũ”;y*I6ˆÎŊ<}BĻ\`€ĩä8.Ļd€MGOĘažzÁ•Ékģŋul2âĻVŅĢDO<Äę>ÕmĖ´Š@†Kš)J#aö °ˆ’:žÚļI‹ FAA”"?bü×~š$ōŖhĀŽÂ›jJFž´ęaáIØc!HŦ!8xĪÛS% Ÿđ ;ÃZ:`…%ŗˆI~*ADˇ3`u ū>áųå+Ø13gŗŋksļÁ ĩøQw–ĖYÄĻŊį,6ųÚΰ!ŖŨ‡ĸÍ_2—ņûũØųŋ–:šM Ö- l~*;™\Ę]1` Ėę ‹ˆ!ŠĨkw˜!5ąa÷¯ÃnÛbÃáė–īÏŨÄ%7ßˎ?o:;㨉ė=ļãG ‚¨ ú6‡ˆ•ˆ§T\xÂáÉ+’H`ôC sãŸcŪâ9lŌÅãŲ´Ãf˛QCĮđŖaXÜęœģ÷™ėčŽcCĮØ­ŠAô_č7ēģ@áØ™ŗŲ9ãĮ˛%9„h#büf0]šVŅÂŊ† vę9‹¸!)¤kw˜!õ9‚ ‚ ÚČS&žLrŊ‚ ĸH”ÄCč.TĐnY€iūņjˆ›ĸT1fuE CRxkQģÃ6iąÁˆ ‚ ˆāČS&°aJ< ƒ ‚ˆGĐÄCŒnT0Ŋ ā§€ÚZ$ɸ)ēYWÁėAaaŽTĩtíÛ¤ÅũAĄE<õBŪčéAq ’xht7*`ÂĄēŠ_í \Æ]1˙Ėņ(,Ė•*Ŗ–ŽŨa†´8’‚ ‚čRtkaĄsMD§đN<4ēÕk ‡ÖæŽč^ÃŗzÎ"rHEéÚfHŊGށ ‚ ētˆi‹ŋAtŠĘ‰‡FwŖŒrđo_ ŗ#âĻ(UŒ„ŲƒÂ"bHęxjwØ&-6AAAô8Ή‡FwŖN̍NĀ3ÄeÜų/ÁÂÂ\Š2jéÚļé+ŽäŸ ‚ ‚ ĸ‹°N<úZqô0áP]ův.åŽč^ÃŗēÂÂ\Š2jéÚfH‹#Æ@AAŅe XŨ‚ŋOx~ų vĖĖŲėĮīڜm0h­äX•.Ԓ9‹ØÅī9‹Mžö3lČh—g#§Ūæ/™ËŽøũžėG^ČF Ú09Ϥã#R9ø-0{ČYDŠ(_ģà iqä$&~īėŠų/ąMGŽËĻũCü(ŅM\5ë1vŌoobßųøžė€q[ōŖAÔ} ĸČķ˞c˙ûĮCŲ´Ãf˛QCĮđŖaXÜęœģ÷™ėčŽcCĮŦĮ„šKnž—ŪtvÆQŲ!{lĮM9„cgÎfįŒÛęģ§9„<ÚÄCY%ÜR§­õ6I<\˛/?@„ĖÃL`¯ŧ2”Ŋūõ‹ŲÖÛĖāG‰nbá‚ÍŲ3OíĘ6Ūô66løü(AuAßA‚(‡D§ĄÄCwá•xø‰4⥠ö‰‡lÂA°rÕJļ`Ų<ž×@ęûĮm:ĢĶn:2Á×ےŋ9{ęųÅlĶCŲU_;ˆÕ,ŧÚŲ…›ŧÁÚ#Vú×V‚č,ô$ˆr6ŧ![cā|/ ”x \ ÄCw҉éŽ=rg#(Îaw¨q;§fĮ‹ŽÅZLŦ |ΝOeĪ[Āļ5œŨüŊ)ü¨‚ áEnk §ŌŪ…Á2@Ŧô• : } ĸ^(ņ@¸@ŋŅŨ…MâĄōã4ũĀ];ŋs—Ū6į°íŦĸСæö*-:kE2ąÖ¸ōü•,<'¯îD–n.4–5ÄJAAŊF͉éŽŊ›nāÃﺊBG\›*-Ė՚C&Öúwō,ŧ`BåÔ oīÂ`9V‚ ‚ ‚čUjJĀ€Ø9-Xt$֊db­?p'Á &TNM.ė0SCŦAAŅËԐxîØģéæŨ9lXu¨qkvZ8#… &kgˇö4ŧČí Ģ7Ëb%‚ ‚ ˆū@=#ēé^ŠÕ.l;Ģ(tÄĩŲŠŌĸö8=ČÄZāĘķWF°đœŧVŖy{k7!‚ ‚ ‚ ÄOG{'qkvĒ<#:E΂¯?pk…X}*V$˛|ûÛq ēPĄ”ÂÔåGÍ_‰p—ūķ vÂoc§hWvđ;6oą ŌNÂ[VĄlV*Ōy+ČFō#hËGöãx ˆ"¯ž8=öÃÃ(ņ@tJēë6üh Aü:ˆxøķJ88U pRŦ$øhĨúɓ‘ŽčGĐvQƒ/čĻ– ŠŧúÂöāW÷ĨÄŅqč7ēģ°IáHxę´á‚m]JD*éš+QâH׎%ß&׆hūr~Áũ•øÉãí×Ōp0ÍĶ{ āéG`ôČOž‚_c ~´å#û!‚ ĸ§hFâAēņŋ•QÔŽ$jŽäÖQÎA”TÁÁ Z2m‰ZmZ:AäJD*i›+uuÂ$t”đųK˛‚ ‚ ú AVˇ&’‘•ŊEíÖĄŲŋø<[{Ė–lã‰4a‰}G8§(ÃPtŖ-@Ģ ×ņ–+‰Šä°s%ˇ„pĸ¤ VВiKĐRÂucɡÉ9ˆæ¯D8¸?KÁ ~<üUN8§ĒA ŦĒđ#ĐĘô#SäGЖė‡ ‚ ˆ~‡WâÁúÖD2˛ģ•)ŗ*ņØ:ôØŸbk Æļ<ū—ü  s$ũ×qÚēĨ*Āĩ‚H**éÚUrO88ĸŦRA'O["€–ŽK>ƒä Ē?…p‚Aü:ˆxøĢo”ƒG+‰~ZŠ€~ōdd#ú´å#û!‚ ĸ_R)ņāt Ä íę”Yék?sŲŠėå9ą7}üd~¤…ŗūÄîûōŪííŠķŋÖ:Ú§õ¯īÄæ]w~ōzߗßŨŌúQŌQĮũé'“u°=séx­>ž<īĢŧü]Éļ åĪˆÔœ‡žõAvī[u[ÛŧkĪoF8 Ų0|Ü’}đčOfk ŪŌûa’€@"b‹)g˛uļČ&EĀŖ?ųdj{ôų‘>”XądÛę įō#­Ø¯==÷ą->ĶŌģ}z°}N¤åŊ0I"€G|TōēÕ˙fG9‘”ļ$6|ĪlÔ~GģčJ*éš+Ų'QVŠÔ ,m ;­=žu{|ū‹l‹‘ëŗ›ŋ9™ÕÁu„Ē'į šŋá(ū,D+øũũ­°ã/ē†qøūėŖģmĶ:â âŅÎĘ āTÕÏĀJ"€V* ™‚l$?‚ļ|d?9ṕÎ÷+¯ŧÂVŦXÁVŽ\ɖ,[Î^ūīŲŌ—_a+_{­e´ŠąU+ŲZ°—_[ŖūwEëu%{eEËö•Öû¯ą˙žú{aŲŲĘUĢ؊•Ģ؋Ë_Iô—ĩŽ/kŲČĖ[ō_ū.Ëâ—_e˙méæYklũA¯į{Y6Xįõ˙ŋŊ;¤Ŧī˙˙Ũ=f¯Ų›cX–åK@Ž”Ŧ jXĐ$jL šņJ4ŋ4‰å÷‹ū"(ų+ĸ!ņLT– €n0€ÂË!‡\ËŊ {Î^ŗ÷ŋ?5õÔÖÔTuWu×ĶĶŨķ~%ewUW}ŋÕ=ŗL=ß~ž§ŦĢr^Î¸ŅŖlRw˙5’ŽŅą˛÷¤q6ļōÚø1•×ĮŽŽ<ī˛qŖēlÂØĘļĘķí•ķļ‘•}GtYרQ6qÜX×Ũm“&ŒˇŽŽ.=ērĖØą6f˘Ęß´æūŒŲļjš=|əväe7ؘŗÃ­åč­´ žqÆöū[>b=ŗ§„[tßģũûčՋėō ÚģO܎BkQ áâÅËėĢ æŲĖX !Žf᥎?{•ƒ‚ÂÃĢĒGTxHėīͰōæk‚žÉų~ûšßĢ4ŪׄkŠɃdú‹{Š iz^uōĀÂCP(HūƒŅßøcėķÆķ*ũūB¤6-ģߞüÚGú‹'Ÿ|&üŨšļŊwā{tÅõh˜tø)ļ˙;öđįpYå3ŧ!\((<ŧeOΊú¤ü|Ģ˙Č3ä;¨ØõY'’zH]oh (DąXÅ •Ø%œjmą$^ķĨ÷’/GĐō,<n­Ą|ǍĒÄņĸ=¸b­Ŋ°nSGDīOīSī÷Ö'_˛Ÿ=ü|đ9čķĐįōĄĘįŖĪIŸ—>7}~úŋ|ũ¯ėģˇÜUų|īŗ;+Ÿķ¯+Ÿ÷’Ĩ÷Ÿ˙ʕ+ƒŸĮŽ]ģÂ,´žÂsnâČāũUū˙ˆŋ]d;6Ŧ Š n9øƒ˙Ôp.)ŸSū/&ßAÅ O$퐂q’ĸ¸™ JP‰ë3|$–Ākž”ā^ōåZJŪAČ×v‡š!JČãT Ubž¤AĄ=åqĸ|õåŅ ŋ}ėq[r×ŨļčW÷Ú˙ũŲŨAĄAC$ô­ûË×s2 õhÂL áĐĪéc?üĩŊëęÅöÅî˛˙ŧmi0 FwāĐĪ€Ą9šäWwĩ ĮõĖĀÉ%‹O M.yIėŽÔ>‰Ąųf\ꈕđôÕ˙Ûļ÷ŽNŊ#E}ÂsjüÔ*2‚ÔģöAíüsėQBœ„=“KNëŸ\˛ü ‰Ūōe.=_΀ įíđĐķ+íÆû{zøų—íg—zį´§îQ]ö–#÷ˇwŸ0ĪöŲk/;ėy•ŋ­ž'Ņ–˜\­‚É%ÛKC“KĻŌߨ!˙;•ī$Úŋ;~™Â€ ĮÍ8šŒÍÕÕ>H?Ãü?ĮÚņRĨRgŦ¸(Dƒq2ÅâúJˆŪHŋÄjy2—ž/gĀRōî 0kę$ģōĻ%öĨ˙úU´¨č z ļ]ßŋ\yã’`˙ŧšWt(áCÉux yœĒaJĖ7(Ŧ§ßNZp/ųr-%oõ įŸņ5˛úrõēö̆‚C5C•”'iP^Oyœ(_9yēēēŦĢjr÷˜p :ŲØQ][c#kü÷@ųÜžYj/G°ũÕš¯Ķ^gŧ´ēėŋ:-söÕOÄ{CURŠ3V\ĸ„X0N)á2‚ÔˇöAC[p(+):ŧÁ8™Âs üI$đ–+㍔ž/#ORÃyķå™;sĒũîņ‡…kéôēöKĶŧ‚ƒÔ™Įɝ¯Á­ū;ÖÖåîü UŠãGĄC–ēą˜(D ą"aœ†ÃeœSÆæęjTŦā …O"ãmŦ#V\ĸ„XŠÂ¸žÂG ŧåËė%_Ž€Ĩä- ˛ë‡ÎšޤËzŊ:T;7Kr^B§j˜ķÄ ë)…÷—gˍ.[úü*û§wžbxÂ<ëĄ÷CGQÁA?ׯŋįT{äÅĩ6ē‹Ū€æjËŋ§%Aۓw˛h^/‡B;§Ëĸ„,?ūõũáVt‚ļ*< mÁĄ`Ŧ¤čđãDÂs “¤î¸ĩĸpTM×WøHâ=x˗Č㔞/#O\Ž]j+$c׏œuJøl øv UÔ URž¤Ay=åqĸ|ƒ{õbīfûōâíÂģÍ[šŪ=ĮŽ}ß‚îųįŧzŽ2ŗĮēŠUŅDÍč ~NúyũëŸŧŪŪ~ˁöôę ö§ßšŨ.ģų{jUo¸'€N÷ž/;jĀkŅēķŠk~2āĩ3˙úĘđ•=Üņ÷=ų|¸Ĩ9Ž_ō }öŊgۚ~9X×94ŗĄĪ&ūy(wŗĪĄSĩná!v]SŦĄ*u\ĨĸuĊ‹B”+R‰SJ¸Œ uÅ­}BC[p(+):ŧÁ8™Âs üI$đ–/#péųrl8gÁ¯˛kZΆxOˆæ¤Îö öī•ŸË‡^DđsŌĪëßšŨū÷Oî˛˙¸īi[ģykx$€á⍋>k/Žé īnŅēk<¯XŗŪžüęčĩåĢ×(LČĸ;c'z ũđļĨá–æĐšĖž1%xūöלŸ‡J+œC§hŊ+…ÄĩVņ‚CÁ ĩÔCꈓ…(!V$ŒÕp¸ŒsĘØ\]íƒ(UÆõ>’Hā-_F`/ųr,%o9ķ%įrĐzÃŊrËy’Õä:ŧ„G}žú\õų~íŨ¯ >o}îGĪžüŽŊ뉠—Ę_ūįÁĪI?¯[š]&0é{šéī?<:ZĪj<_ôÖĶíž'ž ×úŋåWŅáĸˇf‹–ü&ܚNŊ\Ī “pâÛã=t~*rÄ_wŊ*ô|Åęõvá?^O´=ūēë}_”SKZ/q1\/W`q=:’ÛŽúémv÷ãĪDĮ$ĪAÜqnGį*ĸ÷€=ZĢđP÷ĩVj™‡Ô}{!2Ô!ŒUJȔuÅÍwĐĐޤ`œ¤(nf‚Tâú ‰%đš/%°—|9‚–’ˇ@‚ųâw¯ĐãŲĮŋ*x^XĄŧO2MŽ%äqdž*1OŌ Đžō8QxĪyJĸBįôÚ=ôœũË폍Ũ?øÆbûäĸ{ėĻG^°ŪžívĀÔ öļJcų’7ŋ:h$ëÛxM\ųwo;Ū>vÆQÁD‡jLĢĮÄŅŗĻßŪī5iܰÆĄ÷Š÷Ģ÷­ o9r˙āķøđŽ >ŸË˙äāķúáûØgŪz\đ9ęķÔįĒž Ÿũé}Áį­ĪũĘ_>ü4,F?XZi ĢĐˆëîxĀŽ?x˙ PĄB@Ö0×`w='Žúé­Áēļī;­'Úîļ9ęMqöüŖƒ×Ôķ⒰ņŽõYĶ'ÛÕņ^{čĒĪÛâTĐkÚ/(Œŧõ4ûĐÂ7„¯V§ĪEĮ}ëãįŁYĶ&ëZt>zzM1[ÛŋpūšáŅ{¨˜ā†‚hQĄBņœĪ\{Ŋ-ūâĮ‚×$^˜îZŖđ ëŒēŽ5ę80ķ:b%E!Œ Fq‘¤Ž¸ĩ*ÖËA;<‘ĖCꈕ^BœLal_á#‰÷ā-_"SzžŒ¯s˙åįÁÜ úõyęsUOÍÃY4TÁIöˆ7ãT0X8˙ÕáZaāN;>xŽÂ€ iÔKBŊ"W,Đņ=įw‚įĸ}â=*Ô°wƒsN9fĀ9gq=\¯ F4d$¯ËbEåŽTėXžj]¸–Í`âÅ*âīMëĮUÎMô™9ĮN7´…‡†Žĩę80õ†Nĸ_ĸ„X‘JœRÂeŠ+víƒÚžā„h0NĻ0A”Į—Doų2—ž/gĀRrŌ@> ŠxßëŗCö™<R(oƒŠĪ"÷ŽÕÕ SRž¤Aa=åqĸđžķ´Í1 ‰ U˜¸ųŅåÁ4ԘÖD–šGB í ˙ũ6;ûĒ›í=×Üôžøú¯ĩŨ˙´=¸bM°ŦŨŧ-ˆÕ5rDĨŅ?-X:+hĀkQƒ>kŅ™jôį]´Zˇ¸œozÕėč\Fęŋ¤RĪwÎ:˙oŪņXđ~4DEīOīSīWīû~ņ› ×‚>Ÿ=ü|đų¨7 s2¨×ėéũ=*ÅÍO Eë8\Aâ„CæD ņ 7@e_×xÖkÚ7zC¸×ôp­Ÿ+¸ãEûhßF¸xŽņ¯ ęĩP/õ\pī?īšŠ8˙|eÖô)Áü¨mÄîŠđyā•ÍÛ탋—ŲWŪpÍ?:ÜZ܆åëėÛoŧŌÎŋųƒÖ3Ģ‚HŽëŦmĢ—ÛŖ—žÍ˙ä÷lĖ´}­RĮEZæ! ^đE‡7g€0V)!S‚ÔˇöAÅ{ĮÖq"ЇÔõ†ŠB”+UˇÄđ§ūí5öäËkėāŊĻŲí{~¸5‘Ā×Û ¤¯‘īîûĶĢåÙž!Öˇ§ė¤OŸ‰Į_æĄŊūŖé¤hBž‚îZ˛$hcxPqå¤ųįšdÛĒåöđ%gڑ—Ũdcfˎ–ŖˇŌ6øÆWØûoųˆõĖN´ †fũ—_]ûÕ(OÎņ †ļ†`čÛz7ėAC ’4Oæ8HŌ‡äŠųš?Y8hģķn/|ú_="’įMâ1UÄXđɯDą×Q¯ CÅŨ #SÃ2TpIƐäû6Éķ‹˙ĖËĢœĢhõjP,}vę‰áŠ8ņךe@މTC¸xņ2ûę‚y63Ŗ†ĐüÂCÎuƒ uüsÍ<¤„úAˆâDÂXĨ„L RWÜ|+:Ôq"™‡ÔõĻöˆo0NU•ØÂ.<Ēø|;iÁsæS၆ ōEAáÁã/ķ Đ^˙áÄÂ{ÎS' à …dĄđāO3˙ë¯zŧa.y Ž‘,2¨A­9’û+Žžíwy´Ÿž'÷7Čë-<¨ŅÕ ˇ xOŽ+4¸â‚b¨CVá!~>îXÍÛ ĪFE =qį“<^Ÿ­ÛW´î>¯x\Ąđ0Pķ†Zč_[Ũ˙â ˜™ĢĄ“č…h0N$ ÅmDFēâÖ>¨í‡U”'SÛWøbIŧå ßORSŪ†§~š*˙‘\F†K´md¸ÄˇyXĸ˙Ûķ¯ŠŦ€87ąĄÅnŅ×XÎōĢG–‹d†āî)Ã-Ô Öü .‡&Œ4īÛŽ"„kŒ×Kį¤8.ĻW@Ņkš‹Bm¯5šĻæ{ФÚWEëč3RŅB¯Šp¤„&tį "DōķB:˙=~ž2Ô"‡=ö ˇæzVÂĨYĸĖËŧ0VÃ!3ÔˇöA•kčę|sЇÕ+. QBŦTa\_áC{<\ā1_Fā:ōŅãyõ÷x81\̟ü˙ƒÕļũ†ƒà =…ū ŋFū${"ˆūiC@†3z<4ƒū5§ū‹nđŸy73AÂXĨ„L PWÜ|y/:¤žFęÆbĸ%ÄJÆõ~€DoųR7åũÍĨo4Ô=´ĻĢNė¨!’œÜ­Ģũ ™ ĄZHŅá Æ‰„į>4&%H]qķTė‹Ã|1Č<¤`œ¤(nf‚„q}…ø|q)yš•(SâwVc>]×H×=ŗ*X¸xZ(^⇆jćuhX…†9¸yĐúÚģđÚ*Ą……(!V ŒSJ¸Œ uÅ­}ĐĐ ÆJŠo0NĻđà ŧåĘx#Ūōž¤ü*ĢH 1Ÿîļb̚u?ķŧ4a”Æ…ē˜ē *Ŗ Ķ0 ÷7WK­9+ĐZÚŗđÚ>JŨXLĸ„X‘0NÃá2Î)csuĩ*VpÂ'‘qˆ6Ö+. QBŦTa\_á#‰Ūōeö–đdĐīėž}Sŋ@ŅäYęļ§ î›Ír]” šLKT€ˆ÷ˆp]Bã9â=$´î !ÚWëŸüæO‚^q:ĻŅ‚ @ŗĩWá!ŗ!”ē1ŋ(nf‚:„ąJ ™ Ž¸ų*^p(x"ЇÔ') QBŦTa\_áˆ%đš/%°—|ŪŪĐoXí_bõN˜5}ĪäfēgšfãÖ7(ęēŠ[{5J3fkŌ+ÅTWP#4#ˇûĻF\ åÔ,ŲrIĐ;ã4ûâį3kģ"ˆ :Ī×1/XhíQxČŧ†Ŧ}qYStxƒq"á9…ÉRWÜÚ í°Š!2” Œë+|$ņŧåKäqJĪ—‘(Ë _ąÚŋoŽĮ@˛„[wˇŌrŊōŌlÛ įīš%—†aÄgÚÖ𮏾ķ;áZ˙íÉ\ åÔ9¨‡ƒŠ"îļcŠqŨĪŨmÎĪ ÚMëR¯!]i…(!V¤§”pAęŠ]û Ą-8Œ•…h0NĻ0A”Į—Doų2—ž¯ô€Ā@ƒ~ÅōũΊ˜ ž×~âüpKēYĶ'Û3/¯מИā†NœpȜĖû”ģ"Fŧh ™¸UāpTˆĐzüūãįœrLTœ¸~ɃŅ=ŌډˇÂƒŋ&Gƒ‘ŖkĶčI ÂX ‡Ë8§ŒÍÕÕ>¨XÁA ŸDÆ!ÚXGŦ¸(D ąR…q}…$x˗ØK>/oØcXū_b5ū5 ļ†?Ôę5 @ž[tÅ'—üÖĮĪ ˇæōÅ{Q¨°Ą‡ŖáęŅā†\ˆë1ĄáÃm˜ÅO/ŊĀV^ķéˡ?ōŽđÕ~xĶIƒöyôĘO„¯öûŸ5h-:4G酇ü—€ĩ$Ŗ”98ŧ„8‘0V)!SÔ7ßAÅ O$퐂q’ĸ¸™ JP‰ë3|$–Ākž”Ā^ōy}@ƝXūߡxŅ!>üÁQŖŪ ÁФ*¨X ŋz3Ä õRQáōëū;\3ģę†ÛĸĄĘŠû“ßô÷öĶēŖžūãĩÃn˜ÅԉãėŠ•klīķ?,—~į&;ë¸CƒB‚3wīiÁã9_üv´ŸÄ‹ŗ§õwÜënųúĪī ļZ†–ņˇ­Ŗ´ÂCyMŽd¤"G!Œ Fq‘¤Ž¸ĩjûaeÄÉÆö>’xŪō%ō8ĨįËČWzN +ƒ~ÅrüÎ%¸ŋđnhDüŽęš Ą ÚŽI!k Ũ‡Š 6áōkĸI ÍЅ•rj¨…\VŲĻuwÁĨá2܇Y¨P BĹΠˇ¤ûōĸ[ƒĸ=h Š_ūe)/R$ YfėJœRÂeŠ+víƒÚžā„h0NĻ0A”Į—Doų2—ž/gĀRsbXô+–ķw.…†A¸!nŅũĀEŊŽÂ€{Íõ,PzœŽÉ^‘õZüäîuÅÖēë‰áÖ]N7äŖĶ†Y$‡@$‡Q¤™:a\ø,ÛƒĪž>ĢzKčÜî¸ėâAįĻ×Üļda#9ėCCEâ\Üøå`8h¨đP^{ĸŧHčÚ4zR‚0V)áR‚„ዊ}Pą‚ƒ>‰ŒC´ąŽXqQˆbĨ ãú I$đš/%°—|9zɋacĀīÎđūeúámK;n˜…ā.8qĀ Ŗ¨V|PÃ^=~ŧäĄpKē…'<6R€ĐšŨōвāŧŽ^|wpn*¨7…ļ-]ļÜ>˙ž3ÃŊûĪíã OŪ‹Ū×ņķf"ŠûJīĻhŸƒöž–ĢāÁ†<ē^škIC$õÜŨzí­ŽÂCy—€.&ƒpeÆ c•2%H]qķTŧāPđD2)')Š›™ •¸>ÃGb ŧæK î%_Ž ^ōbøâ—ié“ĪÛņTt3Žšg7Ū÷¸Ũųøsá Öį2đ}Ēaîz¨Ąŋv㖚s3¨q¯!ņØââ¸ĨË§ūíÆāš{TÂåvŏ“= xÔöW}øKÁsQnĢ›_ÂQÜķŽø~đ\û¨€‘|Ī0\éÖŌŽW æZZüŏ č!ˆöV¨đP^{ĸv¤Âyĸ…ĖŒâ6"#H]qkÔöÃ* ÆÉžŖ¯đ‘0ã5_JđŌķ%ŪO¯īÃKÎßša@C@˛nĶŲŽTPpŊÜĸõ¤øä’ZÖnڒZ4¸î“įEqԘ?å’_Ų#GKŽžŗOølđĐõΨåūgVäÚ€v—ģđPÎå_ž ÉBšĸųbįÆi8\Æ9elŽŽöAŪ ’zHąâĸ%ÄJÆõ>’Hā=_Béųr,=/€N§Éb@ŧ×@šŋøf˙­F“CâC6Ūúųo†[›Į Ņ7Üy¨ĮčWŗđP^{ĸv”Bšĸ UCĢ”)ꊛī âÃ* J=ÔÅD!Jˆ•*Œë+üą^ķĨö’/G@/yt:5ĘkŨĸčŊ¨wFŅ[tę8 0˜&cPĨđP^{ĸv¤Âšĸ UExáCcR‚ÔˇöAC;ŦĸQÜĖ%ãú IŧoųyœŌķeä‰Ëą dųŅ’‡‚Éãw†Đ$‹ĩ&ZüĮ ÎŨŧ ­BFjøˆŖ;W¤ ĄˆO Šų!ôčŗ ĶÕ5šdYRÛ.Õ4ŅkÕv*"ŒSJ¸Œ uÅ­}BC[p(+):ŧÁ8™Âs üI$đ–¯JāRķå|á.Ÿūū/ėŽe/ô¯´°øíīܒŧ žÉ}’ˇÂ‹ß/ždɓ­G3jŸų×W†kíëĄį^˛õ›ûÂĩÖŖÂ†ZhÂH÷īcfĪ„hâEQO€øä’Zt;M ch5:oõxˆÎsâ¸`=I“K:kF°æĨˆOb @'ąģ"|xeķvûāâevų˛ãG‡[‹Û°|}ëWÚų?ŋØzfM ˇî1¨y“ذmÕrûíĨoĩÃ?õ}3}ßpkŽFQnaŦ†Cf¨+n탊 А:N$õēŪĐ@QˆbĨ ãú I$đ–/#peķŠsĩ=Yš¨=¸rQ~ûß]žĐˆo"ąËaģÜÖoŲj“Įĩk?ü{vŌŧũÂWŌŨ}ßũvöU7‡kÍŖ üøLō*2č[G wŗŪ̍ Ž_Žo"Õ(Đ$unŧ¸ģß~Ú¤ui˛ōj xŅîØÃÍõŊŲN:Ё"Møģ:"Īīu‚ŠgĪ?:˜Yģ øC^ņĨŸüũ•ßŗ?}ĶIö7ΎÉãģÃWŌŨĩdɐüûNTŒ\ōÄķŠ+C%ø76~¸ėĄëī‡/9ĶŽŧė&3cv¸ĩŊ•ļÁ7θÂŪËGŦgöāļA§+ū (.y=Đ,Ē!\ŧx™}uÁ<›™QChzũŖô¯ÚŋÄāĩÔŖęÆ*%dJ€ēâæ;¨XŅĄŽI=¤Ž8IQˆbĨ ãú ?@,×|)ŊäË´Æ.*>,üŋß Ų= J?ņēš‹~w¯˙4*H¨h nĐeQ^}ƒûöųG…[āEôĢVüwîž'ŸˇģĻíŠYÔãáŽģÕNüĢ+ƒBD+÷€­i…‡ÔļK毤âéÂdšr֒¤Ž¸ĩÚa Bd&(A×WøHâ=x˗Č㔞/#O\Ž]âŌ ƒÔ!9ĸÖpwßũZ–¯Y>+nčč<ŨųēsŸ›Û–ö𤠉ī§įņØî.zŨmĶâzr8ZWėøįꆝč5ˇ-yW!ũĒíų›öˇ˙úūāš$׏ēčŗÖxÛR[xō̃įÚŽ×?uÍO‚ã´čšãÖõč†f莝–xl‰ŋĻ"‡$qÛEųŨv ‘øųäB 5ī…‡=—€1Š}Ē$+%gFēb×>hh c%E!Œ“)Låņ%‘Ā[žŒĀĨį˰œųz@”à ‘pˇ¯Ķ0¯V|øķŗN kĢö1ÛŧÎU“ĐÅé|Ũų̧…+ ¨Į…ÛŽįúአzn¸ˆ^×Xy‰Åvˇ÷ĶûÕņŠãâjŅ9ŠĐ§Ø:OŊŽãÕķCįtËC˂m:Ũ:ĐĪŠ~Ĩ˙n̈p÷cĪĪ]­́ŋbõz{ûk ÖeҒߨ9§ŽYđúŠ5ëmÍŋl‹ŋø1ģꧡ ( hŊvĶß8Ø~á?^ė§mW˙Å{ƒuˇŋŠŊõ´čĩ˯ûīč­kûgß{ļŊ÷K×ûĢĀpÂ!s‚íZÔ CûëÜ6å-‚ÄĐŌPŽVfĀPhîP‹Áׅž… Ιqâ›ĢĢ}Pą‚ƒ>‰ŒC´ąŽXqQˆbĨ ãú I$đš/%įbb*HŨIDATpÎ|Ī­ZĖ÷ųFžå3ĩ— }ÛÂŗH×_€øw;ėc_ņV€PƒøË‹n ×,˜;Aē%‡3¨!힊×sÍŨPöŠ­ņØqɉî’ŊĒqŊžvãÁŖŖ†}œ #*xÄ(n˜ÆgŪųÆ`ũÔÃįëŽČāŠ(GĪŲ'xtT ĐgâhâôyiRŋ8}>.ŋŽW.ÅryÜ{Hæ+ËŊΝļīŨū@íåWzüM¸$^Ģ,û͜fKÆŋ *D¸õ_=˛,ęŨ Ž0/Dȡ>~^đ¨[zxčÁqÎįūdaølOo wë/ÅqûĢ` "ÆÎ?7zMqõšöq9U\Đ~ÚÖô)vĪĪÛ;Ų“âĄįVzīږ… U¨ŸûģĻ"ŊëáW6ô›1y´rč=éŊ` æŧ5ÚŌ„­ļœˇęRÔ7ßAÅ O$퐂q’ĸ¸™ JP‰ë3|$–Ākž”ā9ķ͘ÜßPÜļsg0ÉäP,ģÎK›É ~öđžoâËāz5ÄgÅ×ĸâ@Rŧ÷€’EÍBīb(Ļz$ŋT#=§ÖD“ņ‚‡rę˜xyđŲ—Âgũ4ã}ÚløÚænĪwûoŸžģnØC2VÚp‘øđ -iŸW^s8ļšŋšaŠ}ô›××^ŽÖ˛(sŲŧ}w0gƒ¨āđŅs~'ZWŖ^= œëîxĀÎßSˆHŗī´[ąz]¸6zF˚69\ëįöæåÕ6kúĀ×D¯é|ÜĐ -ŽŠ:ŋäŋëIĄíîBíg÷=6čŊk[-Žąāožn/o(ˇwŸøßFĶjäĮ˛ļXM=… }q ŋë數ĐUŸ ˇÂiNáÁ[Ã-)lĩålŧU—¤Ž¸ĩjûaeÄÉÆö>’xŪō%ō8ō}ëƒįډÍîj1TËȜŋ°ēëÅĸ˙õGö–#ũtËwà â‹ģEÔāŽСúņeÜâ.Yđ(›†M¨x aę9‘,j$ŠčāŠ*îœŌŠCí¯Ī<Ö.ŋāėÁ˅nY˜kšāMķƒŋčËW¯z-čBK…‡×1/ĖhļčÎߨ‰‡ŽĨ{qMoĐ!Š*>ÄšũÜkzГ!I¯é|ÜĐ ˇ¸ž ęĄu Ū÷åoÛÔ;BÛ\BŪrÜaƒŪģļÕĸ;]üÕ9§Ûâŋû€í5Šŋ¨h]jØĒÁŦĄyîoÆņ•ŋj$#?õ@ÔgWļŦ/Đ¯šC-ŧĒ4‚2ÚsÅdŠ+víƒÚžā„h0NĻ0A”Į—Doų2בoJwˇ]˙ŋŪÜJsđōū|Ëg[&u Ī&+8<ö•ÕŧÕf=ܡûÕîNŅŽTI녠m¯X<×P ‡ˆ5âC*˛¸!eU|:yî^öîSŲŗœ.§.ą×Ē,G°wЋáĒn‹z7čV™×/yĐfOŸ:`č‚.’Ã,Ä5øĩžÅˆ+âT´PņB…âöWĪåQ|wa¨}Wą´;Ļ^zŸiī=‹+8ÜũļOœûúšˇÚ´Í¤a}ņģ/Š­EwâŊ\‘ÂũŅß×KBĪõ7é3×^ÆŨ6Įí§%^ÔPīŨkŽG@r=ÎÍí—w8…â%c'ßWr]’ëIɞņƒä~qŠŠ/Üįé^×Ŗ;F‹ã~^îį’Œ×‰: đļÚ 6ÜŌĨ Óī ü)|‡ä;ˇĒĸ%ÄJÆõ>’Hā5_J`/ųr,%ov€A‡Rō Ļo÷ÕøÖˇũqBá†adŅ1úĻŋVĄđŲü"FŸđQĪĩÍ ũĐďnÂĮøRk˛G ŊˆŅPÜF†Z4EđģS˙/‘ ēpp…ר×7DNĩaęÉ  ]Lč&WŦHRŅB¯/øäWĸũ5Ҥsí'Î.bôšöҰÅRĪwŒw!ŋ¸QACŊâ{î|Šh•‚ƒ~Wkũ­FķĨ™_Ĩ4ú™’ÃķR#WĮē^ú›ĸb…ūލ0îļ‰ūÅ{TčoWŧP b…›HŲ5Ėãëʕ¤ÆĩæEŌ>úģ§ĩč Ytįáļéog|ÎĨäŖōë=eũŊNrīÍåŅįĸĪBBģ^‰Ú–ļ_œŠAzoîķtĮ¸‰¨ĩ(fŧāázCĻÅëDm\x/DëŋI RWÜ| m/‡Dq3” Œë+|$–ĀįÛI î%_Ž ĨäÍŌŦ‚Cœ†T¨øo|ëÎ ņo˙õ |Ž-:ĻÖü CEÅ INˆŠ^ ŽæxH Ņ{ŌЋjÔĶÁí§¸ķŲ?XoIŅīOcŋDnh‚ûvH"É?đjØ˙ÁiĮ‡ki?wÁ˙†IëÉzŨíĢ%~Ņãō&_sįį7&TwŦˆo—düøųTĶJ=4‰~wŨŋQ7įHŧŅ­Â˜ļ%īļâ¨čĻį*@"ÍY”WÚ$ĖĘįzėéjqį˙ÛŸ/IŊã…(Üē&“Ž5gSœ "*ž¸ķ˒wŋ8Í/•,ŠG„悎ÚŦđ^„6|-šqA›ąšēÚ+8Há“Č8D눅(!VĒ0ޝđ‘Doų2{ɗ#`)yĶŧķ”#Ķ  įCēûEÜ;æՒEļ:}ĢŖŪæŦ×Örs8¨Q­ÆĩzėdŅ  ˛Š|*\¸^:ŽđĨ"C’zķ´ĶŋW é´9k”KÃ,\ģ(×kNˆ÷~éšpë@j$Ģą§B‚†<4BųÜđø0Ä,Žā/Ä'nT@EõĐHÎŲ¤^ ŽwE^ŽwŖ n.‰4y÷stnÉB‘ÎÛõúnÚ¤đļfJiÔ¤¨+nžƒŠ žHę!uÄIŠB”+U×Wøb ŧæK ė%_Ž Ĩä­äsīz‡!ĸjĀé›YˇŦŨ”ŨXCqēhŅÅEģ:ę€}ZĻāā=gŸā1yÛWĮÍ9ōž+nČ.Ô;ĀQBE†8í§žņũ’4LÃũÛŅsQ/­‹æ‰ŋ&îuˇ$į™Đųk›õz|HōØ´a$*”{āŦp ;į”c‚!ņ9ô\ēn¸Ä%9&+TĄÁõ"× Į×ĘÛÅtßü˙đļĨÁc-ꍨÉ4MBĄáÚæælr“Ik{ÔĶ#Y„)JįvÕOo ×ú'šėÔ^–y´AáĄŌš)ĨQ“¤î¸ĩ*ÖËĄŽÉ<¤`œ¤(nf‚„q}…$ۃˇ|‰ŧ˛yģ}pņ2ģü ،ņŖÃ­ÅmXžÎžõÆ+킟_l=ŗ‹O‚˛mÕrûíĨoĩÃ/ũĄ™ļo¸ĩ­ĸēKĩš!R×( QBŦTa\_á#‰Ūōe.=_΀ į- āîwß{Ÿ}ÕÍáíú‹Ūl'͟ŽÕ¯ôŠ-lĀō:Üĩd‰÷Ÿę= †y|>5ø?˙ž3Ãĩū‚Zõ ĐüÉšԃāÆûļgíį¨Á¯^ņķˆS<âs˜¤ÅtįŽb‰ŠŠ›œVTœĐ­pãĮĻC­ķ*SY˙ÆĐytũũđ%gڑ—Ũdcfˎ–ŖˇŌ6øÆWØûoųH]mƒv7œū&ač4z=P/Õ.^ŧĖžē`žÍˍ!´áä’yéŸwĘ?ņŒÍÕÕ>¨X)|‡hcąâĸ%ÄJÆõ>’Hā-_F`/ųr,%oĨä€=Ôx×7ūÉŽųę đĩīˇėná†YT›GBíŖC|(E–Ŧ˜ŽGÅÜŊ÷ܒVī!IŖø]j´h= =ĒC )­ĸēKų*^p(x"ЇÔ') QBŦL•¸>ÃGb ŧæK ė%_Ž Ĩä-¤|#ę="ęRŠŽ•hÍí Fzr‚ŧ=œ<5mF|¸E­aŽz%¨ØĄ!*čXŸÜЍø’Öŗ!­p@'č°ÂCF˨Ž6O탊õrЎO$퐂qŌ!2” Œí+|$ņŧåKäqJĪ—‘'.Į.ĩŌ@>(: š[{Å'Đ*›&øRŽ8­ģ‚†ķˊŨ)4—B5**hȄz$ *Üōвpm Ô`×<ęųPëvŽŽS@ 6˛¸bGōîîŨõHđ˜Eį–gŌș=‚IbčDRxČhÕÕ`Ē}ĐĐ ÆJŠB4'S˜ ĘãK"ˇ|KĪ—3`Ã9 žxų(8 •i.ŨĩB“G5“rÆ'ĮN–¯YôLĐЅ,ĄB@|ŪMÎ(ņ9’TlPÁBņ͆cÄi8Gŧ°Ą^ņĸˆ ŗ§ œøĖDâįūņ…§Ã<˛&Ãt~´äĄ pĪŠ^ɞ:wz<:U›2Q›ĢĢ}Pą‚ƒ>‰ŒC´ąŽXqQˆbĨ ãú I$đš/%°—|9–’ˇ@€ōŅËíBŊÜmĮ\-G]ôŲ`[œz&¸×ŨĖßĸíę5‘ë[?/ȧûyëuõ‚Đ=Ču[4įē;(|[°VĻ‚@rXƒ“AÛT P5Č5C5.NŪ;Bh_ˇ$‹îļšZâÅ=¯vœ^Īʏ™vlŪaĐ*T4WŅČĢ -ŖēÚ;ĩjûaeÄÉÆö>’xŪō%ō8ĨįËȓÔpۜy¤ĀŽIĐîtᤞpÚņÁēîWžđäWĪE…†‹ŪvZ¸fA@Û4tCôڊ5ëƒįE)ŋÎÃ]Č)Į9§<īš§A…„ŧōNŲîĒMž ­FÅwÎŨß> ¤/,\OH-ņŪ‹ņ–n‰™á¸iÅ7‡”[ÜÜQ­Ž -ŖēLĩō^pÔCęŒ…h0NĻ0A”Į—Doų2—ž/gĀRōĐ@. č$YP+V¯†M¸?đ"áƒōëBîW,‹†€¨ŌIôÍŋģEÕztÍņÍÅw‡kĐÚÔ;īėųG‡kHŖ/0\OÉŲͧĻ)ާ¤zGĒįd˛Ā ž—ē&øámKÃ-ũT¤Đ5‰;^ËŌĘąîēĄ•ĩfáaPÃ+Ĩq3hŸ<ōT|XEAЧ‘ēą˜(D ąR…q}…$x͗ØKžKÉ[ Hųčå€ádÖôÉÁ…û¯EC%|Ѕœ.čî~ė™Žf§ĄŪƒĄ(>`xĐ7Ķj€Æį Š7Õ¨tÛĩ8ÚŽcÜqiߐģ}÷­¸Ī˙†\߯gm×ēļ'ŋąWī<79ŗâfˇÄŋũŸŸžģøÉcĢ}âįččYŸI<_üŧ%ž/^Č:ĮdĪ'~>i?‹$ũmqMo¸6PÚ ĘĢĸƒz[.Z˛gč§\õĶ[íŗī={Ā—%ÎŲ_T´^áaP[&ąĄŽÆRžƒ†vXEĸ¸™ JÆõ>Kāķí¤÷’/GĐRōŌ@> čDúã­â‚ģāŌED|Ž4ßBQî5=xLë&)ŗĻOąåĢ׆kũ^wÄŧā‚NŊ*:m˜ ˙›l5DUÄVōĶ˙ē(ØŽÆf|Î ã‹7‚õˇáøĘß+ŊVôŽLŽqėŠįîîJНŋqnģūöÄ !Z×ö‡ŽúL¸Å‚oāãÃ%~ŪzÍ5ęõwUÃ]|ˇÍQüøIi´æ^Ō>úL.‰ŸüL[ |—OīÍ}†ú ÜûŅĸũô÷šÚ9ęg˙âAôZü3ËķŗĐ ûNë ×Ōyé$^HĐ5‡Ū—Š ęué~&:_­ëZĄĩNáa@c(ŖeTW{§öAC[p(+):ŧÁ8™Âs üI$đ–+㍔ž/#ORÃysæqęĖ×PÁĄā)CáÚOœ\čÛ ] čâÆQī] ¸o7ō~Ãá†N,øäW\<:ēXŅ„âš‹wŒ.B:m˜Ā‚˙ÆģÆĒû;ô"xė™ Ņî nŪGęŊķėSÄr´íŖįüN¸Ö?<@įá¨0’¤oā“…ņø\GzÍ̓¤!Ŋõôāš¨āĸmNü¸,:'÷÷°Ögĸs‹Īɤ÷æö×9Å˙ļĢ—€rW;G ‘ˆĸ/ ÜûĢF&wÍ ˜Éž’ē6Đk*Ú${8ęX7ī”ŪōˏZŸYĢúÂÀFIF %csuĩ*VpÂ'‘qˆ6Ö+. QBŦTa\_á#‰Ūōeö’/GĀRōĐ@ž†z84üôM…ģ˜Ņq÷í….Ft!ŋ˜Ņ6÷ēwŅčöuô<~qáŽĶŖč[Ŗøˇ#.^<†žI^„:“ĪĪŧŧ:˜˜8ŪXUÃ4‹ n?-ņ^ iôˇNnˇŋ¸ŪxŽŦ%ŪÛ/ËS­0ŽŪ~*ψzõŠaíâëyôŨ~ZŌ¸ˇ;÷$åuŊ %žŋž@PŅ ŠÚ9ęoˇûRÂ}A ŋáú­mÉĄq*čīģŠ7iī;>Gƒ .~˛„r%.YīŋÕy)<Œ¨´æĩT5 !4`e aĢ+ĻxÁĄā‰¤RGœ¤(D ąR…q}… –Ākž”Ā^ōåZJŪAČG/`hčƒa0<¸ÆōŦi“ŖÆj|I/–kÉĶCNníĢ„zṆmŧŦ%^OĘs›gQÔpõPÃ;ßâ“Ô ī—Æ5¸Ũš'š"Žß_EũĢ×ëqĩÎŅmSÂ^ôƒļ麈Ī‘FīKįå itG,×ËBŊ.]oČx!Dšõ>K“PˇŖŌ 5 2`—2['ĩcéôō´cÁķË<¤`œ¤(nf‚„q}…$ۃˇ|‰oŅ{qŸQ++­đPj/‡ÂjĮڂCÁXIŅá ÆÉžcøāO"ˇ|KĪ—3`Ã9 žxų(8CK]d:“‘jčęÛl5„5Įčŋ˙úæŨ}Ķ­%m~ ,:^l7t">oAüŽ ĘyYØĢAßėĢAė^Ķ’Õ€­VW/ —WCܐCõžPOƒxüjßü§Ņ{REĮĒ‘ÕcB\^—KÃ+Ü6õŧ˛yģ}pņ2ģü ،ņŖÃ­Ų˛Š –¯ŗk\aüübëŲ/>ž&_ëdÛĒåöÛŋ~‹ūé˙°1Ķö ˇ&Վ•ŋØāÔŅzJ=¤Ž8IQˆbĨ ãú I$đ–/#°—|9‚–’ˇ@ōÕ]pœ‡Ūĩô^;ûĒ›Ã5 ÛõŊŲNš??\kL)˙ [܀?âuēkÉū}#eūCgŅõ÷לiG^v“™1;ÜZŽŪJÛāg\aīŋå#Ö3{đXûN§ŋGjĐęÛíjįVĨⅸɥjˆëÛúäö2¸a y÷čWÆ5A=TC¸xņ2ûę‚y63Ŗ†Pw‡\=dĀ.e]*NíXŊųbyHÁ8IQÜĖ%¨Äõ>Kā5_J`/ųr-%o äSÁĄ9ŊęĖ4H€;}Ā7}ãîî´ÔŖŽy Q‡_\l=ŗ§†[ķËîņP;wņ‚CA™‡Ô+)QBœLalŸ)‰Ūōe.=_Ž€Ĩä,¤|ÍčáĐoĪÎw-Í?†āÛØæĸĮÃđBdĄĮƒ?ē"jįYčņĐZ†ę ‰<= rõpí)<|¨Ž˙¸ .<ÔÎŨū)!VĒ0ޝđ‘Doų2—ž/gĀRōŌ@žĄ(:Ô%÷á æqj†))OŌ °žō8QxĪyĐV(< /…ƒ?üÕE3´rá!×P‹ÜÃ*ŧPŪęšujCStĐÆ:bÅE!Jˆ•*Œë+|$‘Ākž”Ā^ōåXJŪAČ×?¨ĸîƒ ämā$\‡—ĮŠĻÄÅ ßOæ!ã$Eq3” ×gøH,×|)ÁŊäË´”ŧ‚4¯í 5C”ĮŠĒÄ÷’ũÃOū'xL{eđŌęęžĢÅPņ^pÔCęŒ…h0NĻ0A”Į—Doų2—ž/gĀRōĐ@Žē RčPíÜ@.Éux yœĒaJĖ7(Ŧ§ûØgĪ>ÁĻŽnA'RŅá˛sO˛é3f†[hŽ–(<äŋ„Žãb;ķ:b%E!Œ“)Låņ%‘Ā[žŒĀĨį˰áœOŧ|Í)8H')šķ>ąt5Ԕ'iPXOyœ(ŧį<ļ:pŽpø<ģúOˇ÷žtˆõt _A'PÁA?×kĪ{ƒXų9Ī;hnø Í1¤…‡b—Đu\l§R,kĒ(D ąR…q}…$x͗ØKžKÉ[ @ųîå['éä:ŧ„’xŪō%ō8ĨįËČ—c—Ú i _į¤Á×˙[ų|˙íüßąV>o}{ĒũYåįpækˇ“+?ũ|ôsŌĪK?7ZňŨáķĀ+›ˇÛ/ŗ+Θg3Æ×_!ß°|]ŗā ģā˛Éŗ§„[kˆ]ko[ĩÜųÔ[ėˆĪüČÆLÛ7ܚ"õúŧ„‹ö(„¯@×WøH"×|)ÁŊäË´”ŧ‚4¯3 %ŠĒÄĄÛÆeSƍąĻM˛Ŋ{ÆÛÔĘķIŨŖlL×HÛi#mô˜16qü8›Kâ5_Jp/ųr-%o äŖāPEÕP%æIÚc.‰Â{Î ąíÛˇËļmۂe{eéŨ´ŲļīØi[úúlįŽf•Ë‘Q#vÛŽĘUɖĘöŪžíÖˇ}§­Ü°%ˆą~ķ6[ŊŠ˙šŠ˛iÛÛ´u{đ|冞ā1Mī–mÖW‰™—ziôŒËžŗĮŪ“úū‚¨āųŒ Ũ6rä›:žÛĻNčŋ-éŪ“ÆY÷čJŦîŅAĖŽĘ?õģ+˙3b„ueŨcĮÚ˜ŅŖŦgÂø Ā0ϞhX„{šÂZ…‡öŌ…‡ŒëíĖÂCæõy îQˆbĨ ãú I$đ–/#péųrl8oÁuæk^ÁAüPrŪ`§f˜’ō$ ë)…÷œhC;wî´;v… ÷(zžĢōšlÚŌgÛļWÖwíŦŧžÃvėÚe•+ëģ-qYPƒ„åīIąģ˛÷ΔŨu7ld—uiŖULč<ĒWB°ļ‡E=jȃ š7ƒáhuĐ*(<´—Ö.<Ô¸ŪN-<¤S…{ĸ„XŠÂ¸žÂG ŧåË\zžœKÉ[ HųčåPEÕP%æ‰ÖS' ī9u đ€VAáĄŊä)<4j]oŊæN=Ļž@ QˆbĨ ãú ?@,×|)Ŋä˰”ŧ‚4O‡ē‹…ō6p’NŽ%äqdž*1OŌ€°ķ8QxĪy€ĶÜÂCi×Û ŌáAˆč‰a\_á#‰÷ā-_"SzžŒÃGb ŧæK î%_Ž Ĩä-¤| rÚĀI:šB”ĮŠĒĒHā5_J`/ųr,%o äëŧ^%äqdž*1OŌ€°ķ8Aø&ä:XŠ…‡ę—į ^ŧG‡7§Ē0ޝđ‘XŸo'-¸—|9‚–’ˇ@ōQp¨ĸf¨’ō$ Ęë)åķœFėŽŸ^ŲŧŨ>¸x™]qÆ<›1~t¸ĩē´KķŪåëėšW؅‹?l=ŗ§„[ķÛļjš=ōŠŗėāū‹™˛W¸Õ—đxoc$x˗¸ô|96œˇ`€:ķÕ]l šOĩ¤÷T3Œ§ĪnPXOyœ(ŧį<´ mkWÚ˙įOėČËn˛13f‡[ËĄļÁ7θÂŪËGęj`xųŪíØG¯^d—_¸ĐŪ}ę1áV´*Õ.^ŧĖžē`žÍˍ!4\xČēø'Ķ„s`8đ_xhB#žXŠ*{6á\N†vrÉ/8TŲ›‚ĨkÛÂCŠŠxŅv…‡bu  Ĩļ)</8dė],hˆŨáķĀ+›ˇÛ/ŗ+Θg3Įˇ×ģ|]ŗā {įwÎŗIûô„[‹+V#¨ą7 é6ŧÔkß˙ÃoŲûoųˆõĖžnŌ}īöėŖW/˛Ë/\hī>õ˜p+Z•j/^f_]]Cđ^xĄđ€<(<´—!-<ėÚšË6žÔŽåP 7Zø3hŌ‰ņ;Đ~&îĶc#ģÚúÆzh íeH šQp(Ŧ%? J@áĄŊä)< ]šQ-Ȝ­Čģv´–üšxRü@ûi~áĄ`C•ÆfáŦyštR-ûū55ˇđP õHcŗ…?ƒ&XËž@nÍ)<hAŌØėגŸA“~8ü@įđ[x(Ђ¤ąŲ¯%?‡&žŋĐYü 6TilūȚ§I'Õ˛īАō Z46[ø3hŌ‰ĩėû”ĸŧÂC$Í~-ų4é‡Ãī  ´ ilökÉĪĄ‰'Åī õ 6TilūȚ§I'Õ˛īāM}…‡­G›ũZō3hŌ‡ßžŠ ´ ilökÉĪĄI'ՒīĐTų [46[¸ŅŨ¤“âw ĩ Z-ÛØnĸ–ũ štb-ûūC"ģđP Icŗ_K~Múáđ;HSßä’!›ũZōshâIņ;ČRwáÆfSÛöÅ4ąāĀī šÂ…›-ü4éÄZöũZNîÍ~-ų4é‡Ãī ¨š…›ũZōshâIņ;¨ĮˆŨáķĀ+›ˇÛ/ŗŋ}í›Ū=*Ü:|ĩlƒ›‚€ô“;´K¯ŊÁ>˙ŪˇŲš'nEĢZŨˇÃ>ķĢgíĢ æŲĖņŖÃ­ehļ—ž}Æ[zˇvü‰ļΜíhu… ;+Ģkļė×hõxøëko°ŋ§ĮC[™6n”uHī3?¨đĀPųŪíØGŽ^dW\¸ĐŪ}ę1áV´ŗÂˇĶȋÂđ†Âđ†Âđ†Âđ†Âđ†Âđ†Âđ†Âđ†Âđ†Âđ†ÂđfÄîŠđ9…íÜšĶvėØaÛˇožoÛļÍvíÚe7oąÍ}}ļyëv šž•mfũMĐm;vÚČĘķJŖ4X—•õĨĪŊbŸûŲ}vɛą“įî­=ÂW+GŽa;+ģ=*ÜRymäHQŲ>~ėhßŨmÆu[WW—3ĻōŌČāqÔ¨QÁ6 €L*(ôõõEËęõlKåqgeûîŨģltĨíŋŖŌĒėÛžĶzûļŲĻm;mõĻ>[ĩase}ģ­Û˛-(2ČĒ}ļ+Ö]ģšōÚN#úõVöí ÷•îQ]Ö3nL¸f6Ļk¤Mŋg}äˆ6cbwđ|Leß)•}'Œe{õŒ¯lWy^9ž{Lå¸Áą:O1ŌēF˛qcĮÚô)=ÖŨŨmc+Īõ8zôč ĘEá†96oŪ6oŲbkÂâÂČŨ;mûÎ]ļnËv[žn“=ņō:{aíĻ ° ĸŠ Ģ*ĪÛŌ Ũ6ēk¤Í˜8ÖĻWžī;yŧ˛÷ÛoĘ›6~LđÚŽ]ÖŨ=ÖĻõL˛ ãĮE‰ &Ŋ&P 0Œh8Ć lãĻMöōęĩļĩ¯/â°rCŸ=ĩĒמŠ,/önļ×W–Ū-ļqëöđČÎ7qėhÛkŌ8Ûˇ§˛Lo͜ls§O˛Ŋ'vۈ‘#ll÷8›9mŠMš8Ņ&V ã@m ƒŠCoo¯Ŋŧf­õŽīĩ];wØĶk7ŲC/ŦļĮ_^oO­ÚPŨž=ãmÎô‰vč^“í¨ŲĶíāĘķ‘]ŖŦ§g’í5}šMš4ÉÆî8 ĐA4šŖz4ŧŧjĩ­ZŊÆļíØaŋ¸Ö–>ûŠ=ōŌē WĘ1gÚ$;bß)öšfÚĢgMĩąŖGÛôiSmī™3‚BZöŖđmNÍ:õjx~ŋļ~Ũ:{~ŨfģũÉmÉ3¯Øŗk6„{Á7Íqō{ŲŠ‡Î˛§N°I==ļ˙Ŧ}mōäÉÁ7†+ ĐĻtÛĘį—¯°_Zi¯lÜb×˙æYģõɗ‚ģK`hižˆĶŪĮÎ>zN0_Ä^3gÚûÍ&Ģn(<@›Ų´i“=ļėiÛ´qƒ-~l…-úÍŗöÂēMáĢh5ę ĄĂWÍ &Ĩûî=OĻÎépũEoļųķį‡kƒÂ´¨ŦƒsČĖ{ĶĢfÛiīc,_cŋ|âEģ˙ųÕÖˇcg¸†ŠzŖč™§ÍÛĮ^ŗ˙t[ōĖ+öŗGžˇĮVŽ÷ŒÂ ŠjŨáôƒ÷ą×Uš‡îÕc­Xkw>ķ˛=đÂ[ĩŠ/Ü žŠgÃ1ŗ§ŲÉs÷ —­Ú`ˇ>ņĸũúé—mãÖíá^Ų(<š*oá!NE}Ķ~✙Á7í›ļí°û_XôˆĐˇí É(OO÷;bß)vÔžSíØũĻ…‡{Ÿ_mKŸ[LY´į …@SÕSxHŌpŒ×ė?Ξ=-˜BßŧĢĄåŠUŊÁˇōyžî&Žms§O æ×8|Ÿ)ÁįÚ3nŒ=ņr¯=¸bŨûüĒĒÃ(ō đhĒ2 IsĻM˛ƒgjéŠ4ž'ۜJcZ…‡'+ čÖm –×oļ—7ô Ëaęĩ°oĪ8ÛkŌ8Ûoę›3UņIÁögÖl´ĮVŽŗ§Wm°'+‹ 7eĸđh*…‡4ûMŠ4°§M´š•öž=ãmīJŖ{¯IŨÁˇü+7l Š+Öo _ėí/JhČF;ÛPAC$ö<.xŸû„E†Y“ûß÷–í;‚;Oô_ļØĶĢ7Øŗk6U– a(<šĒY…‡,š/bīžņQ#}æÄîĘs5ĐģŖ|oß6ëÛž3hŦĢŅžĄo{PĐ6õ˜Đ›bC9zûvû%mŽė—6äCį ! IcēFVÎaĪö cGۄ1ŖlƄnëŨœß¤îŅ6nô¨ā|ĩ͝ooåW„……x1eÅēMCzG €ĻęÂCjāĢ80câØĒ§§{Ԁug|å8õ°HR! wËļpmm;wŲÚÍ{ļW+zŦÚXŲV‰Ķę=4(<šĒ (O§F†ĨŖđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđŧĄđmîäC°•×|:XZYü<ã‹ļŖsQx€6÷įgbO­\<˙Ÿ<ļĸŖįė<^ú›līķ?,K—-ˇë>y^°‰Â´šų‡ėoˇ<´,(>{āŦpk{øė~<ļrÁĄđmėo:ÉĻNg‹îz$(>?ovøJŋ´á ^ų‰đU ֓~mûöGŪŽY°ŋöŅcōĩ;.ģ8ŠĢĨŦaņ˜ņķuâ¯ë3ˆĶųÅ_˙éĨ„¯ôĶ9§mĶâčuˇ(Füĩdüø{ÖšÄ_KæŽ(<@;õđšAO‡;.(>Hŧ ínXƒYōÄķÁc.8Ņ~´äĄ ÆyW|?Øæã.öՋī†M)>üãgÛڍ[ėS˙vc¸Ĩŋ¨pã}GqõūâÅåÕ ÷ú•ssÔĐ?ë¸CŖ×´¨SOĀqã”Kū9xŽĸC<ž†č=ˆŠŸĪ™†’´÷´a_| đmLÃ,_ą*xŽ"ƒég5/XORŖY|W8(B ũxq@Å5Ē˙â›×‡[,x]ņ5įD5jœģÚ˙UūRøJŅ$yŽŽĄ^ŽgÃÔ ãė•ŪMÁsqEQą@8­k{ŅŪ:ˇ~ū›áZ?TqžūķģĸüoŸTđ9i›ŖbM˛ĘpCáڔfņĩīˇXP„PA ŲČÖēÍj×#ŪЗ…'<LJƒ+"hŅųÔâz¸‚@|¨„æ¨PŒxĖää“zz/z-~Ŧ{îz~8nŨMn™×ÚM[ÂgũÜgzûoŸ“ôšëũÄĪ]=E†; ĐĻô ģÄ˙jK˛×†¨7Dŧ×Bܐ‚ø’ˇG…ÎEį*!ږ×õ$ĐqZWīõžH›b¨Ä‡ˆÄ—áŒÂ´)}ģžÖĐUÃũĐY3ÂŊú‡/$‡E4ęéJ‰÷8¨‡&Č÷ĐPĪ ­įĄaį|ņÛŅ0ŒŸ})Øîzc8n=>ĸĘ"šW#zHÄ?wôŖđmČM fá$ķī˜TP p į8)ô瓎÷€ņ:öã OˇôĶņÉaÕ¸ŸyįƒG×[">!ŖâšķŠ?WTPŅAīOŊ ’ôŸ—AÃQâs>(WŪb‡âģ^%ĸ‚‡›d훋īâÄ'÷Ôķá>šäˆŨás@ Y˛d‰}ÕÍáÚ@ŽąŸ˜ŅQƒZÃ/ÔØÖ|ސ¤žŽĄ!Žæ_P1AwžpE5ôãëqjtĮí:>ĢgéîŽņ}ô^TˆIˆŸ“Ä_Sc>^\ȊįčsHž{üŧõúĖž AĪ 7Q¤bÄ×ã’ņãŸĨ{ŽŠ3i1Ō\Ņ›mūüųáZį đ-ĒZᝧS  ĩŪPxŪPxŪPxŪPxŪPxŪPxŪPxŪPxŪPx ōíŧËV^ķé`Éō7ísōĄ„[(<™Ų3ÁÖnÜ<˙Ÿ<&]°āÄh …@ĻĨ˖ÛGÍ ×öP‡ƒöžf?ZōP¸H7bwEøĐB–,Ybg_us¸Ö\?Ŋô‚ °đåEˇÚįßsĻķÅo۝?žÚ?ãĐY3뛋īôēŠ×}ōŧāš<ĩrrÉ?‡kfw\vą=žbUĐĢâøyŗƒm*pŧõķß ž;^ų ›:q\¸fļ÷ųŸ ŸõKž.ņ\ ĸss’9Üyč}čŊĻC3]Ņ›mūüųáZį Į Ķ×~W0œâ3ī|c¸ĨßYĮjˇ<´,\ÛÃT$p‹õ*TÄéøWz7¯ĢhĄD|H‡Š *z¸*(¨Pā¨0˛vĶ–¯§.ũÎMŅ>:§ķPņA¯eŅĄ“QxTĩä‰įŖž â ŸúˇƒĮ8õzP#>NõnˆSī‚ķŽø~đ\ĮhŸcœŦËĢ>üĨ čá¨80uÂžŪ *"h›Ŗ"HüõˇĪ?*ȏĄa!ņ÷!ĘëÎ~PxTõĩī]ÁAs>¨QŸ%~ˇ -*ä‘6Ąž q*hˆ„ŖsRG9UdˆĮ¸pÁ‰áĢ{09ĻUš jÜģI%œ1ФâpõâģƒžZtlQ*¨¸ábÜxßãá+{č<\QAŊâķHˆŽqĮĮ4…@MĘ †ū?^pvPHˆaˆ;õđšA/‚´ayŠx!ŸũÁ/‚Į4ęÍŸŋAC3âÔû!Ū#C‡Â &WHPņ!mRIgųšõÁ õŒM*ŠcŠxđŲ—‚Į…'<Ē‘jĄâ†zV¸Zâ“OęnĘŸ°RĪ““KÂ? €\ÜŧÕz3č5í§;[¨0˙ũĢΑFC;4TCs2(† ÉĄęu‘JĄBƒ+,¨G†zD¸Z4T„;W4߈Ũás@ Y˛d‰}ÕÍá7„nÊ"…Ŗę ŅŽÅ…ë/zŗÍŸ??\ëôx´%7CTŒP‡ûŸYnAĢ Į´(zô€āņęÅwÛŪį.Xnŧīqûü{Ό^“ŠÆ…ƒöž6`{ÜYĮė“´tŲōāü0´(<ęĸB€ ˇ˙öépËŽ'Ä7ßô<Č*ĝwÅ÷ƒĮ??ë”āŅQņBą’ÛÅåI+~Ü˙ˊāü0´(<ęrôœ}‚ĮŸ})xŒSO 4#ĢhP„ i='Ēõ¸Xt×#ÁcĢÍ?1ÜPxÔåÔÃįE…;.ܲ‡züxÉCÁķŦáIšB\·¸¯ŨxĮ 9TPPŊ–Ɲ×\z= ) €ēĖė™>Č܄“*@¨hÖķāÂ'FKjÍõFE0âs6ŧ}ūQÁļ´Â‡ŖÂČėi“Ã5  €RŠ8 ‰W€PĄ ÉM.yéwn z/Tģ Å-- öqķEhōJmĢ%Ģ@‚æ đ(›pREדA‹h['TŦxGJqÂŅ-2EķE¸…ÛVÍ+Ŋ›Âg uIkĐģI$Ũ-2Ũĸ RmĸĮjC2'4_„ ēũf-Ў|Íúp CÂ .jĐĢaŋUϊņaŽģģEÚp ĮísÁ‚Ã-ƒšâ„–ŦI%w^O¯\C’+NԚTRžtDđ还ĀĐąģ"|h!K–,ąŗ¯ē9\kMw\vqPxëįŋniē=§ !§\ōĪá–ÖvũEoļųķį‡kƒ€ēš;M´ĸŧwŊ€_uĶ]%ÔĢ Ú„CAwŊPOŒû‚]ēč{ąwsø ÚŨŒ ŨöŲŗO°W´ŋrđ<1bDøJgĄđmāĨ•+íɧžļ˙¸īiûū=Ël'MšļÕ5b„Ŋ㸚ö‡'Ėŗšs°Ųŗf…¯t& Đ&ļmÛf>ņ¤­YˇŪž{÷“ö_=gÛwî _EĢSÁaÁaŗė}¯}•Íœ:Ų^uČÁ6vėØđÕÎEáÚĖæÍ›íņeOŲúŪ ļč7Īˆĩ›ˇ†¯ĸÕL?ÖÎ<|?;÷Øm꤉6oîÖĶĶžÚų(<@›Ú´i“Ŋ°âE[Ŋzĩ=üŌ:ģū7ĪØũΝļž;Ã=0TFw n‘ųļŖæØŅŗĻÚ´iĶlŋYûÚĉÃ=† ĐævîÜžņ%Û˛yŗũöĨuöߏ-ˇ^XcĢ6õ…{Á7õl8fö4[đĒũėČ}§X÷¸ņÁ]IĻOŸn]]Ã÷Ž$ ƒ¨ą~ũz{îŗl͆ ļqë[ōôJģįšWėą•ë’Qĸžî1vÄžSė¸ũfØIδ)ãĮÚø m˙}÷ļŠS§ëbC…č`ŽąfíZ{é•Õļc[ŸõmßiŋÜk÷>÷Š=ĩĒז­Rqb{¸7˛L;ÚæNŸhÍčąæėeĪėąņcēŦkôXÛgæt›6uę°F‘…FļlŲb7n´ĩë{mõēõf;ˇň§×l´Į^\kĪ­Ũh/Žßl/oč–Ã44\bߞqļפqvĀ´‰vøžĶėĀĘã„1]ļ{ä(›:e˛M›ÜƏ…j(<Ā0§b„Ũ-cõē^ÛÜ×gģvlˇ‘•×zˇî°W6öŲŗĢ{íš5íÅŪūĸ„†l´ã° 4DbßÉãlī°¸0gÚ$ÛģgŧõŒej čeãĮŗ)=m℠Aa\eõĄđHĨų"ļmÛf}}}ļuëVëŨ´Éz7l žØŊËēF˜mÛšÛļīÚek7WöÛąĶVmėŗ5›ļښ[‚›ļí°MąĄŊ};lËöáÚ›+ûĨ ųčÕe=ãÆ„k{ŒéiSĮīŲ>aėh›0f”͘ĐmĶ&ŽŗiÆÚŒ‰ŨÁņÚoôȑ•cFXåtÍFvŲčŅclō¤ 6iÂxëîîļącĮ ķ2”Â nÛˇo zÜącGđ|׎]ļiKŸõmŨjÛļī°]ŽéYŲ>ÂvێĘcĐŊ ¤m*d$íŽŧ œĘ>ŖFŽŦf#+ĪĮŒeŨcĮڄqŨ6RE†1clÔ¨QA!AĪõ8zôč`4…āMy tf˙?Ņâ‘ÕĢÄT&IENDŽB`‚GuestProxyAgent-1.0.30/docker/000077500000000000000000000000001500521614600161525ustar00rootroot00000000000000GuestProxyAgent-1.0.30/docker/linux/000077500000000000000000000000001500521614600173115ustar00rootroot00000000000000GuestProxyAgent-1.0.30/docker/linux/Dockerfile000066400000000000000000000001751500521614600213060ustar00rootroot00000000000000FROM ubuntu:latest ARG RUST_VERSION COPY install.sh ./ RUN chmod +x install.sh RUN RUST_VERSION=$RUST_VERSION ./install.sh GuestProxyAgent-1.0.30/docker/linux/docker-compose.yml000066400000000000000000000004361500521614600227510ustar00rootroot00000000000000services: gpalinuxdev: build: context: . dockerfile: Dockerfile args: RUST_VERSION: 1.81.0 platform: linux/amd64 volumes: - ../../:/usr/root/GuestProxyAgent:rw working_dir: /usr/root/GuestProxyAgent command: /bin/bash tty: true GuestProxyAgent-1.0.30/docker/linux/install.sh000066400000000000000000000015721500521614600213200ustar00rootroot00000000000000if [ "$EUID" -ne 0 ] then echo "Please run as root" exit fi # Build Dependencies; # WSL2 won't work with linux headers, fallback to generic apt update && \ (apt install linux-headers-$(uname -r) linux-tools-$(uname -r) || (apt install -y linux-headers-generic && exit 0)) \ && apt install -y \ git \ libbpfcc-dev \ libbpf-dev \ llvm \ clang \ gcc-multilib \ build-essential \ linux-tools-common \ linux-tools-generic \ rpm \ musl-tools \ zip \ dotnet-sdk-8.0 \ sudo # Originally was grouped with install dotnet chown -R root:root /var/lib # Install Rust apt install curl (curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf | sh -s -- -y) PATH="/root/.cargo/bin:${PATH}" . "$HOME/.cargo/env" rustup update "$RUST_VERSION" rustup component add rust-std-x86_64-unknown-linux-musl rustup default "$RUST_VERSION" GuestProxyAgent-1.0.30/docker/windows/000077500000000000000000000000001500521614600176445ustar00rootroot00000000000000GuestProxyAgent-1.0.30/docker/windows/Dockerfile000066400000000000000000000053051500521614600216410ustar00rootroot00000000000000FROM mcr.microsoft.com/windows/server:ltsc2022 WORKDIR C:/Users/ContainerAdministrator ARG RUST_VERSION RUN curl -SL --output vs_community.exe https://aka.ms/vs/17/release/vs_community.exe # Install VS Build Tools with: # Desktop development with C++ (w/recommended) # MSVC v143 - VS 2022 C++ ARM64/ARM64EC Spectre-mitigated libs (Latest) # MSVC v143 - VS 2022 C++ x64/x86 Spectre-mitigated libs (Latest) # C++ ATL for latest v143 build tools with Spectre Mitigations (ARM64/ARM64EC) # C++ ATL for latest v143 build tools with Spectre Mitigations (x86 & x64) # C++ MFC for latest v143 build tools with Spectre Mitigations (ARM64/ARM64EC) # C++ MFC for latest v143 build tools with Spectre Mitigations (x86 & x64) # Windows Driver Kit # Windows 11 SDK (10.0.26100.0) RUN start /w vs_community.exe --quiet --wait --norestart --nocache \ --installPath "%ProgramFiles%\Microsoft Visual Studio\2022\Community" \ --includeRecommended \ --add Microsoft.VisualStudio.Workload.NativeDesktop \ --add Microsoft.VisualStudio.Component.VC.Runtimes.ARM64.Spectre \ --add Microsoft.VisualStudio.Component.VC.Runtimes.x86.x64.Spectre \ --add Microsoft.VisualStudio.Component.VC.ATL.ARM64.Spectre \ --add Microsoft.VisualStudio.Component.VC.ATL.Spectre \ --add Microsoft.VisualStudio.Component.VC.MFC.ARM64.Spectre \ --add Microsoft.VisualStudio.Component.VC.ATLMFC.Spectre \ --add Component.Microsoft.Windows.DriverKit \ --add Microsoft.VisualStudio.Component.Windows11SDK.26100 \ || IF "%ERRORLEVEL%"=="3010" EXIT 0 # WDK for Windows 11, version 10.0.26100.1) RUN curl -SL --output wdksetup.exe https://go.microsoft.com/fwlink/?linkid=2272234 RUN wdksetup.exe /quiet /norestart /log wdksetup.log RUN SET "PATH=C:\Program Files (x86)\Windows Kits\10\bin\x64\;%PATH%" # Install Chocolatey RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; \ [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; \ iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Install Clang RUN choco install -y llvm --version 11.0.1 --allow-downgrade RUN choco install nuget.commandline -y RUN choco install dotnet-sdk -y RUN choco install git -y # Rust RUN curl -SL --output rustup-init.exe https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe RUN rustup-init.exe -y -q RUN SET "PATH=%USERPROFILE%\.cargo\bin;%PATH%" RUN rustup update %RUST_VERSION% RUN rustup default %RUST_VERSION% ENTRYPOINT ["C:\\Program Files\\Microsoft Visual Studio\\2022\\Community\\Common7\\Tools\\VsDevCmd.bat", \ "&&", "powershell.exe", "-NoLogo", "-ExecutionPolicy", "Bypass"] GuestProxyAgent-1.0.30/docker/windows/docker-compose.yml000066400000000000000000000005251500521614600233030ustar00rootroot00000000000000services: gpawindev: container_name: GpaWindowsDev build: context: . dockerfile: DockerFile args: RUST_VERSION: 1.85.0 platform: windows/amd64 volumes: - ../../:C:\Users\ContainerAdministrator\GuestProxyAgent:rw working_dir: C:\Users\ContainerAdministrator\GuestProxyAgent tty: true GuestProxyAgent-1.0.30/e2etest/000077500000000000000000000000001500521614600162565ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest.sln000066400000000000000000000021621500521614600227450ustar00rootroot00000000000000īģŋ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 17 VisualStudioVersion = 17.5.33530.505 MinimumVisualStudioVersion = 10.0.40219.1 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GuestProxyAgentTest", "GuestProxyAgentTest\GuestProxyAgentTest.csproj", "{A5602F49-7ADA-4203-A3B8-0936CE1F2862}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {A5602F49-7ADA-4203-A3B8-0936CE1F2862}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {A5602F49-7ADA-4203-A3B8-0936CE1F2862}.Debug|Any CPU.Build.0 = Debug|Any CPU {A5602F49-7ADA-4203-A3B8-0936CE1F2862}.Release|Any CPU.ActiveCfg = Release|Any CPU {A5602F49-7ADA-4203-A3B8-0936CE1F2862}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {75DDC28D-5045-4B2E-852C-2DABC2291200} EndGlobalSection EndGlobal GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/000077500000000000000000000000001500521614600222265ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Extensions/000077500000000000000000000000001500521614600243655ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Extensions/ExceptionExtensions.cs000066400000000000000000000020341500521614600307310ustar00rootroot00000000000000using GuestProxyAgentTest.TestCases; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.Extensions { public static class ExceptionExtensions { public static void UpdateTestCaseResults(this Exception ex, List testCases, JunitTestResultBuilder junitTestResultBuilder, string testScenarioName) { foreach (var testCase in testCases) { if (testCase.Result == TestCaseResult.Running) { testCase.Result = TestCaseResult.Failed; junitTestResultBuilder.AddFailureTestResult(testScenarioName, testCase.TestCaseName, "", "Test case timed out.", ex.Message, 0); } else if (testCase.Result == TestCaseResult.NotStarted) { testCase.Result = TestCaseResult.Aborted; junitTestResultBuilder.AddAbortedTestResult(testScenarioName, testCase.TestCaseName, "Test case not started."); } } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Extensions/ModelExtensions.cs000066400000000000000000000112421500521614600300340ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Utilities; using GuestProxyAgentTest.Models; using System.Net; using Newtonsoft.Json; namespace GuestProxyAgentTest.Extensions { /// /// Extension methods for data model, that translate one model to another. /// public static class ModelExtensions { public static TestCaseResultDetails ToTestResultDetails(this RunCommandOutputDetails runCommandOutputDetails, Action logger = null!, bool downloadContentFromBlob = true) { return new TestCaseResultDetails { StdOut = runCommandOutputDetails.StdOut, StdErr = runCommandOutputDetails.StdErr, Succeed = runCommandOutputDetails.Succeed, CustomOut = runCommandOutputDetails.CustomOut, FromBlob = downloadContentFromBlob }.DownloadContentIfFromBlob(logger); } public static TestCaseResultDetails DownloadContentIfFromBlob(this TestCaseResultDetails testCaseResultDetails, Action logger = null!) { if(!testCaseResultDetails.FromBlob) { return testCaseResultDetails; } testCaseResultDetails.FromBlob = false; var downloadSucceed = true; if (!string.IsNullOrEmpty(testCaseResultDetails.StdOut)) { var stdOutDownload = TestCommonUtilities.DownloadContentAsString(testCaseResultDetails.StdOut, logger); testCaseResultDetails.StdOut = stdOutDownload.Item2; if(!stdOutDownload.Item1) { downloadSucceed = false; } } if (!string.IsNullOrEmpty(testCaseResultDetails.StdErr)) { var stdErrDownload = TestCommonUtilities.DownloadContentAsString(testCaseResultDetails.StdErr, logger); testCaseResultDetails.StdErr = stdErrDownload.Item2; if(!stdErrDownload.Item1) { downloadSucceed = false; } } if (!string.IsNullOrEmpty(testCaseResultDetails.CustomOut)) { var custOutDownload = TestCommonUtilities.DownloadContentAsString(testCaseResultDetails.CustomOut, logger); testCaseResultDetails.CustomOut = custOutDownload.Item2; if(!custOutDownload.Item1) { downloadSucceed = false; } } if (!downloadSucceed) { // in case of any download failed, mark the test result as failed and set the std error as combined string from all the download result testCaseResultDetails.Succeed = false; testCaseResultDetails.StdErr = string.Format("Download content from blob failed: \n stdOut: {0}\n stdError: {1}\n customOut: {2}" , testCaseResultDetails.StdOut , testCaseResultDetails.StdErr , testCaseResultDetails.CustomOut); } return testCaseResultDetails; } public static void WriteJUnitTestResult(this TestCaseResultDetails testCaseResultDetails, JunitTestResultBuilder testResultBuilder, string testScenarioName, string testCaseName, long durationInMilliseconds = 0) { if(testCaseResultDetails.Succeed) { testResultBuilder.AddSuccessTestResult(testScenarioName, testCaseName, testCaseResultDetails.StdOut, testCaseResultDetails.CustomOut, durationInMilliseconds); } else { testResultBuilder.AddFailureTestResult(testScenarioName, testCaseName, testCaseResultDetails.StdOut, testCaseResultDetails.StdErr, testCaseResultDetails.CustomOut, durationInMilliseconds); } } /// /// Safely do json deserialize customout as the object /// In case of error, return null /// /// /// /// public static T SafeDeserializedCustomOutAs(this TestCaseResultDetails testCaseResultDetails) where T: class { try { return JsonConvert.DeserializeObject(testCaseResultDetails.CustomOut); } catch (Exception ex) { Console.WriteLine("Deserialized custom out json string failed with exception: " + ex.ToString()); } return null; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Extensions/TaskExtensions.cs000066400000000000000000000013731500521614600277020ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT namespace GuestProxyAgentTest.Extensions { public static class TaskExtensions { public static async Task TimeoutAfter(this Task task, int timeoutMilliSeconds, CancellationTokenSource cancellationTokenSource = null!) { if (task == await Task.WhenAny(task, Task.Delay(timeoutMilliSeconds))) { await task; } else { if (cancellationTokenSource != null) { // Cancel the task cancellationTokenSource.Cancel(); } throw new TimeoutException("task time out."); } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/GuestProxyAgentScenarioTests.cs000066400000000000000000000141601500521614600303760ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Utilities; using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.Models; using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestScenarios; namespace GuestProxyAgentTest { /// /// GuestProxyAgentScenarioTests class for running scenario tests /// public class GuestProxyAgentScenarioTests { /// /// Main function to start each scenario test /// /// /// public async Task StartAsync(List testScenarioList) { var groupTestResultBuilderMap = new Dictionary(); foreach(var testGroupName in testScenarioList.Select(x => x.testGroupName).ToHashSet()) { groupTestResultBuilderMap[testGroupName] = new JunitTestResultBuilder(TestSetting.Instance.testResultFolder, testGroupName); } var taskList = new List(); var testScenarioStatusList = new List(); foreach (var testScenario in testScenarioList) { var testScenarioStatusDetails = new TestScenarioStatusDetails() { ScenarioName = testScenario.testScenarioName, GroupName = testScenario.testGroupName, Status = ScenarioTestStatus.NotStarted, ErrorMessage = "", Result = ScenarioTestResult.Succeed, }; Task testScenarioTask = null!; try { if (Activator.CreateInstance(Type.GetType(testScenario.testScenarioClassName)!) is TestScenarioBase @scenario) { testScenarioTask = @scenario .TestScenarioSetting(testScenario) .JUnitTestResultBuilder(groupTestResultBuilderMap[testScenario.testGroupName]) .StartAsync(testScenarioStatusDetails); taskList.Add(testScenarioTask); } else { testScenarioStatusDetails.Result = ScenarioTestResult.Failed; testScenarioStatusDetails.Status = ScenarioTestStatus.Completed; testScenarioStatusDetails.ErrorMessage = "Failed to create the scenario class instance: " + testScenario.testScenarioClassName; } } catch (Exception ex) { testScenarioStatusDetails.Result = ScenarioTestResult.Failed; testScenarioStatusDetails.Status = ScenarioTestStatus.Completed; testScenarioStatusDetails.ErrorMessage = ex.Message; } finally { testScenarioStatusList.Add(testScenarioStatusDetails); if (testScenarioTask != null) { taskList.Add(testScenarioTask); } } } var stopMonitor = new ManualResetEvent(false); var monitoringTask = Task.Run(() => { while (!stopMonitor.WaitOne(5000)) { ConsolePrintTestScenariosStatusSummary(testScenarioStatusList); } }); try { await Task.WhenAll(taskList).TimeoutAfter(TestSetting.Instance.testMapTimeoutMilliseconds); } catch (Exception ex) { Console.WriteLine($"Test execution exception: {ex.Message}"); } stopMonitor.Set(); foreach (var groupName in groupTestResultBuilderMap.Keys) { Console.WriteLine("building test result report for test group: " + groupName); groupTestResultBuilderMap[groupName].Build(); } ConsolePrintTestScenariosStatusSummary(testScenarioStatusList); ConsolePrintTestScenariosDetailsSummary(testScenarioStatusList); } private void ConsolePrintTestScenariosStatusSummary(IEnumerable testScenarioStatusDetailsList) { var message = $"Test Running Summary: total {testScenarioStatusDetailsList.Count()}" + $", not started {testScenarioStatusDetailsList.Where(x => x.Status == ScenarioTestStatus.NotStarted).Count()}" + $", running {testScenarioStatusDetailsList.Where(x => x.Status == ScenarioTestStatus.Running).Count()}" + $", failed {testScenarioStatusDetailsList.Where(x => x.Status == ScenarioTestStatus.Completed && x.Result == ScenarioTestResult.Failed).Count()}" + $", success {testScenarioStatusDetailsList.Where(x => x.Status == ScenarioTestStatus.Completed && x.Result == ScenarioTestResult.Succeed).Count()}. "; Console.WriteLine(message); } private void ConsolePrintTestScenariosDetailsSummary(IEnumerable testScenariosStatusDetailsList) { var failedScenarios = testScenariosStatusDetailsList.Where(x => x.Status == ScenarioTestStatus.Completed && x.Result == ScenarioTestResult.Failed).ToList(); var message = $"Total Failed Scenarios: {failedScenarios.Count()}" + Environment.NewLine; int i = 1; foreach (var fc in failedScenarios) { message += $"Failed Scenario {i}/{failedScenarios.Count()}: " + Environment.NewLine + $"GroupName: {fc.GroupName}, ScenarioName: {fc.ScenarioName}" + Environment.NewLine + $"Scenario Level ErrorMessage: {fc.ErrorMessage}" + Environment.NewLine + $"Failed Test Cases Summary: {fc.TestCasesErrorMessage}" + Environment.NewLine; i++; } Console.WriteLine(message); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/GuestProxyAgentTest.csproj000066400000000000000000000040731500521614600274240ustar00rootroot00000000000000 Exe net8.0 enable enable CS8603,SYSLIB0028 PreserveNewest PreserveNewest PreserveNewest PreserveNewest GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/000077500000000000000000000000001500521614600246755ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/CollectInVMGALog.sh000077500000000000000000000036371500521614600302360ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT currentDir=$(pwd) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - currentDir=$currentDir" zipFilePath=$currentDir/guest-proxy-agent-logs.zip decodedLogZipSas=$(echo $logZipSas | base64 -d) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - detecting os and installing zip" #TODO: needs to be revisited if we support other distros os=$(hostnamectl | grep "Operating System") echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - os=$os" if [[ $os == *"Ubuntu"* ]]; then for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing zip via apt-get $i" sudo apt update sudo apt-get install zip sleep 10 install=$(apt list --installed zip) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"zip"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - zip installed successfully" break fi done else for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing zip via dnf $i" sudo dnf -y install zip sleep 10 install=$(dnf list --installed zip) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"zip"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - $(date -u +"%Y-%m-%dT%H:%M:%SZ") - zip installed successfully" break fi done fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - call zip -r $zipFilePath /var/log/azure-proxy-agent" cd /var/log/azure-proxy-agent zip -r $zipFilePath . echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - call zip -r $zipFilePath /var/log/azure" cd /var/log/azure zip -r $zipFilePath . ls -l $currentDir # upload log to blob echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start uploading file $zipFilePath to blob" curl -X PUT -T $zipFilePath -H "x-ms-date: $(date -u)" -H "x-ms-blob-type: BlockBlob" "$decodedLogZipSas"GuestProxyAgentExtensionValidation.sh000066400000000000000000000162531500521614600342010ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT customOutputJsonUrl=$(echo $customOutputJsonSAS | base64 -d) expectedProxyAgentVersion=$(echo $expectedProxyAgentVersion) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - expectedProxyAgentVersion=$expectedProxyAgentVersion" currentDir=$(pwd) customOutputJsonPath=$currentDir/proxyagentextensionvalidation.json echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Starting guest proxy agent extension validation script" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Get Extension Folder and Version" timeout=300 interval=5 elapsed=0 while :; do directories=$(find /var/lib/waagent -type d -name '*Microsoft.CPlat.ProxyAgent.ProxyAgentLinux*') found=0 if [ $(echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - $directories" | wc -l) -eq 1 ]; then for dir in $directories; do PIRExtensionFolderPath=$dir echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - PIR extension folder path=" $PIRExtensionFolderPath found=1 done if [ $found -eq 1 ]; then break fi fi ((elapsed += interval)) if [[ $elapsed -ge $timeout ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Timeout reached. Exiting the loop." break fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Waiting for the extension folder to be created: $elapsed seconds elapsed" sleep $interval done PIRExtensionVersion=$(echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - $PIRExtensionFolderPath" | grep -oP '(\d+\.\d+\.\d+)$') echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - PIRExtensionVersion=$PIRExtensionVersion" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - detecting os and installing jq" os=$(hostnamectl | grep "Operating System") echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - os=$os" if [[ $os == *"Ubuntu"* ]]; then for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing jq via apt-get $i" sudo apt update sudo apt-get install -y jq sleep 10 install=$(apt list --installed jq) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"jq"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - jq installed successfully" break fi done else for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing jq via dnf $i" sudo dnf -y install jq sleep 10 install=$(dnf list --installed jq) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"jq"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - jq installed successfully" break fi done fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Check that status file is regenerated" timeout=900 elapsed=0 while :; do statusFolder=$(find "$PIRExtensionFolderPath" -type d -name 'status') statusFile=$(ls $statusFolder/*.status) if [ -f "$statusFile" ]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - statusFile=$statusFile" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Contents of status file:" cat "$statusFile" statusExists=true break fi ((elapsed += 5)) if [[ $elapsed -ge $timeout ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Timeout reached. Exiting the loop, status file is not regenerated." statusExists=false break fi sleep 5 done echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - TEST: Check that status file is success with 5 minute timeout" guestProxyAgentExtensionStatusObjGenerated=false guestProxyAgentExtensionServiceStatus=false timeout=300 elapsed=0 if [[ "$statusExists" == "true" ]]; then while :; do extensionStatus=$(cat "$statusFile" | jq -r '.[0].status.status') if [[ "$extensionStatus" == "success" ]]; then guestProxyAgentExtensionStatusObjGenerated=true guestProxyAgentExtensionServiceStatus=true echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - The status is success." break fi ((elapsed += 5)) if [[ $elapsed -ge $timeout ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Timeout reached. Exiting the loop." break fi sleep 5 done fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - TEST: Check that process ProxyAgentExt is running" processId=$(pgrep ProxyAgentExt) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - processId=$processId" if [ -z "$processId" ]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Process ProxyAgentExt is not running" guestProxyAgentExtensionServiceExist=false guestProxyAgentExtensionProcessExist=false else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Process ProxyAgentExt is running" guestProxyAgentExtensionServiceExist=true guestProxyAgentExtensionProcessExist=true fi echo Write-Output "TEST: ProxyAgent version running in VM is the same as expected version" proxyAgentVersion="$(eval "$PIRExtensionFolderPath/ProxyAgent/ProxyAgent/azure-proxy-agent --version")" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - proxy agent version from extension folder: $proxyAgentVersion" guestProxyAgentExtensionVersion=true proxyAgentStatus=$(cat "$statusFile" | jq -r '.[0].status.substatus[1].formattedMessage.message') extractedVersion=$(echo $proxyAgentStatus | jq -r '.version') if [[ $proxyAgentVersion == $extractedVersion ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - ProxyAgent version running in VM is the same as expected version" else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - ProxyAgent version [$proxyAgentVersion] running in VM is not the same as expected version [$extractedVersion]" guestProxyAgentExtensionVersion=false fi if [ $expectedProxyAgentVersion != "0" ]; then if [[ $proxyAgentVersion == $expectedProxyAgentVersion ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - After Update Version check: ProxyAgent version running in VM is the same as expected and extracted version" else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - After Update Version check: ProxyAgent version [$proxyAgentVersion] running in VM is not the same as expected version [$expectedProxyAgentVersion]" guestProxyAgentExtensionVersion=false fi fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - TEST: Check that detailed status of the extension status to see if the Instance View is successful" guestProxyAgentExtensionInstanceView=false if [[ $proxyAgentStatus == *"SUCCESS"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Instance View is successful" guestProxyAgentExtensionInstanceView=true else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Instance View is not successful" fi jsonString='{"guestProxyAgentExtensionStatusObjGenerated": "'$guestProxyAgentExtensionStatusObjGenerated'", "guestProxyAgentExtensionProcessExist": "'$guestProxyAgentExtensionProcessExist'", "guestProxyAgentExtensionServiceExist": "'$guestProxyAgentExtensionServiceExist'", "guestProxyAgentExtensionVersion": "'$guestProxyAgentExtensionVersion'", "guestProxyAgentExtensionInstanceView": "'$guestProxyAgentExtensionInstanceView'", "guestProxyAgentExtensionServiceStatus": "'$guestProxyAgentExtensionServiceStatus'"}' echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - $jsonString" echo "$jsonString" > $customOutputJsonPath curl -X PUT -T $customOutputJsonPath -H "x-ms-date: $(date -u)" -H "x-ms-blob-type: BlockBlob" "$customOutputJsonUrl"GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/GuestProxyAgentValidation.sh000077500000000000000000000052261500521614600323640ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT customOutputJsonUrl=$(echo $customOutputJsonSAS | base64 -d) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Start Guest Proxy Agent Validation" currentDir=$(pwd) customOutputJsonPath=$currentDir/proxyagentvalidation.json serviceName="azure-proxy-agent" guestProxyAgentServiceExist=$(systemctl list-unit-files | grep $serviceName | wc -l) guestProxyAgentServiceStatus="unknown" if [ $guestProxyAgentServiceExist -eq 0 ]; then guestProxyAgentServiceExist='false' guestProxyAgentServiceStatus="service not exists" else guestProxyAgentServiceExist='true' guestProxyAgentServiceStatus=$(systemctl is-enabled $serviceName) fi guestProxyProcessStarted=$(systemctl is-active $serviceName) # check guestProxyProcessStarted is 'active' if [ "$guestProxyProcessStarted" == "active" ]; then guestProxyProcessStarted='true' else guestProxyProcessStarted=$(ps -C azure-proxy-agent) if [[ $guestProxyProcessStarted == *"azure-proxy-agent"* ]]; then guestProxyProcessStarted='true' else guestProxyProcessStarted='false' fi fi logdir="/var/log/azure-proxy-agent" guestProxyAgentLogGenerated='false' if [ -d "$logdir" ]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - logdir '$logdir' exists" ls -l $logdir # check if any log file is generated logFileCount=$(ls -l $logdir | grep -v ^l | wc -l) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - logFileCount=$logFileCount" if [ $logFileCount -gt 0 ]; then guestProxyAgentLogGenerated='true' fi else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - logdir does not exist" fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - guestProxyAgentServiceExist=$guestProxyAgentServiceExist" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - guestProxyAgentServiceStatus=$guestProxyAgentServiceStatus" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - guestProxyProcessStarted=$guestProxyProcessStarted" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - guestProxyAgentLogGenerated=$guestProxyAgentLogGenerated" jsonString='{"guestProxyAgentServiceInstalled": "'$guestProxyAgentServiceExist'", "guestProxyAgentServiceStatus": "'$guestProxyAgentServiceStatus'", "guestProxyProcessStarted": "'$guestProxyProcessStarted'", "guestProxyAgentLogGenerated": "'$guestProxyAgentLogGenerated'"}' echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - $jsonString" # write to $customOutputJsonPath echo "$jsonString" > $customOutputJsonPath # upload $customOutputJsonPath to blob echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start uploading file=@$customOutputJsonPath to blob" curl -X PUT -T $customOutputJsonPath -H "x-ms-date: $(date -u)" -H "x-ms-blob-type: BlockBlob" "$customOutputJsonUrl"GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/IMDSPingTest.sh000077500000000000000000000025251500521614600274520ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - imdsSecureChannelEnabled=$imdsSecureChannelEnabled" # make 10 requests if any failed, will failed the test for tcp port scalability config for i in {1..10}; do url="http://169.254.169.254/metadata/instance?api-version=2020-06-01" statusCode=$(curl -s -o /dev/null -w "%{http_code}" -H "Metadata:True" $url) if [ $statusCode -eq 200 ]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Response status code is OK (200)" else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Ping test failed. Response status code is $statusCode" exit -1 fi sleep 1 if [ "${imdsSecureChannelEnabled,,}" = "true" ] # case insensitive comparison then authorizationHeader=$(curl -s -I -H "Metadata:True" $url | grep -Fi "x-ms-azure-host-authorization") if [ "$authorizationHeader" = "" ]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Response authorization header not exist" exit -1 else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Response authorization header exists" fi sleep 1 else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - IMDS secure channel is not enabled. Skipping x-ms-azure-host-authorization header validation" fi done exit 0GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/InstallGuestProxyAgent.sh000077500000000000000000000040371500521614600316770ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT zipFile=$zipsas # zipsas is a variable set by RunCommand extension by os.Setenv(name, value) currentDir=$(pwd) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - currentDir=$currentDir" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - detecting os and installing unzip" #TODO: needs to be revisited if we support other distros os=$(hostnamectl | grep "Operating System") echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - os=$os" if [[ $os == *"Ubuntu"* ]]; then for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing unzip via apt-get $i" sudo apt-get update echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - apt-get install unzip" sudo apt-get install unzip echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - apt-get install unzip with exit code=$?" sleep 10 install=$(apt list --installed unzip) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"unzip"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - unzip installed successfully" break fi done else for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing unzip via dnf $i" sudo dnf -y install unzip sleep 10 install=$(dnf list --installed unzip) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"unzip"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - unzip installed successfully" break fi done fi zipFilePath=$currentDir/guest-proxy-agent.zip decodedUrl=$(echo $zipFile | base64 -d) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start downloading guest-proxy-agent.zip" curl -L -o $zipFilePath "$decodedUrl" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start unzipping guest-proxy-agent.zip" unzip -o $zipFilePath -d $currentDir ls -l $currentDir echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start install & start guest-proxy-agent" $currentDir/ProxyAgent/proxy_agent_setup install GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/InstallGuestProxyAgentExtension.sh000066400000000000000000000123331500521614600335670ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT zipFile=$devExtensionSas echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Starting install guest proxy agent extension script" timeout=300 interval=5 elapsed=0 while :; do directories=$(find /var/lib/waagent -type d -name '*Microsoft.CPlat.ProxyAgent.ProxyAgentLinux*') found=0 if [ $(echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - $directories" | wc -l) -eq 1 ]; then for dir in $directories; do PIRExtensionFolderPath=$dir echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - PIR extension folder path=" $PIRExtensionFolderPath PIRExtensionFolderZip="${PIRExtensionFolderPath//-/__}.zip" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - PIRExtensionFolderZip=$PIRExtensionFolderZip" found=1 done if [ $found -eq 1 ]; then break fi fi ((elapsed += interval)) if [[ $elapsed -ge $timeout ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Timeout reached. Exiting the loop." break fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Waiting for the extension folder to be created: $elapsed seconds elapsed" sleep $interval done PIRExtensionVersion=$(echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - $PIRExtensionFolderPath" | grep -oP '(\d+\.\d+\.\d+)$') echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - PIRExtensionVersion=$PIRExtensionVersion" proxyAgentVersion="$(eval "$PIRExtensionFolderPath/ProxyAgent/ProxyAgent/azure-proxy-agent --version")" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - proxy agent version: $proxyAgentVersion" statusFolder=$(find "$PIRExtensionFolderPath" -type d -name 'status') echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Status Directory: $statusFolder" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Delete status file of PIR version" statusFile=$(ls $statusFolder/*.status) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - statusFile=$statusFile" rm $statusFile echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - detecting os and installing jq" os=$(hostnamectl | grep "Operating System") echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - os=$os" if [[ $os == *"Ubuntu"* ]]; then for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing jq via apt-get $i" sudo apt-get update sudo apt-get install -y jq sleep 10 install=$(apt list --installed jq) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"jq"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - jq installed successfully" break fi done else for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing jq via dnf $i" sudo dnf -y install jq sleep 10 install=$(dnf list --installed jq) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"jq"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - jq installed successfully" break fi done fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Check that status file is regenerated" timeout=900 elapsed=0 while :; do statusFolder=$(find "$PIRExtensionFolderPath" -type d -name 'status') statusFile=$(ls $statusFolder/*.status) if [ -f "$statusFile" ]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - statusFile=$statusFile" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Contents of status file:" cat "$statusFile" statusExists=true break fi ((elapsed += 5)) if [[ $elapsed -ge $timeout ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Timeout reached. Exiting the loop, status file is not regenerated." statusExists=false break fi sleep 5 done echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Check that status file is success with 5 minute timeout" timeout=300 elapsed=0 if [[ "$statusExists" == "true" ]]; then while :; do extensionStatus=$(cat "$statusFile" | jq -r '.[0].status.status') if [[ "$extensionStatus" == "success" ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - The status is success." break fi ((elapsed += 5)) if [[ $elapsed -ge $timeout ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Timeout reached. Exiting the loop." break fi sleep 5 done fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Check that process ProxyAgentExt is running" processId=$(pgrep ProxyAgentExt) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - processId=$processId" if [ -z "$processId" ]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Process ProxyAgentExt is not running" else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Process ProxyAgentExt is running" fi echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Delete PIR extension zip" rm -rf $PIRExtensionFolderZip echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Delete PIR extension folder" rm -rf $PIRExtensionFolderPath echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Downloading proxy agent extension binaries to PIR extension zip location" decodedUrl=$(echo $zipFile | base64 -d) curl -L -o $PIRExtensionFolderZip "$decodedUrl" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Get PID of ProxyAgentExt and kill pidof" pidof ProxyAgentExt | xargs kill -9 echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - Delete status file inside status folder" rm -rf $statusFolder/*GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/InstallGuestProxyAgentPackage.sh000077500000000000000000000056611500521614600331570ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT zipFile=$zipsas # zipsas is a variable set by RunCommand extension by os.Setenv(name, value) currentDir=$(pwd) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - currentDir=$currentDir" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - detecting os and installing unzip" #TODO: needs to be revisited if we support other distros os=$(hostnamectl | grep "Operating System") echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - os=$os" if [[ $os == *"Ubuntu"* ]]; then for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing unzip via apt-get $i" sudo apt-get update echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - apt-get install unzip" sudo apt-get install unzip echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - apt-get install unzip with exit code=$?" sleep 10 install=$(apt list --installed unzip) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"unzip"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - unzip installed successfully" break fi done else for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start installing unzip via dnf $i" sudo dnf -y install unzip sleep 10 install=$(dnf list --installed unzip) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"unzip"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - unzip installed successfully" break fi done fi zipFilePath=$currentDir/guest-proxy-agent.zip decodedUrl=$(echo $zipFile | base64 -d) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start downloading guest-proxy-agent.zip" curl -L -o $zipFilePath "$decodedUrl" echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start unzipping guest-proxy-agent.zip" unzip -o $zipFilePath -d $currentDir ls -l $currentDir pkgversion=$($currentDir/ProxyAgent/ProxyAgent/azure-proxy-agent --version) for i in {1..3}; do echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - start install & start guest-proxy-agent package $i" if [[ $os == *"Ubuntu"* ]]; then sudo apt-get -f install sudo dpkg -i $currentDir/ProxyAgent/packages/*.deb sleep 10 install=$(apt list --installed guest-proxy-agent) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"guest-proxy-agent"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - guest-proxy-agent installed successfully" break fi else sudo rpm -i $currentDir/ProxyAgent/packages/*.rpm sleep 10 install=$(dnf list --installed guest-proxy-agent) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - install=$install" if [[ $install == *"guest-proxy-agent"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - guest-proxy-agent installed successfully" break fi fi doneGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/LinuxScripts/SetupCGroup2.sh000077500000000000000000000010521500521614600275340ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - checking cgroup2 ... " mount_cgroup2=$(mount | grep cgroup2) echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - mount_cgroup2=$mount_cgroup2" if [[ $mount_cgroup2 == *"cgroup2"* ]]; then echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - cgroup2 is already mounted" else echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") - mount cgroup2 by default during system boot" sudo grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=1" fi GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Models/000077500000000000000000000000001500521614600234515ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Models/RunCommandOutputDetails.cs000066400000000000000000000017601500521614600305760ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace GuestProxyAgentTest.Models { /// /// RunCommand Output details /// public class RunCommandOutputDetails { /// /// Std output sas url for the run command execution. /// public string StdOut { get; set; } = null!; /// /// Std error output sas url for the run command execution. /// public string StdErr { get; set; } = null!; /// /// Customized output url for the run command execution. /// public string CustomOut { get; set; } = null!; /// /// Indicate if the run command execution status is succeed or not. /// public bool Succeed { get; set; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Models/TestCaseResultDetails.cs000066400000000000000000000025211500521614600302200ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT namespace GuestProxyAgentTest.Models { /// /// Test Result Details class /// public class TestCaseResultDetails { /// /// The std output, it would be a content string or a blob SAS url that contains the std output, indicated by bool value /// public string StdOut { get; set; } = null!; /// /// The std error output, it would be a content string or a blob SAS url that contains the std error output, indicated by bool value /// public string StdErr { get; set; } = null!; /// /// The customized output, it would be a content string or a blob SAS url that contains the customized output, indicated by bool value /// public string CustomOut { get; set; } = null!; /// /// indicate the test result is success or failed. /// public bool Succeed { get; set; } /// /// The bool flag indicate the content of StdOut, StdErr and CustomOut should read from blob or directly /// public bool FromBlob { get; set; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Models/TestConfig.cs000066400000000000000000000021571500521614600260520ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace GuestProxyAgentTest.Models { public class TestConfig { public string TenantId { get; set; } = null!; public string AppClientId { get; set; } = null!; public string CertThumbprint { get; set; } = null!; public string CertNameInKV { get; set; } = "GuestProxyAgentE2ETestCert"; public string SubscriptionId { get; set; } = null!; public string Location { get; set; } = null!; public string VmSize { get; set; } = null!; public string SharedStorageAccountUrl { get; set; } = null!; public string WindowsInVmWireServerAccessControlProfileReferenceId { get; set; } = null!; public string WindowsInVmIMDSAccessControlProfileReferenceId { get; set; } = null!; public string LinuxInVmWireServerAccessControlProfileReferenceId { get; set; } = null!; public string LinuxInVmIMDSAccessControlProfileReferenceId { get; set; } = null!; } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Models/TestMap.cs000066400000000000000000000015721500521614600253620ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT namespace GuestProxyAgentTest.Models { public class TestMap { public List TestGroupList { get; set; } = null!; } public class TestGroupConfig { public string Include { get; set; } = null!; } public class TestGroupDetails { public string GroupName { get; set; } = null!; public string VmImagePublisher { get; set; } = null!; public string VmImageOffer { get; set; } = null!; public string VmImageSku { get; set; } = null!; public string VmImageVersion { get; set; } = null!; public List Scenarios { get; set; } = null!; } public class TestScenarioConfig { public string Name { get; set; } = null!; public string ClassName { get; set; } = null!; } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Models/TestScenarioStatusDetails.cs000066400000000000000000000031601500521614600311150ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT namespace GuestProxyAgentTest.Models { /// /// Test case run status details /// public class TestScenarioStatusDetails { /// /// Indicate if the test case is not-started, running, or completed /// public ScenarioTestStatus Status { get; set; } = ScenarioTestStatus.NotStarted; /// /// test case group name /// public string GroupName { get; set; } = null!; /// /// test case name /// public string ScenarioName { get; set; } = null!; /// /// Error Message in Scenario Level Execution /// public string ErrorMessage { get; set; } = null!; /// /// test case result /// public ScenarioTestResult Result { get; set; } public List FailedCases { get; } = new List(); /// /// Failed test cases summary error message /// public string TestCasesErrorMessage { get { return FailedCases.Count() == 0? "": $"Test Scenario:{ScenarioName} failed by test cases: {string.Join(',', FailedCases)}, Check the test case log for error details."; } } } public enum ScenarioTestStatus { NotStarted, Running, Completed, } public enum ScenarioTestResult { Succeed, Failed } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Program.cs000066400000000000000000000025541500521614600241720ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest { /// /// Entry point program class /// public class Program { /// /// Entry point main method /// /// /// args[0]: test config file yml file path /// args[1]: test result folder path, the test pipeline will publish the test result under this folder /// args[2]: guest proxy agent msi file path /// static async Task Main(string[] args) { var testConfigFilePath = args[0]; var testResultFolder = args[1]; var guestProxyAgentZipFilePath = args[2]; var test_arm64 = false; if (args.Length > 3 && args[3].Equals("arm64", StringComparison.InvariantCultureIgnoreCase)) { test_arm64 = true; } TestCommonUtilities.TestSetup(guestProxyAgentZipFilePath, testConfigFilePath, testResultFolder); VMHelper.Instance.CleanupOldTestResourcesAndForget(); await new GuestProxyAgentScenarioTests().StartAsync(TestMapReader.ReadFlattenTestScenarioSettingFromTestMap(test_arm64)); Console.WriteLine("E2E Test run completed."); } } }GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Resources/000077500000000000000000000000001500521614600242005ustar00rootroot00000000000000GuestProxyAgentLoadedModulesBaseline.txt000066400000000000000000000007261500521614600341040ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Resourcesguestproxyagent.exe ntdll.dll kernel32.dll kernelbase.dll apphelp.dll bcrypt.dll advapi32.dll msvcrt.dll sechost.dll rpcrt4.dll ws2_32.dll ucrtbase.dll secur32.dll cryptbase.dll bcryptprimitives.dll sspicli.dll mswsock.dll ntmarta.dll ebpfapi.dll dbghelp.dll vcruntime140_1.dll vcruntime140.dll msvcp140.dll CRYPTSP.dll DNSAPI.dll IPHLPAPI.DLL logoncli.dll netapi32.dll netutils.dll NSI.dll rsaenh.dll SAMCLI.DLL SAMLIB.dll WLDAP32.dll crypt32.dll msasn1.dll version.dllGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/000077500000000000000000000000001500521614600236555ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/CollectInVMGALog.ps1000066400000000000000000000023261500521614600272760ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory=$true, Position=0)] [string]$logZipSas ) $decodedUrlBytes = [System.Convert]::FromBase64String($logZipSas) $decodedUrlString = [System.Text.Encoding]::UTF8.GetString($decodedUrlBytes) ## get guest agent installation path $serviceName="WindowsAzureGuestAgent" $serviceKeyPath = "HKLM:\SYSTEM\CurrentControlSet\Services\$serviceName" $gaInstallPath = Get-ItemPropertyValue -Path $serviceKeyPath -Name "ImagePath" $gaFolder = Split-Path -Path $gaInstallPath -Parent ## get CollectGuestLogs.exe path $collectGuestLogExePath = $gaFolder + "\CollectGuestLogs.exe" ## run CollectGuestLogs.exe to collect log zip ## upload the zip to blob sas url $currentFolder = $PWD.Path $logZipPath = $currentFolder + "\VMAgentLogs.zip"; Write-Output "$((Get-Date).ToUniversalTime()) - CollectGuestLogExe path: $collectGuestLogExePath" Start-Process -FilePath $collectGuestLogExePath -WorkingDirectory $currentFolder -ArgumentList "-Mode:full -FileName:$logZipPath" -Wait -NoNewWindow $headers = @{ 'x-ms-blob-type' = 'BlockBlob' } #Upload File... Invoke-RestMethod -Uri $decodedUrlString -Method Put -Headers $headers -InFile $logZipPathGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/ConfigTCPPortScalability.ps1000066400000000000000000000017601500521614600311100ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT Set-NetTCPSetting -SettingName "InternetCustom" -DynamicPortRangeStartPort 5000 -DynamicPortRangeNumberOfPorts 19475 -AutoReusePortRangeStartPort 22000 -AutoReusePortRangeNumberOfPorts 43536 Set-NetTCPSetting -SettingName "DatacenterCustom" -DynamicPortRangeStartPort 5000 -DynamicPortRangeNumberOfPorts 19475 -AutoReusePortRangeStartPort 22000 -AutoReusePortRangeNumberOfPorts 43536 Set-NetTCPSetting -SettingName "Compat" -DynamicPortRangeStartPort 5000 -DynamicPortRangeNumberOfPorts 19475 -AutoReusePortRangeStartPort 22000 -AutoReusePortRangeNumberOfPorts 43536 Set-NetTCPSetting -SettingName "Datacenter" -DynamicPortRangeStartPort 5000 -DynamicPortRangeNumberOfPorts 19475 -AutoReusePortRangeStartPort 22000 -AutoReusePortRangeNumberOfPorts 43536 Set-NetTCPSetting -SettingName "Internet" -DynamicPortRangeStartPort 5000 -DynamicPortRangeNumberOfPorts 19475 -AutoReusePortRangeStartPort 22000 -AutoReusePortRangeNumberOfPorts 43536GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/GuestProxyAgentExtensionValidation.ps1000066400000000000000000000203311500521614600333210ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory=$true, Position=0)] [string]$customOutputJsonSAS, [string]$expectedProxyAgentVersion ) Write-Output "$((Get-Date).ToUniversalTime()) - expectedProxyAgentVersion=$expectedProxyAgentVersion" $decodedUrlBytes = [System.Convert]::FromBase64String($customOutputJsonSAS) $decodedUrlString = [System.Text.Encoding]::UTF8.GetString($decodedUrlBytes) Write-Output "$((Get-Date).ToUniversalTime()) - Start Guest Proxy Agent Extension Validation" $currentFolder = $PWD.Path $customOutputJsonPath = $currentFolder + "\proxyagentextensionvalidation.json"; New-Item -ItemType File -Path $customOutputJsonPath -Force $timeoutInSeconds = 300 $stopwatch = [System.Diagnostics.Stopwatch]::StartNew() do { $nonRootRegKeyPath = $null $proxy = Get-ChildItem "HKLM:\SOFTWARE\Microsoft\Windows Azure\HandlerState" foreach ($obj in $proxy) { if($obj.Name -like "*Microsoft.CPlat.ProxyAgent.ProxyAgentWindows*") { $nonRootRegKeyPath = $obj.Name Write-Output "$((Get-Date).ToUniversalTime()) - Got proxy agent extension registry key path: " $nonRootRegKeyPath break } } if ($nonRootRegKeyPath -ne $null) { $registrykeyPath = $nonRootRegKeyPath -replace '^HKEY_LOCAL_MACHINE', 'HKLM:' $PIRversion = ($registrykeyPath -split "_")[1] Write-Output "$((Get-Date).ToUniversalTime()) - PIR Version: $PIRversion" if (((Get-Item -Path $registrykeyPath).GetValue("SequenceNumber") -ne $null) -and ((Get-Item -Path $registrykeyPath).GetValue("StatusFolder") -ne $null)) { $seqNo = (Get-ItemProperty -Path $registrykeyPath).SequenceNumber Write-Output "$((Get-Date).ToUniversalTime()) - Seq No: $seqNo" $statusFolderPath = (Get-ItemProperty -Path $registrykeyPath).StatusFolder Write-Output "$((Get-Date).ToUniversalTime()) - Status Folder: $statusFolderPath" $statusFilePath = [IO.Path]::Combine($statusFolderPath, $seqNo + ".status") Write-Output "$((Get-Date).ToUniversalTime()) - Status file path: $statusFilePath" break } } if ($stopwatch.Elapsed.TotalSeconds -ge $timeoutInSeconds) { Write-Output "$((Get-Date).ToUniversalTime()) - Timeout reached. Error, The registry key does not have proxy agent extension." exit 1 } start-sleep -Seconds 3 } until ($false) $extensionFolder = Split-Path -Path $statusFolderPath -Parent Write-Output "$((Get-Date).ToUniversalTime()) - Extension Folder: $extensionFolder" $PIRExePath = [IO.Path]::Combine($extensionFolder, "ProxyAgentExt.exe") Write-Output "$((Get-Date).ToUniversalTime()) - TEST: ProxyAgentVMExtension Status is succesful, Check that status file is success with 5 minute timeout" $guestProxyAgentExtensionStatusObjGenerated = $false $stopwatch = [System.Diagnostics.Stopwatch]::StartNew() do { $boolStatus = Test-Path -Path $statusFilePath if ($boolStatus) { $json = Get-Content $statusFilePath | Out-String | ConvertFrom-Json $extensionStatus = $json.status.status if ($extensionStatus -eq "Success") { Write-Output "$((Get-Date).ToUniversalTime()) - The extension status is success: $extensionStatus." $guestProxyAgentExtensionStatusObjGenerated = $true break } if ($extensionStatus -eq "Error") { Write-Output "$((Get-Date).ToUniversalTime()) - The extension status is error: $extensionStatus." break } if ($stopwatch.Elapsed.TotalSeconds -ge $timeoutInSeconds) { Write-Output "$((Get-Date).ToUniversalTime()) - Timeout reached. Error, The extension status is $extensionStatus." break } } start-sleep -Seconds 3 } until ($false) Write-Output "$((Get-Date).ToUniversalTime()) - TEST: ProxyAgentVMExtension Service is started and success" $serviceName = "GuestProxyAgentVMExtension" $guestProxyAgentExtensionServiceExist = $false $guestProxyAgentExtensionServiceStatus = $false $service = Get-Service -Name $serviceName -ErrorAction SilentlyContinue if ($service -ne $null) { $serviceStatus = $service.Status Write-Output "$((Get-Date).ToUniversalTime()) - The service $serviceName exists with status $serviceStatus." $guestProxyAgentExtensionServiceExist = $true $guestProxyAgentExtensionServiceStatus = $true } Write-Output "$((Get-Date).ToUniversalTime()) - TEST: ProxyAgentVMExtension process is running " $processName = "ProxyAgentExt" $process = Get-Process -Name $processName -ErrorAction SilentlyContinue $guestProxyAgentExtensionProcessExist = $true if ($process -ne $null) { Write-Output "$((Get-Date).ToUniversalTime()) - The process $processName exists." } else { $guestProxyAgentExtensionProcessExist = $false Write-Output "$((Get-Date).ToUniversalTime()) - The process $processName does not exist." } Write-Output "$((Get-Date).ToUniversalTime()) - TEST: ProxyAgent version running in VM is the same as expected version" $proxyAgentExeCmd = $extensionFolder + "\ProxyAgent\ProxyAgent\GuestProxyAgent.exe --version" $proxyAgentVersion = Invoke-Expression $proxyAgentExeCmd Write-Output "$((Get-Date).ToUniversalTime()) - proxy agent version from extension folder: $proxyAgentVersion" $guestProxyAgentExtensionVersion = $false $json = Get-Content $statusFilePath | Out-String | ConvertFrom-Json if ($json.status.substatus -is [System.Collections.IEnumerable] -and $json.status.substatus.Count -gt 0) { Write-Output "$((Get-Date).ToUniversalTime()) - The 'substatus' array exists and has length greater than 0." $guestProxyAgentExtensionVersion = $true } if ($guestProxyAgentExtensionVersion) { $proxyAgentStatus = $json.status.substatus[1].formattedMessage.message $jsonObject = $proxyAgentStatus | ConvertFrom-json $extractedVersion = $jsonObject.version if ($extractedVersion -ne $proxyAgentVersion) { Write-Output "$((Get-Date).ToUniversalTime()) - Error, the proxy agent version [ $extractedVersions ] does not match the version [ $proxyAgentVersion ]" $guestProxyAgentExtensionVersion = $false } if ($expectedProxyAgentVersion -ne "0") { $cleanExpectedProxyAgentVersion = $expectedProxyAgentVersion.Trim() if ($extractedVersion -eq $cleanExpectedProxyAgentVersion){ Write-Output "$((Get-Date).ToUniversalTime()) - After Update Version check: The proxy agent version matches the expected and extracted version" } else { Write-Output "$((Get-Date).ToUniversalTime()) - After Update Version check: Error, the proxy agent version [ $extractedVersion ] does not match expected version [ $cleanExpectedProxyAgentVersion ]" $guestProxyAgentExtensionVersion = $false } } } Write-Output "$((Get-Date).ToUniversalTime()) - TEST: Check detailed status of the extension if InstanceView is successful" $guestProxyAgentExtensionInstanceView = $false if ($proxyAgentStatus -like "*SUCCESS*") { Write-Output "$((Get-Date).ToUniversalTime()) - The InstanceView status is $proxyAgentStatus." $guestProxyAgentExtensionInstanceView = $true } else { Write-Output "$((Get-Date).ToUniversalTime()) - Error the InstanceView status is not ready: $proxyAgentStatus." } $jsonString = '{ "guestProxyAgentExtensionServiceExist": ' + $guestProxyAgentExtensionServiceExist.ToString().ToLower() ` + ', "guestProxyAgentExtensionProcessExist": ' + $guestProxyAgentExtensionProcessExist.ToString().ToLower() ` + ', "guestProxyAgentExtensionServiceStatus": ' + $guestProxyAgentExtensionServiceStatus.ToString().ToLower() ` + ', "guestProxyAgentExtensionStatusObjGenerated": ' + $guestProxyAgentExtensionStatusObjGenerated.ToString().ToLower() ` + ', "guestProxyAgentExtensionVersion": ' + $guestProxyAgentExtensionVersion.ToString().ToLower() ` + ', "guestProxyAgentExtensionInstanceView": ' + $guestProxyAgentExtensionInstanceView.ToString().ToLower() ` + '}' Write-Output "$((Get-Date).ToUniversalTime()) - $jsonString" Set-Content -Path $customOutputJsonPath -Value $jsonString $headers = @{ 'x-ms-blob-type' = 'BlockBlob' } Write-Output "$((Get-Date).ToUniversalTime()) - Upload File..." Invoke-RestMethod -Uri $decodedUrlString -Method Put -Headers $headers -InFile $customOutputJsonPathGuestProxyAgentLoadedModulesValidation.ps1000066400000000000000000000063741500521614600340220ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory=$true, Position=0)] [string]$customOutputJsonSAS, [Parameter(Mandatory=$true, Position=1)] [string]$loadedModulesBaseLineSAS ) Write-Output "$((Get-Date).ToUniversalTime()) - Start Guest Proxy Agent Loaded Module Validation" $decodedUrlBytes = [System.Convert]::FromBase64String($customOutputJsonSAS) $decodedUrlString = [System.Text.Encoding]::UTF8.GetString($decodedUrlBytes) $moduleBaselineUrlBytes = [System.Convert]::FromBase64String($loadedModulesBaseLineSAS) $moduleBaselineUrlString = [System.Text.Encoding]::UTF8.GetString($moduleBaselineUrlBytes) $currentFolder = $PWD.Path $customOutputJsonPath = $currentFolder + "\validateLoadedModule.json"; $moduleBaselineFilePath = $currentFolder + "\moduleListBaseline.txt"; New-Item -ItemType File -Path $customOutputJsonPath New-Item -ItemType File -Path $moduleBaselineFilePath Invoke-WebRequest -Uri $moduleBaselineUrlString -OutFile $moduleBaselineFilePath Write-Output "$((Get-Date).ToUniversalTime()) - Downloaded baseline file" $baseArray = @() foreach ($line in Get-Content $moduleBaselineFilePath| Where-Object { $_.Trim() -ne '' }) { $baseArray += $line.Trim().ToLower() } Write-Output "$((Get-Date).ToUniversalTime()) - Read baseline list: $baseArray" $processName = "GuestProxyAgent" $process = Get-Process -Name $processName -ErrorAction SilentlyContinue $currentModulesArray = @() if ($process -eq $null) { Write-Output "$((Get-Date).ToUniversalTime()) - Process '$processName' not found." } else { Write-Output "$((Get-Date).ToUniversalTime()) - Loaded modules for process '$processName':" $modules = $process.Modules foreach ($module in $modules) { $moduleName = $module.ModuleName $currentModulesArray += $moduleName.Trim().ToLower() Write-Output "$((Get-Date).ToUniversalTime()) - Module Name: $moduleName" } } Write-Output "$((Get-Date).ToUniversalTime()) - Current loaded list: $currentModulesArray" $comparisonResult = Compare-Object -ReferenceObject $baseArray -DifferenceObject $currentModulesArray $missedInBaselineModules = @() $newAddedModules = @() $isMatch = $false if ($comparisonResult -eq $null -or $comparisonResult.Count -eq 0) { Write-Output "$((Get-Date).ToUniversalTime()) - No differences found." $isMatch = $true } else { Write-Output "$((Get-Date).ToUniversalTime()) - Differences found:" # Display the differences foreach ($result in $comparisonResult) { $inputObject = '"' + $result.InputObject + '"' $sideIndicator = $result.SideIndicator if ($sideIndicator -eq "<=") { $missedInBaselineModules += $inputObject } else { $newAddedModules += $inputObject } } } $jsonString = '{ "isMatch": ' + $isMatch.ToString().ToLower() + ', "newAddedModules": [' + ($newAddedModules -join ",") + '], "missedInBaselineModules": [' + ($missedInBaselineModules -join ",") + ']}' Set-Content -Path $customOutputJsonPath -Value $jsonString $headers = @{ 'x-ms-blob-type' = 'BlockBlob' } Invoke-RestMethod -Uri $decodedUrlString -Method Put -Headers $headers -InFile $customOutputJsonPath Write-Output "$((Get-Date).ToUniversalTime()) - Uploaded json output result." GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/GuestProxyAgentValidation.ps1000066400000000000000000000054461500521614600314360ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory=$true, Position=0)] [string]$customOutputJsonSAS ) $decodedUrlBytes = [System.Convert]::FromBase64String($customOutputJsonSAS) $decodedUrlString = [System.Text.Encoding]::UTF8.GetString($decodedUrlBytes) Write-Output "$((Get-Date).ToUniversalTime()) - Start Guest Proxy Agent Validation" $currentFolder = $PWD.Path $customOutputJsonPath = $currentFolder + "\proxyagentvalidation.json"; New-Item -ItemType File -Path $customOutputJsonPath $serviceName = "GuestProxyAgent" $service = Get-Service -Name $serviceName -ErrorAction SilentlyContinue $guestProxyAgentServiceExist = $true $guestProxyAgentServiceStatus = "" $guestProxyAgentProcessExist = $true if ($service -ne $null) { Write-Output "$((Get-Date).ToUniversalTime()) - The service $serviceName exists." $guestProxyAgentServiceStatus = $service.Status } else { Write-Output "$((Get-Date).ToUniversalTime()) - The service $serviceName does not exist." $guestProxyAgentServiceExist = $false $guestProxyAgentServiceStatus = "service not exists" } $processName = "GuestProxyAgent" $process = Get-Process -Name $processName -ErrorAction SilentlyContinue if ($process -ne $null) { Write-Output "$((Get-Date).ToUniversalTime()) - The process $processName exists." } else { $guestProxyAgentProcessExist = $false Write-Output "$((Get-Date).ToUniversalTime()) - The process $processName does not exist." } $folderPath = "C:\WindowsAzure\ProxyAgent\Logs" $guestProxyAgentLogGenerated = $false if (Test-Path -Path $folderPath -PathType Container) { Write-Output "$((Get-Date).ToUniversalTime()) - The folder $folderPath exists." $files = Get-ChildItem -Path $folderPath -File if ($files.Count -gt 0) { Write-Output "$((Get-Date).ToUniversalTime()) - The folder $folderPath contains files." $guestProxyAgentLogGenerated = $true } else { Write-Output "$((Get-Date).ToUniversalTime()) - The folder $folderPath is empty." } } else { Write-Output "$((Get-Date).ToUniversalTime()) - The folder $folderPath does not exist." } $jsonString = '{"guestProxyAgentServiceInstalled": ' + $guestProxyAgentServiceExist.ToString().ToLower() ` + ', "guestProxyProcessStarted": ' + $guestProxyAgentProcessExist.ToString().ToLower() ` + ', "guestProxyAgentServiceStatus": "' + $guestProxyAgentServiceStatus ` + '", "guestProxyAgentLogGenerated": ' + $guestProxyAgentLogGenerated.ToString().ToLower() + '}' Write-Output "$((Get-Date).ToUniversalTime()) - $jsonString" Set-Content -Path $customOutputJsonPath -Value $jsonString $headers = @{ 'x-ms-blob-type' = 'BlockBlob' } #Upload File... Invoke-RestMethod -Uri $decodedUrlString -Method Put -Headers $headers -InFile $customOutputJsonPath GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/IMDSPingTest.ps1000066400000000000000000000036161500521614600265220ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory = $true, Position = 0)] [string]$imdsSecureChannelEnabled ) Write-Output "$((Get-Date).ToUniversalTime()) - imdsSecureChannelEnabled=$imdsSecureChannelEnabled" $i = 0 # make 10 requests if any failed, will failed the test for tcp port scalability config while ($i -lt 10) { try { $url = "http://169.254.169.254/metadata/instance?api-version=2020-06-01" $webRequest = [System.Net.HttpWebRequest]::Create($url) $webRequest.Headers.Add("Metadata", "True") $response = $webRequest.GetResponse() if ($response.StatusCode -eq [System.Net.HttpStatusCode]::OK) { Write-Output "$((Get-Date).ToUniversalTime()) - Response status code is OK (200)" } else { Write-Error "$((Get-Date).ToUniversalTime()) - Ping test failed. Response status code is $($response.StatusCode)" exit -1 } if ("$imdsSecureChannelEnabled" -ieq "true") { # case insensitive comparison $responseHeaders = $response.Headers if ($null -eq $responseHeaders["x-ms-azure-host-authorization"]) { Write-Error "$((Get-Date).ToUniversalTime()) - Ping test failed. Response does not contain x-ms-azure-host-authorization header" exit -1 } else { Write-Output "$((Get-Date).ToUniversalTime()) - Ping test passed. Response contains x-ms-azure-host-authorization header" } } else { Write-Output "$((Get-Date).ToUniversalTime()) - IMDS secure channel is not enabled. Skipping x-ms-azure-host-authorization header validation" } $webRequest.Abort() } catch { Write-Error "$((Get-Date).ToUniversalTime()) - An error occurred: $_" exit -1 } start-sleep -Seconds 1 $i++ } exit 0GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/InstallGuestProxyAgent.ps1000066400000000000000000000020141500521614600307360ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory=$true, Position=0)] [string]$zipsas ) $currentFolder = $PWD.Path $zipFilePath = Join-Path -Path $currentFolder -ChildPath "GuestProxyAgent.zip" $decodedUrlBytes = [System.Convert]::FromBase64String($zipsas) $decodedUrlString = [System.Text.Encoding]::UTF8.GetString($decodedUrlBytes) Write-Output "$((Get-Date).ToUniversalTime()) - start downloading zip file path from blob: decodedUrlString" wget $decodedUrlString -OutFile $zipFilePath $unzipFolder = Join-Path -Path $currentFolder -ChildPath "GuestProxyAgent" Write-Output "$((Get-Date).ToUniversalTime()) - unzip to folder: $unzipFolder" Expand-Archive $zipFilePath -DestinationPath $unzipFolder $msiFilePath = Get-ChildItem -Path $unzipFolder -Filter "*.msi" $msiFileFullPath = $msiFilePath.FullName Write-Output "$((Get-Date).ToUniversalTime()) - installing/updating guest proxy agent, msi file path: $msiFileFullPath" Start-Process -FilePath $msiFileFullPath -WaitGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/InstallGuestProxyAgentExtension.ps1000066400000000000000000000111671500521614600326440ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory=$true, Position=0)] [string]$devExtensionSas ) $decodedUrlBytes = [System.Convert]::FromBase64String($devExtensionSas) $decodedUrlString = [System.Text.Encoding]::UTF8.GetString($decodedUrlBytes) Write-Output "$((Get-Date).ToUniversalTime()) - Starting install guest proxy agent extension script" $proxy = Get-ChildItem "HKLM:\SOFTWARE\Microsoft\Windows Azure\HandlerState" foreach ($obj in $proxy) { if($obj.Name -like "*Microsoft.CPlat.ProxyAgent.ProxyAgentWindows*") { $nonRootRegKeyPath = $obj.Name Write-Output "$((Get-Date).ToUniversalTime()) - Got proxy agent extension registry key path: " $nonRootRegKeyPath } } $registrykeyPath = $nonRootRegKeyPath -replace '^HKEY_LOCAL_MACHINE', 'HKLM:' $PIRversion = ($registrykeyPath -split "_")[1] Write-Output "$((Get-Date).ToUniversalTime()) - PIR Version: $PIRversion" $seqNo = (Get-ItemProperty -Path $registrykeyPath).SequenceNumber Write-Output "$((Get-Date).ToUniversalTime()) - Seq No: $seqNo" $statusFolderPath = (Get-ItemProperty -Path $registrykeyPath).StatusFolder Write-Output "$((Get-Date).ToUniversalTime()) - Status Folder: $statusFolderPath" $statusFilePath = [IO.Path]::Combine($statusFolderPath, $seqNo + ".status") Write-Output "$((Get-Date).ToUniversalTime()) - Status file path: $statusFilePath" $extensionFolder = Split-Path -Path $statusFolderPath -Parent Write-Output "$((Get-Date).ToUniversalTime()) - Extension Folder: $extensionFolder" $PIRExePath = [IO.Path]::Combine($extensionFolder, "ProxyAgentExt.exe") $PIRExtensionFolderZIPLocation = [IO.Path]::Combine($extensionFolder, "Microsoft.CPlat.ProxyAgent.ProxyAgentWindows_" + $PIRversion + ".zip") Write-Output "$((Get-Date).ToUniversalTime()) - Delete status file of PIR version" $boolStatus = Test-Path -Path $statusFilePath if ($boolStatus) { Remove-Item -Path $statusFilePath -Force } Write-Output "$((Get-Date).ToUniversalTime()) - Check that status file is success with 5 minute timeout" $timeoutInSeconds = 300 $stopwatch = [System.Diagnostics.Stopwatch]::StartNew() do { $boolStatus = Test-Path -Path $statusFilePath if ($boolStatus) { $json = Get-Content $statusFilePath | Out-String | ConvertFrom-Json $extensionStatus = $json.status.status if ($extensionStatus -eq "success") { Write-Output "$((Get-Date).ToUniversalTime()) - The extension status is success: $extensionStatus." break } if ($extensionStatus -eq "error") { Write-Output "$((Get-Date).ToUniversalTime()) - The extension status is error: $extensionStatus." break } if ($stopwatch.Elapsed.TotalSeconds -ge $timeoutInSeconds) { Write-Output "$((Get-Date).ToUniversalTime()) - Timeout reached. Error, The extension status is $extensionStatus." break } } start-sleep -Seconds 3 } until ($false) Write-Output "$((Get-Date).ToUniversalTime()) - Check that Extension service exists " $serviceName = "GuestProxyAgentVMExtension" $service = Get-Service -Name $serviceName -ErrorAction SilentlyContinue if ($service -ne $null) { $serviceStatus = $service.Status Write-Output "$((Get-Date).ToUniversalTime()) - The service $serviceName exists with status: $serviceStatus." } else { Write-Output "$((Get-Date).ToUniversalTime()) - The service $serviceName does not exist." } Write-Output "$((Get-Date).ToUniversalTime()) - Check Extension process exists" $processName = "ProxyAgentExt" $process = Get-Process -Name $processName -ErrorAction SilentlyContinue if ($process -ne $null) { Write-Output "$((Get-Date).ToUniversalTime()) - The process $processName is running." } else { Write-Output "$((Get-Date).ToUniversalTime()) - The process $processName is not running." } Write-Output "$((Get-Date).ToUniversalTime()) - Delete extension zip file $PIRExtensionFolderZIPLocation" Remove-Item -Path $PIRExtensionFolderZIPLocation -Force wget $decodedUrlString -OutFile $PIRExtensionFolderZIPLocation Write-Output "$((Get-Date).ToUniversalTime()) - downloaded the proxyagent extension file to path: " $PIRExtensionFolderZIPLocation Write-Output "$((Get-Date).ToUniversalTime()) - net stop $serviceName" net stop $serviceName Write-Output "$((Get-Date).ToUniversalTime()) - TASKKILL /F /IM ProxyAgentExt.exe" TASKKILL /F /IM ProxyAgentExt.exe Write-Output "$((Get-Date).ToUniversalTime()) - Delete registry key at $registrykeyPath" Remove-Item -Path $registrykeyPath -Recurse Write-Output "$((Get-Date).ToUniversalTime()) - Delete status file $statusFilePath" Remove-Item -Path $statusFilePath -Force GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Scripts/PingTestOnBindingLocalIP.ps1000066400000000000000000000035411500521614600310360ustar00rootroot00000000000000# Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT param ( [Parameter(Mandatory = $true, Position = 0)] [string]$imdsSecureChannelEnabled ) Write-Output "$((Get-Date).ToUniversalTime()) - imdsSecureChannelEnabled=$imdsSecureChannelEnabled" try { $localIP = (Get-NetIPAddress -AddressFamily IPv4 -InterfaceAlias Ethernet)[0].IPAddress.ToString() $url = "http://169.254.169.254/metadata/instance?api-version=2020-06-01" $webRequest = [System.Net.HttpWebRequest]::Create($url) $webRequest.Headers.Add("Metadata", "True") $webRequest.ServicePoint.BindIPEndPointDelegate = { return New-Object System.Net.IPEndPoint([System.Net.IPAddress]::Parse($localIP), 0) } $response = $webRequest.GetResponse() if ($response.StatusCode -eq [System.Net.HttpStatusCode]::OK) { Write-Output "$((Get-Date).ToUniversalTime()) - Response status code is OK (200)" } else { Write-Error "$((Get-Date).ToUniversalTime()) - Ping test failed. Response status code is $($response.StatusCode)" exit -1 } if ("$imdsSecureChannelEnabled" -ieq "true") { # case insensitive comparison $responseHeaders = $response.Headers if ($null -eq $responseHeaders["x-ms-azure-host-authorization"]) { Write-Error "$((Get-Date).ToUniversalTime()) - Ping test failed. Response does not contain x-ms-azure-host-authorization header" exit -1 } else { Write-Output "$((Get-Date).ToUniversalTime()) - Ping test passed. Response contains x-ms-azure-host-authorization header" } } else { Write-Output "$((Get-Date).ToUniversalTime()) - IMDS secure channel is not enabled. Skipping x-ms-azure-host-authorization header validation" } } catch { Write-Error "$((Get-Date).ToUniversalTime()) - An error occurred: $_" exit -1 } exit 0GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Settings/000077500000000000000000000000001500521614600240265ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Settings/RunCommandSetting.cs000066400000000000000000000025541500521614600277640ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.Core; using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace GuestProxyAgentTest.Settings { /// /// Setting class for RunCommand /// public class RunCommandSetting { internal string runCommandName = null!; internal string runCommandScriptSAS = null!; internal Dictionary runCommandParameters = new Dictionary(); /// /// run command will write std output to this blob /// internal string outputBlobSAS = null!; /// /// run command will write std error output to this blob /// internal string errorBlobSAS = null!; /// /// if this one is not null or empty, it will /// be covert to base64 and passed to run command as an input parameter with name /// the run command script can write customized information to this blob, .i.e. agent instance view /// internal string customOutputSAS = null!; internal TestScenarioSetting testCaseSetting = null!; } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Settings/TestScenarioSetting.cs000066400000000000000000000033671500521614600303270ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT namespace GuestProxyAgentTest.Settings { /// /// Setting for a test case, including test group name, case name, vmImage Type, etc /// public class TestScenarioSetting { internal string testGroupName = ""; internal string testScenarioName = "BVTScenario"; internal string vmImagePublisher = ""; internal string vmImageOffer = ""; internal string vmImageSku = ""; internal string vmImageVersion = ""; internal string suffixName = new Random().Next(1000).ToString(); internal string testScenarioClassName = "GuestProxyAgentTest.TestScenarios.BVTScenario"; internal int testScenarioTimeoutMilliseconds = 1000 * 60 * 120; internal VMImageDetails VMImageDetails { get { return new VMImageDetails { Publisher = vmImagePublisher, Offer = vmImageOffer, Sku = vmImageSku, Version = vmImageVersion }; } } public string ResourceGroupName { get { return this.testGroupName + "_" + this.testScenarioName + suffixName; } } public string TestScenarioStorageFolderPrefix { get { return ResourceGroupName; } } } public class VMImageDetails { public string Publisher { get; set; } = null!; public string Offer { get; set; } = null!; public string Sku { get; set; } = null!; public string Version { get; set; } = null!; } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Settings/TestSetting.cs000066400000000000000000000113461500521614600266370ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.Core; using GuestProxyAgentTest.Models; using GuestProxyAgentTest.Utilities; using Newtonsoft.Json; using System.Reflection; using System.Runtime.Serialization; using System.Text.Json.Serialization; namespace GuestProxyAgentTest.Settings { /// /// E2ETestSetting related azure resource /// public class TestSetting { private static TestSetting _instance = null!; public static TestSetting Instance { get { return _instance; } } internal string tenantId = ""; internal string appClientId = ""; internal string certThumbprint = ""; internal string subscriptionId = ""; internal AzureLocation location = AzureLocation.WestUS; internal string vmSize = "Standard_B4as_v2"; internal string scriptsFolder = null!; internal string resourcesFolder = null!; internal string zipFilePath = null!; internal string sharedStorageAccountUrl = null!; internal string testResultFolder = null!; internal int testMapTimeoutMilliseconds = 1000 * 60 * 180; internal string windowsInVmWireServerAccessControlProfileReferenceId = null!; internal string windowsInVmIMDSAccessControlProfileReferenceId = null!; internal string linuxInVmWireServerAccessControlProfileReferenceId = null!; internal string linuxInVmIMDSAccessControlProfileReferenceId = null!; internal string InVmWireServerAccessControlProfileReferenceId { get => Constants.IS_WINDOWS() ? windowsInVmWireServerAccessControlProfileReferenceId : linuxInVmWireServerAccessControlProfileReferenceId; } internal string InVmIMDSAccessControlProfileReferenceId { get => Constants.IS_WINDOWS() ? windowsInVmIMDSAccessControlProfileReferenceId : linuxInVmIMDSAccessControlProfileReferenceId; } private TestSetting() { } public static void Init(TestConfig testConfig, string zipFilePath, string testResultFolder) { var scriptsFolder = Constants.IS_WINDOWS() ? "Scripts" : "LinuxScripts"; if (_instance != null) { return; } _instance = new TestSetting() { tenantId = testConfig.TenantId, appClientId = testConfig.AppClientId, location = new AzureLocation(testConfig.Location), subscriptionId = testConfig.SubscriptionId, vmSize = testConfig.VmSize, scriptsFolder = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location)!, scriptsFolder), resourcesFolder = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location)!, "Resources"), sharedStorageAccountUrl = testConfig.SharedStorageAccountUrl, windowsInVmWireServerAccessControlProfileReferenceId = testConfig.WindowsInVmWireServerAccessControlProfileReferenceId, windowsInVmIMDSAccessControlProfileReferenceId = testConfig.WindowsInVmIMDSAccessControlProfileReferenceId, linuxInVmWireServerAccessControlProfileReferenceId = testConfig.LinuxInVmWireServerAccessControlProfileReferenceId, linuxInVmIMDSAccessControlProfileReferenceId = testConfig.LinuxInVmIMDSAccessControlProfileReferenceId, zipFilePath = zipFilePath, testResultFolder = testResultFolder, }; } } public class GuestProxyAgentE2ETokenCredential : TokenCredential { public override AccessToken GetToken(TokenRequestContext requestContext, CancellationToken cancellationToken) { return TestCommonUtilities.GetAccessTokenFromEnv(Constants.GUEST_PROXY_AGENT_E2E_ACCESS_TOKEN_ENV); } public override ValueTask GetTokenAsync(TokenRequestContext requestContext, CancellationToken cancellationToken) { return ValueTask.FromResult(GetToken(requestContext, cancellationToken)); } } public class GuestProxyAgentE2EStorageAccountTokenCredential : TokenCredential { public override AccessToken GetToken(TokenRequestContext requestContext, CancellationToken cancellationToken) { return TestCommonUtilities.GetAccessTokenFromEnv(Constants.GUEST_PROXY_AGENT_E2E_ACCESS_TOKEN_STORAGE_ACCOUNT_ENV); } public override ValueTask GetTokenAsync(TokenRequestContext requestContext, CancellationToken cancellationToken) { return ValueTask.FromResult(GetToken(requestContext, cancellationToken)); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/000077500000000000000000000000001500521614600241245ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/AddLinuxVMExtensionCase.cs000066400000000000000000000076401500521614600311260ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.TestScenarios; using Azure.ResourceManager.Compute; namespace GuestProxyAgentTest.TestCases { /// /// Reboot VM test case /// public class AddLinuxVMExtensionCase : TestCaseBase { public AddLinuxVMExtensionCase() : base("AddLinuxVMExtensionCase") { } public AddLinuxVMExtensionCase(string testCaseName) : base(testCaseName) { } private const string EXTENSION_NAME = "ProxyAgentLinuxTest"; public override async Task StartAsync(TestCaseExecutionContext context) { var vmr = context.VirtualMachineResource; var vmExtData = new VirtualMachineExtensionData(GuestProxyAgentTest.Settings.TestSetting.Instance.location) { Location = GuestProxyAgentTest.Settings.TestSetting.Instance.location, Publisher = "Microsoft.CPlat.ProxyAgent", ExtensionType = "ProxyAgentLinuxTest", TypeHandlerVersion = "1.0", AutoUpgradeMinorVersion = false, EnableAutomaticUpgrade = false, Settings = { } }; try { context.TestResultDetails = new GuestProxyAgentTest.Models.TestCaseResultDetails { StdOut = "", StdErr = "", Succeed = false, FromBlob = false, }; var result = await vmr.GetVirtualMachineExtensions().CreateOrUpdateAsync(Azure.WaitUntil.Completed, EXTENSION_NAME, vmExtData, cancellationToken: context.CancellationToken); var provisioningState = result.Value.Data.ProvisioningState; if (result.HasValue && result.Value.Data != null && result.Value.Data.ProvisioningState == "Succeeded") { // add vm extension operation succeeded context.TestResultDetails.Succeed = true; context.TestResultDetails.CustomOut = FormatVMExtensionData(result.Value.Data); return; } else { // capture the provisioning data into TestResultDetails and continue poll the extension instance view context.TestResultDetails.StdErr = string.Format("VMExtension provisioning data: {}", FormatVMExtensionData(result?.Value?.Data)); } } catch (Exception ex) { // capture the exception into TestResultDetails and continue poll the extension instance view context.TestResultDetails.StdErr = ex.ToString(); } // poll the extension instance view for 5 minutes more var startTime = DateTime.UtcNow; while (true) { var vmExtension = await vmr.GetVirtualMachineExtensionAsync(EXTENSION_NAME, expand: "instanceView", cancellationToken: context.CancellationToken); var instanceView = vmExtension?.Value?.Data?.InstanceView; if (instanceView?.Statuses?.Count > 0 && instanceView.Statuses[0].DisplayStatus == "Provisioning succeeded") { context.TestResultDetails.Succeed = true; context.TestResultDetails.CustomOut = FormatVMExtensionData(vmExtension.Value.Data); return; } if (DateTime.UtcNow - startTime > TimeSpan.FromMinutes(5)) { // poll timed out, report failure with the extension data context.TestResultDetails.CustomOut = FormatVMExtensionData(vmExtension?.Value?.Data); return; } // wait for 10 seconds before polling again await Task.Delay(10000); } } } }GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/EnableProxyAgentCase.cs000066400000000000000000000042621500521614600304620ustar00rootroot00000000000000īģŋusing Azure.ResourceManager.Compute.Models; using GuestProxyAgentTest.Models; using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestScenarios; using Newtonsoft.Json; namespace GuestProxyAgentTest.TestCases { internal class EnableProxyAgentCase : TestCaseBase { public EnableProxyAgentCase() : this("EnableProxyAgentCase", true) { } public EnableProxyAgentCase(string testCaseName) : this(testCaseName, true) { } public EnableProxyAgentCase(string testCaseName, bool enableProxyAgent) : base(testCaseName) { EnableProxyAgent = enableProxyAgent; } internal bool EnableProxyAgent { get; set; } public override async Task StartAsync(TestCaseExecutionContext context) { var vmr = context.VirtualMachineResource; var patch = new VirtualMachinePatch() { SecurityProfile = new SecurityProfile { ProxyAgentSettings = new ProxyAgentSettings { Enabled = EnableProxyAgent } } }; if (EnableProxyAgent) { patch.SecurityProfile.ProxyAgentSettings.WireServer = new HostEndpointSettings { InVmAccessControlProfileReferenceId = TestSetting.Instance.InVmWireServerAccessControlProfileReferenceId }; patch.SecurityProfile.ProxyAgentSettings.Imds = new HostEndpointSettings { InVmAccessControlProfileReferenceId = TestSetting.Instance.InVmIMDSAccessControlProfileReferenceId }; } await vmr.UpdateAsync(Azure.WaitUntil.Completed, patch, cancellationToken: context.CancellationToken); var iv = await vmr.InstanceViewAsync(); context.TestResultDetails = new TestCaseResultDetails { CustomOut = JsonConvert.SerializeObject(iv), StdOut = "Enable ProxyAgent succeed.", StdErr = "", Succeed = true, FromBlob = false, }; } } } GuestProxyAgentExtensionValidationCase.cs000066400000000000000000000053041500521614600342120ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT // create validation test to check the guest proxy agent service status and log file using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.TestCases { public class GuestProxyAgentExtensionValidationCase : TestCaseBase { private string expectedProxyAgentVersion = ""; public GuestProxyAgentExtensionValidationCase() : base("GuestProxyAgentExtensionValidationCase") { } public GuestProxyAgentExtensionValidationCase(string testCaseName, string expectedProxyAgentVersion) : base(testCaseName) { this.expectedProxyAgentVersion = expectedProxyAgentVersion; } public override async Task StartAsync(TestCaseExecutionContext context) { List<(string, string)> parameterList = new List<(string, string)>(); parameterList.Add(("expectedProxyAgentVersion", expectedProxyAgentVersion)); context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, Constants.GUEST_PROXY_AGENT_EXTENSION_VALIDATION_SCRIPT_NAME, parameterList)).ToTestResultDetails(ConsoleLog); if (context.TestResultDetails.Succeed && context.TestResultDetails.CustomOut != null) { var validationDetails = context.TestResultDetails.SafeDeserializedCustomOutAs(); if (validationDetails != null && validationDetails.guestProxyAgentExtensionServiceExist && validationDetails.guestProxyAgentExtensionProcessExist && validationDetails.guestProxyAgentExtensionServiceStatus && validationDetails.guestProxyAgentExtensionStatusObjGenerated && validationDetails.guestProxyAgentExtensionVersion && validationDetails.guestProxyAgentExtensionInstanceView) { context.TestResultDetails.Succeed = true; } else { context.TestResultDetails.Succeed = false; } } } } class GuestProxyAgentExtensionValidationDetails { public bool guestProxyAgentExtensionServiceExist { get; set; } public bool guestProxyAgentExtensionProcessExist { get; set; } public bool guestProxyAgentExtensionServiceStatus { get; set; } public bool guestProxyAgentExtensionStatusObjGenerated { get; set; } public bool guestProxyAgentExtensionVersion { get; set; } public bool guestProxyAgentExtensionInstanceView { get; set; } } }GuestProxyAgentLoadedModulesValidationCase.cs000066400000000000000000000045201500521614600347560ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCasesīģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; using Newtonsoft.Json; namespace GuestProxyAgentTest.TestCases { public class GuestProxyAgentLoadedModulesValidationCase : TestCaseBase { public GuestProxyAgentLoadedModulesValidationCase() : base("GuestProxyAgentLoadedModulesValidationCase") { } public override async Task StartAsync(TestCaseExecutionContext context) { var baseLineModulesFilePath = Path.Combine(TestSetting.Instance.resourcesFolder, "GuestProxyAgentLoadedModulesBaseline.txt"); var baseLineModulesSas = StorageHelper.Instance.Upload2SharedBlob(Constants.SHARED_E2E_TEST_OUTPUT_CONTAINER_NAME, baseLineModulesFilePath, context.ScenarioSetting.TestScenarioStorageFolderPrefix); context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, "GuestProxyAgentLoadedModulesValidation.ps1", new List<(string, string)> { ("loadedModulesBaseLineSAS", System.Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(baseLineModulesSas))) })).ToTestResultDetails(ConsoleLog); if (context.TestResultDetails.Succeed && context.TestResultDetails.CustomOut != null) { var validationDetails = context.TestResultDetails.SafeDeserializedCustomOutAs(); // if the validation result is match or no new added modules, then consider the case as succeed. if (validationDetails != null && (validationDetails.IsMatch || validationDetails.NewAddedModules == null || validationDetails.NewAddedModules.Count == 0)) { context.TestResultDetails.Succeed = true; } else { context.TestResultDetails.Succeed = false; } } } } class LoadedModulesValidationDetails { public List MissedInBaselineModules { get; set; } = new List(); public List NewAddedModules { get; set; } = new List(); public bool IsMatch { get; set; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/GuestProxyAgentValidationCase.cs000066400000000000000000000051241500521614600323740ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; using Newtonsoft.Json; namespace GuestProxyAgentTest.TestCases { /// /// Hello test on In VM script test case /// public class GuestProxyAgentValidationCase : TestCaseBase { private static readonly string EXPECTED_GUEST_PROXY_AGENT_SERVICE_STATUS; static GuestProxyAgentValidationCase() { if (Constants.IS_WINDOWS()) { EXPECTED_GUEST_PROXY_AGENT_SERVICE_STATUS = "Running"; } else { EXPECTED_GUEST_PROXY_AGENT_SERVICE_STATUS = "enabled"; } } public GuestProxyAgentValidationCase() : base("GuestProxyAgentValidationCase") { } public override async Task StartAsync(TestCaseExecutionContext context) { context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, Constants.GUEST_PROXY_AGENT_VALIDATION_SCRIPT_NAME, null!)).ToTestResultDetails(ConsoleLog); if (context.TestResultDetails.Succeed && context.TestResultDetails.CustomOut != null) { var validationDetails = context.TestResultDetails.SafeDeserializedCustomOutAs(); // check the validation json output, if the guest proxy agent service was installed and runing and guest proxy agent process exists and log was generate, // then consider it as succeed, otherwise fail the case. if (validationDetails != null && validationDetails.GuestProxyAgentServiceInstalled && validationDetails.GuestProxyAgentServiceStatus.Equals(EXPECTED_GUEST_PROXY_AGENT_SERVICE_STATUS, StringComparison.OrdinalIgnoreCase) && validationDetails.GuestProxyProcessStarted && validationDetails.GuestProxyAgentLogGenerated) { context.TestResultDetails.Succeed = true; } else { context.TestResultDetails.Succeed = false; } } } } class GuestProxyAgentValidationDetails { public bool GuestProxyAgentServiceInstalled { get; set; } public bool GuestProxyProcessStarted { get; set; } public bool GuestProxyAgentLogGenerated { get; set; } public string GuestProxyAgentServiceStatus { get; set; } = null!; } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/IMDSPingTestCase.cs000066400000000000000000000017571500521614600274730ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.TestCases { public class IMDSPingTestCase : TestCaseBase { public IMDSPingTestCase(string testCaseName, bool imdsSecureChannelEnabled) : base(testCaseName) { ImdsSecureChannelEnabled = imdsSecureChannelEnabled; } private bool ImdsSecureChannelEnabled { get; set; } public override async Task StartAsync(TestCaseExecutionContext context) { List<(string, string)> parameterList = new List<(string, string)>(); parameterList.Add(("imdsSecureChannelEnabled", ImdsSecureChannelEnabled.ToString())); context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, Constants.IMDS_PING_TEST_SCRIPT_NAME, parameterList, false)).ToTestResultDetails(ConsoleLog); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/InstallOrUpdateGuestProxyAgentCase.cs000066400000000000000000000034371500521614600333610ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.TestCases; using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.TestCases { /// /// Install or Update Guest Proxy Agent through Msi test case /// public class InstallOrUpdateGuestProxyAgentCase : TestCaseBase { public InstallOrUpdateGuestProxyAgentCase() : base("InstallOrUpdateGuestProxyAgentCase") { } public override async Task StartAsync(TestCaseExecutionContext context) { var runCommandRes = await RunCommandRunner.ExecuteRunCommandOnVM(context.VirtualMachineResource, new RunCommandSettingBuilder() .TestScenarioSetting(context.ScenarioSetting) .RunCommandName("InstallOrUpdateProxyAgentMsi") .ScriptFullPath(Path.Combine(TestSetting.Instance.scriptsFolder, Constants.INSTALL_GUEST_PROXY_AGENT_SCRIPT_NAME)) , context.CancellationToken , (builder) => { var zipsas = StorageHelper.Instance.Upload2SharedBlob(Constants.SHARED_MSI_CONTAINER_NAME, TestSetting.Instance.zipFilePath, context.ScenarioSetting.TestScenarioStorageFolderPrefix); return builder.AddParameter("zipsas", Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(zipsas))); }); context.TestResultDetails = new Models.TestCaseResultDetails { Succeed = runCommandRes.Succeed, StdErr = runCommandRes.StdErr, StdOut = runCommandRes.StdOut, FromBlob = true }; } } } InstallOrUpdateGuestProxyAgentExtensionCase.cs000066400000000000000000000035401500521614600351720ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.TestCases; using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.TestCases { /// /// Install or Update Guest Proxy Agent through Msi test case /// public class InstallOrUpdateGuestProxyAgentExtensionCase : TestCaseBase { public InstallOrUpdateGuestProxyAgentExtensionCase() : base("InstallOrUpdateGuestProxyAgentExtensionCase") { } public override async Task StartAsync(TestCaseExecutionContext context) { var runCommandRes = await RunCommandRunner.ExecuteRunCommandOnVM(context.VirtualMachineResource, new RunCommandSettingBuilder() .TestScenarioSetting(context.ScenarioSetting) .RunCommandName("InstallGuestProxyAgentExtension") .ScriptFullPath(Path.Combine(TestSetting.Instance.scriptsFolder, Constants.INSTALL_GUEST_PROXY_AGENT_EXTENSION_SCRIPT_NAME)) , context.CancellationToken , (builder) => { var devExtensionSas = StorageHelper.Instance.Upload2SharedBlob(Constants.SHARED_MSI_CONTAINER_NAME, TestSetting.Instance.zipFilePath, context.ScenarioSetting.TestScenarioStorageFolderPrefix); return builder.AddParameter("devExtensionSas", Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(devExtensionSas))); }); context.TestResultDetails = new Models.TestCaseResultDetails { Succeed = runCommandRes.Succeed, StdErr = runCommandRes.StdErr, StdOut = runCommandRes.StdOut, FromBlob = true }; } } } InstallOrUpdateGuestProxyAgentPackageCase.cs000066400000000000000000000034411500521614600345510ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCasesīģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.TestCases { /// /// Install or Update Guest Proxy Agent through Msi test case /// public class InstallOrUpdateGuestProxyAgentPackageCase : TestCaseBase { public InstallOrUpdateGuestProxyAgentPackageCase() : base("InstallOrUpdateGuestProxyAgentPackageCase") { } public override async Task StartAsync(TestCaseExecutionContext context) { var runCommandRes = await RunCommandRunner.ExecuteRunCommandOnVM(context.VirtualMachineResource, new RunCommandSettingBuilder() .TestScenarioSetting(context.ScenarioSetting) .RunCommandName("InstallOrUpdateProxyAgentPackage") .ScriptFullPath(Path.Combine(TestSetting.Instance.scriptsFolder, Constants.INSTALL_LINUX_GUEST_PROXY_AGENT_PACKAGE_SCRIPT_NAME)) , context.CancellationToken , (builder) => { var zipsas = StorageHelper.Instance.Upload2SharedBlob(Constants.SHARED_MSI_CONTAINER_NAME, TestSetting.Instance.zipFilePath, context.ScenarioSetting.TestScenarioStorageFolderPrefix); return builder.AddParameter("zipsas", Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(zipsas))); }); context.TestResultDetails = new Models.TestCaseResultDetails { Succeed = runCommandRes.Succeed, StdErr = runCommandRes.StdErr, StdOut = runCommandRes.StdOut, FromBlob = true }; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/LocalIPBindingCase.cs000066400000000000000000000016731500521614600300340ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.TestScenarios; namespace GuestProxyAgentTest.TestCases { public class LocalIPBindingCase : TestCaseBase { public LocalIPBindingCase(bool imdsSecureChannelEnabled) : base("LocalIPBindingCase") { ImdsSecureChannelEnabled = imdsSecureChannelEnabled; } private bool ImdsSecureChannelEnabled { get; set; } public override async Task StartAsync(TestCaseExecutionContext context) { List<(string, string)> parameterList = new List<(string, string)>(); parameterList.Add(("imdsSecureChannelEnabled", ImdsSecureChannelEnabled.ToString())); context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, "PingTestOnBindingLocalIP.ps1", parameterList, false)).ToTestResultDetails(ConsoleLog); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/RebootVMCase.cs000066400000000000000000000055011500521614600267450ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Models; using GuestProxyAgentTest.TestScenarios; using Newtonsoft.Json; namespace GuestProxyAgentTest.TestCases { /// /// Reboot VM test case /// public class RebootVMCase : TestCaseBase { public RebootVMCase() : base("RebootVMCase") { } public RebootVMCase(string testCaseName) : base(testCaseName) { } public override async Task StartAsync(TestCaseExecutionContext context) { context.TestResultDetails = new TestCaseResultDetails { StdOut = "", StdErr = "", Succeed = false, FromBlob = false, }; var vmr = context.VirtualMachineResource; try { await vmr.RestartAsync(Azure.WaitUntil.Completed, cancellationToken: context.CancellationToken); var iv = await vmr.InstanceViewAsync(); context.TestResultDetails = new TestCaseResultDetails { CustomOut = JsonConvert.SerializeObject(iv), StdOut = "Reboot VM case succeed.", StdErr = "", Succeed = true, }; return; } catch (Exception ex) { // capture the exception into TestResultDetails and continue poll the vm instance view context.TestResultDetails.StdErr = ex.ToString(); } // if the reboot operation failed, try check the VM instance view for 5 minutes var startTime = DateTime.UtcNow; while (true) { var instanceView = await vmr.InstanceViewAsync(cancellationToken: context.CancellationToken); if (instanceView?.Value?.Statuses?.Count > 0 && (instanceView.Value.Statuses[0].DisplayStatus == "Provisioning succeeded" || instanceView.Value.Statuses[0].DisplayStatus == "VM running")) { context.TestResultDetails.Succeed = true; context.TestResultDetails.StdOut = "Reboot VM case succeed."; context.TestResultDetails.CustomOut = JsonConvert.SerializeObject(instanceView); return; } if (DateTime.UtcNow - startTime > TimeSpan.FromMinutes(5)) { // poll timed out, report failure with the extension data context.TestResultDetails.CustomOut = JsonConvert.SerializeObject(instanceView); return; } // wait for 10 seconds before polling again await Task.Delay(10000); } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/SetupCGroup2TestCase.cs000066400000000000000000000012251500521614600304110ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.TestCases { public class SetupCGroup2TestCase : TestCaseBase { public SetupCGroup2TestCase(string testCaseName) : base(testCaseName) { } public override async Task StartAsync(TestCaseExecutionContext context) { context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, Constants.SETUP_CGROUP2_SCRIPT_NAME, null!, false)).ToTestResultDetails(); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/TCPPortScalabilityCase.cs000066400000000000000000000025241500521614600307260ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.TestScenarios; namespace GuestProxyAgentTest.TestCases { public class TCPPortScalabilityCase : TestCaseBase { public TCPPortScalabilityCase(bool imdsSecureChannelEnabled) : base("TCPPortScalabilityCase") { ImdsSecureChannelEnabled = imdsSecureChannelEnabled; } private bool ImdsSecureChannelEnabled { get; set; } public override async Task StartAsync(TestCaseExecutionContext context) { context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, "ConfigTCPPortScalability.ps1", null!, false)).ToTestResultDetails(ConsoleLog); if(!context.TestResultDetails.Succeed) { return; } // reboot var vmr = context.VirtualMachineResource; await vmr.RestartAsync(Azure.WaitUntil.Completed); List<(string, string)> parameterList = new List<(string, string)>(); parameterList.Add(("imdsSecureChannelEnabled", ImdsSecureChannelEnabled.ToString())); context.TestResultDetails = (await RunScriptViaRunCommandV2Async(context, "IMDSPingTest.ps1", parameterList, false)).ToTestResultDetails(ConsoleLog); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestCases/TestCaseBase.cs000066400000000000000000000125041500521614600267630ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.ResourceManager.Compute; using Azure.ResourceManager.Compute.Models; using GuestProxyAgentTest.Models; using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestScenarios; using GuestProxyAgentTest.Utilities; using System.Text; namespace GuestProxyAgentTest.TestCases { public enum TestCaseResult { NotStarted, Running, Succeed, Failed, Aborted, } /// /// Base case for each TestCase /// public abstract class TestCaseBase { /// /// Test Case Name /// public string TestCaseName { get; private set; } = null!; public TestCaseResult Result { get; set; } = TestCaseResult.NotStarted; public TestCaseBase(string testCaseName) { TestCaseName = testCaseName; } /// /// Abstract function to start the test case /// /// /// public abstract Task StartAsync(TestCaseExecutionContext context); /// /// Function to run a script through RunCommandV2 on the VM, that can be used in the inherit class /// /// /// /// /// /// if set to true, it will automatically add a parameter named /// the parameter value is base64 encoded blob SAS url, the test script can use it to write customized output info. /// if set to false, it will not add the parameter. /// /// protected async Task RunScriptViaRunCommandV2Async(TestCaseExecutionContext context, string scriptFileName, List<(string, string)> parameterList, bool includeCustomJsonOutputSasParam = true) { var testScenarioSetting = context.ScenarioSetting; string custJsonSas = null!; if (includeCustomJsonOutputSasParam) { var custJsonPath = Path.Combine(Path.GetTempPath(), $"{testScenarioSetting.testGroupName}_{testScenarioSetting.testScenarioName}_{TestCaseName}.json"); using (File.CreateText(custJsonPath)) ConsoleLog("Created empty test file for customized json output file."); custJsonSas = StorageHelper.Instance.Upload2SharedBlob(Constants.SHARED_E2E_TEST_OUTPUT_CONTAINER_NAME, custJsonPath, "customOutputJson.json", testScenarioSetting.TestScenarioStorageFolderPrefix); } return await RunCommandRunner.ExecuteRunCommandOnVM(context.VirtualMachineResource, new RunCommandSettingBuilder() .TestScenarioSetting(testScenarioSetting) .RunCommandName(TestCaseName) .ScriptFullPath(Path.Combine(TestSetting.Instance.scriptsFolder, scriptFileName)) , context.CancellationToken , (builder) => builder .CustomOutputSas(custJsonSas) .AddParameters(parameterList)); } protected void ConsoleLog(string message) { Console.WriteLine($"[{TestCaseName}]: " + message); } protected string FormatVMExtensionData(VirtualMachineExtensionData data) { if (data == null) { return "null"; } return string.Format("ProvisioningState: {0}, Publisher: {1}, ExtensionType: {2}, TypeHandlerVersion: {3}, AutoUpgradeMinorVersion: {4}, EnableAutomaticUpgrade: {5}, InstanceView: {6}", data.ProvisioningState, data.Publisher, data.ExtensionType, data.TypeHandlerVersion, data.AutoUpgradeMinorVersion, data.EnableAutomaticUpgrade, FormatVMExtensionInstanceView(data.InstanceView)); } protected string FormatVMExtensionInstanceView(VirtualMachineExtensionInstanceView instanceView) { if (instanceView == null) { return "null"; } return string.Format("Name: {0}, ExtensionType:{1}, ExtensionVersion:{2} Statuses: {3}, Substatuses: {4}", instanceView.Name, instanceView.VirtualMachineExtensionInstanceViewType, instanceView.TypeHandlerVersion , FormatVMInstanceViewStatus(instanceView.Statuses), FormatVMInstanceViewStatus(instanceView.Substatuses)); } protected string FormatVMInstanceViewStatus(IList instanceView) { StringBuilder stringBuilder = new StringBuilder(); foreach (var status in instanceView) { stringBuilder.AppendFormat("Code: {0}, Level: {1}, DisplayStatus: {2}, Message: {3}", status.Code, status.Level, status.DisplayStatus, status.Message); } return stringBuilder.ToString(); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/000077500000000000000000000000001500521614600236035ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/AzureLinux3-Arm64-TestGroup.yml000066400000000000000000000005311500521614600313370ustar00rootroot00000000000000groupName: AzureLinux3-Arm64 vmImagePublisher: MicrosoftCBLMariner vmImageOffer: azure-linux-3 vmImageSku: azure-linux-3-arm64 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenario GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/AzureLinux3-Fips-TestGroup.yml000066400000000000000000000006761500521614600313610ustar00rootroot00000000000000groupName: AzureLinux3-Fips vmImagePublisher: MicrosoftCBLMariner vmImageOffer: azure-linux-3 vmImageSku: azure-linux-3-gen2-fips vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtensionGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Mariner2-Fips-TestGroup.yml000066400000000000000000000006721500521614600306430ustar00rootroot00000000000000groupName: Mariner2-Fips vmImagePublisher: microsoftcblmariner vmImageOffer: cbl-mariner vmImageSku: cbl-mariner-2-gen2-fips vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtensionGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Redhat90-Arm64-TestGroup.yml000066400000000000000000000004731500521614600305330ustar00rootroot00000000000000groupName: Redhat90-Arm64 vmImagePublisher: RedHat vmImageOffer: rhel-arm64 vmImageSku: 9_0-arm64 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenarioGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Redhat90-TestGroup.yml000066400000000000000000000004511500521614600276400ustar00rootroot00000000000000groupName: Redhat90 vmImagePublisher: RedHat vmImageOffer: RHEL vmImageSku: 9_0 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenarioGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Rocky9-TestGroup.yml000066400000000000000000000004651500521614600274450ustar00rootroot00000000000000groupName: Rocky9 vmImagePublisher: resf vmImageOffer: rockylinux-x86_64 vmImageSku: 9-base vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenarioGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Suse15SP4-Arm64-TestGroup.yml000066400000000000000000000005011500521614600305570ustar00rootroot00000000000000groupName: Suse15SP4-Arm64 vmImagePublisher: SUSE vmImageOffer: sles-15-sp4-byos-arm64 vmImageSku: gen2 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenarioGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Suse15SP4-TestGroup.yml000066400000000000000000000004651500521614600277010ustar00rootroot00000000000000groupName: Suse15SP4 vmImagePublisher: SUSE vmImageOffer: sles-15-sp4-byos vmImageSku: gen2 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenarioGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Test-Map-Arm64.yml000066400000000000000000000000731500521614600266470ustar00rootroot00000000000000testGroupList: - include: WinClient11-Arm64-TestGroup.ymlGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Test-Map-Linux-Arm64.yml000066400000000000000000000003441500521614600277450ustar00rootroot00000000000000testGroupList: - include: AzureLinux3-Arm64-TestGroup.yml - include: Ubuntu24-Arm64-TestGroup.yml - include: Ubuntu22-Arm64-TestGroup.yml - include: Redhat90-Arm64-TestGroup.yml - include: Suse15SP4-Arm64-TestGroup.ymlGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Test-Map-Linux.yml000066400000000000000000000004721500521614600270600ustar00rootroot00000000000000testGroupList: - include: AzureLinux3-Fips-TestGroup.yml - include: Mariner2-Fips-TestGroup.yml - include: Ubuntu24-TestGroup.yml - include: Ubuntu22-TestGroup.yml - include: Ubuntu20-TestGroup.yml - include: Redhat90-TestGroup.yml - include: Suse15SP4-TestGroup.yml - include: Rocky9-TestGroup.ymlGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Test-Map.yml000066400000000000000000000002601500521614600257560ustar00rootroot00000000000000testGroupList: - include: WinServer2019-TestGroup.yml - include: WinServer2022-TestGroup.yml - include: WinServer2025-TestGroup.yml - include: WinClient11-TestGroup.ymlGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Ubuntu20-TestGroup.yml000066400000000000000000000005121500521614600277020ustar00rootroot00000000000000groupName: Ubuntu20 vmImagePublisher: canonical vmImageOffer: 0001-com-ubuntu-server-focal vmImageSku: 20_04-lts vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenarioGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Ubuntu22-Arm64-TestGroup.yml000066400000000000000000000005271500521614600306010ustar00rootroot00000000000000groupName: Ubuntu22-Arm64 vmImagePublisher: canonical vmImageOffer: 0001-com-ubuntu-server-jammy vmImageSku: 22_04-lts-arm64 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenario GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Ubuntu22-TestGroup.yml000066400000000000000000000006641500521614600277140ustar00rootroot00000000000000groupName: Ubuntu22 vmImagePublisher: canonical vmImageOffer: 0001-com-ubuntu-server-jammy vmImageSku: 22_04-lts-gen2 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtensionGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Ubuntu24-Arm64-TestGroup.yml000066400000000000000000000005071500521614600306010ustar00rootroot00000000000000groupName: Ubuntu24-Arm64 vmImagePublisher: canonical vmImageOffer: ubuntu-24_04-lts vmImageSku: server-arm64 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenarioGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/Ubuntu24-TestGroup.yml000066400000000000000000000006361500521614600277150ustar00rootroot00000000000000groupName: Ubuntu24 vmImagePublisher: canonical vmImageOffer: ubuntu-24_04-lts vmImageSku: server vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: LinuxPackageScenario className: GuestProxyAgentTest.TestScenarios.LinuxPackageScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtensionGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/WinClient11-Arm64-TestGroup.yml000066400000000000000000000005321500521614600312050ustar00rootroot00000000000000groupName: WinClient11-Arm64 vmImagePublisher: microsoftwindowsdesktop vmImageOffer: windows11preview-arm64 vmImageSku: win11-24h2-ent vmImageVersion: latest scenarios: - name: BVTScenario className: GuestProxyAgentTest.TestScenarios.BVTScenario - name: BugFixesScenario className: GuestProxyAgentTest.TestScenarios.BugFixesScenario GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/WinClient11-TestGroup.yml000066400000000000000000000006531500521614600303220ustar00rootroot00000000000000groupName: WinClient11 vmImagePublisher: microsoftwindowsdesktop vmImageOffer: windows-11 vmImageSku: win11-24h2-ent vmImageVersion: latest scenarios: - name: BVTScenario className: GuestProxyAgentTest.TestScenarios.BVTScenario - name: BugFixesScenario className: GuestProxyAgentTest.TestScenarios.BugFixesScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtension GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/WinServer2019-TestGroup.yml000066400000000000000000000006601500521614600305220ustar00rootroot00000000000000groupName: WinServer2019 vmImagePublisher: MicrosoftWindowsServer vmImageOffer: WindowsServer vmImageSku: 2019-Datacenter vmImageVersion: latest scenarios: - name: BVTScenario className: GuestProxyAgentTest.TestScenarios.BVTScenario - name: BugFixesScenario className: GuestProxyAgentTest.TestScenarios.BugFixesScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtension GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/WinServer2022-TestGroup.yml000066400000000000000000000006571500521614600305220ustar00rootroot00000000000000groupName: WinServer2022 vmImagePublisher: MicrosoftWindowsServer vmImageOffer: WindowsServer vmImageSku: 2022-Datacenter vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: BugFixesScenario className: GuestProxyAgentTest.TestScenarios.BugFixesScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtensionGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestMap/WinServer2025-TestGroup.yml000066400000000000000000000006621500521614600305210ustar00rootroot00000000000000groupName: WinServer2025 vmImagePublisher: MicrosoftWindowsServer vmImageOffer: WindowsServer vmImageSku: 2025-datacenter-g2 vmImageVersion: latest scenarios: - className: GuestProxyAgentTest.TestScenarios.BVTScenario name: BVTScenario - name: BugFixesScenario className: GuestProxyAgentTest.TestScenarios.BugFixesScenario - name: ProxyAgentExtension className: GuestProxyAgentTest.TestScenarios.ProxyAgentExtensionGuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestScenarios/000077500000000000000000000000001500521614600250145ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestScenarios/BVTScenario.cs000066400000000000000000000026521500521614600274670ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.TestCases; using GuestProxyAgentTest.Utilities; namespace GuestProxyAgentTest.TestScenarios { public class BVTScenario : TestScenarioBase { public override void TestScenarioSetup() { var secureChannelEnabled = false; if (!Constants.IS_WINDOWS()) { AddTestCase(new SetupCGroup2TestCase("SetupCGroup2")); AddTestCase(new RebootVMCase("RebootVMCaseAfterSetupCGroup2")); } AddTestCase(new InstallOrUpdateGuestProxyAgentCase()); AddTestCase(new GuestProxyAgentValidationCase()); if (Constants.IS_WINDOWS()) { AddTestCase(new GuestProxyAgentLoadedModulesValidationCase()); } else { // do not enable proxy agent for Windows VM, // it will add GPA VM Extension and overwrite the private GPA package AddTestCase(new EnableProxyAgentCase()); secureChannelEnabled = true; } AddTestCase(new IMDSPingTestCase("IMDSPingTestBeforeReboot", secureChannelEnabled)); AddTestCase(new RebootVMCase("RebootVMCaseAfterInstallOrUpdateGuestProxyAgent")); AddTestCase(new IMDSPingTestCase("IMDSPingTestAfterReboot", secureChannelEnabled)); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestScenarios/BugFixesScenario.cs000066400000000000000000000007421500521614600305460ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.TestCases; namespace GuestProxyAgentTest.TestScenarios { public class BugFixesScenario : TestScenarioBase { public override void TestScenarioSetup() { AddTestCase(new InstallOrUpdateGuestProxyAgentCase()); AddTestCase(new GuestProxyAgentValidationCase()); AddTestCase(new TCPPortScalabilityCase(false)); } } }GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestScenarios/LinuxPackageScenario.cs000066400000000000000000000015651500521614600314110ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.TestCases; namespace GuestProxyAgentTest.TestScenarios { public class LinuxPackageScenario : TestScenarioBase { public override void TestScenarioSetup() { AddTestCase(new SetupCGroup2TestCase("SetupCGroup2")); AddTestCase(new RebootVMCase("RebootVMCaseAfterSetupCGroup2")); AddTestCase(new InstallOrUpdateGuestProxyAgentPackageCase()); AddTestCase(new GuestProxyAgentValidationCase()); AddTestCase(new EnableProxyAgentCase()); AddTestCase(new IMDSPingTestCase("IMDSPingTestBeforeReboot", true)); AddTestCase(new RebootVMCase("RebootVMCaseAfterInstallOrUpdateGuestProxyAgent")); AddTestCase(new IMDSPingTestCase("IMDSPingTestAfterReboot", true)); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestScenarios/ProxyAgentExtension.cs000066400000000000000000000056061500521614600313470ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.TestCases; using GuestProxyAgentTest.Utilities; using System.Diagnostics; using System.IO.Compression; namespace GuestProxyAgentTest.TestScenarios { public class ProxyAgentExtension : TestScenarioBase { public override void TestScenarioSetup() { string zipFile = Settings.TestSetting.Instance.zipFilePath; string withoutExt = Path.GetFileNameWithoutExtension(zipFile); string extractPath = Path.Combine(Path.GetDirectoryName(zipFile), withoutExt); string proxyAgentVersion = ""; string exePath = ""; try { ZipFile.ExtractToDirectory(zipFile, extractPath); Console.WriteLine("Extraction successful!"); } catch (Exception ex) { Console.WriteLine($"An error occurred: {ex.Message}"); } if (!Constants.IS_WINDOWS()) { AddTestCase(new SetupCGroup2TestCase("SetupCGroup2")); AddTestCase(new RebootVMCase("RebootVMCaseAfterSetupCGroup2")); AddTestCase(new AddLinuxVMExtensionCase("AddLinuxVMExtensionCase")); AddTestCase(new EnableProxyAgentCase()); exePath = extractPath + "/ProxyAgent/ProxyAgent/azure-proxy-agent"; } else { EnableProxyAgentForNewVM = true; exePath = extractPath + "\\ProxyAgent\\ProxyAgent\\GuestProxyAgent.exe"; } var process = new Process() { StartInfo = new ProcessStartInfo { FileName = exePath, Arguments = "--version", RedirectStandardOutput = true, RedirectStandardError = true, UseShellExecute = false, CreateNoWindow = true, } }; process.Start(); proxyAgentVersion = process.StandardOutput.ReadToEnd(); process.WaitForExit(); // Passing in 0 version number for the first validation case string proxyAgentVersionBeforeUpdate = "0"; AddTestCase(new GuestProxyAgentExtensionValidationCase("GuestProxyAgentExtensionValidationCaseBeforeUpdate", proxyAgentVersionBeforeUpdate)); AddTestCase(new InstallOrUpdateGuestProxyAgentExtensionCase()); AddTestCase(new GuestProxyAgentExtensionValidationCase("GuestProxyAgentExtensionValidationCaseAfterUpdate", proxyAgentVersion)); AddTestCase(new IMDSPingTestCase("IMDSPingTestBeforeReboot", true)); AddTestCase(new RebootVMCase("RebootVMCaseAfterUpdateGuestProxyAgentExtension")); AddTestCase(new IMDSPingTestCase("IMDSPingTestAfterReboot", true)); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/TestScenarios/TestScenarioBase.cs000066400000000000000000000362171500521614600305520ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.ResourceManager.Compute; using GuestProxyAgentTest.Extensions; using GuestProxyAgentTest.Models; using GuestProxyAgentTest.Settings; using GuestProxyAgentTest.TestCases; using GuestProxyAgentTest.Utilities; using System.Diagnostics; namespace GuestProxyAgentTest.TestScenarios { /// /// Class for trigger each Test Scenario /// public abstract class TestScenarioBase { private TestScenarioSetting _testScenarioSetting = null!; private VMBuilder _vmBuilder = null!; private JunitTestResultBuilder _junitTestResultBuilder = null!; private List _testCases = new List(); protected bool EnableProxyAgentForNewVM { get; set; } public TestScenarioBase() { TestScenarioSetup(); } public TestScenarioBase TestScenarioSetting(TestScenarioSetting testScenarioSetting) { this._testScenarioSetting = testScenarioSetting; this._vmBuilder = new VMBuilder().LoadTestCaseSetting(testScenarioSetting); return this; } public TestScenarioBase JUnitTestResultBuilder(JunitTestResultBuilder junitTestResultBuilder) { this._junitTestResultBuilder = junitTestResultBuilder; return this; } /// /// Abstract method for sub class(TestScenario) to set up its scenario, including add test case or others settings /// public abstract void TestScenarioSetup(); /// /// Add test case for the scenario /// /// protected void AddTestCase(TestCaseBase testCase) { this._testCases.Add(testCase); } private string LogPrefix { get { return "Test Group: " + _testScenarioSetting.testGroupName + ", Test Scenario: " + _testScenarioSetting.testScenarioName + ": "; } } protected void ConsoleLog(string msg) { Console.WriteLine(LogPrefix + msg); } protected void PreCheck() { if (_testCases.Count == 0) { throw new Exception("Test cases list is empty."); } if (_testScenarioSetting == null) { throw new Exception("Test scenario setting is not set."); } if (_junitTestResultBuilder == null) { throw new Exception("JUnit test result builder is not set"); } if (_vmBuilder == null) { throw new Exception("VM builder is not set"); } } /// /// The template workflow for start a test scenario: /// 1. build VM /// 2. run the test cases one by one /// 3. collect GALogs zip /// 4. write the test result to Junit format. /// 5. save Logs including each test case run and collect GALogs zip /// /// /// /// public async Task StartAsync(TestScenarioStatusDetails testScenarioStatusDetails) { PreCheck(); try { // Create a cancellation token source that will be used to cancel the running test scenario/cases CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(); await DoStartAsync(testScenarioStatusDetails, cancellationTokenSource.Token).TimeoutAfter(_testScenarioSetting.testScenarioTimeoutMilliseconds, cancellationTokenSource); } catch (Exception ex) { ConsoleLog($"Test Scenario {_testScenarioSetting.testScenarioName} Exception: {ex.Message}."); // set running test cases to failed // set not started test cases to aborted ex.UpdateTestCaseResults(_testCases, _junitTestResultBuilder, _testScenarioSetting.testScenarioName); } finally { try { await CollectGALogsOnVMAsync(); } catch (Exception ex) { Console.WriteLine("Collect GA Logs error: " + ex.Message); } try { ConsoleLog("Cleanup generated Azure Resources."); VMHelper.Instance.CleanupTestResources(_testScenarioSetting); } catch (Exception ex) { Console.WriteLine("Cleanup azure resources exception: " + ex.Message); } } } private async Task DoStartAsync(TestScenarioStatusDetails testScenarioStatusDetails, CancellationToken cancellationToken) { try { ConsoleLog("Running test."); testScenarioStatusDetails.Status = ScenarioTestStatus.Running; VirtualMachineResource vmr; Stopwatch sw = Stopwatch.StartNew(); var vmCreateTestName = "CreateVM"; try { vmr = await _vmBuilder.Build(this.EnableProxyAgentForNewVM, cancellationToken); ConsoleLog("VM Create succeed"); sw.Stop(); _junitTestResultBuilder.AddSuccessTestResult(_testScenarioSetting.testScenarioName, vmCreateTestName, "VM Create succeed", "", sw.ElapsedMilliseconds); } catch (Exception ex) { // if the VM Creation operation failed, try check the VM instance view for 5 minutes var startTime = DateTime.UtcNow; while (true) { vmr = await _vmBuilder.GetVirtualMachineResource(); var instanceView = await vmr.InstanceViewAsync(cancellationToken: cancellationToken); if (instanceView?.Value?.Statuses?.Count > 0 && (instanceView.Value.Statuses[0].DisplayStatus == "Provisioning succeeded" || instanceView.Value.Statuses[0].DisplayStatus == "VM running")) { ConsoleLog("VM Create succeed"); sw.Stop(); _junitTestResultBuilder.AddSuccessTestResult(_testScenarioSetting.testScenarioName, vmCreateTestName, "VM Create succeed", "", sw.ElapsedMilliseconds); break; } if (DateTime.UtcNow - startTime > TimeSpan.FromMinutes(5)) { // poll timed out, rethrow the exception sw.Stop(); _junitTestResultBuilder.AddFailureTestResult(testScenarioStatusDetails.ScenarioName, vmCreateTestName, "", ex.Message + ex.StackTrace ?? "", "", sw.ElapsedMilliseconds); throw; } // wait for 10 seconds before polling again await Task.Delay(10000); } } ConsoleLog("Running scenario test: " + _testScenarioSetting.testScenarioName); await ScenarioTestAsync(vmr, testScenarioStatusDetails, cancellationToken); } catch (Exception ex) { testScenarioStatusDetails.ErrorMessage = ex.Message; testScenarioStatusDetails.Result = ScenarioTestResult.Failed; ConsoleLog("Exception occurs: " + ex.Message); } testScenarioStatusDetails.Status = ScenarioTestStatus.Completed; ConsoleLog("Test scenario run finished."); } private async Task ScenarioTestAsync(VirtualMachineResource vmr, TestScenarioStatusDetails testScenarioStatusDetails, CancellationToken cancellationToken) { testScenarioStatusDetails.Result = ScenarioTestResult.Succeed; // always running all the cases inside scenario foreach (var testCase in _testCases) { cancellationToken.ThrowIfCancellationRequested(); if (cancellationToken.IsCancellationRequested) { ConsoleLog($"Test case {testCase.TestCaseName} is cancelled."); break; } TestCaseExecutionContext context = new TestCaseExecutionContext(vmr, _testScenarioSetting, cancellationToken); Stopwatch sw = Stopwatch.StartNew(); try { testCase.Result = TestCaseResult.Running; await testCase.StartAsync(context); sw.Stop(); context.TestResultDetails .DownloadContentIfFromBlob() .WriteJUnitTestResult(_junitTestResultBuilder, _testScenarioSetting.testScenarioName, testCase.TestCaseName, sw.ElapsedMilliseconds); if (!context.TestResultDetails.Succeed) { testScenarioStatusDetails.FailedCases.Add(testCase.TestCaseName); testScenarioStatusDetails.Result = ScenarioTestResult.Failed; } } catch (Exception ex) { var errorMessage = $"test case: {testCase.TestCaseName} failed with exception: message: {ex.Message}, stack trace: {ex.StackTrace}"; testScenarioStatusDetails.FailedCases.Add(testCase.TestCaseName); testScenarioStatusDetails.Result = ScenarioTestResult.Failed; context.TestResultDetails.Succeed = false; context.TestResultDetails.StdErr = errorMessage; context.TestResultDetails.FromBlob = false; sw.Stop(); ConsoleLog($"Scenario case {testCase.TestCaseName} exception: {ex.Message}, stack trace: {ex.StackTrace}"); _junitTestResultBuilder.AddFailureTestResult(_testScenarioSetting.testScenarioName, testCase.TestCaseName, "", errorMessage, "", sw.ElapsedMilliseconds); } finally { testCase.Result = context.TestResultDetails.Succeed ? TestCaseResult.Succeed : TestCaseResult.Failed; ConsoleLog($"Scenario case {testCase.TestCaseName} finished with result: {(context.TestResultDetails.Succeed ? "Succeed" : "Failed")} and duration: " + sw.ElapsedMilliseconds + "ms"); SaveResultFile(context.TestResultDetails.CustomOut, $"TestCases/{testCase.TestCaseName}", "customOut.txt", context.TestResultDetails.FromBlob); SaveResultFile(context.TestResultDetails.StdErr, $"TestCases/{testCase.TestCaseName}", "stdErr.txt", context.TestResultDetails.FromBlob); SaveResultFile(context.TestResultDetails.StdOut, $"TestCases/{testCase.TestCaseName}", "stdOut.txt", context.TestResultDetails.FromBlob); } } } private async Task CollectGALogsOnVMAsync() { ConsoleLog("Collecting GA logs on VM."); var vmr = await _vmBuilder.GetVirtualMachineResource(); var logZipPath = Path.Combine(Path.GetTempPath(), _testScenarioSetting.testGroupName + "_" + _testScenarioSetting.testScenarioName + "_VMAgentLogs.zip"); using (File.CreateText(logZipPath)) { ConsoleLog("Created empty VMAgentLogs.zip file."); } var logZipSas = StorageHelper.Instance.Upload2SharedBlob(Constants.SHARED_E2E_TEST_OUTPUT_CONTAINER_NAME, logZipPath, _testScenarioSetting.TestScenarioStorageFolderPrefix); var collectGALogOutput = await RunCommandRunner.ExecuteRunCommandOnVM(vmr, new RunCommandSettingBuilder() .TestScenarioSetting(_testScenarioSetting) .RunCommandName("CollectInVMGALog") .ScriptFullPath(Path.Combine(TestSetting.Instance.scriptsFolder, Constants.COLLECT_INVM_GA_LOG_SCRIPT_NAME)) , CancellationToken.None , (builder) => { return builder.AddParameter("logZipSas", Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(logZipSas))); }); collectGALogOutput.CustomOut = logZipSas; ConsoleLog("GA log zip collected."); SaveResultFile(collectGALogOutput.StdOut, "collectLogZip", "stdOut.txt"); SaveResultFile(collectGALogOutput.StdErr, "collectLogZip", "stdErr.txt"); SaveResultFile(collectGALogOutput.CustomOut, "collectLogZip", "GALogs.zip"); ConsoleLog("GA log zip saved."); } private void SaveResultFile(string fileContentOrSas, string parentFolderName, string fileName, bool isFromSas = true) { var fileFolder = Path.Combine(TestSetting.Instance.testResultFolder, _testScenarioSetting.testGroupName, _testScenarioSetting.testScenarioName, parentFolderName); Directory.CreateDirectory(fileFolder); var filePath = Path.Combine(fileFolder, fileName); if (isFromSas) { TestCommonUtilities.DownloadFile(fileContentOrSas, filePath, ConsoleLog); } else { File.WriteAllText(filePath, fileContentOrSas); } } } /// /// Test case execution context class /// container VirtualMachineResource and TestScenarioSetting /// VirtualMachineResource is the created the Azure VM resource for the test scenario /// public class TestCaseExecutionContext { private VirtualMachineResource _vmr = null!; private TestScenarioSetting _testScenarioSetting = null!; private CancellationToken _cancellationToken; /// /// TestResultDetails for a particular test case /// public TestCaseResultDetails TestResultDetails { get; set; } = new TestCaseResultDetails(); public TestScenarioSetting ScenarioSetting { get { return _testScenarioSetting; } } /// /// the Azure Virtual Machine Resource created for running E2E test /// public VirtualMachineResource VirtualMachineResource { get { return _vmr; } } public CancellationToken CancellationToken { get { return _cancellationToken; } } public TestCaseExecutionContext(VirtualMachineResource vmr, TestScenarioSetting testScenarioSetting, CancellationToken cancellationToken) { _vmr = vmr; _testScenarioSetting = testScenarioSetting; _cancellationToken = cancellationToken; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/000077500000000000000000000000001500521614600242015ustar00rootroot00000000000000GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/CertificateUtility.cs000066400000000000000000000132401500521614600303360ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using System; using System.Security.Cryptography.X509Certificates; using System.Security.Cryptography; namespace GuestProxyAgentTest.Utilities { /// /// Utility for certificate operations /// public static class CertificateUtility { /// /// check if the certificate was installed /// /// store name of the cert /// store location of the cert /// thumb print of the cert /// public static bool IsCertInstalled(StoreName storeName, StoreLocation storeLocation, string thumbPrint) { string normalizedThumbPrint = thumbPrint.Replace(" ", "").ToUpper(); var store = new X509Store(storeName, storeLocation); store.Open(OpenFlags.ReadOnly); var findResult = store.Certificates.Find(X509FindType.FindByThumbprint, normalizedThumbPrint, false); store.Close(); var found = findResult.Count >= 1; return found; } /// /// Get certificate by the thumbprint /// /// /// /// /// public static X509Certificate2 GetCertificate(string thumbPrint, StoreName storeName, StoreLocation storeLocation) { X509Store store = new X509Store(storeName, storeLocation); try { store.Open(OpenFlags.ReadOnly); X509Certificate2Collection certs = store.Certificates.Find(X509FindType.FindByThumbprint, thumbPrint, false); if (certs.Count <= 0) { return null; } else { return certs[0]; } } catch (Exception e) { Console.WriteLine("GetCertificate error: " + e.Message); } finally { if (store != null) { store.Close(); } } return null; } /// /// Get certificate with check private key access /// /// /// /// /// public static X509Certificate2? GetCertificate(string thumbPrint, StoreName storeName, bool requirePrivateKeyAccess = false) { Console.WriteLine("getting cert with thumbprint: " + thumbPrint); var cert = GetCertificate(thumbPrint, storeName, StoreLocation.CurrentUser); if (cert != null) { Console.WriteLine("Found cert on current user, " + cert.Thumbprint); } if (cert != null && (!requirePrivateKeyAccess || CanAccessPrivateKey(cert))) { return cert; } cert = GetCertificate(thumbPrint, storeName, StoreLocation.LocalMachine); if (cert != null) { Console.WriteLine("Found cert on local machine, " + cert.Thumbprint); } return (cert != null && !(requirePrivateKeyAccess && !CanAccessPrivateKey(cert))) ? cert : null; } /// /// Get certificate with check private key access /// /// /// /// public static X509Certificate2? GetCertificate(string certNameInKV, bool requirePrivateKeyAccess = false) { Console.WriteLine("Getting cert with name in KeyVault: " + certNameInKV); var based64EncodedCert = Environment.GetEnvironmentVariable(certNameInKV); if (string.IsNullOrEmpty(based64EncodedCert)) { Console.WriteLine("No cert found in environment variable: " + certNameInKV); return null; } var cert = new X509Certificate2(Convert.FromBase64String(based64EncodedCert)); if (cert != null) { Console.WriteLine("Found cert on from environment variable, " + cert.Thumbprint); } return (cert != null && !(requirePrivateKeyAccess && !CanAccessPrivateKey(cert))) ? cert : null; } private static bool CanAccessPrivateKey(X509Certificate2 cert) { if (null == cert) { return false; } try { Console.WriteLine("check cert private key, has private key: " + cert.HasPrivateKey); //a. Has private key doesn't mean we can access the private key (check by null != cert.PrivateKey) //b. PrivateKey can be get doesn't mean the information inside didn't corrupt already (check by cert.PrivateKey.KeySize > 0) return cert.HasPrivateKey && null != cert.PrivateKey && cert.PrivateKey.KeySize > 0; } catch (CryptographicException ex) { //no permission to access the certificate or privacy key Console.WriteLine("check cert private key error: " + ex.Message); return false; } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/Constants.cs000066400000000000000000000065241500521614600265130ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.Core; using Azure.ResourceManager.Network.Models; using System; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; using System.Text; using System.Threading.Tasks; namespace GuestProxyAgentTest.Utilities { /// /// Constants used in the project /// public static class Constants { public static readonly string SHARED_SCRIPTS_CONTAINER_NAME = "scripts"; public static readonly string SHARED_MSI_CONTAINER_NAME = "guestproxyagentmsis"; public static readonly string SHARED_E2E_TEST_OUTPUT_CONTAINER_NAME = "e2etestoutputs"; public static readonly string RUNCOMMAND_OUTPUT_FILE_NAME = "runCommandOutput.txt"; public static readonly string RUNCOMMAND_ERROR_OUTPUT_FILE_NAME = "runCommandErr.txt"; public static readonly string RUNCOMMAND_CUSTOM_OUTPUT_SAS_PARAMETER_NAME = "customOutputJsonSAS"; public static readonly string COULD_CLEANUP_TAG_NAME = "CouldCleanup"; public const string INSTALL_LINUX_GUEST_PROXY_AGENT_PACKAGE_SCRIPT_NAME = "InstallGuestProxyAgentPackage.sh"; public static readonly string GUEST_PROXY_AGENT_E2E_ACCESS_TOKEN_ENV = "GuestProxyAgentE2EAccessToken"; public static readonly string GUEST_PROXY_AGENT_E2E_ACCESS_TOKEN_STORAGE_ACCOUNT_ENV = "GuestProxyAgentE2EAccessTokenForStorageAccount"; public static readonly string INSTALL_GUEST_PROXY_AGENT_SCRIPT_NAME; public static readonly string COLLECT_INVM_GA_LOG_SCRIPT_NAME; public static readonly string GUEST_PROXY_AGENT_VALIDATION_SCRIPT_NAME; public static readonly string IMDS_PING_TEST_SCRIPT_NAME; public static readonly string SETUP_CGROUP2_SCRIPT_NAME; public static readonly string GUEST_PROXY_AGENT_EXTENSION_VALIDATION_SCRIPT_NAME; public static readonly string INSTALL_GUEST_PROXY_AGENT_EXTENSION_SCRIPT_NAME; static Constants() { if (IS_WINDOWS()) { INSTALL_GUEST_PROXY_AGENT_SCRIPT_NAME = "InstallGuestProxyAgent.ps1"; COLLECT_INVM_GA_LOG_SCRIPT_NAME = "CollectInVMGALog.ps1"; GUEST_PROXY_AGENT_VALIDATION_SCRIPT_NAME = "GuestProxyAgentValidation.ps1"; IMDS_PING_TEST_SCRIPT_NAME = "IMDSPingTest.ps1"; GUEST_PROXY_AGENT_EXTENSION_VALIDATION_SCRIPT_NAME = "GuestProxyAgentExtensionValidation.ps1"; INSTALL_GUEST_PROXY_AGENT_EXTENSION_SCRIPT_NAME = "InstallGuestProxyAgentExtension.ps1"; } else { INSTALL_GUEST_PROXY_AGENT_SCRIPT_NAME = "InstallGuestProxyAgent.sh"; COLLECT_INVM_GA_LOG_SCRIPT_NAME = "CollectInVMGALog.sh"; GUEST_PROXY_AGENT_VALIDATION_SCRIPT_NAME = "GuestProxyAgentValidation.sh"; IMDS_PING_TEST_SCRIPT_NAME = "IMDSPingTest.sh"; SETUP_CGROUP2_SCRIPT_NAME = "SetupCGroup2.sh"; GUEST_PROXY_AGENT_EXTENSION_VALIDATION_SCRIPT_NAME = "GuestProxyAgentExtensionValidation.sh"; INSTALL_GUEST_PROXY_AGENT_EXTENSION_SCRIPT_NAME = "InstallGuestProxyAgentExtension.sh"; } } public static bool IS_WINDOWS() { return RuntimeInformation.IsOSPlatform(OSPlatform.Windows); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/JUnitTestResultBuilder.cs000066400000000000000000000251741500521614600311400ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using System.Net; using System.Xml; namespace GuestProxyAgentTest.Utilities { /// /// JUnit test result builder /// public class JunitTestResultBuilder { // key is test suit name, value is junit-doc, junit-suite // azure public publish test result task doesn't display result correctly, if result has multiple test suites // work around is create one result file per test suite (test scenario in E2E) private readonly Dictionary testSuiteMap = new Dictionary(); private readonly string testGroupName = null!; private readonly string testResultGroupFolder = null!; internal string testResultFolder = null!; public JunitTestResultBuilder(string testResultFolder, string testGroupName) { this.testGroupName = testGroupName; this.testResultGroupFolder = Path.Combine(testResultFolder, testGroupName); Directory.CreateDirectory(this.testResultGroupFolder); this.testResultFolder = testResultFolder; } /// /// Add success test result, it will merge stdoutput with customoutput. /// /// /// /// /// /// public JunitTestResultBuilder AddSuccessTestResult(string testScenarioName, string testName, string stdOut, string customOut, long durationInMilliseconds = 0) { var stdOutMessage = "Std output:" + Environment.NewLine + stdOut + Environment.NewLine + "Custom output:" + Environment.NewLine + customOut; return AddSuccessTestResult(testScenarioName, testName, stdOutMessage, durationInMilliseconds); } /// /// Add succeed test result /// /// /// /// /// public JunitTestResultBuilder AddSuccessTestResult(string testScenarioName, string testName, string stdOutMessage, long durationInMilliseconds = 0) { lock (this) { XmlDocument doc = null!; if (!testSuiteMap.ContainsKey(testScenarioName)) { doc = new XmlDocument(); var testsuites = doc.CreateElement("testsuites"); doc.AppendChild(testsuites); XmlElement testSuiteElement = doc.CreateElement("testsuite"); testSuiteElement.SetAttribute("name", testGroupName + "." + testScenarioName); testSuiteElement.SetAttribute("tests", "0"); testSuiteElement.SetAttribute("errors", "0"); testSuiteElement.SetAttribute("failures", "0"); testSuiteElement.SetAttribute("skipped", "0"); testSuiteElement.SetAttribute("timestamp", DateTime.UtcNow.ToString("yyyy-MM-ddTHH:mm:ss")); testsuites.AppendChild(testSuiteElement); testSuiteMap[testScenarioName] = (doc, testSuiteElement); } doc = testSuiteMap[testScenarioName].Item1; var testSuite = testSuiteMap[testScenarioName].Item2; testSuite.SetAttribute("tests", StringAdd(testSuite.GetAttribute("tests"), 1)); XmlElement successTestCaseElement = doc.CreateElement("testcase"); successTestCaseElement.SetAttribute("name", testName); successTestCaseElement.SetAttribute("classname", testGroupName + "." + testScenarioName); successTestCaseElement.SetAttribute("time", ((double)durationInMilliseconds / 1000).ToString()); testSuite.AppendChild(successTestCaseElement); XmlElement systemOutElement = doc.CreateElement("system-out"); systemOutElement.InnerText = stdOutMessage; successTestCaseElement.AppendChild(systemOutElement); } return this; } /// /// Add failure test result /// /// /// /// /// /// /// public JunitTestResultBuilder AddFailureTestResult(string testScenarioName, string testName, string stdOutMessage, string stdErrMessage, string customOutput, long durationInMilliseconds = 0) { lock (this) { XmlDocument doc = null!; if (!testSuiteMap.ContainsKey(testScenarioName)) { doc = new XmlDocument(); var testsuites = doc.CreateElement("testsuites"); doc.AppendChild(testsuites); XmlElement testSuiteElement = doc.CreateElement("testsuite"); testSuiteElement.SetAttribute("name", testGroupName + "." + testScenarioName); testSuiteElement.SetAttribute("tests", "0"); testSuiteElement.SetAttribute("errors", "0"); testSuiteElement.SetAttribute("failures", "0"); testSuiteElement.SetAttribute("skipped", "0"); testSuiteElement.SetAttribute("timestamp", DateTime.UtcNow.ToString("yyyy-MM-ddTHH:mm:ss")); testsuites.AppendChild(testSuiteElement); testSuiteMap[testScenarioName] = (doc, testSuiteElement); } doc = testSuiteMap[testScenarioName].Item1; var testSuite = testSuiteMap[testScenarioName].Item2; testSuite.SetAttribute("tests", StringAdd(testSuite.GetAttribute("tests"), 1)); testSuite.SetAttribute("failures", StringAdd(testSuite.GetAttribute("failures"), 1)); XmlElement failedTestCaseElement = doc.CreateElement("testcase"); failedTestCaseElement.SetAttribute("name", testName); failedTestCaseElement.SetAttribute("classname", testGroupName + "." + testScenarioName); failedTestCaseElement.SetAttribute("time", ((double)durationInMilliseconds / 1000).ToString()); testSuite.AppendChild(failedTestCaseElement); XmlElement systemOutElement = doc.CreateElement("system-out"); systemOutElement.InnerText = stdOutMessage; failedTestCaseElement.AppendChild(systemOutElement); XmlElement systemErrElement = doc.CreateElement("system-err"); systemErrElement.InnerText = stdErrMessage; failedTestCaseElement.AppendChild(systemErrElement); XmlElement failureElement = doc.CreateElement("failure"); failureElement.SetAttribute("message", "Std Error Output: " + Environment.NewLine + stdErrMessage + Environment.NewLine + Environment.NewLine + "Custom output: " + Environment.NewLine + customOutput); failureElement.SetAttribute("type", "AssertionException"); failedTestCaseElement.AppendChild(failureElement); } return this; } public JunitTestResultBuilder AddAbortedTestResult(string testScenarioName, string testName, string message) { lock (this) { XmlDocument doc = null!; if (!testSuiteMap.ContainsKey(testScenarioName)) { doc = new XmlDocument(); var testsuites = doc.CreateElement("testsuites"); doc.AppendChild(testsuites); XmlElement testSuiteElement = doc.CreateElement("testsuite"); testSuiteElement.SetAttribute("name", testGroupName + "." + testScenarioName); testSuiteElement.SetAttribute("tests", "0"); testSuiteElement.SetAttribute("errors", "0"); testSuiteElement.SetAttribute("failures", "0"); testSuiteElement.SetAttribute("skipped", "0"); testSuiteElement.SetAttribute("timestamp", DateTime.UtcNow.ToString("yyyy-MM-ddTHH:mm:ss")); testsuites.AppendChild(testSuiteElement); testSuiteMap[testScenarioName] = (doc, testSuiteElement); } doc = testSuiteMap[testScenarioName].Item1; var testSuite = testSuiteMap[testScenarioName].Item2; testSuite.SetAttribute("tests", StringAdd(testSuite.GetAttribute("tests"), 1)); testSuite.SetAttribute("skipped", StringAdd(testSuite.GetAttribute("skipped"), 1)); XmlElement abortedTestCaseElement = doc.CreateElement("testcase"); abortedTestCaseElement.SetAttribute("name", testName); abortedTestCaseElement.SetAttribute("classname", testGroupName + "." + testScenarioName); abortedTestCaseElement.SetAttribute("time", "0"); testSuite.AppendChild(abortedTestCaseElement); XmlElement abortedElement = doc.CreateElement("skipped"); abortedElement.SetAttribute("message", "Test case aborted."); abortedElement.InnerText = message; abortedTestCaseElement.AppendChild(abortedElement); } return this; } /// /// build and save the test result to file /// /// public List Build() { List result = new List(); foreach (KeyValuePair kv in testSuiteMap) { var doc = kv.Value.Item1; var resultPath = Path.Combine(this.testResultGroupFolder, kv.Key + "-TestResults.xml"); result.Add(resultPath); doc.Save(resultPath); } return result; } private string StringAdd(string str, int added) { var val = int.Parse(str) + added; return val + ""; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/RunCommandRunner.cs000066400000000000000000000061761500521614600277770ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure; using Azure.ResourceManager.Compute; using Azure.ResourceManager.Compute.Models; using GuestProxyAgentTest.Models; using GuestProxyAgentTest.Settings; namespace GuestProxyAgentTest.Utilities { /// /// Helper class for Running 'RunCommand' on a particular virtual machine /// public class RunCommandRunner { /// /// Execute/Run a 'RunCommand' on a particular virtual machine /// /// virtual machine resource, used to specify the azure virtual machine instance /// builder for run command setting /// cancellation token /// parameter setter for the run command script /// public static async Task ExecuteRunCommandOnVM(VirtualMachineResource vmr , RunCommandSettingBuilder runCommandSettingBuilder , CancellationToken cancellationToken , Func runCommandParameterSetter = null!) { var vmrcs = vmr.GetVirtualMachineRunCommands(); Console.WriteLine("Creating runcommand on vm."); if (null != runCommandParameterSetter) { runCommandSettingBuilder = runCommandParameterSetter(runCommandSettingBuilder); } var runCommandSetting = runCommandSettingBuilder.Build(); await vmrcs.CreateOrUpdateAsync(WaitUntil.Completed, runCommandSetting.runCommandName, toVMRunCommandData(runCommandSetting), cancellationToken: cancellationToken); var iv = vmrcs.Get(runCommandSetting.runCommandName, "InstanceView").Value.Data.InstanceView; return new RunCommandOutputDetails { StdOut = runCommandSetting.outputBlobSAS, StdErr = runCommandSetting.errorBlobSAS, CustomOut = runCommandSetting.customOutputSAS, Succeed = iv.ExecutionState == ExecutionState.Succeeded && iv.ExitCode == 0, }; } private static VirtualMachineRunCommandData toVMRunCommandData(RunCommandSetting runCommandSetting) { var res = new VirtualMachineRunCommandData(TestSetting.Instance.location) { Source = new VirtualMachineRunCommandScriptSource() { ScriptUri = new Uri(runCommandSetting.runCommandScriptSAS), }, AsyncExecution = false, TimeoutInSeconds = 3600, OutputBlobUri = new Uri(runCommandSetting.outputBlobSAS), ErrorBlobUri = new Uri(runCommandSetting.errorBlobSAS), }; foreach (var x in runCommandSetting.runCommandParameters.Select(kv => new RunCommandInputParameter(kv.Key, kv.Value))) { res.Parameters.Add(x); } return res; } } }; GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/RunCommandSettingBuilder.cs000066400000000000000000000130551500521614600314440ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.Core; using GuestProxyAgentTest.Settings; using System; namespace GuestProxyAgentTest.Utilities { /// /// Builder class for build RunCommandSetting /// public class RunCommandSettingBuilder { internal string scriptFullPath = null!; private TestScenarioSetting _testCaseSetting = null!; private RunCommandSetting runCommandSetting; public RunCommandSettingBuilder() { this.runCommandSetting = new RunCommandSetting(); } /// /// Specify test case setting for the run command setting /// /// /// public RunCommandSettingBuilder TestScenarioSetting(TestScenarioSetting testCaseSetting) { this._testCaseSetting = testCaseSetting; return this; } /// /// Set run command name /// /// /// public RunCommandSettingBuilder RunCommandName(string runCommandName) { this.runCommandSetting.runCommandName = runCommandName; return this; } /// /// Set run command script by local file /// The 'scriptFullPath' is the full path of a local script file that will be uploaded to blob and used as the run command script rul /// /// /// public RunCommandSettingBuilder ScriptFullPath(string scriptFullPath) { this.scriptFullPath = scriptFullPath; return this; } /// /// Set custom output SAS url /// /// /// public RunCommandSettingBuilder CustomOutputSas(string customOutputSas) { this.runCommandSetting.customOutputSAS = customOutputSas; return this; } /// /// Add parameter for the run command script. /// /// /// /// public RunCommandSettingBuilder AddParameter(string paramName, string paramValue) { this.runCommandSetting.runCommandParameters.Add(paramName, paramValue); return this; } public RunCommandSettingBuilder AddParameters(List<(string, string)> list) { if(list == null || list.Count == 0) { return this; } foreach(var kv in list) { this.runCommandSetting.runCommandParameters.Add(kv.Item1, kv.Item2); } return this; } /// /// Set run command script by blob SAS url /// /// /// public RunCommandSettingBuilder RunCommandScriptSAS(string scriptSAS) { this.runCommandSetting.runCommandScriptSAS = scriptSAS; return this; } /// /// Build the run command setting /// Setup run command script, if non of scriptFullPath or runCommandScriptSAS was specified, will throw the parameter error exception /// if both was set, the runCommandScriptSAS has more priority. /// Setup testcasesetting/output/error output for the run command /// /// /// public RunCommandSetting Build() { if(this.runCommandSetting.runCommandScriptSAS == null && this.scriptFullPath == null) { throw new Exception("neither specifying the run command script from runCommandScriptSAS nor scriptFullPath"); } if(this.runCommandSetting.runCommandName == null) { throw new Exception("runCommandName was not specified."); } if(this.runCommandSetting.runCommandScriptSAS == null) { this.runCommandSetting.runCommandScriptSAS = StorageHelper.Instance.Upload2SharedBlob(Constants.SHARED_SCRIPTS_CONTAINER_NAME, this.scriptFullPath); } runCommandSetting.testCaseSetting = _testCaseSetting; this.runCommandSetting.outputBlobSAS = StorageHelper.Instance.CreateAppendBlob(Constants.SHARED_E2E_TEST_OUTPUT_CONTAINER_NAME, Constants.RUNCOMMAND_OUTPUT_FILE_NAME, this._testCaseSetting.TestScenarioStorageFolderPrefix + "/" + this.runCommandSetting.runCommandName); this.runCommandSetting.errorBlobSAS= StorageHelper.Instance.CreateAppendBlob(Constants.SHARED_E2E_TEST_OUTPUT_CONTAINER_NAME, Constants.RUNCOMMAND_ERROR_OUTPUT_FILE_NAME, this._testCaseSetting.TestScenarioStorageFolderPrefix + "/" + this.runCommandSetting.runCommandName); if(this.runCommandSetting.customOutputSAS != null && this.runCommandSetting.customOutputSAS.Count() > 0) { this.runCommandSetting.runCommandParameters.Add(Constants.RUNCOMMAND_CUSTOM_OUTPUT_SAS_PARAMETER_NAME, System.Convert.ToBase64String(System.Text.Encoding.UTF8.GetBytes(this.runCommandSetting.customOutputSAS))); } return this.runCommandSetting; } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/StorageHelper.cs000066400000000000000000000152641500521614600273040ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.Core; using Azure.Identity; using Azure.Storage.Blobs; using Azure.Storage.Blobs.Specialized; using Azure.Storage.Sas; using GuestProxyAgentTest.Settings; using System.Security.Cryptography.X509Certificates; namespace GuestProxyAgentTest.Utilities { /// /// Helper class for operation shared storage account 'e2etestsharedstorage' /// internal class StorageHelper { private static StorageHelper _instance = null!; private TokenCredential tokenCredential = null!; private StorageHelper() { } /// /// Init of the StorageHelper Instance /// /// azure tenant id /// application id of the app principal /// certificate that will be used to retrieved the app principal public static void Init(string tenantId, string appClientId) { if(_instance != null) return; _instance = new StorageHelper(); _instance.tokenCredential = new GuestProxyAgentE2EStorageAccountTokenCredential(); } /// /// Instance of StorageHelper, need to call Init before using /// The storageHelper will operate the pre-created storage account 'e2etestsharedstorage' /// public static StorageHelper Instance { get { if(null == _instance) { throw new Exception(""); } return _instance; } } /// /// Create an append blob in the shared blob /// /// blob container name /// file name /// parent folder path in the blob /// public string CreateAppendBlob(string containerName, string fileName, string parentPathInBlob = null!) { var containerClient = new BlobContainerClient(new Uri($"{TestSetting.Instance.sharedStorageAccountUrl}/{containerName}"), this.tokenCredential); if (null != parentPathInBlob) { fileName = $"{parentPathInBlob}/{fileName}"; } containerClient.GetAppendBlobClient(fileName).CreateIfNotExists(); return GenerateSasUriFromSharedBlob(containerName, fileName); } /// /// Upload to local file to the shared blob, will use the local file name on the blob file name /// /// container name /// local file path(full path) /// parent folder name in the blob /// public string Upload2SharedBlob(string containerName, string filePath, string parentPathInBlob = null!) { return Upload2SharedBlob(containerName, filePath, Path.GetFileName(filePath), parentPathInBlob); } /// /// Upload to local file to shared blob with specified file name on the blob file name /// /// container name /// local file path (full path) /// file name that will be used as the blob file name /// parent folder path in the blob /// public string Upload2SharedBlob(string containerName, string filePath, string fileName, string parentPathInBlob = null!) { var containerClient = new BlobContainerClient(new Uri($"{TestSetting.Instance.sharedStorageAccountUrl}/{containerName}"), this.tokenCredential); if (null != parentPathInBlob) { fileName = $"{parentPathInBlob}/{fileName}"; } containerClient.GetBlobClient(fileName).Upload(filePath, true); return GenerateSasUriFromSharedBlob(containerName, fileName); } /// /// Generate the SaS URI for a blob on shared repo /// /// container name /// file name including the parent path of the blob /// public string GenerateSasUriFromSharedBlob(string containerName, string fileName) { return DoGenerateSasUri(TestSetting.Instance.sharedStorageAccountUrl, containerName, fileName); } /// /// Clean/delete all the folder under a folder of the shared blob /// /// container name /// folder path in the blob public void CleanSharedBlobFolder(string containerName, string folderPath) { var serviceClient = new BlobServiceClient(new Uri(TestSetting.Instance.sharedStorageAccountUrl), this.tokenCredential); var containerClient = serviceClient.GetBlobContainerClient(containerName); foreach(var blob in containerClient.GetBlobs(prefix: folderPath)) { containerClient.GetBlobClient(blob.Name).DeleteIfExists(); } } private string DoGenerateSasUri(string storageAccountUrl, string containerName, string fileName) { var bsClient = new BlobServiceClient(new Uri(storageAccountUrl), this.tokenCredential); Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey = bsClient.GetUserDelegationKey(DateTimeOffset.UtcNow.AddHours(-6), DateTimeOffset.UtcNow.AddDays(1)); // Create a SAS token that's valid for one hour. BlobSasBuilder sasBuilder = new BlobSasBuilder() { BlobContainerName = containerName, Resource = "c" }; sasBuilder.ExpiresOn = DateTimeOffset.UtcNow.AddDays(7); sasBuilder.SetPermissions(BlobContainerSasPermissions.All); BlobUriBuilder blobUriBuilder = new BlobUriBuilder(new Uri($"{storageAccountUrl}/{containerName}/{fileName}")) { Sas = sasBuilder.ToSasQueryParameters(userDelegationKey, bsClient.AccountName) }; return blobUriBuilder.ToUri().ToString(); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/TestAssertUtils.cs000066400000000000000000000007701500521614600276560ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace GuestProxyAgentTest.Utilities { public static class TestAssertUtils { public static void AssertIsTrue(Func func, string message) { if (!func()) { throw new Exception("Test Assert failed: " + message); } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/TestCommonUtilities.cs000066400000000000000000000113651500521614600305220ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.Core; using GuestProxyAgentTest.Models; using GuestProxyAgentTest.Settings; using Newtonsoft.Json; using System.Runtime.Serialization; namespace GuestProxyAgentTest.Utilities { public static class TestCommonUtilities { /// /// Test Setup, set up all the test related setting /// /// /// /// public static void TestSetup(string guestProxyAgentZipFilePath, string testConfigFilePath, string testResultFolder) { TestSetting.Init(YamlUtils.DeserializeYaml(testConfigFilePath), guestProxyAgentZipFilePath, testResultFolder); StorageHelper.Init(TestSetting.Instance.tenantId, TestSetting.Instance.appClientId); VMHelper.Init(TestSetting.Instance.tenantId, TestSetting.Instance.appClientId, TestSetting.Instance.subscriptionId); } /// /// download the content as string with retry per 1 second /// in case of download succeed will return (true, content string) /// if case of download failed will return(false, error message) /// /// download url /// retry count, default value is 5 /// public static (bool, string) DownloadContentAsString(string url, Action logger = null!, int retryCnt = 5) { if (url == null || url.Length == 0) { return (false, "The url provided is null or empty."); } int cnt = 0; var errMessage = ""; while (cnt < retryCnt) { cnt++; try { string contents = ""; using (var client = new HttpClient()) { var res = client.GetAsync(url).Result; res.EnsureSuccessStatusCode(); contents = res.Content.ReadAsStringAsync().Result; } return (true, contents); } catch (Exception ex) { errMessage = string.Format("Download content failed, attempted: {0} times, exception: {1}", cnt, ex.ToString()); logger?.Invoke(errMessage); } Thread.Sleep(1000); } return (false, errMessage); } public static bool DownloadFile(string url, string filePath, Action logger = null!, int retryCnt = 5) { if (null == url || url.Length == 0) { return false; } int cnt = 0; while (cnt < retryCnt) { cnt++; try { if (File.Exists(filePath)) { File.Delete(filePath); } using var client = new HttpClient(); var res = client.GetAsync(url).Result; res.EnsureSuccessStatusCode(); using var fileStream = File.Create(filePath); res.Content.CopyToAsync(fileStream).Wait(); return true; } catch (Exception ex) { var errMessage = string.Format("Download file failed, attempted: {0} times, exception: {1}", cnt, ex.ToString()); logger?.Invoke(errMessage); } } return false; } public static AccessToken GetAccessTokenFromEnv(string envName) { var tokenString = Environment.GetEnvironmentVariable(envName); if (string.IsNullOrEmpty(tokenString)) { throw new Exception("Failed to get the access token from environment variable: " + envName); } var model = JsonConvert.DeserializeObject(tokenString); if (model == null) { throw new Exception("Failed to deserialize access token json object: " + tokenString); } return new AccessToken(model.AccessToken, DateTimeOffset.Parse(model.ExpiresOn)); } [DataContract] public class TokenEnvModel { [DataMember(Name = "accessToken")] public string AccessToken { get; set; } [DataMember(Name = "expiresOn")] public string ExpiresOn { get; set; } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/TestMapReader.cs000066400000000000000000000040131500521614600272260ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using GuestProxyAgentTest.Settings; using System.Reflection; using GuestProxyAgentTest.Models; namespace GuestProxyAgentTest.Utilities { public static class TestMapReader { static string TestMapFile(bool test_arm64 = false) { if (test_arm64) { if (Constants.IS_WINDOWS()) { return "Test-Map-Arm64.yml"; } else { return "Test-Map-Linux-Arm64.yml"; } } else { if (Constants.IS_WINDOWS()) { return "Test-Map.yml"; } else { return "Test-Map-Linux.yml"; } } } /// /// Read 'Test-Map.yml' and covert to a TestScenarioSetting list /// /// public static List ReadFlattenTestScenarioSettingFromTestMap(bool test_arm64 = false) { var curFolder = Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location)!; return YamlUtils.DeserializeYaml(Path.Combine(curFolder, "TestMap", TestMapFile(test_arm64))) .TestGroupList.Select((x) => YamlUtils.DeserializeYaml(Path.Combine(curFolder, "TestMap", x.Include))) .SelectMany(x => x.Scenarios, (group, ele) => new TestScenarioSetting { vmImageOffer = group.VmImageOffer, vmImagePublisher = group.VmImagePublisher, vmImageSku = group.VmImageSku, vmImageVersion = group.VmImageVersion, testGroupName = group.GroupName, testScenarioClassName = ele.ClassName, testScenarioName = ele.Name, }).ToList(); } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/VMBuilder.cs000066400000000000000000000265101500521614600263650ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure; using Azure.Core; using Azure.ResourceManager; using Azure.ResourceManager.Compute; using Azure.ResourceManager.Compute.Models; using Azure.ResourceManager.Network; using Azure.ResourceManager.Resources; using GuestProxyAgentTest.Settings; using Microsoft.Azure.Management.ResourceManager.Fluent; namespace GuestProxyAgentTest.Utilities { /// /// Class for Build an Azure VM based on the test case setting /// class VMBuilder { private TestScenarioSetting testScenarioSetting = null!; private string vmName = ""; private string vNetName = ""; private string netInfName = ""; private string pubIpName = ""; private string rgName = ""; private string adminUsername = "testuser"; private string adminPassword = SdkContext.RandomResourceName("pP@1", 15); // In order to use the plan, we need to accept the terms first. // https://learn.microsoft.com/en-us/cli/azure/vm/image/terms?view=azure-cli-latest#az-vm-image-terms-accept // az vm image terms accept --urn almalinux:almalinux:9:latest --subscription // az vm image terms accept --urn kinvolk:flatcar:stable:latest --subscription // az vm image terms accept --urn resf:rockylinux-x86_64:9-base:latest --subscription private readonly string[] PLAN_REQUIRED_IMAGES = new string[] { "almalinux", "kinvolk", "resf" }; public VMBuilder() { } /// /// Load the test case setting, that set up the virtual machine related resource names, including virtual machine name, virtual network name, network interface name, public ip address name /// /// /// public VMBuilder LoadTestCaseSetting(TestScenarioSetting testScenarioSetting) { this.testScenarioSetting = testScenarioSetting; this.rgName = this.testScenarioSetting.ResourceGroupName; var prefix = "e2e" + new Random().Next(1000); this.vmName = prefix + "vm"; this.vNetName = prefix + "vNet"; this.netInfName = prefix + "nInf"; this.pubIpName = prefix + "pubIp"; return this; } /// /// Build Build and return the VirtualMachine based on the setting /// /// public async Task Build(bool enableProxyAgent, CancellationToken cancellationToken) { PreCheck(); ArmClient client = new(new GuestProxyAgentE2ETokenCredential(), defaultSubscriptionId: TestSetting.Instance.subscriptionId); var sub = await client.GetDefaultSubscriptionAsync(); var rgs = sub.GetResourceGroups(); if (await rgs.ExistsAsync(rgName)) { Console.WriteLine($"Resource group: {rgName} already exists, cleaning it up."); await (await rgs.GetAsync(rgName)).Value.DeleteAsync(WaitUntil.Completed); } Console.WriteLine("Creating resource group: " + rgName); var rgData = new ResourceGroupData(TestSetting.Instance.location); rgData.Tags.Add(Constants.COULD_CLEANUP_TAG_NAME, "true"); var rgr = rgs.CreateOrUpdate(WaitUntil.Completed, rgName, rgData).Value; VirtualMachineCollection vmCollection = rgr.GetVirtualMachines(); Console.WriteLine("Creating virtual machine..."); var vmr = (await vmCollection.CreateOrUpdateAsync(WaitUntil.Completed, this.vmName, await DoCreateVMData(rgr, enableProxyAgent), cancellationToken: cancellationToken)).Value; Console.WriteLine("Virtual machine created, with id: " + vmr.Id); return vmr; } public async Task GetVirtualMachineResource() { PreCheck(); ArmClient client = new(new GuestProxyAgentE2ETokenCredential(), defaultSubscriptionId: TestSetting.Instance.subscriptionId); var sub = await client.GetDefaultSubscriptionAsync(); return sub.GetResourceGroups().Get(this.rgName).Value.GetVirtualMachine(this.vmName); } private async Task DoCreateVMData(ResourceGroupResource rgr, bool enableProxyAgent) { var vmData = new VirtualMachineData(TestSetting.Instance.location) { HardwareProfile = new VirtualMachineHardwareProfile() { VmSize = new VirtualMachineSizeType(TestSetting.Instance.vmSize), }, StorageProfile = new VirtualMachineStorageProfile() { ImageReference = new ImageReference() { Publisher = this.testScenarioSetting.VMImageDetails.Publisher, Offer = this.testScenarioSetting.VMImageDetails.Offer, Sku = this.testScenarioSetting.VMImageDetails.Sku, Version = this.testScenarioSetting.VMImageDetails.Version, }, OSDisk = new VirtualMachineOSDisk(DiskCreateOptionType.FromImage) { Name = "e2eVmOsDisk", Caching = CachingType.ReadWrite, ManagedDisk = new VirtualMachineManagedDisk() { StorageAccountType = StorageAccountType.StandardLrs, }, }, }, OSProfile = new VirtualMachineOSProfile() { ComputerName = vmName, AdminUsername = this.adminUsername, AdminPassword = this.adminPassword, }, NetworkProfile = await DoCreateVMNetWorkProfile(rgr), }; if (enableProxyAgent) { vmData.SecurityProfile = new SecurityProfile() { ProxyAgentSettings = new ProxyAgentSettings() { Enabled = true, WireServer = new HostEndpointSettings() { InVmAccessControlProfileReferenceId = TestSetting.Instance.InVmWireServerAccessControlProfileReferenceId, }, Imds = new HostEndpointSettings() { InVmAccessControlProfileReferenceId = TestSetting.Instance.InVmIMDSAccessControlProfileReferenceId, }, } }; } if (Constants.IS_WINDOWS()) { vmData.OSProfile.WindowsConfiguration = new WindowsConfiguration() { ProvisionVmAgent = true, IsAutomaticUpdatesEnabled = true, PatchSettings = new PatchSettings() { AssessmentMode = WindowsPatchAssessmentMode.ImageDefault, }, }; } else { vmData.OSProfile.LinuxConfiguration = new LinuxConfiguration() { //ProvisionVMAgent = true, //IsPasswordAuthenticationDisabled = false, }; } if (PLAN_REQUIRED_IMAGES.Contains(this.testScenarioSetting.VMImageDetails.Publisher)) { vmData.Plan = new ComputePlan() { Name = this.testScenarioSetting.VMImageDetails.Sku, Publisher = this.testScenarioSetting.VMImageDetails.Publisher, Product = this.testScenarioSetting.VMImageDetails.Offer, }; } return vmData; } private async Task DoCreateVMNetWorkProfile(ResourceGroupResource rgr) { Console.WriteLine("Creating network profile"); var vns = rgr.GetVirtualNetworks(); await vns.CreateOrUpdateAsync(WaitUntil.Completed, this.vNetName, new VirtualNetworkData { AddressPrefixes = { "10.0.0.0/16" }, FlowTimeoutInMinutes = 10, Location = TestSetting.Instance.location, Subnets = { new SubnetData { Name = "default", AddressPrefix = "10.0.0.0/24", DefaultOutboundAccess = false } } }); var pips = rgr.GetPublicIPAddresses(); Console.WriteLine("Creating public ip address."); await pips.CreateOrUpdateAsync(WaitUntil.Completed, this.pubIpName, new PublicIPAddressData { Location = TestSetting.Instance.location }); var nifs = rgr.GetNetworkInterfaces(); Console.WriteLine("Creating network interface."); await nifs.CreateOrUpdateAsync(WaitUntil.Completed, this.netInfName, new NetworkInterfaceData() { IPConfigurations = { new NetworkInterfaceIPConfigurationData() { Subnet = new SubnetData() { Id = new ResourceIdentifier($"/subscriptions/{TestSetting.Instance.subscriptionId}/resourceGroups/{this.rgName}/providers/Microsoft.Network/virtualNetworks/{this.vNetName}/subnets/default"), }, PublicIPAddress = new PublicIPAddressData() { Id = new ResourceIdentifier($"/subscriptions/{TestSetting.Instance.subscriptionId}/resourceGroups/{this.rgName}/providers/Microsoft.Network/publicIPAddresses/{this.pubIpName}"), }, Name = "ipconfig1", } }, Location = TestSetting.Instance.location, }); return new VirtualMachineNetworkProfile() { NetworkInterfaces = { new VirtualMachineNetworkInterfaceReference() { Primary = true, Id = new ResourceIdentifier($"/subscriptions/{TestSetting.Instance.subscriptionId}/resourceGroups/{this.rgName}/providers/Microsoft.Network/networkInterfaces/{this.netInfName}"), } }, }; } private void PreCheck() { if (this.testScenarioSetting == null) { throw new Exception("missing test case settings"); } if (TestSetting.Instance == null) { throw new Exception("TestSetting not init."); } if (StorageHelper.Instance == null) { throw new Exception("StorageHelper not init."); } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/VMHelper.cs000066400000000000000000000103161500521614600262130ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using Azure.Identity; using Azure.ResourceManager; using Azure.ResourceManager.Compute; using Azure.ResourceManager.Resources; using GuestProxyAgentTest.Settings; using System.Security.Cryptography.X509Certificates; namespace GuestProxyAgentTest.Utilities { /// /// VMHelper class that will be used to VM operations /// internal class VMHelper { private static VMHelper _instance = null!; private ArmClient client = null!; private VMHelper() { } /// /// Single instance of VMHelper, needs to call Init before using /// public static VMHelper Instance { get { if(null == _instance) { throw new Exception("not init."); } return _instance; } } /// /// Init the VMHelper, need to be called before using VMHelper.Instance /// /// azure tenant id /// application principal id /// default subscription id, the resources will be created on the default subscription /// certificate that will be used to retrieve the application principal public static void Init(string tenantId, string appClientId, string defaultSubId) { if(null != _instance) { return; } _instance = new VMHelper(); _instance.client = new ArmClient(new GuestProxyAgentE2ETokenCredential(), defaultSubId); } /// /// Get VirtualMachineResource of the VM, specified by resource group name and virtual machine name /// /// resource group name /// virtual machine name /// public VirtualMachineResource GetVMResource(string rgName, string vmName) { var sub = client.GetDefaultSubscription(); return sub.GetResourceGroups().Get(rgName).Value.GetVirtualMachine(vmName); } /// /// Clean up the test related Azure resources, including resource group and saved azure blob storage,during the test. /// /// test case setting, that contains the information of the resources that needs to be cleaned up public void CleanupTestResources(TestScenarioSetting testCaseSetting) { var sub = client.GetDefaultSubscription(); var rgs = sub.GetResourceGroups(); if(rgs.Exists(testCaseSetting.ResourceGroupName)) { rgs.Get(testCaseSetting.ResourceGroupName).Value.Delete(Azure.WaitUntil.Completed); } StorageHelper.Instance.CleanSharedBlobFolder(Constants.SHARED_E2E_TEST_OUTPUT_CONTAINER_NAME, testCaseSetting.TestScenarioStorageFolderPrefix); StorageHelper.Instance.CleanSharedBlobFolder(Constants.SHARED_MSI_CONTAINER_NAME, testCaseSetting.TestScenarioStorageFolderPrefix); } public async Task CleanupOldTestResourcesAndForget() { var sub = await client.GetDefaultSubscriptionAsync(); var rgs = sub.GetResourceGroups().Where(rg => rg.Data.Tags.ContainsKey(Constants.COULD_CLEANUP_TAG_NAME) && rg.Data.Tags[Constants.COULD_CLEANUP_TAG_NAME].Equals("true", StringComparison.OrdinalIgnoreCase) ); foreach (var rg in rgs) { var firstDeployment = rg.GetArmDeployments().Where(x => x?.Data?.Properties?.Timestamp != null).OrderBy(x => x.Data.Properties.Timestamp).FirstOrDefault(); if(firstDeployment != null && firstDeployment?.Data?.Properties?.Timestamp?.DateTime.AddDays(2) <= DateTime.UtcNow) { await rg.DeleteAsync(Azure.WaitUntil.Started); } } } } } GuestProxyAgent-1.0.30/e2etest/GuestProxyAgentTest/Utilities/YamlUtils.cs000066400000000000000000000013701500521614600264540ustar00rootroot00000000000000īģŋ// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using YamlDotNet.Serialization.NamingConventions; using YamlDotNet.Serialization; namespace GuestProxyAgentTest.Utilities { public static class YamlUtils { public static T DeserializeYaml(string filePath) { var deserializer = new DeserializerBuilder() .WithNamingConvention(CamelCaseNamingConvention.Instance) .Build(); using (var reader = new StreamReader(filePath)) { return deserializer.Deserialize(reader.ReadToEnd()); } } } } GuestProxyAgent-1.0.30/ebpf/000077500000000000000000000000001500521614600156175ustar00rootroot00000000000000GuestProxyAgent-1.0.30/ebpf/ReadMe.md000066400000000000000000000007541500521614600173040ustar00rootroot00000000000000# Important The drive 'redirect.bpf.sys' must be signed manually currently, so if we have to make any code changes within this folder, we need manual sign the 'redirect.bpf.sys' from the build output folder. ## Get the new 'redirect.bpf.sys' 1. To get the release version of new redirect.bpf.sys from your local dev box, run ```build release clean```. 2. copy the './out/release/redirect.bpf.sys' to sign it manually. ## Manual Sign the new 'redirect.bpf.sys' << To be defined later >> GuestProxyAgent-1.0.30/ebpf/redirect.bpf.c000066400000000000000000000111161500521614600203320ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #include "bpf_helpers.h" #include "socket.h" // SEC("maps") #pragma clang section data = "maps" struct bpf_map_def policy_map = { .type = BPF_MAP_TYPE_HASH, .key_size = sizeof(destination_entry_t), .value_size = sizeof(destination_entry_t), .max_entries = 10}; #pragma clang section data = "maps" struct bpf_map_def skip_process_map = { .type = BPF_MAP_TYPE_HASH, .key_size = sizeof(sock_addr_skip_process_entry), .value_size = sizeof(sock_addr_skip_process_entry), .max_entries = 10}; #pragma clang section data = "maps" struct bpf_map_def audit_map = { .type = BPF_MAP_TYPE_LRU_HASH, // retain the latest records automatically .key_size = sizeof(sock_addr_audit_key_t), // source port and protocol .value_size = sizeof(sock_addr_audit_entry_t), .max_entries = 1000}; /* check the current pid in the skip_process map. return 1 if found, otherwise return 0. */ inline __attribute__((always_inline)) int check_skip_process_map_entry(uint32_t pid) { sock_addr_skip_process_entry key = {0}; key.pid = pid; // Find the entry in the skip_process map. sock_addr_skip_process_entry *skip_entry = bpf_map_lookup_elem(&skip_process_map, &key); return (skip_entry != NULL) ? 1 : 0; } /* update audit map entry if not skip redirecting. return 0 if the entry is updated, otherwise return 1 if pid found in the skip_process_map. */ inline __attribute__((always_inline)) int update_audit_map_entry(bpf_sock_addr_t *ctx) { uint64_t pid_tip = bpf_get_current_pid_tgid(); uint32_t pid = (uint32_t)(pid_tip >> 32); if (check_skip_process_map_entry(pid) == 1) { return 1; } sock_addr_audit_entry_t entry = {0}; entry.process_id = pid; entry.logon_id = bpf_get_current_logon_id(ctx); if (entry.logon_id == 0) { bpf_printk("Failed to get logon id."); } entry.is_admin = bpf_is_current_admin(ctx); if (entry.is_admin < 0) { bpf_printk("Failed to get admin status %u.", entry.is_admin); } entry.destination_ipv4 = ctx->user_ip4; // we only support ipv4 so far. entry.destination_port = ctx->user_port; uint16_t source_port = ctx->msg_src_port; if (source_port == 0) { int32_t result = bpf_sock_addr_set_redirect_context(ctx, &entry, sizeof(sock_addr_audit_entry_t)); if (result != 0) { bpf_printk("Failed to add audit entry to redirect context with result %u.", result); } else { bpf_printk("Added audit entry to redirect context."); } } else { sock_addr_audit_key_t key = {0}; key.protocol = ctx->protocol; key.source_port = source_port; uint64_t ret = bpf_map_update_elem(&audit_map, &key, &entry, 0); if (ret != 0) { bpf_printk("Failed to update audit map with results: %u.", ret); } else { bpf_printk("Added audit entry with source port: %u", source_port); } } return 0; } inline __attribute__((always_inline)) int authorize_v4(bpf_sock_addr_t *ctx) { destination_entry_t entry = {0}; entry.destination_ip.ipv4 = ctx->user_ip4; entry.destination_port = ctx->user_port; entry.protocol = ctx->protocol; // Find the entry in the policy map. destination_entry_t *policy = bpf_map_lookup_elem(&policy_map, &entry); if (policy != NULL) { bpf_printk("Found v4 proxy entry value: %u, %u", policy->destination_ip.ipv4, policy->destination_port); // update to the audit map before changing the destination ip and port. if (update_audit_map_entry(ctx) == 1) { bpf_printk("Found skip process entry, skip the redirection."); return BPF_SOCK_ADDR_VERDICT_PROCEED; } // if (ctx->msg_src_ip4 == 0) // { // bpf_printk("Local/source ip is not set, redirect to loopback ip."); // ctx->user_ip4 = policy->destination_ip.ipv4; // } // else // { // ctx->user_ip4 = ctx->msg_src_ip4; // bpf_printk("Local/source ip is set, redirect to source ip:%u.", ctx->user_ip4); // } bpf_printk("redirecting to destination loopback ip."); ctx->user_ip4 = policy->destination_ip.ipv4; ctx->user_port = policy->destination_port; } return BPF_SOCK_ADDR_VERDICT_PROCEED; } // SEC("cgroup/connect4") #pragma clang section text = "cgroup/connect4" int authorize_connect4(bpf_sock_addr_t *ctx) { return authorize_v4(ctx); } GuestProxyAgent-1.0.30/ebpf/socket.h000066400000000000000000000015631500521614600172650ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #pragma once #include #include #define IPPROTO_TCP 6 #define IPPROTO_UDP 17 #define AF_INET 2 #define AF_INET6 0x17 typedef struct _ip_address { union { uint32_t ipv4; uint32_t ipv6[4]; }; } ip_address_t; typedef struct _destination_entry { ip_address_t destination_ip; uint16_t destination_port; uint32_t protocol; } destination_entry_t; typedef struct _sock_addr_audit_key{ uint32_t protocol; uint16_t source_port; }sock_addr_audit_key_t; typedef struct _sock_addr_audit_entry{ uint64_t logon_id; uint32_t process_id; int32_t is_admin; uint32_t destination_ipv4; uint16_t destination_port; }sock_addr_audit_entry_t; typedef struct _sock_addr_skip_process_entry{ uint32_t pid; }sock_addr_skip_process_entry;GuestProxyAgent-1.0.30/linux-ebpf/000077500000000000000000000000001500521614600167545ustar00rootroot00000000000000GuestProxyAgent-1.0.30/linux-ebpf/ebpf_cgroup.c000066400000000000000000000172661500521614600214270ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #include #include #include #include #include "socket.h" struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, sock_addr_skip_process_entry); __type(value, sock_addr_skip_process_entry); __uint(max_entries, 10); } skip_process_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, destination_entry); __type(value, destination_entry); __uint(max_entries, 10); } policy_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, sock_addr_audit_key); // source port and protocol __type(value, sock_addr_audit_entry); // audit entry __uint(max_entries, 200); // some older kernel version cannot support over 200 entries. } audit_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, __u64); // socket cookie or pid-tgid __type(value, sock_addr_local_entry); // audit local entry __uint(max_entries, 200); // some older kernel version cannot support over 200 entries. } local_map SEC(".maps"); /* check the current pid in the skip_process map. return 1 if found, otherwise return 0. */ static __always_inline int check_skip_process_map_entry(__u32 pid) { sock_addr_skip_process_entry key = {0}; key.pid = pid; // Find the entry in the skip_process map. sock_addr_skip_process_entry *skip_entry = bpf_map_lookup_elem(&skip_process_map, &key); return (skip_entry != NULL) ? 1 : 0; } /* update audit map entry if not skip redirecting. return 0 if the entry is updated, otherwise return 1 if pid found in the skip_process_map. */ static __always_inline int update_local_map_entry(struct bpf_sock_addr *ctx) { __u64 pid_tip = bpf_get_current_pid_tgid(); __u32 pid = (__u32)(pid_tip >> 32); if (check_skip_process_map_entry(pid) == 1) { return 1; } sock_addr_local_entry entry = {0}; entry.process_id = pid; __u32 uid = (__u32)(bpf_get_current_uid_gid() >> 32); entry.logon_id = uid; entry.is_root = (uid == 0) ? 1 : 0; // root uid is 0. entry.destination_ipv4 = ctx->user_ip4; // we only support ipv4 so far. entry.destination_port = ctx->user_port; entry.protocol = ctx->protocol; __u64 ret = bpf_map_update_elem(&local_map, &pid_tip, &entry, 0); if (ret != 0) { bpf_printk("update_local_map_entry: Failed to update local map entry with results:%u.", ret); } else { bpf_printk("update_local_map_entry: Updated local map entry with key:%u.", pid_tip); } return 0; } static __always_inline int authorize_v4(struct bpf_sock_addr *ctx) { destination_entry entry = {0}; entry.destination_ip.ipv4 = ctx->user_ip4; entry.destination_port = ctx->user_port; entry.protocol = ctx->protocol; // Find the entry in the policy map. destination_entry *policy = bpf_map_lookup_elem(&policy_map, &entry); if (policy != NULL) { bpf_printk("authorize_v4: Found v4 proxy entry value: %u, %u", policy->destination_ip.ipv4, policy->destination_port); // update to the audit map before changing the destination ip and port. if (update_local_map_entry(ctx) == 1) { bpf_printk("authorize_v4: Found skip process entry, skip the redirection."); return BPF_SOCK_ADDR_VERDICT_PROCEED; } // TODO: check if the local ip is set. // __u32 local_ip; // __u64 read = bpf_probe_read_kernel(&local_ip, sizeof(__u32), &ctx->msg_src_ip4); // if (read == 0 && local_ip != 0) // { // // read the local ip from the msg_src_ip4 successfully and ip is set. // ctx->user_ip4 = local_ip; // bpf_printk("authorize_v4: Local/source ip is set, redirect to source ip:%u.", local_ip); // } // else { ctx->user_ip4 = policy->destination_ip.ipv4; bpf_printk("authorize_v4: Local/source ip is not set, redirect to loopback ip."); } ctx->user_port = policy->destination_port; } return BPF_SOCK_ADDR_VERDICT_PROCEED; } SEC("cgroup/connect4") int connect4(struct bpf_sock_addr *ctx) { __u64 cookie = bpf_get_socket_cookie(ctx); return authorize_v4(ctx); } static __always_inline int update_audit_map_entry_sk(__u32 local_port, sock_addr_local_entry *local_entry) { sock_addr_audit_key key = {0}; key.protocol = local_entry->protocol; key.source_port = local_port; sock_addr_audit_entry entry = {0}; entry.process_id = local_entry->process_id; entry.logon_id = local_entry->logon_id; entry.is_root = local_entry->is_root; entry.destination_ipv4 = local_entry->destination_ipv4; entry.destination_port = local_entry->destination_port; __u64 ret = bpf_map_update_elem(&audit_map, &key, &entry, 0); if (ret != 0) { bpf_printk("update_audit_map_entry_sk: Failed to update audit map entry with results:%u.", ret); } else { bpf_printk("update_audit_map_entry_sk: Updated audit map entry with local port:%u.", key.source_port); } return 0; } static __always_inline int trace_v4(struct pt_regs *ctx, struct probe_sock *sk) { struct sock_common skc; // bpf_probe_read_kernel helper function requires kernel version 5.5+ // hence have to use bpf_probe_read helper function instead. long re = bpf_probe_read(&skc, sizeof(struct sock_common), &sk->__sk_common); if (re != 0) { // 0 is success. return 0; } if (skc.skc_family != AF_INET) { // Only support IPv4. return 0; } __u64 pid_tgid = bpf_get_current_pid_tgid(); __u32 pid = (__u32)(pid_tgid >> 32); if (check_skip_process_map_entry(pid) == 1) { bpf_printk("trace_v4: Found skip process entry %u, skip the trace.", pid); return 0; } // Find the entry in the local map. sock_addr_local_entry *local_entry = bpf_map_lookup_elem(&local_map, &pid_tgid); if (local_entry != NULL) { update_audit_map_entry_sk(skc.skc_num, local_entry); __u64 ret = bpf_map_delete_elem(&local_map, &pid_tgid); if (ret != 0) { bpf_printk("trace_v4: Failed to delete local map entry with results:%u.", ret); } else { bpf_printk("trace_v4: Deleted local map entry with key:%u.", pid_tgid); } return 0; } destination_entry entry = {0}; entry.destination_ip.ipv4 = skc.skc_daddr; entry.destination_port = skc.skc_dport; entry.protocol = IPPROTO_TCP; // Find the entry in the policy map. destination_entry *policy = bpf_map_lookup_elem(&policy_map, &entry); if (policy != NULL) { __u32 uid = (__u32)(bpf_get_current_uid_gid() >> 32); sock_addr_audit_key key = {0}; key.protocol = IPPROTO_TCP; key.source_port = skc.skc_num; sock_addr_audit_entry entry = {0}; entry.process_id = pid; entry.logon_id = uid; entry.is_root = (uid == 0) ? 1 : 0; // root uid is 0. entry.destination_ipv4 = skc.skc_daddr; entry.destination_port = skc.skc_dport; __u64 ret = bpf_map_update_elem(&audit_map, &key, &entry, 0); if (ret != 0) { bpf_printk("trace_v4: Failed to update audit map entry with results:%u.", ret); } else { bpf_printk("trace_v4: Updated audit map entry with local port:%u.", key.source_port); } } return 0; } SEC("kprobe/tcp_v4_connect") int BPF_KPROBE(tcp_v4_connect, struct probe_sock *sk) { return trace_v4(ctx, sk); } char _license[] SEC("license") = "GPL";GuestProxyAgent-1.0.30/linux-ebpf/socket.h000066400000000000000000000041251500521614600204170ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #define BPF_SOCK_ADDR_VERDICT_PROCEED 1 #define IPPROTO_TCP 6 #define AF_INET 2 typedef struct _sock_addr_skip_process_entry { __u32 pid; } sock_addr_skip_process_entry; typedef struct _ip_address { union { __u32 ipv4; __u32 ipv6[4]; }; } ip_address; typedef struct _destination_entry { ip_address destination_ip; __u32 destination_port; __u32 protocol; } destination_entry; typedef struct _sock_addr_audit_key { __u32 protocol; __u32 source_port; } sock_addr_audit_key; typedef struct _sock_addr_audit_entry { __u32 logon_id; __u32 process_id; __u32 is_root; __u32 destination_ipv4; __u32 destination_port; } sock_addr_audit_entry; typedef struct _bpf_sock_tuple_ipv4 { __be32 saddr; __be32 daddr; __be16 sport; __be16 dport; } bpf_sock_tuple_ipv4; typedef struct _sock_addr_local_entry { __u32 logon_id; __u32 process_id; __u32 is_root; __u32 destination_ipv4; __u32 destination_port; __u32 protocol; } sock_addr_local_entry; typedef __u32 __bitwise __portpair; typedef __u64 __bitwise __addrpair; struct hlist_node { struct hlist_node *next, **pprev; }; struct sock_common { union { __addrpair skc_addrpair; struct { __be32 skc_daddr; __be32 skc_rcv_saddr; }; }; union { unsigned int skc_hash; __u16 skc_u16hashes[2]; }; /* skc_dport && skc_num must be grouped as well */ union { __portpair skc_portpair; struct { __be16 skc_dport; __u16 skc_num; }; }; unsigned short skc_family; volatile unsigned char skc_state; unsigned char skc_reuse : 4; unsigned char skc_reuseport : 1; unsigned char skc_ipv6only : 1; unsigned char skc_net_refcnt : 1; int skc_bound_dev_if; union { struct hlist_node skc_bind_node; struct hlist_node skc_portaddr_node; }; }; struct probe_sock { struct sock_common __sk_common; };GuestProxyAgent-1.0.30/packages.config000066400000000000000000000004521500521614600176510ustar00rootroot00000000000000 GuestProxyAgent-1.0.30/pkg_debian/000077500000000000000000000000001500521614600167665ustar00rootroot00000000000000GuestProxyAgent-1.0.30/pkg_debian/compat000066400000000000000000000000021500521614600201640ustar00rootroot0000000000000010GuestProxyAgent-1.0.30/pkg_debian/control000066400000000000000000000010201500521614600203620ustar00rootroot00000000000000 Package: azure-proxy-agent Architecture: amd64 Version: pkgversion Maintainer: AzureRT ProxyAgent V Team Homepage: https://github.com/Azure/GuestProxyAgent Suggests: btrfs-progs, xfsprogs, grub2, udev, e2fsprogs, systemd Description: Azure Guest Proxy Agent The Azure Guest Proxy Agent is a daemon that runs on the Azure guest operating system and provides a proxy for the Azure Fabric Controller to communicate with the guest operating system. This package contains the proxy agent. GuestProxyAgent-1.0.30/pkg_debian/postinst000077500000000000000000000002461500521614600206010ustar00rootroot00000000000000 systemctl unmark azure-proxy-agent.service systemctl daemon-reload systemctl start azure-proxy-agent.service systemctl enable azure-proxy-agent.service #DEBHELPER# GuestProxyAgent-1.0.30/pkg_debian/prerm000077500000000000000000000003021500521614600200340ustar00rootroot00000000000000systemctl stop GuestProxyAgent.service systemctl disable GuestProxyAgent.service systemctl daemon-reload rm -r /usr/sbin/azure-proxy-agent rm -r /usr/lib/azure-proxy-agent/package #DEBHELPER# GuestProxyAgent-1.0.30/pkg_debian/rules000066400000000000000000000000001500521614600200310ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/000077500000000000000000000000001500521614600172425ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/Cargo.toml000066400000000000000000000072421500521614600211770ustar00rootroot00000000000000[package] name = "azure-proxy-agent" version = "1.0.30" # always 3-number version edition = "2021" build = "build.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] proxy_agent_shared = { path ="../proxy_agent_shared"} itertools = "0.10.5" # use to sort iterator elements into a new iterator in ascending order once_cell = "1.17.0" # use Lazy serde = "1.0.152" serde_derive = "1.0.152" serde_json = "1.0.91" # json Deserializer serde-xml-rs = "0.6.0" # xml Deserializer bitflags = "2.6.0" # support bitflag enum hmac-sha256 = "1.1.6" # use HMAC using the SHA-256 hash function hex = "0.4.3" # hex encode regex = "1.11" # match process name in cmdline tokio = { version = "1", features = ["rt", "rt-multi-thread", "time", "net", "macros", "sync"] } tokio-util = "0.7.11" http = "1.1.0" http-body-util = "0.1" hyper = { version = "1", features = ["server", "http1", "client"] } hyper-util = { version = "0.1", features = ["tokio"] } tower = { version = "0.5.2", features = ["full"] } tower-http = { version = "0.6.2", features = ["limit"] } clap = { version = "4.5.17", features =["derive"] } # Command Line Argument Parser thiserror = "1.0.64" ctor = "0.3.6" # used for test setup and clean up [dependencies.uuid] version = "1.3.0" features = [ "v4", # Lets you generate random UUIDs "fast-rng", # Use a faster (but still sufficiently random) RNG "macro-diagnostics", # Enable better diagnostics for compile-time UUIDs ] [target.'cfg(not(windows))'.dependencies] sysinfo = "0.30.13" # read process information for Linux aya = "0.13.1" # linux ebpf program loader uzers = "0.12.1" # get user name libc = "0.2.147" # linux call [target.'cfg(not(windows))'.dependencies.nix] version = "0.29.0" features = [ "net", "fs", "user" ] [target.'cfg(windows)'.dependencies] windows-service = "0.7.0" # windows NT service windows-acl = "0.3.0" # ACL the latch key folder winapi = "0.3.9" # used by windows-acl PSID libloading = "0.8.0" # for dynamic load libraries [target.'cfg(windows)'.build-dependencies] winres = "0.1.12" # Rust Windows resource helper to add file version static_vcruntime = "2.0.0" # Statically link the VCRuntime when using the MSVC toolchain [target.'cfg(windows)'.dependencies.windows-sys] version = "0.42.0" features = [ "Win32_Foundation", "Win32_Networking_WinSock", "Win32_System_IO", "Win32_Security", "Win32_System_WindowsProgramming", "Win32_Security_Authentication_Identity", "Win32_System_Diagnostics_Debug", "Win32_System_SystemInformation", "Win32_System_Threading", "Win32_System_ProcessStatus", "Win32_System_Kernel", "Win32_Security_Cryptography", "Win32_System_Memory", ] [features] test-with-root = [] [package.metadata.deb] name = "azure-proxy-agent" revision = "0" maintainer = "AzureRT ProxyAgent V Team " copyright = "2024, AzureRT ProxyAgent V Team " license-file = ["../LICENSE", "4"] extended-description = """\ The Azure Guest Proxy Agent is a daemon that runs on the Azure guest \ operating system and provides a proxy for the Azure Fabric Controller \ to communicate with the guest operating system.""" maintainer-scripts = "DEBIAN" systemd-units = { enable = true } assets = [ ["azure-proxy-agent", "usr/sbin/azure-proxy-agent", "755"], # Binary ["proxy-agent.json", "etc/azure/proxy-agent.json", "644"], ["ebpf_cgroup.o", "usr/lib/azure-proxy-agent/ebpf_cgroup.o", "644"], ]GuestProxyAgent-1.0.30/proxy_agent/build.rs000066400000000000000000000003561500521614600207130ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT fn main() { #[cfg(windows)] { static_vcruntime::metabuild(); let res = winres::WindowsResource::new(); res.compile().unwrap(); } } GuestProxyAgent-1.0.30/proxy_agent/config/000077500000000000000000000000001500521614600205075ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/config/GuestProxyAgent.linux.json000066400000000000000000000005341500521614600256520ustar00rootroot00000000000000{ "logFolder": "", "eventFolder": "/var/log/azure-proxy-agent/events", "latchKeyFolder": "/var/lib/azure-proxy-agent/keys", "monitorIntervalInSeconds": 60, "pollKeyStatusIntervalInSeconds": 15, "hostGAPluginSupport": 1, "ebpfProgramName": "ebpf_cgroup.o", "cgroupRoot": "/sys/fs/cgroup", "fileLogLevel": "Info" }GuestProxyAgent-1.0.30/proxy_agent/config/GuestProxyAgent.windows.json000066400000000000000000000006041500521614600262030ustar00rootroot00000000000000{ "logFolder": "%SYSTEMDRIVE%\\WindowsAzure\\ProxyAgent\\Logs", "eventFolder": "%SYSTEMDRIVE%\\WindowsAzure\\ProxyAgent\\Events", "latchKeyFolder": "%SYSTEMDRIVE%\\WindowsAzure\\ProxyAgent\\Keys", "monitorIntervalInSeconds": 60, "pollKeyStatusIntervalInSeconds": 15, "hostGAPluginSupport": 1, "ebpfProgramName": "redirect.bpf.sys", "fileLogLevel": "Info" }GuestProxyAgent-1.0.30/proxy_agent/src/000077500000000000000000000000001500521614600200315ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/acl.rs000066400000000000000000000016161500521614600211420ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to set the ACL on the directory. //! The ACL is set on the directory to allow the elevated accounts only to access the directory. //! Example //! ```rust //! use proxy_agent::acl; //! use std::path::PathBuf; //! //! // Set the ACL on the directory //! let dir_to_acl = PathBuf::from("path_to_directory"); //! acl::acl_directory(dir_to_acl); //! ``` #[cfg(windows)] mod windows_acl; #[cfg(not(windows))] mod linux_acl; use crate::common::result::Result; use std::path::PathBuf; pub fn acl_directory(dir_to_acl: PathBuf) -> Result<()> { if !dir_to_acl.exists() || !dir_to_acl.is_dir() { return Ok(()); } #[cfg(windows)] { windows_acl::acl_directory(dir_to_acl)?; } #[cfg(not(windows))] { linux_acl::acl_directory(dir_to_acl)?; } Ok(()) } GuestProxyAgent-1.0.30/proxy_agent/src/acl/000077500000000000000000000000001500521614600205705ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/acl/linux_acl.rs000066400000000000000000000050411500521614600231140ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common::{logger, result::Result}; use nix::unistd::{chown, Gid, Uid}; use proxy_agent_shared::misc_helpers; use std::fs; use std::os::unix::fs::PermissionsExt; use std::path::PathBuf; pub fn acl_directory(dir_to_acl: PathBuf) -> Result<()> { let dir_str = misc_helpers::path_to_string(&dir_to_acl); logger::write(format!( "acl_directory: start to set root-only permission to folder {}.", dir_str )); match chown(&dir_to_acl, Some(Uid::from_raw(0)), Some(Gid::from_raw(0))) { Ok(_) => logger::write(format!( "acl_directory: successfully set root-only permission to folder {}.", dir_str )), Err(e) => { logger::write(format!( "acl_directory: failed to set root-only permission to folder {}. Error: {:?}", dir_str, e )); } } // Set permissions to 700 let permissions = fs::Permissions::from_mode(0o700); match fs::set_permissions(dir_to_acl, permissions) { Ok(_) => logger::write(format!( "acl_directory: successfully set root-only permission to folder {}.", dir_str )), Err(e) => { logger::write(format!( "acl_directory: failed to set root-only permission to folder {}. Error: {:?}", dir_str, e )); } } Ok(()) } #[cfg(feature = "test-with-root")] #[cfg(test)] mod tests { use proxy_agent_shared::misc_helpers; use std::env; use std::fs; use std::os::unix::fs::PermissionsExt; #[tokio::test] async fn acl_directory_test() { let mut temp_test_path = env::temp_dir(); let logger_key = "acl_directory_test"; temp_test_path.push(logger_key); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); let output = super::acl_directory(temp_test_path.to_path_buf()); assert!( output.is_ok(), "failed to set root-only permission to folder" ); match fs::metadata(temp_test_path.to_path_buf()) { Ok(metadata) => { let permissions = metadata.permissions().mode(); assert_eq!(permissions & 0o700, 0o700, "Permissions are not set to 700"); } Err(e) => panic!("Failed to get metadata: {:?}", e), } _ = fs::remove_dir_all(&temp_test_path); } } GuestProxyAgent-1.0.30/proxy_agent/src/acl/windows_acl.rs000066400000000000000000000146401500521614600234540ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common::{ error::{AclErrorType, Error}, logger, result::Result, }; use proxy_agent_shared::misc_helpers; use std::path::PathBuf; use winapi::um::winnt::PSID; use windows_acl::acl::{AceType, ACL}; use windows_acl::helper; use windows_sys::Win32::Security::{CONTAINER_INHERIT_ACE, OBJECT_INHERIT_ACE}; // https://learn.microsoft.com/en-us/windows/win32/secauthz/well-known-sids const LOCAL_SYSTEM_SID: &str = "S-1-5-18"; const BUILDIN_ADMIN_SID: &str = "S-1-5-32-544"; const FULL_CONTROL: u32 = 2032127; pub fn acl_directory(dir_to_acl: PathBuf) -> Result<()> { let dir_str = misc_helpers::path_to_string(&dir_to_acl); let mut acl = ACL::from_file_path(&dir_str, true) .map_err(|e| Error::Acl(AclErrorType::AclObject(dir_str.to_string()), e))?; let system_sid = helper::string_to_sid(LOCAL_SYSTEM_SID) .map_err(|e| Error::Acl(AclErrorType::Sid(LOCAL_SYSTEM_SID.to_string()), e))?; let admin_sid = helper::string_to_sid(BUILDIN_ADMIN_SID) .map_err(|e| Error::Acl(AclErrorType::Sid(BUILDIN_ADMIN_SID.to_string()), e))?; logger::write(format!( "acl_directory: removing all the remaining access rules for folder {}.", dir_str )); match acl.all() { Ok(entries) => { logger::write(format!( "acl_directory: get '{}' access rules for folder {}.", entries.len(), dir_str )); for entry in entries { match entry.sid { Some(ref sid) => { logger::write(format!( "acl_directory: removing ACL entry '{}-{}-{}-{}' .", entry.string_sid, entry.entry_type, entry.flags, entry.mask )); match acl.remove_entry( sid.as_ptr() as PSID, Some(entry.entry_type), None, // remove all, including inherited permissions ) { Ok(r) => { logger::write(format!("acl_directory: removed '{}' entry.", r)); } Err(e) => { logger::write_warning(format!( "acl_directory: remove_entry failed with error '{}' entry.", e )); } } } None => { logger::write_warning("acl_directory: entry.sid is NONE.".to_string()); } } } } Err(e) => { return Err(Error::Acl(AclErrorType::AclEntries(dir_str), e)); } } logger::write(format!( "acl_directory: Adding new access rules for the target directory {}.", dir_str )); let flags = (CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE) as u8; let mask = FULL_CONTROL; match acl.add_entry( system_sid.as_ptr() as PSID, AceType::AccessAllow, flags, mask, ) { Ok(r) => { logger::write(format!( "acl_directory: Adding new access rules for sid {} with result {}.", LOCAL_SYSTEM_SID, r )); } Err(e) => { return Err(Error::Acl( AclErrorType::AddEntry(LOCAL_SYSTEM_SID.to_string()), e, )); } } match acl.add_entry( admin_sid.as_ptr() as PSID, AceType::AccessAllow, flags, mask, ) { Ok(r) => { logger::write(format!( "acl_directory: Adding new access rules for sid {} with result {}.", BUILDIN_ADMIN_SID, r )); } Err(e) => { return Err(Error::Acl( AclErrorType::AddEntry(LOCAL_SYSTEM_SID.to_string()), e, )); } } Ok(()) } #[cfg(test)] mod tests { use proxy_agent_shared::misc_helpers; use std::env; use std::fs; use std::path::PathBuf; use winapi::um::winnt::PSID; use windows_acl::acl::{AceType, ACL}; use windows_acl::helper; const EVERY_ONE_SID: &str = "S-1-1-0"; #[tokio::test] async fn acl_directory_test() { let mut temp_test_path = env::temp_dir(); let logger_key = "acl_directory_test"; temp_test_path.push(logger_key); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); // test when dir_to_acl does not exist let invalid_path = PathBuf::from("invalid_path"); _ = super::acl_directory(invalid_path); // add everyone to log directory let every_one_sid = helper::string_to_sid(EVERY_ONE_SID).unwrap(); let flags = (super::CONTAINER_INHERIT_ACE | super::OBJECT_INHERIT_ACE) as u8; let mask = super::FULL_CONTROL; let mut acl = ACL::from_file_path(temp_test_path.to_str().unwrap(), true).unwrap(); acl.add_entry( every_one_sid.as_ptr() as PSID, AceType::AccessAllow, flags, mask, ) .unwrap(); // acl the log directory _ = super::acl_directory(temp_test_path.to_path_buf()); let acl = ACL::from_file_path(temp_test_path.to_str().unwrap(), false).unwrap(); let entries = acl .get(every_one_sid.as_ptr() as PSID, Some(AceType::AccessAllow)) .unwrap(); assert_eq!(0, entries.len(), "ACL rule entry should be 0 for everyone"); let admin_sid = helper::string_to_sid(super::BUILDIN_ADMIN_SID).unwrap(); let entries = acl .get(admin_sid.as_ptr() as PSID, Some(AceType::AccessAllow)) .unwrap(); assert_eq!(1, entries.len(), "ACL rule entry should be 1 for admins"); let system_sid = helper::string_to_sid(super::LOCAL_SYSTEM_SID).unwrap(); let entries = acl .get(system_sid.as_ptr() as PSID, Some(AceType::AccessAllow)) .unwrap(); assert_eq!( 1, entries.len(), "ACL rule entry should be 1 for system_sid" ); _ = fs::remove_dir_all(&temp_test_path); } } GuestProxyAgent-1.0.30/proxy_agent/src/common.rs000066400000000000000000000005231500521614600216670ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod cli; pub mod config; pub mod constants; pub mod error; pub mod helpers; pub mod hyper_client; pub mod logger; pub mod result; #[cfg(windows)] pub mod windows; #[cfg(windows)] pub use windows::store_key_data; #[cfg(windows)] pub use windows::fetch_key_data; GuestProxyAgent-1.0.30/proxy_agent/src/common/000077500000000000000000000000001500521614600213215ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/common/cli.rs000066400000000000000000000024741500521614600224450ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use clap::{Parser, Subcommand}; use once_cell::sync::Lazy; /// azure-proxy-agent console - launch a long run process of GPA in console mode. /// azure-proxy-agent --version - print the version of the GPA. /// azure-proxy-agent --status [--wait ] - get the provision status of the GPA service. /// azure-proxy-agent - start the GPA as an OS service. /// The GPA service will be started as an OS service in the background. #[derive(Parser)] #[command()] pub struct Cli { /// get the provision status of the GPA service #[arg(short, long)] pub status: bool, /// wait for the provision status to finish #[arg(short, long, requires = "status")] pub wait: Option, /// print the version of the GPA #[arg(short, long)] pub version: bool, #[cfg(test)] #[arg(short, long)] test_threads: Option, #[cfg(test)] #[arg(short, long)] nocapture: bool, #[command(subcommand)] pub command: Option, } #[derive(Subcommand)] pub enum Commands { /// launch a long run process of GPA in console mode Console, } impl Cli { pub fn is_console_mode(&self) -> bool { self.command.is_some() } } pub static CLI: Lazy = Lazy::new(Cli::parse); GuestProxyAgent-1.0.30/proxy_agent/src/common/config.rs000066400000000000000000000215261500521614600231420ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to read the configuration from the config file. //! The configuration file is a json file that contains the configuration for the GPA service. //! //! Example //! ```rust //! use proxy_agent::config; //! //! // Get the logs directory //! let logs_dir = config::get_logs_dir(); //! //! // Get the keys directory //! let keys_dir = config::get_keys_dir(); //! //! ``` use crate::common::constants; use once_cell::sync::Lazy; use proxy_agent_shared::{logger::LoggerLevel, misc_helpers}; use serde_derive::{Deserialize, Serialize}; use std::str::FromStr; use std::{path::PathBuf, time::Duration}; #[cfg(not(windows))] const CONFIG_FILE_NAME: &str = "proxy-agent.json"; #[cfg(windows)] const CONFIG_FILE_NAME: &str = "GuestProxyAgent.json"; static SYSTEM_CONFIG: Lazy = Lazy::new(Config::default); #[cfg(not(windows))] pub fn get_cgroup_root() -> PathBuf { SYSTEM_CONFIG.get_cgroup_root() } pub fn get_logs_dir() -> PathBuf { PathBuf::from(SYSTEM_CONFIG.get_log_folder()) } pub fn get_keys_dir() -> PathBuf { PathBuf::from(SYSTEM_CONFIG.get_latch_key_folder()) } pub fn get_events_dir() -> PathBuf { PathBuf::from(SYSTEM_CONFIG.get_event_folder()) } pub fn get_monitor_duration() -> Duration { Duration::from_secs(SYSTEM_CONFIG.get_monitor_interval()) } pub fn get_poll_key_status_duration() -> Duration { Duration::from_secs(SYSTEM_CONFIG.get_poll_key_status_interval()) } //TODO: remove this config/function once the contract is defined for HostGAPlugin pub fn get_host_gaplugin_support() -> u8 { SYSTEM_CONFIG.hostGAPluginSupport } pub fn get_max_event_file_count() -> usize { SYSTEM_CONFIG.get_max_event_file_count() } pub fn get_ebpf_file_full_path() -> Option { SYSTEM_CONFIG.get_ebpf_file_full_path() } pub fn get_ebpf_program_name() -> String { SYSTEM_CONFIG.get_ebpf_program_name().to_string() } pub fn get_file_log_level() -> LoggerLevel { SYSTEM_CONFIG.get_file_log_level() } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct Config { logFolder: String, eventFolder: String, latchKeyFolder: String, monitorIntervalInSeconds: u64, pollKeyStatusIntervalInSeconds: u64, hostGAPluginSupport: u8, // 0 not support; 1 proxy only; 2 proxy + authentication check #[serde(skip_serializing_if = "Option::is_none")] maxEventFileCount: Option, #[serde(skip_serializing_if = "Option::is_none")] ebpfFileFullPath: Option, ebpfProgramName: String, #[serde(skip_serializing_if = "Option::is_none")] fileLogLevel: Option, #[serde(skip_serializing_if = "Option::is_none")] #[cfg(not(windows))] cgroupRoot: Option, } impl Default for Config { fn default() -> Self { let mut config_file_full_path = PathBuf::new(); #[cfg(not(windows))] { if !config_file_full_path.exists() { // linux config file default to /etc/azure folder config_file_full_path = PathBuf::from(format!("/etc/azure/{CONFIG_FILE_NAME}")); } } if !config_file_full_path.exists() { // default to current exe folder config_file_full_path = misc_helpers::get_current_exe_dir(); config_file_full_path.push(CONFIG_FILE_NAME); } Config::from_json_file(config_file_full_path) } } impl Config { pub fn from_json_file(file_path: PathBuf) -> Self { misc_helpers::json_read_from_file::(&file_path).unwrap_or_else(|_| { panic!( "Error in reading Config from Json file: {}", misc_helpers::path_to_string(&file_path) ) }) } pub fn get_log_folder(&self) -> String { match misc_helpers::resolve_env_variables(&self.logFolder) { Ok(val) => val, Err(_) => self.logFolder.clone(), } } pub fn get_event_folder(&self) -> String { match misc_helpers::resolve_env_variables(&self.eventFolder) { Ok(val) => val, Err(_) => self.eventFolder.clone(), } } pub fn get_latch_key_folder(&self) -> String { match misc_helpers::resolve_env_variables(&self.latchKeyFolder) { Ok(val) => val, Err(_) => self.latchKeyFolder.clone(), } } pub fn get_monitor_interval(&self) -> u64 { self.monitorIntervalInSeconds } pub fn get_poll_key_status_interval(&self) -> u64 { self.pollKeyStatusIntervalInSeconds } pub fn get_host_gaplugin_support(&self) -> u8 { self.hostGAPluginSupport } pub fn get_max_event_file_count(&self) -> usize { self.maxEventFileCount .unwrap_or(constants::DEFAULT_MAX_EVENT_FILE_COUNT) } pub fn get_ebpf_program_name(&self) -> &str { &self.ebpfProgramName } pub fn get_ebpf_file_full_path(&self) -> Option { self.ebpfFileFullPath.as_ref().map(PathBuf::from) } pub fn get_file_log_level(&self) -> LoggerLevel { let file_log_level = self.fileLogLevel.clone().unwrap_or("Info".to_string()); LoggerLevel::from_str(&file_log_level).unwrap_or(LoggerLevel::Info) } #[cfg(not(windows))] pub fn get_cgroup_root(&self) -> PathBuf { match &self.cgroupRoot { Some(cgroup) => PathBuf::from(cgroup), None => PathBuf::from(constants::CGROUP_ROOT), } } } #[cfg(test)] mod tests { use crate::common::config::Config; use crate::common::constants; use proxy_agent_shared::misc_helpers; use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::{env, fs}; #[test] fn config_struct_test() { let mut temp_test_path: PathBuf = env::temp_dir(); temp_test_path.push("config_struct_test"); _ = fs::remove_dir_all(&temp_test_path); match misc_helpers::try_create_folder(&temp_test_path) { Ok(_) => {} Err(err) => panic!("Failed to create folder: {}", err), } let config_file_path = temp_test_path.join("test_config.json"); let config = create_config_file(config_file_path); assert_eq!( r#"C:\logFolderName"#.to_string(), config.get_log_folder(), "Log Folder mismatch" ); assert_eq!( r#"C:\eventFolderName"#.to_string(), config.get_event_folder(), "Event Folder mismatch" ); assert_eq!( r#"C:\latchKeyFolderName"#.to_string(), config.get_latch_key_folder(), "Latch Key Folder mismatch" ); assert_eq!( 60u64, config.get_monitor_interval(), "get_monitor_interval mismatch" ); assert_eq!( 15u64, config.get_poll_key_status_interval(), "get_poll_key_status_interval mismatch" ); assert_eq!( 1u8, config.get_host_gaplugin_support(), "get_host_gaplugin_support mismatch" ); assert_eq!( constants::DEFAULT_MAX_EVENT_FILE_COUNT, config.get_max_event_file_count(), "get_max_event_file_count mismatch" ); assert_eq!( "ebpfProgramName".to_string(), config.get_ebpf_program_name(), "get_ebpf_program_name mismatch" ); #[cfg(not(windows))] { assert_eq!( PathBuf::from(constants::CGROUP_ROOT), config.get_cgroup_root(), "get_cgroup_root mismatch" ); } // clean up _ = fs::remove_dir_all(&temp_test_path); } fn create_config_file(file_path: PathBuf) -> Config { let data = if cfg!(not(windows)) { r#"{ "logFolder": "C:\\logFolderName", "eventFolder": "C:\\eventFolderName", "latchKeyFolder": "C:\\latchKeyFolderName", "monitorIntervalInSeconds": 60, "pollKeyStatusIntervalInSeconds": 15, "wireServerSupport": 2, "hostGAPluginSupport": 1, "imdsSupport": 1, "ebpfProgramName": "ebpfProgramName" }"# } else { r#"{ "logFolder": "%SYSTEMDRIVE%\\logFolderName", "eventFolder": "%SYSTEMDRIVE%\\eventFolderName", "latchKeyFolder": "%SYSTEMDRIVE%\\latchKeyFolderName", "monitorIntervalInSeconds": 60, "pollKeyStatusIntervalInSeconds": 15, "wireServerSupport": 2, "hostGAPluginSupport": 1, "imdsSupport": 1, "ebpfProgramName": "ebpfProgramName" }"# }; File::create(&file_path) .unwrap() .write_all(data.as_bytes()) .unwrap(); Config::from_json_file(file_path) } } GuestProxyAgent-1.0.30/proxy_agent/src/common/constants.rs000066400000000000000000000032361500521614600237070ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub const WIRE_SERVER_IP: &str = "168.63.129.16"; pub const WIRE_SERVER_PORT: u16 = 80u16; pub const GA_PLUGIN_IP: &str = "168.63.129.16"; pub const GA_PLUGIN_PORT: u16 = 32526u16; pub const IMDS_IP: &str = "169.254.169.254"; pub const IMDS_PORT: u16 = 80u16; pub const PROXY_AGENT_SERVICE_NAME: &str = "GuestProxyAgent"; pub const PROXY_AGENT_IP: &str = "127.0.0.1"; pub const PROXY_AGENT_PORT: u16 = 3080; pub const WIRE_SERVER_IP_NETWORK_BYTE_ORDER: u32 = 0x10813FA8; // 168.63.129.16 pub const GA_PLUGIN_IP_NETWORK_BYTE_ORDER: u32 = 0x10813FA8; // 168.63.129.16 pub const IMDS_IP_NETWORK_BYTE_ORDER: u32 = 0xFEA9FEA9; //"169.254.169.254"; pub const PROXY_AGENT_IP_NETWORK_BYTE_ORDER: u32 = 0x100007F; //"127.0.0.1"; pub const EMPTY_GUID: &str = "00000000-0000-0000-0000-000000000000"; pub const AUTHORIZATION_SCHEME: &str = "Azure-HMAC-SHA256"; pub const KEY_DELIVERY_METHOD_HTTP: &str = "http"; pub const KEY_DELIVERY_METHOD_VTPM: &str = "vtpm"; pub const CLAIMS_IS_ROOT: &str = "isRoot"; pub const CLAIMS_HEADER: &str = "x-ms-azure-host-claims"; pub const AUTHORIZATION_HEADER: &str = "x-ms-azure-host-authorization"; pub const DATE_HEADER: &str = "x-ms-azure-host-date"; pub const METADATA_HEADER: &str = "Metadata"; pub const CONNECTION_HEADER: &str = "connection"; pub const TIME_TICK_HEADER: &str = "x-ms-azure-time_tick"; pub const NOTIFY_HEADER: &str = "x-ms-azure-notify"; // Default Config Settings pub const DEFAULT_MAX_EVENT_FILE_COUNT: usize = 30; pub const CGROUP_ROOT: &str = "/sys/fs/cgroup"; pub const MAX_LOG_FILE_COUNT: usize = 5; pub const MAX_LOG_FILE_SIZE: u64 = 10 * 1024 * 1024; // 10MB GuestProxyAgent-1.0.30/proxy_agent/src/common/error.rs000066400000000000000000000154751500521614600230340ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use http::{uri::InvalidUri, StatusCode}; #[derive(Debug, thiserror::Error)] pub enum Error { #[error("IO error: {0}: {1}")] Io(String, std::io::Error), #[error("{0}")] Hyper(HyperErrorType), #[error("Hex encoded key '{0}' is invalid: {1}")] Hex(String, hex::FromHexError), #[error("Key error: {0}")] Key(KeyErrorType), #[error("{0} with the error: {1}")] WireServer(WireServerErrorType, String), #[error("Failed to parse URL {0} with error: {1}")] ParseUrl(String, String), #[error("acl_directory: {0} with error: {1}")] Acl(AclErrorType, u32), #[error("{0}")] Bpf(BpfErrorType), #[cfg(windows)] #[error("{0}")] WindowsApi(WindowsApiErrorType), #[error("{0} is invalid")] Invalid(String), #[cfg(windows)] #[error(transparent)] WindowsService(#[from] windows_service::Error), #[error("Failed to send '{0}' action response with error {1}")] SendError(String, String), #[error("Failed to receive '{0}' action response with error {1}")] RecvError(String, tokio::sync::oneshot::error::RecvError), #[error("{0}")] FindAuditEntryError(String), } #[derive(Debug, thiserror::Error)] pub enum HyperErrorType { #[error("{0}: {1}")] Custom(String, hyper::Error), #[error("Host connection error: {0}")] HostConnection(String), #[error("Failed to build request with error: {0}")] RequestBuilder(String), #[error("Failed to receive the request body with error: {0}")] RequestBody(String), #[error("Failed to get response from {0}, status code: {1}")] ServerError(String, StatusCode), #[error("Deserialization failed: {0}")] Deserialize(String), } #[derive(Debug, thiserror::Error)] pub enum WireServerErrorType { #[error("Telemetry call to wire server failed")] Telemetry, #[error("Goal state call to wire server failed")] GoalState, #[error("Shared config call to wire server failed")] SharedConfig, } #[derive(Debug, thiserror::Error)] pub enum KeyErrorType { #[error("Key status validation failed with the error: {0}")] KeyStatusValidation(String), #[error("Failed to send {0} key with error: {1}")] SendKeyRequest(String, String), #[error("Failed to {0} key with status code: {1}")] KeyResponse(String, StatusCode), #[error("Failed to join {0} and {1} with error: {2}")] ParseKeyUrl(String, String, InvalidUri), #[error("Failed to check local key with error: {0}")] CheckLocalKey(String), #[error("Failed to get local key with error: {0}")] FetchLocalKey(String), #[error("Failed to store key locally with error: {0}")] StoreLocalKey(String), } #[derive(Debug, thiserror::Error)] pub enum AclErrorType { #[error("Failed to get ACL object for folder '{0}'")] AclObject(String), #[error("Failed to get SID for '{0}'")] Sid(String), #[error("Failed to get ACL entries for folder '{0}'")] AclEntries(String), #[error("Failed to add entry for SID '{0}'")] AddEntry(String), } #[derive(Debug, thiserror::Error)] pub enum BpfErrorType { #[error("Failed to lookup element '{0}' in BPF map 'audit_map'. {1}")] MapLookupElem(String, String), #[error("Failed to delete element '{0}' in BPF map 'audit_map'. {1}")] MapDeleteElem(String, String), #[error("Failed to retrieve file descriptor of the BPF map 'audit_map' with error: {0}")] MapFileDescriptor(String), #[error("Failed to get valid map '{0}' in BPF object with error: {1}")] GetBpfMap(String, String), #[error("Failed to get eBPF API: EbpfApi.dll is not loaded")] GetBpfApi, #[error("Failed to get BPF object: Object is not initialized")] NullBpfObject, #[error("Loading eBPF API from file path '{0}' failed with error: {1}")] LoadBpfApi(String, String), #[error("Opening BPF object from file path '{0}' failed with error: {1}")] OpenBpfObject(String, String), #[error("Loading BPF object from file path '{0}' failed with error: {1}")] LoadBpfObject(String, String), #[error("Loading BPF API function '{0}' failed with error: {1}")] LoadBpfApiFunction(String, String), #[error("Failed to load HashMap '{0}' with error: {1}")] LoadBpfMapHashMap(String, String), #[error("Failed to update HashMap '{0}' for '{1}' with error: {2}")] UpdateBpfMapHashMap(String, String, String), #[error("Failed to get program '{0}' with error: {1}")] GetBpfProgram(String, String), #[error("Failed to load program '{0}' with error: {1}")] LoadBpfProgram(String, String), #[error("Failed to attach program '{0}' with error: {1}")] AttachBpfProgram(String, String), #[error("Failed to convert program to '{0}' with error: {1}")] ConvertBpfProgram(String, String), #[error("Failed to open cgroup '{0}' with error: {1}")] OpenCgroup(String, String), #[error("CString initialization failed with error: {0}")] CString(std::ffi::NulError), #[error("Failed to start eBPF/redirector with multiple retries")] FailedToStartRedirector, } #[derive(Debug, thiserror::Error)] #[cfg(windows)] pub enum WindowsApiErrorType { #[error("Loading NetUserGetLocalGroups failed with error: {0}")] LoadNetUserGetLocalGroups(libloading::Error), #[error("LsaGetLogonSessionData {0}")] LsaGetLogonSessionData(String), #[error("WinSock::WSAIoctl - {0}")] WSAIoctl(String), #[error("GlobalMemoryStatusEx failed: {0}")] GlobalMemoryStatusEx(std::io::Error), #[error("{0}")] WindowsOsError(std::io::Error), #[error("CryptProtectData failed: {0}")] CryptProtectData(std::io::Error), #[error("CryptUnprotectData failed: {0}")] CryptUnprotectData(std::io::Error), } #[cfg(test)] mod test { use super::{Error, KeyErrorType, WireServerErrorType}; use http::StatusCode; #[test] fn error_formatting_test() { let mut error = Error::Hyper(super::HyperErrorType::ServerError( "testurl.com".to_string(), StatusCode::from_u16(500).unwrap(), )); assert_eq!( error.to_string(), "Failed to get response from testurl.com, status code: 500 Internal Server Error" ); error = Error::WireServer( WireServerErrorType::Telemetry, "Invalid response".to_string(), ); assert_eq!( error.to_string(), "Telemetry call to wire server failed with the error: Invalid response" ); error = Error::Key(KeyErrorType::SendKeyRequest( "acquire".to_string(), error.to_string(), )); assert_eq!( error.to_string(), "Key error: Failed to send acquire key with error: Telemetry call to wire server failed with the error: Invalid response" ); } } GuestProxyAgent-1.0.30/proxy_agent/src/common/helpers.rs000066400000000000000000000077121500521614600233400ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use super::result::Result; use super::{error::Error, logger}; use once_cell::sync::Lazy; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::telemetry::span::SimpleSpan; #[cfg(not(windows))] use sysinfo::{CpuRefreshKind, MemoryRefreshKind, RefreshKind, System}; #[cfg(windows)] use super::windows; static CURRENT_SYS_INFO: Lazy<(u64, usize)> = Lazy::new(|| { #[cfg(windows)] { let ram_in_mb = match windows::get_memory_in_mb() { Ok(ram) => ram, Err(e) => { logger::write_error(format!("get_memory_in_mb failed: {}", e)); 0 } }; let cpu_count = windows::get_processor_count(); (ram_in_mb, cpu_count) } #[cfg(not(windows))] { let sys = System::new_with_specifics( RefreshKind::new() .with_memory(MemoryRefreshKind::everything()) .with_cpu(CpuRefreshKind::everything()), ); let ram = sys.total_memory(); let ram_in_mb = ram / 1024 / 1024; let cpu_count = sys.cpus().len(); (ram_in_mb, cpu_count) } }); static CURRENT_OS_INFO: Lazy<(String, String)> = Lazy::new(|| { //arch let arch = misc_helpers::get_processor_arch(); // os let os = misc_helpers::get_long_os_version(); (arch, os) }); pub fn get_ram_in_mb() -> u64 { CURRENT_SYS_INFO.0 } pub fn get_cpu_count() -> usize { CURRENT_SYS_INFO.1 } pub fn get_cpu_arch() -> String { CURRENT_OS_INFO.0.to_string() } pub fn get_long_os_version() -> String { CURRENT_OS_INFO.1.to_string() } pub fn compute_signature(hex_encoded_key: &str, input_to_sign: &[u8]) -> Result { match hex::decode(hex_encoded_key) { Ok(key) => { let mut mac = hmac_sha256::HMAC::new(key); mac.update(input_to_sign); let result = mac.finalize(); Ok(hex::encode(result)) } Err(e) => Err(Error::Hex(hex_encoded_key.to_string(), e)), } } // replace xml escape characters pub fn xml_escape(s: String) -> String { s.replace('&', "&") .replace('\'', "'") .replace('"', """) .replace('<', "<") .replace('>', ">") } static START: Lazy = Lazy::new(SimpleSpan::new); pub fn get_elapsed_time_in_millisec() -> u128 { START.get_elapsed_time_in_millisec() } pub fn write_startup_event( task: &str, method_name: &str, module_name: &str, logger_key: &str, ) -> String { let message = START.write_event(task, method_name, module_name, logger_key); #[cfg(not(windows))] logger::write_serial_console_log(message.clone()); message } #[cfg(test)] mod tests { #[test] fn get_system_info_tests() { let ram = super::get_ram_in_mb(); assert!(ram > 100, "total ram must great than 100MB"); let cpu_count = super::get_cpu_count(); assert!( cpu_count >= 1, "total cpu count must great than or equal to 1" ); let cpu_arch = super::get_cpu_arch(); assert_ne!("unknown", cpu_arch, "cpu arch cannot be 'unknown'"); } #[test] fn compute_signature_test() { let hex_encoded_key = "4A404E635266556A586E3272357538782F413F4428472B4B6250645367566B59"; let message = "Hello world"; let result = super::compute_signature(hex_encoded_key, message.as_bytes()).unwrap(); println!("compute_signature: {result}"); let invalid_hex_encoded_key = "YA404E635266556A586E3272357538782F413F4428472B4B6250645367566B59"; let result = super::compute_signature(invalid_hex_encoded_key, message.as_bytes()); assert!(result.is_err(), "invalid key should fail."); let e = result.unwrap_err(); let error = e.to_string(); assert!( error.contains(invalid_hex_encoded_key), "Error does not contains the invalid key" ) } } GuestProxyAgent-1.0.30/proxy_agent/src/common/hyper_client.rs000066400000000000000000000417401500521614600243620ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to send http requests and read the response body via hyper crate. //! //! Example //! ```rust //! use proxy_agent::hyper_client; //! use host_clients::goal_state::GoalState; //! use std::collections::HashMap; //! use hyper::Uri; //! use std::str::FromStr; //! //! let mut headers = HashMap::new(); //! headers.insert("x-ms-version".to_string(), "2012-11-30".to_string()); //! let full_url = Uri::from_str("http://168.63.129.16/machine/machine?comp=goalstate").unwrap(); //! //! // use get method to get response, and deserialize it //! let response: GoalState = hyper_client::get(full_url, &headers, None, None, |log| { //! println!("{}", log); //! }).await.unwrap(); //! //! // build request //! let request = hyper_client::build_request(Method::GET, full_url.clone(), &headers, None, None, None).unwrap(); //! //! // send request //! let (host, port) = hyper_client::host_port_from_uri(full_url.clone()).unwrap(); //! let response = hyper_client::send_request(&host, port, request, |log| { //! println!("{}", log); //! }).await.unwrap(); //! //! // read response body and deserialize it //! let response_body: GoalState = hyper_client::read_response_body(response).await.unwrap(); //! //! ``` use super::error::{Error, HyperErrorType}; use super::result::Result; use super::{constants, helpers}; use http::request::Builder; use http::request::Parts; use http::Method; use http_body_util::combinators::BoxBody; use http_body_util::BodyExt; use http_body_util::Empty; use http_body_util::Full; use hyper::body::Bytes; use hyper::Request; use hyper::Uri; use hyper_util::rt::TokioIo; use itertools::Itertools; use proxy_agent_shared::misc_helpers; use serde::de::DeserializeOwned; use std::collections::HashMap; use tokio::net::TcpStream; const LF: &str = "\n"; pub async fn get( full_url: &Uri, headers: &HashMap, key_guid: Option, key: Option, log_fun: F, ) -> Result where T: DeserializeOwned, F: Fn(String) + Send + 'static, { let request = build_request(Method::GET, full_url, headers, None, key_guid, key)?; let (host, port) = host_port_from_uri(full_url)?; let response = send_request(&host, port, request, log_fun).await?; let status = response.status(); if !status.is_success() { return Err(Error::Hyper(HyperErrorType::ServerError( full_url.to_string(), status, ))); } read_response_body(response).await } pub async fn read_response_body( mut response: hyper::Response, ) -> Result where T: DeserializeOwned, { // LATER:: need find a well_known way to get content_type and charset_type let (content_type, charset_type) = if let Some(content_type) = response.headers().get(hyper::header::CONTENT_TYPE) { if let Ok(content_type_str) = content_type.to_str() { let content_type_str = content_type_str.to_lowercase(); let content_type; if content_type_str.contains("xml") { content_type = "xml"; } else if content_type_str.contains("json") { content_type = "json"; } else if content_type_str.contains("text") { content_type = "text"; } else { content_type = "unknown"; } let charset_type; if content_type_str.contains("utf-8") { charset_type = "utf-8"; } else if content_type_str.contains("utf-16") { charset_type = "utf-16"; } else if content_type_str.contains("utf-32") { charset_type = "utf-32"; } else { charset_type = "unknown"; } (content_type, charset_type) } else { ("unknown", "unknown") } } else { ("unknown", "unknown") }; let mut body_string = String::new(); while let Some(next) = response.frame().await { let frame = match next { Ok(f) => f, Err(e) => { return Err(Error::Hyper(HyperErrorType::Custom( "Failed to get next frame from response".to_string(), e, ))) } }; if let Some(chunk) = frame.data_ref() { match charset_type { "utf-16" => { // Convert Bytes to Vec let byte_vec: Vec = chunk.to_vec(); // Convert Vec to Vec let u16_vec: Vec = byte_vec .chunks(2) .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]])) .collect(); body_string.push_str(&String::from_utf16_lossy(&u16_vec)); } "utf-32" => { return Err(Error::Hyper(HyperErrorType::Deserialize( "utf-32 charset is not supported".to_string(), ))) } _ => { // default to utf-8 body_string.push_str(&String::from_utf8_lossy(chunk)); } }; } } match content_type { "xml" => match serde_xml_rs::from_str(&body_string) { Ok(t) => Ok(t), Err(e) => Err(Error::Hyper( HyperErrorType::Deserialize( format!( "Failed to xml deserialize response body with content_type {} from: {} with error {}", content_type, body_string, e ) ), )), }, // default to json _ => match serde_json::from_str(&body_string) { Ok(t) => Ok(t), Err(e) => Err(Error::Hyper( HyperErrorType::Deserialize( format!( "Failed to json deserialize response body with {} from: {} with error {}", content_type, body_string, e ) ), )), }, } } pub fn build_request( method: http::Method, full_url: &Uri, headers: &HashMap, body: Option<&[u8]>, key_guid: Option, key: Option, ) -> Result>> { let (host, _) = host_port_from_uri(full_url)?; let mut request_builder = Request::builder() .method(method) .uri(match full_url.path_and_query() { Some(pq) => pq.as_str(), None => full_url.path(), }) .header( constants::DATE_HEADER, misc_helpers::get_date_time_rfc1123_string(), ) .header(hyper::header::HOST, host) .header( constants::CLAIMS_HEADER, format!("{{ \"{}\": \"{}\"}}", constants::CLAIMS_IS_ROOT, true,), ) .header( hyper::header::CONTENT_LENGTH, match body { Some(b) => b.len().to_string(), None => "0".to_string(), }, ); for (key, value) in headers { request_builder = request_builder.header(key, value); } if let (Some(key), Some(key_guid)) = (key, key_guid) { let body_vec = body.map(|b| b.to_vec()); let input_to_sign = request_to_sign_input(&request_builder, body_vec)?; let authorization_value = format!( "{} {} {}", constants::AUTHORIZATION_SCHEME, key_guid, helpers::compute_signature(&key, input_to_sign.as_slice())? ); request_builder = request_builder.header( constants::AUTHORIZATION_HEADER.to_string(), authorization_value.to_string(), ); } let boxed_body = match body { Some(body) => full_body(body.to_vec()), None => empty_body(), }; match request_builder.body(boxed_body) { Ok(r) => Ok(r), Err(e) => Err(Error::Hyper(HyperErrorType::RequestBuilder(format!( "Failed to build request body: {}", e )))), } } pub async fn send_request( host: &str, port: u16, request: Request, log_fun: F, ) -> Result> where B: hyper::body::Body + Send + 'static, B::Data: Send, B::Error: Into>, F: FnMut(String) + Send + 'static, { let full_url = request.uri().clone(); let mut sender = build_http_sender(host, port, log_fun).await?; sender.send_request(request).await.map_err(|e| { Error::Hyper(HyperErrorType::Custom( format!("Failed to send request to {}", full_url), e, )) }) } pub async fn build_http_sender( host: &str, port: u16, mut log_fun: F, ) -> Result> where B: hyper::body::Body + Send + 'static, B::Data: Send, B::Error: Into>, F: FnMut(String) + Send + 'static, { let addr = format!("{}:{}", host, port); let stream = match TcpStream::connect(addr.to_string()).await { Ok(tcp_stream) => tcp_stream, Err(e) => { return Err(Error::Io( format!("Failed to open TCP connection to {}", addr), e, )) } }; let io = TokioIo::new(stream); let (sender, conn) = hyper::client::conn::http1::handshake(io) .await .map_err(|e| { Error::Hyper(HyperErrorType::Custom( format!("Failed to establish connection to {}", addr), e, )) })?; tokio::task::spawn(async move { if let Err(err) = conn.await { log_fun(format!("Connection failed: {:?}", err)); } }); Ok(sender) } pub fn host_port_from_uri(full_url: &Uri) -> Result<(String, u16)> { let host = match full_url.host() { Some(h) => h.to_string(), None => { return Err(Error::ParseUrl( full_url.to_string(), "Failed to get host from uri".to_string(), )) } }; let port = full_url.port_u16().unwrap_or(80); Ok((host, port)) } /* StringToSign = Method + "\n" + HexEncoded(Body) + "\n" + CanonicalizedHeaders + "\n" UrlEncodedPath + "\n" CanonicalizedParameters; */ pub fn as_sig_input(head: Parts, body: Bytes) -> Vec { let mut data: Vec = head.method.to_string().as_bytes().to_vec(); data.extend(LF.as_bytes()); data.extend(body); data.extend(LF.as_bytes()); data.extend(headers_to_canonicalized_string(&head.headers).as_bytes()); let path_para = get_path_and_canonicalized_parameters(&head.uri); data.extend(path_para.0.as_bytes()); data.extend(LF.as_bytes()); data.extend(path_para.1.as_bytes()); data } fn request_to_sign_input(request_builder: &Builder, body: Option>) -> Result> { let mut data: Vec = match request_builder.method_ref() { Some(m) => m.as_str().as_bytes().to_vec(), None => { return Err(Error::Hyper(HyperErrorType::RequestBuilder( "Failed to get method from request builder".to_string(), ))) } }; data.extend(LF.as_bytes()); if let Some(body) = body { data.extend(body); } data.extend(LF.as_bytes()); match request_builder.headers_ref() { Some(h) => { data.extend(headers_to_canonicalized_string(h).as_bytes()); } None => { // no headers data.extend(LF.as_bytes()); } } match request_builder.uri_ref() { Some(u) => { let path_para = get_path_and_canonicalized_parameters(u); data.extend(path_para.0.as_bytes()); data.extend(LF.as_bytes()); data.extend(path_para.1.as_bytes()); } None => { return Err(Error::Hyper(HyperErrorType::RequestBuilder( "Failed to get method from request builder".to_string(), ))) } } Ok(data) } fn headers_to_canonicalized_string(headers: &hyper::HeaderMap) -> String { let mut canonicalized_headers = String::new(); let separator = String::from(LF); let mut map: HashMap = HashMap::new(); for (key, value) in headers.iter() { let key = key.to_string(); let value = value.to_str().unwrap().to_string(); let key_lower_case = key.to_lowercase(); map.insert(key_lower_case, (key, value)); } for key in map.keys().sorted() { // skip the expect header if key.eq_ignore_ascii_case(constants::AUTHORIZATION_HEADER) { continue; } let h = format!("{}:{}{}", key, map[key].1.trim(), separator); canonicalized_headers.push_str(&h); } canonicalized_headers } fn get_path_and_canonicalized_parameters(url: &Uri) -> (String, String) { let path = url.path().to_string(); let query_pairs = query_pairs(url); let mut canonicalized_parameters = String::new(); let mut pairs: HashMap = HashMap::new(); if !query_pairs.is_empty() { for (key, value) in query_pairs { let key = key.to_lowercase(); pairs.insert( // add the query parameter value for sorting, // just in case of duplicate keys by value lexicographically in ascending order. format!("{}{}", key, value), (key.to_lowercase(), value.to_string()), ); } // Sort the parameters lexicographically by parameter name and value, in ascending order. let mut first = true; for key in pairs.keys().sorted() { if !first { canonicalized_parameters.push('&'); } first = false; let query_pair = pairs[key].clone(); // Join each parameter key value pair with '=' let p = if query_pair.1.is_empty() { key.to_string() } else { format!("{}={}", query_pair.0, query_pair.1) }; canonicalized_parameters.push_str(&p); } } (path, canonicalized_parameters) } /// get query parameters from uri /// uri - the uri to get query parameters from /// return - a vec of query parameters /// first one is the query parameter key /// second one is parameter value pub fn query_pairs(uri: &Uri) -> Vec<(String, String)> { let query = uri.query().unwrap_or(""); let mut pairs: Vec<(String, String)> = Vec::new(); for pair in query.split('&') { let mut split = pair.splitn(2, '='); let key = split.next().unwrap_or(""); if key.is_empty() { // parameter key is must have while value is optional continue; } let value = split.next().unwrap_or(""); pairs.push((key.to_string(), value.to_string())); } pairs } pub fn empty_body() -> BoxBody { Empty::::new() .map_err(|never| match never {}) .boxed() } pub fn full_body>(chunk: T) -> BoxBody { Full::new(chunk.into()) .map_err(|never| match never {}) .boxed() } /// Certain endpoints are exempt from enforcement regardless of the VM's configuration. /// Restricting access to these non-security impacting endpoints would introduce unreasonable /// overhead and/or harm live-site investigations. Since the service won't require a signature, /// there is no reason to generate one. pub fn should_skip_sig(method: &hyper::Method, relative_uri: &Uri) -> bool { let url = relative_uri.to_string().to_lowercase(); // currently, we agreed to skip the sig for those requests: // o PUT /vmAgentLog // o POST /machine/?comp=telemetrydata (method == hyper::Method::PUT && url == "/vmagentlog") || (method == hyper::Method::POST && url == "/machine/?comp=telemetrydata") } #[cfg(test)] mod tests { #[test] fn get_path_and_canonicalized_parameters_test() { let url_str = "/machine/a8016240-7286-49ef-8981-63520cb8f6d0/49c242ba%2Dc18a%2D4f6c%2D8cf8%2D85ff790b6431.%5Fzpeng%2Debpf%2Dvm2?comp=config&keyOnly&comp=again&type=hostingEnvironmentConfig&incarnation=1&resource=https%3a%2f%2fstorage.azure.com%2f"; let url = url_str.parse::().unwrap(); let path_para = super::get_path_and_canonicalized_parameters(&url); assert_eq!("/machine/a8016240-7286-49ef-8981-63520cb8f6d0/49c242ba%2Dc18a%2D4f6c%2D8cf8%2D85ff790b6431.%5Fzpeng%2Debpf%2Dvm2", path_para.0, "path mismatch"); assert_eq!( "comp=again&comp=config&incarnation=1&keyonly&resource=https%3a%2f%2fstorage.azure.com%2f&type=hostingEnvironmentConfig", path_para.1, "query parameters mismatch" ); } } GuestProxyAgent-1.0.30/proxy_agent/src/common/logger.rs000066400000000000000000000035601500521614600231520ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common::cli; use proxy_agent_shared::{ logger::{logger_manager, LoggerLevel}, misc_helpers, }; pub const AGENT_LOGGER_KEY: &str = "Agent_Logger"; pub fn write(message: String) { log(LoggerLevel::Trace, message); } pub fn write_information(message: String) { log(LoggerLevel::Info, message); } pub fn write_warning(message: String) { log(LoggerLevel::Warn, message); } pub fn write_error(message: String) { log(LoggerLevel::Error, message); } fn log(log_level: LoggerLevel, message: String) { if log_level != LoggerLevel::Trace { write_console_log(message.to_string()); }; logger_manager::log(AGENT_LOGGER_KEY.to_string(), log_level, message); } pub fn write_console_log(message: String) { if cli::CLI.is_console_mode() { println!( "{} {}", misc_helpers::get_date_time_string_with_milliseconds(), message ); } else { println!("{}", message); } } #[cfg(not(windows))] pub fn write_serial_console_log(message: String) { use proxy_agent_shared::misc_helpers; use std::io::Write; let message = format!( "{} {}_{}({}) - {}\n", misc_helpers::get_date_time_string_with_milliseconds(), env!("CARGO_PKG_NAME"), misc_helpers::get_current_version(), std::process::id(), message ); const SERIAL_CONSOLE_PATH: &str = "/dev/console"; match std::fs::OpenOptions::new() .write(true) .open(SERIAL_CONSOLE_PATH) { Ok(mut serial_console) => { if serial_console.write_all(message.as_bytes()).is_err() { eprintln!("Failed to write to serial console: {}", message); } } Err(e) => { eprintln!("Failed to open serial console: {}", e); } } } GuestProxyAgent-1.0.30/proxy_agent/src/common/result.rs000066400000000000000000000002271500521614600232060ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use super::error::Error; pub type Result = core::result::Result; GuestProxyAgent-1.0.30/proxy_agent/src/common/windows.rs000066400000000000000000000123421500521614600233630ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common::{ error::{Error, WindowsApiErrorType}, result::Result, }; use std::mem::MaybeUninit; use std::path::Path; use windows_sys::Win32::Security::Cryptography::{ // crypt32.dll // msasn1.dll (ASN.1 library) is also used by crypt32.dll CryptProtectData, CryptUnprotectData, CRYPTOAPI_BLOB, }; use windows_sys::Win32::System::SystemInformation::{ GetSystemInfo, // kernel32.dll GlobalMemoryStatusEx, // kernel32.dll MEMORYSTATUSEX, SYSTEM_INFO, }; pub fn get_processor_count() -> usize { let mut data = MaybeUninit::::uninit(); unsafe { GetSystemInfo(data.as_mut_ptr()) }; let data = unsafe { data.assume_init() }; data.dwNumberOfProcessors as usize } pub fn get_memory_in_mb() -> Result { let mut data = MaybeUninit::::uninit(); let data = data.as_mut_ptr(); unsafe { (*data).dwLength = std::mem::size_of::() as u32; if GlobalMemoryStatusEx(data) == 0 { return Err(Error::WindowsApi( WindowsApiErrorType::GlobalMemoryStatusEx(std::io::Error::last_os_error()), )); } let memory_in_mb = (*data).ullTotalPhys / 1024 / 1024; Ok(memory_in_mb) } } pub fn store_key_data(encrypted_file_path: &Path, key_data: String) -> Result<()> { let data = key_data.as_bytes(); let data_in = CRYPTOAPI_BLOB { cbData: data.len() as u32, pbData: data.as_ptr() as *mut u8, }; let mut data_out = CRYPTOAPI_BLOB { cbData: 0, pbData: std::ptr::null_mut(), }; let result = unsafe { CryptProtectData( &data_in, std::ptr::null_mut(), std::ptr::null_mut(), std::ptr::null_mut(), std::ptr::null_mut(), 0, &mut data_out, ) }; if result == 0 { return Err(Error::WindowsApi(WindowsApiErrorType::CryptProtectData( std::io::Error::last_os_error(), ))); } let encrypted_data = unsafe { std::slice::from_raw_parts(data_out.pbData, data_out.cbData as usize).to_vec() }; unsafe { windows_sys::Win32::System::Memory::LocalFree(data_out.pbData as isize) }; std::fs::write(encrypted_file_path, encrypted_data).map_err(|e| { Error::Io( format!( "store_encrypt_key write file '{}' failed", encrypted_file_path.display() ), e, ) })?; Ok(()) } pub fn fetch_key_data(encrypted_file_path: &Path) -> Result { let encrypted_data = std::fs::read(encrypted_file_path).map_err(|e| { Error::Io( format!( "fetch_encrypted_key read file '{}' failed", encrypted_file_path.display() ), e, ) })?; let data_in = CRYPTOAPI_BLOB { cbData: encrypted_data.len() as u32, pbData: encrypted_data.as_ptr() as *mut u8, }; let mut data_out = CRYPTOAPI_BLOB { cbData: 0, pbData: std::ptr::null_mut(), }; let result = unsafe { CryptUnprotectData( &data_in, std::ptr::null_mut(), std::ptr::null_mut(), std::ptr::null_mut(), std::ptr::null_mut(), 0, &mut data_out, ) }; if result == 0 { return Err(Error::WindowsApi(WindowsApiErrorType::CryptUnprotectData( std::io::Error::last_os_error(), ))); } let decrypted_data = unsafe { std::slice::from_raw_parts(data_out.pbData as *const u8, data_out.cbData as usize).to_vec() }; unsafe { windows_sys::Win32::System::Memory::LocalFree(data_out.pbData as isize) }; let key_data = String::from_utf8_lossy(&decrypted_data).to_string(); Ok(key_data) } #[cfg(test)] mod tests { use std::{env, fs}; use proxy_agent_shared::misc_helpers; #[test] fn get_processor_count_test() { let count = super::get_processor_count(); println!("Processor count: {}", count); assert_ne!(0, count, "Processor count cannot be 0."); } #[test] fn get_memory_in_mb_test() { let memory = super::get_memory_in_mb(); match memory { Ok(memory) => { assert_ne!(0, memory, "Memory cannot be 0."); } Err(e) => assert!(false, "{}", format!("Failed to get memory: {}", e)), } } #[test] fn store_fetch_data_test() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("store_fetch_data_test"); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); misc_helpers::try_create_folder(&temp_test_path).unwrap(); let key_data = "test data".to_string(); let encrypted_file_path = temp_test_path.join("test_data.encrypted"); super::store_key_data(&encrypted_file_path, key_data.clone()).unwrap(); let fetched_key_data = super::fetch_key_data(&encrypted_file_path).unwrap(); assert_eq!(key_data, fetched_key_data); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); } } GuestProxyAgent-1.0.30/proxy_agent/src/host_clients.rs000066400000000000000000000002431500521614600230740ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod goal_state; pub mod imds_client; pub mod instance_info; pub mod wire_server_client; GuestProxyAgent-1.0.30/proxy_agent/src/host_clients/000077500000000000000000000000001500521614600225275ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/host_clients/goal_state.rs000066400000000000000000000215071500521614600252240ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the structs for the goal state and shared config from wire server. use serde_derive::{Deserialize, Serialize}; #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] pub struct GoalState { Version: String, Incarnation: u32, Machine: MachineField, Container: ContainerField, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct MachineField { ExpectedState: String, StopRolesDeadlineHint: u64, #[serde(skip_serializing_if = "Option::is_none")] LBProbePorts: Option, ExpectHealthReport: String, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct LbProbePortsField { #[serde(rename = "Port")] ports: Vec, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct ContainerField { ContainerId: String, #[serde(rename = "RoleInstanceList")] RoleInstanceList: RoleInstanceListField, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct RoleInstanceListField { #[serde(rename = "RoleInstance")] RoleInstance: Vec, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct RoleInstanceField { InstanceId: String, State: String, Configuration: RoleConfigField, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct RoleConfigField { HostingEnvironmentConfig: String, SharedConfig: String, ExtensionsConfig: String, FullConfig: String, Certificates: String, ConfigName: String, } impl GoalState { pub fn get_container_id(&self) -> String { self.Container.ContainerId.to_string() } pub fn get_shared_config_uri(&self) -> String { self.Container.RoleInstanceList.RoleInstance[0] .Configuration .SharedConfig .to_string() } } #[derive(Deserialize, Serialize, PartialEq)] #[allow(non_snake_case)] pub struct SharedConfig { Deployment: DeploymentField, Role: RoleField, Instances: InstancesField, } #[derive(Deserialize, Serialize, PartialEq)] #[allow(non_snake_case)] struct DeploymentField { name: String, guid: String, incarnation: String, } #[derive(Deserialize, Serialize, PartialEq)] #[allow(non_snake_case)] struct RoleField { guid: String, name: String, } #[derive(Deserialize, Serialize, PartialEq)] struct InstancesField { #[serde(rename = "Instance")] instances: Vec, } #[derive(Deserialize, Serialize, PartialEq)] #[allow(non_snake_case)] struct SharedConfigInstance { id: String, address: String, } impl SharedConfig { pub fn get_deployment_name(&self) -> String { self.Deployment.name.to_string() } pub fn get_role_name(&self) -> String { self.Role.name.to_string() } pub fn get_role_instance_name(&self) -> String { match self.Instances.instances.first() { Some(instance) => instance.id.to_string(), None => String::new(), } } } #[cfg(test)] mod tests { use crate::host_clients::goal_state::SharedConfig; use super::GoalState; #[test] fn goal_state_test() { let goal_state_str = r#" 2015-04-05 16 Started 300000 16001 TRUE http://168.63.129.16:80/machine/?comp=package&incarnation=Win8-Win8_2.7.32211.3_221108-1339_GuestAgentPackage_NoWER.zip Win8-Win8_2.7.32211.3_221108-1339_GuestAgentPackage_NoWER.zip 374188df-b0a2-456a-a7b2-83f28b18d36f 7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker_IN_0 Started http://168.63.129.16:80/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=hostingEnvironmentConfig&incarnation=16 http://168.63.129.16:80/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=sharedConfig&incarnation=16 http://168.63.129.16:80/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=extensionsConfig&incarnation=16 http://168.63.129.16:80/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=fullConfig&incarnation=16 http://168.63.129.16:80/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=certificates&incarnation=16 7d2798bb72a0413d9a60b355277df726.132.7d2798bb72a0413d9a60b355277df726.78.TenantAdminApi.Worker_IN_0.1.xml "#; let goal_state = serde_xml_rs::from_str::(goal_state_str).unwrap(); assert_eq!( "374188df-b0a2-456a-a7b2-83f28b18d36f", goal_state.get_container_id(), "ContainerId mismatch" ); assert_eq!("http://168.63.129.16:80/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=sharedConfig&incarnation=16", goal_state.get_shared_config_uri(), "SharedConfig mismatch"); } #[test] fn shared_config_test() { let shared_config_str = r#" "#; let shared_config = serde_xml_rs::from_str::(shared_config_str).unwrap(); assert_eq!( "7d2798bb72a0413d9a60b355277df726", shared_config.get_deployment_name(), "deployment_name mismatch" ); assert_eq!( "TenantAdminApi.Worker", shared_config.get_role_name(), "role_name mismatch" ); assert_eq!( "TenantAdminApi.Worker_IN_0", shared_config.get_role_instance_name(), "role_instance_name mismatch" ); } } GuestProxyAgent-1.0.30/proxy_agent/src/host_clients/imds_client.rs000066400000000000000000000042771500521614600254010ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to interact with the IMDS service. //! The IMDS service is used to get the instance information of the VM. //! The GPA service uses the IMDS service to get the instance information of the VM. //! //! Example //! ```rust //! use proxy_agent::common::constants; //! use proxy_agent::host_clients::imds_client; //! use proxy_agent::shared_state::key_keeper_wrapper::KeyKeeperSharedState; //! let key_keeper_shared_state = KeyKeeperSharedState::new(); //! let imds_client = imds_client::ImdsClient::new( //! constants::IMDS_IP, //! constants::IMDS_PORT, //! key_keeper_shared_state, //! ); //! let instance_info = imds_client.get_imds_instance_info().await.unwrap(); //! //! ``` use super::instance_info::InstanceInfo; use crate::common::{error::Error, hyper_client, logger, result::Result}; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use hyper::Uri; use std::collections::HashMap; pub struct ImdsClient { ip: String, port: u16, key_keeper_shared_state: KeyKeeperSharedState, } const IMDS_URI: &str = "metadata/instance?api-version=2018-02-01"; impl ImdsClient { pub fn new(ip: &str, port: u16, key_keeper_shared_state: KeyKeeperSharedState) -> Self { ImdsClient { ip: ip.to_string(), port, key_keeper_shared_state, } } pub async fn get_imds_instance_info(&self) -> Result { let url: String = format!("http://{}:{}/{}", self.ip, self.port, IMDS_URI); let url: Uri = url .parse::() .map_err(|e| Error::ParseUrl(url, e.to_string()))?; let mut headers = HashMap::new(); headers.insert("Metadata".to_string(), "true".to_string()); hyper_client::get( &url, &headers, self.key_keeper_shared_state .get_current_key_guid() .await .unwrap_or(None), self.key_keeper_shared_state .get_current_key_value() .await .unwrap_or(None), logger::write_warning, ) .await } } GuestProxyAgent-1.0.30/proxy_agent/src/host_clients/instance_info.rs000066400000000000000000000223351500521614600257210ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to get the instance information from the IMDS service. use serde_derive::{Deserialize, Serialize}; #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] pub struct InstanceInfo { compute: ComputeInfo, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct ComputeInfo { location: String, name: String, resourceGroupName: String, subscriptionId: String, vmId: String, vmSize: String, #[serde(skip_serializing_if = "Option::is_none")] offer: Option, } impl InstanceInfo { pub fn get_subscription_id(&self) -> String { self.compute.subscriptionId.to_string() } pub fn get_vm_id(&self) -> String { self.compute.vmId.to_string() } pub fn get_resource_group_name(&self) -> String { self.compute.resourceGroupName.to_string() } pub fn get_image_origin(&self) -> u64 { let image_origin: u64; match &self.compute.offer { Some(offer) => { if offer.is_empty() { image_origin = 0; // custom } else { image_origin = 1; // platform } } None => { image_origin = 0; // custom } } image_origin } } #[cfg(test)] mod tests { use super::InstanceInfo; #[test] fn compute_instance_test() { let instance_string = r#"{ "compute": { "azEnvironment": "AZUREPUBLICCLOUD", "additionalCapabilities": { "hibernationEnabled": "true" }, "hostGroup": { "id": "testHostGroupId" }, "extendedLocation": { "type": "edgeZone", "name": "microsoftlosangeles" }, "evictionPolicy": "", "isHostCompatibilityLayerVm": "true", "licenseType": "Windows_Client", "location": "westus", "name": "examplevmname", "offer": "WindowsServer", "osProfile": { "adminUsername": "admin", "computerName": "examplevmname", "disablePasswordAuthentication": "true" }, "osType": "Windows", "placementGroupId": "f67c14ab-e92c-408c-ae2d-da15866ec79a", "plan": { "name": "planName", "product": "planProduct", "publisher": "planPublisher" }, "platformFaultDomain": "36", "platformSubFaultDomain": "", "platformUpdateDomain": "42", "priority": "Regular", "publicKeys": [{ "keyData": "ssh-rsa 0", "path": "/home/user/.ssh/authorized_keys0" }, { "keyData": "ssh-rsa 1", "path": "/home/user/.ssh/authorized_keys1" } ], "publisher": "RDFE-Test-Microsoft-Windows-Server-Group", "resourceGroupName": "macikgo-test-may-23", "resourceId": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test-may-23/providers/Microsoft.Compute/virtualMachines/examplevmname", "securityProfile": { "secureBootEnabled": "true", "virtualTpmEnabled": "false", "encryptionAtHost": "true", "securityType": "TrustedLaunch" }, "sku": "2019-Datacenter", "storageProfile": { "dataDisks": [{ "bytesPerSecondThrottle": "979202048", "caching": "None", "createOption": "Empty", "diskCapacityBytes": "274877906944", "diskSizeGB": "1024", "image": { "uri": "" }, "isSharedDisk": "false", "isUltraDisk": "true", "lun": "0", "managedDisk": { "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test-may-23/providers/Microsoft.Compute/disks/exampledatadiskname", "storageAccountType": "StandardSSD_LRS" }, "name": "exampledatadiskname", "opsPerSecondThrottle": "65280", "vhd": { "uri": "" }, "writeAcceleratorEnabled": "false" }], "imageReference": { "id": "", "offer": "WindowsServer", "publisher": "MicrosoftWindowsServer", "sku": "2019-Datacenter", "version": "latest" }, "osDisk": { "caching": "ReadWrite", "createOption": "FromImage", "diskSizeGB": "30", "diffDiskSettings": { "option": "Local" }, "encryptionSettings": { "enabled": "false", "diskEncryptionKey": { "sourceVault": { "id": "/subscriptions/test-source-guid/resourceGroups/testrg/providers/Microsoft.KeyVault/vaults/test-kv" }, "secretUrl": "https://test-disk.vault.azure.net/secrets/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx" }, "keyEncryptionKey": { "sourceVault": { "id": "/subscriptions/test-key-guid/resourceGroups/testrg/providers/Microsoft.KeyVault/vaults/test-kv" }, "keyUrl": "https://test-key.vault.azure.net/secrets/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx" } }, "image": { "uri": "" }, "managedDisk": { "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test-may-23/providers/Microsoft.Compute/disks/exampleosdiskname", "storageAccountType": "StandardSSD_LRS" }, "name": "exampleosdiskname", "osType": "Windows", "vhd": { "uri": "" }, "writeAcceleratorEnabled": "false" }, "resourceDisk": { "size": "4096" } }, "subscriptionId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx", "tags": "baz:bash;foo:bar", "userData": "Zm9vYmFy", "version": "15.05.22", "virtualMachineScaleSet": { "id": "/subscriptions/xxxxxxxx-xxxx-xxx-xxx-xxxxxxxxxxx/resourceGroups/resource-group-name/providers/Microsoft.Compute/virtualMachineScaleSets/virtual-machine-scale-set-name" }, "vmId": "02aab8a4-74ef-476e-8182-f6d2ba4166a6", "vmScaleSetName": "crpteste9vflji9", "vmSize": "Standard_A3", "zone": "" }, "network": { "interface": [{ "ipv4": { "ipAddress": [{ "privateIpAddress": "10.144.133.132", "publicIpAddress": "" }], "subnet": [{ "address": "10.144.133.128", "prefix": "26" }] }, "ipv6": { "ipAddress": [ ] }, "macAddress": "0011AAFFBB22" }] } }"#; let instance_info = serde_json::from_str::(instance_string).unwrap(); assert_eq!( "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx", instance_info.get_subscription_id(), "subscription_id mismatch" ); assert_eq!( "02aab8a4-74ef-476e-8182-f6d2ba4166a6", instance_info.get_vm_id(), "vm id mismatch" ); assert_eq!( "macikgo-test-may-23", instance_info.get_resource_group_name(), "resource_group_name mismatch" ); assert_eq!(1, instance_info.get_image_origin(), "image_origin mismatch"); } } GuestProxyAgent-1.0.30/proxy_agent/src/host_clients/wire_server_client.rs000066400000000000000000000117361500521614600267770ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to interact with the wire server for sending telemetry data and getting goal state. //! Example //! ```rust //! use proxy_agent::common::constants; //! use proxy_agent::host_clients::wire_server_client; //! use proxy_agent::shared_state::key_keeper_wrapper::KeyKeeperSharedState; //! //! let key_keeper_shared_state = KeyKeeperSharedState::new(); //! //! let wire_server_client = wire_server_client::WireServerClient::new(constants::WIRE_SERVER_IP.to_string(), 80, key_keeper_shared_state); //! let goal_state = wire_server_client.get_goalstate().await; //! let shared_config = wire_server_client.get_shared_config(goal_state.get_shared_config_uri()).await; //! //! let telemetry_data = "[xml telemetry data]".to_string(); //! wire_server_client.send_telemetry_data(telemetry_data).await; //! //! ``` use crate::host_clients::goal_state::{GoalState, SharedConfig}; use crate::{ common::{ error::{Error, WireServerErrorType}, hyper_client, logger, result::Result, }, shared_state::key_keeper_wrapper::KeyKeeperSharedState, }; use http::Method; use hyper::Uri; use std::collections::HashMap; pub struct WireServerClient { ip: String, port: u16, key_keeper_shared_state: KeyKeeperSharedState, } const TELEMETRY_DATA_URI: &str = "machine/?comp=telemetrydata"; const GOALSTATE_URI: &str = "machine?comp=goalstate"; impl WireServerClient { pub fn new(ip: &str, port: u16, key_keeper_shared_state: KeyKeeperSharedState) -> Self { WireServerClient { ip: ip.to_string(), port, key_keeper_shared_state, } } pub async fn send_telemetry_data(&self, xml_data: String) -> Result<()> { if xml_data.is_empty() { return Ok(()); } let url = format!("http://{}:{}/{}", self.ip, self.port, TELEMETRY_DATA_URI); let url: Uri = url .parse::() .map_err(|e| Error::ParseUrl(url, e.to_string()))?; let mut headers = HashMap::new(); headers.insert("x-ms-version".to_string(), "2012-11-30".to_string()); headers.insert( "Content-Type".to_string(), "text/xml; charset=utf-8".to_string(), ); let request = hyper_client::build_request( Method::POST, &url, &headers, Some(xml_data.as_bytes()), None, // post telemetry data does not require signing None, )?; let response = match hyper_client::send_request(&self.ip, self.port, request, logger::write_warning) .await { Ok(r) => r, Err(e) => { return Err(Error::WireServer( WireServerErrorType::Telemetry, format!("Failed to send request {}", e), )) } }; let status = response.status(); if !status.is_success() { return Err(Error::WireServer( WireServerErrorType::Telemetry, format!( "Failed to get response from {}, status code: {}", url, status ), )); } Ok(()) } pub async fn get_goalstate(&self) -> Result { let url = format!("http://{}:{}/{}", self.ip, self.port, GOALSTATE_URI); let url = url .parse::() .map_err(|e| Error::ParseUrl(url, e.to_string()))?; let mut headers = HashMap::new(); headers.insert("x-ms-version".to_string(), "2012-11-30".to_string()); hyper_client::get( &url, &headers, self.key_keeper_shared_state .get_current_key_guid() .await .unwrap_or(None), self.key_keeper_shared_state .get_current_key_value() .await .unwrap_or(None), logger::write_warning, ) .await .map_err(|e| Error::WireServer(WireServerErrorType::GoalState, e.to_string())) } pub async fn get_shared_config(&self, url: String) -> Result { let mut headers = HashMap::new(); let url = url .parse::() .map_err(|e| Error::ParseUrl(url, e.to_string()))?; headers.insert("x-ms-version".to_string(), "2012-11-30".to_string()); hyper_client::get( &url, &headers, self.key_keeper_shared_state .get_current_key_guid() .await .unwrap_or(None), self.key_keeper_shared_state .get_current_key_value() .await .unwrap_or(None), logger::write_warning, ) .await .map_err(|e| Error::WireServer(WireServerErrorType::SharedConfig, e.to_string())) } } GuestProxyAgent-1.0.30/proxy_agent/src/key_keeper.rs000066400000000000000000001122151500521614600225240ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! The key keeper module is responsible for polling the secure channel status from the WireServer endpoint. //! It polls the secure channel status at a specified interval and update the secure channel state, key details, access control rule details. //! This module will be launched when the GPA service is started. //! It start the redirector/eBPF module when the key keeper task is running. //! Example: //! ```rust //! use proxy_agent::key_keeper; //! use proxy_agent::shared_state::SharedState; //! use std::sync::{Arc, Mutex}; //! use hyper::Uri; //! use std::path::PathBuf; //! use std::time::Duration; //! //! let shared_state = SharedState::start_all(); //! let base_url = "http://127:0.0.1:8081/"; //! let key_dir = PathBuf::from("path"); //! let interval = Duration::from_secs(10); //! let config_start_redirector = false; //! let key_keeper = key_keeper::KeyKeeper::new(base_url.parse().unwrap(), key_dir, interval, config_start_redirector, &shared_state); //! tokio::spawn(key_keeper.poll_secure_channel_status()); //! ``` pub mod key; use self::key::Key; use crate::common::error::{Error, KeyErrorType}; use crate::common::result::Result; use crate::common::{constants, helpers, logger}; use crate::provision; use crate::proxy::authorization_rules::{AuthorizationRulesForLogging, ComputedAuthorizationRules}; use crate::shared_state::agent_status_wrapper::{AgentStatusModule, AgentStatusSharedState}; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use crate::shared_state::provision_wrapper::ProvisionSharedState; use crate::shared_state::redirector_wrapper::RedirectorSharedState; use crate::shared_state::telemetry_wrapper::TelemetrySharedState; use crate::shared_state::SharedState; use crate::{acl, redirector}; use hyper::Uri; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::proxy_agent_aggregate_status::ModuleState; use proxy_agent_shared::telemetry::event_logger; use std::fs; use std::path::Path; use std::time::Instant; use std::{path::PathBuf, time::Duration}; use tokio_util::sync::CancellationToken; //pub const RUNNING_STATE: &str = "running"; pub const DISABLE_STATE: &str = "disabled"; pub const MUST_SIG_WIRESERVER: &str = "wireserver"; pub const MUST_SIG_WIRESERVER_IMDS: &str = "wireserverandimds"; pub const UNKNOWN_STATE: &str = "Unknown"; static FREQUENT_PULL_INTERVAL: Duration = Duration::from_secs(1); // 1 second const FREQUENT_PULL_TIMEOUT_IN_MILLISECONDS: u128 = 300000; // 5 minutes const PROVISION_TIMEUP_IN_MILLISECONDS: u128 = 120000; // 2 minute const DELAY_START_EVENT_THREADS_IN_MILLISECONDS: u128 = 60000; // 1 minute #[derive(Clone)] pub struct KeyKeeper { /// base_url: the WireServer endpoint to poll the secure channel status base_url: Uri, /// key_dir: the folder to save the key details key_dir: PathBuf, /// log_dir: the folder to log the access control rule details log_dir: PathBuf, /// interval: the interval to poll the secure channel status interval: Duration, /// cancellation_token: the cancellation token to cancel the key keeper task cancellation_token: CancellationToken, /// key_keeper_shared_state: the sender for the key details, secure channel state, access control rule key_keeper_shared_state: KeyKeeperSharedState, /// telemetry_shared_state: the sender for the telemetry events telemetry_shared_state: TelemetrySharedState, /// redirector_shared_state: the sender for the redirector/eBPF module redirector_shared_state: RedirectorSharedState, /// provision_shared_state: the sender for the provision state provision_shared_state: ProvisionSharedState, /// agent_status_shared_state: the sender for the agent status agent_status_shared_state: AgentStatusSharedState, } impl KeyKeeper { pub fn new( base_url: Uri, key_dir: PathBuf, log_dir: PathBuf, interval: Duration, shared_state: &SharedState, ) -> Self { KeyKeeper { base_url, key_dir, log_dir, interval, cancellation_token: shared_state.get_cancellation_token(), key_keeper_shared_state: shared_state.get_key_keeper_shared_state(), telemetry_shared_state: shared_state.get_telemetry_shared_state(), redirector_shared_state: shared_state.get_redirector_shared_state(), provision_shared_state: shared_state.get_provision_shared_state(), agent_status_shared_state: shared_state.get_agent_status_shared_state(), } } /// poll secure channel status at interval from the WireServer endpoint pub async fn poll_secure_channel_status(&self) { self.update_status_message("poll secure channel status task started.".to_string(), true) .await; if let Err(e) = misc_helpers::try_create_folder(&self.key_dir) { logger::write_warning(format!( "key folder {} created failed with error {}.", misc_helpers::path_to_string(&self.key_dir), e )); } else { logger::write(format!( "key folder {} created if not exists before.", misc_helpers::path_to_string(&self.key_dir) )); } match acl::acl_directory(self.key_dir.clone()) { Ok(()) => { logger::write(format!( "Folder {} ACLed if has not before.", misc_helpers::path_to_string(&self.key_dir) )); } Err(e) => { logger::write_warning(format!( "Folder {} ACLed failed with error {}.", misc_helpers::path_to_string(&self.key_dir), e )); } } // acl current executable dir #[cfg(windows)] { if let Ok(current_exe) = std::env::current_exe() { if let Some(current_dir) = current_exe.parent() { if let Err(e) = acl::acl_directory(current_dir.to_path_buf()) { logger::write_warning(format!( "Current executable directory {} ACLed failed with error {}.", misc_helpers::path_to_string(current_dir), e )); } } } } tokio::select! { _ = self.loop_poll() => { self.update_status_message("poll_secure_channel_status task exited.".to_string(), true).await; }, _ = self.cancellation_token.cancelled() => { self.update_status_message("poll_secure_channel_status task cancelled.".to_string(), true).await; self.stop().await; } } } /// Loop to poll the secure channel status from the WireServer endpoint async fn loop_poll(&self) { let mut first_iteration: bool = true; let mut started_event_threads: bool = false; let mut provision_timeup: bool = false; let notify = match self.key_keeper_shared_state.get_notify().await { Ok(notify) => notify, Err(e) => { logger::write_error(format!("Failed to get notify: {}", e)); return; } }; // set the key keeper task state to running if let Err(e) = self .agent_status_shared_state .set_module_state(ModuleState::RUNNING, AgentStatusModule::KeyKeeper) .await { logger::write_error(format!( "Failed to set key_keeper module state to 'Running' with error: {} ", e )); } let mut start = Instant::now(); loop { if !first_iteration { // skip the sleep for the first loop let current_state = match self .key_keeper_shared_state .get_current_secure_channel_state() .await { Ok(state) => state, Err(e) => { logger::write_warning(format!( "Failed to get current secure channel state: {}", e )); UNKNOWN_STATE.to_string() } }; let sleep = if current_state == UNKNOWN_STATE && helpers::get_elapsed_time_in_millisec() < FREQUENT_PULL_TIMEOUT_IN_MILLISECONDS { // frequent poll the secure channel status every second for the first 5 minutes // until the secure channel state is known FREQUENT_PULL_INTERVAL } else { self.interval }; let time = Instant::now(); tokio::select! { // notify to query the secure channel status immediately when the secure channel state is unknown or disabled // this is to handle quicker response to the secure channel state change during VM provisioning. _ = notify.notified() => { if current_state == DISABLE_STATE || current_state == UNKNOWN_STATE { logger::write_warning(format!("poll_secure_channel_status task notified and secure channel state is '{}', reset states and start poll status now.", current_state)); provision::key_latch_ready_state_reset(self.provision_shared_state.clone()).await; if let Err(e) = self.key_keeper_shared_state.update_current_secure_channel_state(UNKNOWN_STATE.to_string()).await{ logger::write_warning(format!("Failed to update secure channel state to 'Unknown': {}", e)); } if start.elapsed().as_millis() > PROVISION_TIMEUP_IN_MILLISECONDS { // already timeup, reset the start timer start = Instant::now(); provision_timeup = false; } } else { // report key latched ready to try update the provision finished time_tick provision::key_latched( self.cancellation_token.clone(), self.key_keeper_shared_state.clone(), self.telemetry_shared_state.clone(), self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ).await; let slept_time_in_millisec = time.elapsed().as_millis(); let continue_sleep = sleep.as_millis() - slept_time_in_millisec; if continue_sleep > 0 { let continue_sleep = Duration::from_millis(continue_sleep as u64); let message = format!("poll_secure_channel_status task notified but secure channel state is '{}', continue with sleep wait for {:?}.", current_state, continue_sleep); logger::write_warning(message); tokio::time::sleep(continue_sleep).await; } } }, _ = tokio::time::sleep(sleep) => {} } } first_iteration = false; if !provision_timeup && start.elapsed().as_millis() > PROVISION_TIMEUP_IN_MILLISECONDS { provision::provision_timeup( None, self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ) .await; provision_timeup = true; } if !started_event_threads && helpers::get_elapsed_time_in_millisec() > DELAY_START_EVENT_THREADS_IN_MILLISECONDS { provision::start_event_threads( self.cancellation_token.clone(), self.key_keeper_shared_state.clone(), self.telemetry_shared_state.clone(), self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ) .await; started_event_threads = true; } let status = match key::get_status(&self.base_url).await { Ok(s) => s, Err(e) => { self.update_status_message(format!("Failed to get key status - {}", e), true) .await; continue; } }; self.update_status_message(format!("Got key status successfully: {}.", status), true) .await; let mut access_control_rules_changed = false; let wireserver_rule_id = status.get_wireserver_rule_id(); let imds_rule_id: String = status.get_imds_rule_id(); let hostga_rule_id: String = status.get_hostga_rule_id(); match self .key_keeper_shared_state .update_wireserver_rule_id(wireserver_rule_id.to_string()) .await { Ok((updated, old_wire_server_rule_id)) => { if updated { logger::write_warning(format!( "Wireserver rule id changed from '{}' to '{}'.", old_wire_server_rule_id, wireserver_rule_id )); if let Err(e) = self .key_keeper_shared_state .set_wireserver_rules(status.get_wireserver_rules()) .await { logger::write_error(format!("Failed to set wireserver rules: {}", e)); } access_control_rules_changed = true; } } Err(e) => { logger::write_warning(format!("Failed to update wireserver rule id: {}", e)); } } match self .key_keeper_shared_state .update_imds_rule_id(imds_rule_id.to_string()) .await { Ok((updated, old_imds_rule_id)) => { if updated { logger::write_warning(format!( "IMDS rule id changed from '{}' to '{}'.", old_imds_rule_id, imds_rule_id )); if let Err(e) = self .key_keeper_shared_state .set_imds_rules(status.get_imds_rules()) .await { logger::write_error(format!("Failed to set imds rules: {}", e)); } access_control_rules_changed = true; } } Err(e) => { logger::write_warning(format!("Failed to update imds rule id: {}", e)); } } match self .key_keeper_shared_state .update_hostga_rule_id(hostga_rule_id.to_string()) .await { Ok((updated, old_hostga_rule_id)) => { if updated { logger::write_warning(format!( "HostGA rule id changed from '{}' to '{}'.", old_hostga_rule_id, hostga_rule_id )); if let Err(e) = self .key_keeper_shared_state .set_hostga_rules(status.get_hostga_rules()) .await { logger::write_error(format!("Failed to set HostGA rules: {}", e)); } access_control_rules_changed = true; } } Err(e) => { logger::write_warning(format!("Failed to update HostGA rule id: {}", e)); } } if access_control_rules_changed { if let (Ok(wireserver_rules), Ok(imds_rules), Ok(hostga_rules)) = ( self.key_keeper_shared_state.get_wireserver_rules().await, self.key_keeper_shared_state.get_imds_rules().await, self.key_keeper_shared_state.get_hostga_rules().await, ) { let rules = AuthorizationRulesForLogging::new( status.authorizationRules.clone(), ComputedAuthorizationRules { wireserver: wireserver_rules, imds: imds_rules, hostga: hostga_rules, }, ); rules.write_all(&self.log_dir, constants::MAX_LOG_FILE_COUNT); } } let state = status.get_secure_channel_state(); // check if need fetch the key if state != DISABLE_STATE && (status.keyGuid.is_none() // key has not latched yet || status.keyGuid != self.key_keeper_shared_state.get_current_key_guid().await.unwrap_or(None)) // key changed { let mut key_found = false; if let Some(guid) = &status.keyGuid { // key latched before and search the key locally first match Self::fetch_key(&self.key_dir, guid) { Ok(key) => { if let Err(e) = self.key_keeper_shared_state.update_key(key.clone()).await { logger::write_warning(format!("Failed to update key: {}", e)); } let message = helpers::write_startup_event( "Found key details from local and ready to use.", "poll_secure_channel_status", "key_keeper", logger::AGENT_LOGGER_KEY, ); self.update_status_message(message, false).await; key_found = true; provision::key_latched( self.cancellation_token.clone(), self.key_keeper_shared_state.clone(), self.telemetry_shared_state.clone(), self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ) .await; } Err(e) => { event_logger::write_event( LoggerLevel::Info, format!("Failed to fetch local key details with error: {:?}. Will try acquire the key details from Server.", e), "poll_secure_channel_status", "key_keeper", logger::AGENT_LOGGER_KEY, ); } }; } // if key has not latched before, // or not found // or could not read locally, // try fetch from server if !key_found { let key = match key::acquire_key(&self.base_url).await { Ok(k) => k, Err(e) => { self.update_status_message( format!("Failed to acquire key details: {:?}", e), true, ) .await; continue; } }; // persist the new key to local disk let guid = key.guid.to_string(); match Self::store_key(&self.key_dir, &key) { Ok(()) => { logger::write_information(format!( "Successfully acquired the key '{}' details from server and saved locally.", guid)); } Err(e) => { self.update_status_message( format!("Failed to save key details to file: {:?}", e), true, ) .await; continue; } } // double check the key details saved correctly to local disk if let Err(e) = Self::check_key(&self.key_dir, &key) { self.update_status_message( format!( "Failed to check the key '{}' details saved locally: {:?}.", guid, e ), true, ) .await; continue; } else { match key::attest_key(&self.base_url, &key).await { Ok(()) => { // update in memory if let Err(e) = self.key_keeper_shared_state.update_key(key.clone()).await { logger::write_warning(format!("Failed to update key: {}", e)); } let message = helpers::write_startup_event( "Successfully attest the key and ready to use.", "poll_secure_channel_status", "key_keeper", logger::AGENT_LOGGER_KEY, ); self.update_status_message(message, false).await; provision::key_latched( self.cancellation_token.clone(), self.key_keeper_shared_state.clone(), self.telemetry_shared_state.clone(), self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ) .await; } Err(e) => { logger::write_warning(format!("Failed to attest the key: {:?}", e)); continue; } } } } } // update the current secure channel state if different match self .key_keeper_shared_state .update_current_secure_channel_state(state.to_string()) .await { Ok(updated) => { if updated { // update the redirector policy map redirector::update_wire_server_redirect_policy( status.get_wire_server_mode() != DISABLE_STATE, self.redirector_shared_state.clone(), ) .await; redirector::update_imds_redirect_policy( status.get_imds_mode() != DISABLE_STATE, self.redirector_shared_state.clone(), ) .await; redirector::update_hostga_redirect_policy( status.get_hostga_mode() != DISABLE_STATE, self.redirector_shared_state.clone(), ) .await; // customer has not enforce the secure channel state if state == DISABLE_STATE { let message = helpers::write_startup_event( "Customer has not enforce the secure channel state.", "poll_secure_channel_status", "key_keeper", logger::AGENT_LOGGER_KEY, ); // Update the status message and let the provision to continue self.update_status_message(message, false).await; // clear key in memory for disabled state if let Err(e) = self.key_keeper_shared_state.clear_key().await { logger::write_warning(format!("Failed to clear key: {}", e)); } provision::key_latched( self.cancellation_token.clone(), self.key_keeper_shared_state.clone(), self.telemetry_shared_state.clone(), self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ) .await; } } } Err(e) => { logger::write_warning(format!("Failed to update secure channel state: {}", e)); } } } } async fn update_status_message(&self, message: String, log_to_file: bool) { match self .agent_status_shared_state .set_module_status_message(message.clone(), AgentStatusModule::KeyKeeper) .await { Ok(updated) => { if log_to_file && !updated { // not updated, log at verbose level logger::write(message); } } Err(e) => { logger::write_warning(format!("Failed to set module status message: {}", e)); } } } fn store_local_key(key_dir: &Path, key: &Key, encrypted: bool) -> Result<()> { let guid = key.guid.to_string(); let mut key_file = key_dir.to_path_buf().join(guid); if encrypted { key_file.set_extension("encrypted"); #[cfg(windows)] { crate::common::store_key_data( &key_file, serde_json::to_string(&key).map_err(|e| { Error::Key(KeyErrorType::StoreLocalKey(format!( "serialize key error: {:?} ", e ))) })?, ) } #[cfg(not(windows))] { // return NotSupported error for non-windows platform Err(Error::Key(KeyErrorType::StoreLocalKey( "Not supported to store encrypted key on non-windows platform.".to_string(), ))) } } else { key_file.set_extension("key"); misc_helpers::json_write_to_file(&key, &key_file).map_err(|e| { Error::Key(KeyErrorType::StoreLocalKey(format!( "json_write_to_file '{}' failed {}", key_file.display(), e ))) }) } } fn store_key(key_dir: &Path, key: &Key) -> Result<()> { #[cfg(windows)] { // save the key to encrypted file Self::store_local_key(key_dir, key, true) } #[cfg(not(windows))] { Self::store_local_key(key_dir, key, false) } } fn fetch_local_key(key_dir: &Path, key_guid: &str, encrypted: bool) -> Result { let mut key_file = key_dir.join(key_guid); if encrypted { key_file.set_extension("encrypted"); } else { key_file.set_extension("key"); } if !key_file.exists() { // guid.key file does not exist locally return Err(Error::Key( crate::common::error::KeyErrorType::FetchLocalKey(format!( "Key file '{}' does not exist locally.", key_file.display() )), )); } let key_data = if encrypted { #[cfg(not(windows))] { // return NotSupported error for non-windows platform return Err(Error::Key(KeyErrorType::FetchLocalKey( "Not supported to fetch encrypted key on non-windows platform.".to_string(), ))); } #[cfg(windows)] { crate::common::fetch_key_data(&key_file)? } } else { fs::read_to_string(&key_file).map_err(|e| { Error::Io(format!("read key file '{}' failed", key_file.display()), e) })? }; serde_json::from_str::(&key_data).map_err(|e| { Error::Key(crate::common::error::KeyErrorType::FetchLocalKey(format!( "Parse key data with error: {}", e ))) }) } fn fetch_key(key_dir: &Path, key_guid: &str) -> Result { // fetch encrypted key file first match Self::fetch_local_key(key_dir, key_guid, true) { Ok(key) => Ok(key), Err(_e) => { #[cfg(windows)] { logger::write_information(format!( "Failed to fetch .encrypted file with error: {}. Fallback to fetch .key file for windows platform.", _e )); } // fallback to fetch key file let local_key = Self::fetch_local_key(key_dir, key_guid, false)?; #[cfg(windows)] { // re-save the key to encrypted file for windows platform Self::store_local_key(key_dir, &local_key, true)?; } Ok(local_key) } } } // key was saved locally correctly before // check the key file found and its guid and key value are corrected fn check_local_key(key_dir: &Path, key: &Key, encrypted: bool) -> Result<()> { let guid = key.guid.to_string(); let local_key = Self::fetch_local_key(key_dir, &guid, encrypted)?; if local_key.guid == key.guid && local_key.key == key.key { Ok(()) } else { // guid.key file found but guid or key value is not matched Err(Error::Key( crate::common::error::KeyErrorType::CheckLocalKey( "Local key guid or key value is not matched.".to_string(), ), )) } } fn check_key(key_dir: &Path, key: &Key) -> Result<()> { #[cfg(windows)] { Self::check_local_key(key_dir, key, true) } #[cfg(not(windows))] { Self::check_local_key(key_dir, key, false) } } /// Stop the key keeper task async fn stop(&self) { if let Err(e) = self .agent_status_shared_state .set_module_state(ModuleState::STOPPED, AgentStatusModule::KeyKeeper) .await { logger::write_warning(format!( "Failed to set key_keeper module state to 'Stopped' with error: {} ", e )); } } } #[cfg(test)] mod tests { use super::key::Key; use crate::key_keeper; use crate::key_keeper::KeyKeeper; use crate::test_mock::server_mock; use proxy_agent_shared::misc_helpers; use std::env; use std::fs; use std::time::Duration; use tokio_util::sync::CancellationToken; #[tokio::test] async fn check_local_key_test() { let mut temp_test_path = env::temp_dir(); let logger_key = "check_local_key_test"; temp_test_path.push(logger_key); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); let key_str = r#"{ "authorizationScheme": "Azure-HMAC-SHA256", "guid": "9cf81e97-0316-4ad3-94a7-8ccbdee8ccbf", "issued": "2021-05-05T 12:00:00Z", "key": "4A404E635266556A586E3272357538782F413F4428472B4B6250645367566B59" }"#; let key: Key = serde_json::from_str(key_str).unwrap(); KeyKeeper::store_local_key(&temp_test_path, &key, false).unwrap(); assert!(KeyKeeper::check_local_key(&temp_test_path, &key, false).is_ok()); let local_key = KeyKeeper::fetch_key(&temp_test_path, &key.guid).unwrap(); assert_eq!( key.key, local_key.key, "Key value should be matched without encrypted." ); #[cfg(windows)] { // test encrypted key for windows platform assert!(KeyKeeper::check_local_key(&temp_test_path, &key, true).is_ok()); let local_key = KeyKeeper::fetch_key(&temp_test_path, &key.guid).unwrap(); assert_eq!( key.key, local_key.key, "Key value should be matched with encrypted." ); } _ = fs::remove_dir_all(&temp_test_path); } #[tokio::test] async fn poll_secure_channel_status_tests() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("poll_secure_channel_status_tests"); let mut log_dir = temp_test_path.to_path_buf(); log_dir.push("Logs"); let mut keys_dir = temp_test_path.to_path_buf(); keys_dir.push("Keys"); // clean up and ignore the clean up errors match fs::remove_dir_all(&temp_test_path) { Ok(_) => {} Err(e) => { print!("Failed to remove_dir_all with error {}.", e); } } let cancellation_token = CancellationToken::new(); // start wire_server listener let ip = "127.0.0.1"; let port = 8081u16; tokio::spawn(server_mock::start( ip.to_string(), port, cancellation_token.clone(), )); tokio::time::sleep(Duration::from_millis(100)).await; // start with disabled secure channel state server_mock::set_secure_channel_state(false); // start poll_secure_channel_status let cloned_keys_dir = keys_dir.to_path_buf(); let key_keeper = KeyKeeper { base_url: (format!("http://{}:{}/", ip, port)).parse().unwrap(), key_dir: cloned_keys_dir.clone(), log_dir: cloned_keys_dir.clone(), interval: Duration::from_millis(10), cancellation_token: cancellation_token.clone(), key_keeper_shared_state: key_keeper::KeyKeeperSharedState::start_new(), telemetry_shared_state: key_keeper::TelemetrySharedState::start_new(), redirector_shared_state: key_keeper::RedirectorSharedState::start_new(), provision_shared_state: key_keeper::ProvisionSharedState::start_new(), agent_status_shared_state: key_keeper::AgentStatusSharedState::start_new(), }; tokio::spawn({ let key_keeper = key_keeper.clone(); async move { key_keeper.poll_secure_channel_status().await; } }); for _ in [0; 5] { // wait poll_secure_channel_status run at least one loop tokio::time::sleep(Duration::from_millis(100)).await; if keys_dir.exists() { break; } } let key_files: Vec = misc_helpers::get_files(&keys_dir).unwrap(); assert!( key_files.is_empty(), "Should not write key file at disable secure channel state" ); // set secure channel state to running server_mock::set_secure_channel_state(true); // wait poll_secure_channel_status run at least one loop tokio::time::sleep(Duration::from_millis(100)).await; let key_files = misc_helpers::get_files(&keys_dir).unwrap(); assert_eq!( 1, key_files.len(), "Should write key file at running secure channel state" ); // stop poll cancellation_token.cancel(); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); } } GuestProxyAgent-1.0.30/proxy_agent/src/key_keeper/000077500000000000000000000000001500521614600221545ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/key_keeper/key.rs000066400000000000000000001547201500521614600233230ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the KeyStatus and Key structs and the logic to latch the key from the wire server. //! The KeyStatus struct contains the status of the key and the access control rule details from the wire server. //! The Key struct contains the key details that are latched from the wire server. //! //! Example //! ```rust //! use proxy_agent::common::constants; //! use proxy_agent::key_keeper::key::{Key, KeyStatus}; //! use hyper::Uri; //! //! let base_url: Uri = format!("http://{}:{}", constants::WIRE_SERVER_IP, constants::WIRE_SERVER_PORT).parse().unwrap(); //! let status = KeyStatus::get_status(base_url.clone()).await.unwrap(); //! //! // acquire the key if the has not attest yet //! let key = Key::acquire_key(base_url.clone()).await.unwrap(); //! //! // attest the key //! Key::attest_key(base_url.clone(), &key).await.unwrap(); //! //! ``` use crate::{ common::{ constants, error::{Error, KeyErrorType}, hyper_client, logger, result::Result, }, proxy::{proxy_connection::ConnectionLogger, Claims}, }; use http::{Method, StatusCode}; use hyper::Uri; use proxy_agent_shared::logger::LoggerLevel; use serde_derive::{Deserialize, Serialize}; use std::ffi::OsString; use std::fmt::{Display, Formatter}; use std::{collections::HashMap, path::PathBuf}; const AUDIT_MODE: &str = "audit"; const ENFORCE_MODE: &str = "enforce"; //const ALLOW_DEFAULT_ACCESS: &str = "allow"; //const DENY_DEFAULT_ACCESS: &str = "deny"; #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct KeyStatus { // The authorization scheme; // defines what the scheme is along with what algorithms will be used. // Only Azure-HMAC-SHA256 exists in V1. authorizationScheme: String, // How the guest fetches the key. Either http or vtpm. keyDeliveryMethod: String, // An integer representing the incarnation of the key. #[serde(skip_serializing_if = "Option::is_none")] keyIncarnationId: Option, // Unique ID of the key pub keyGuid: Option, // In AuthZ paradigms, specifies what keys are expected for validation. In AuthN paradigms, // specifies what keys are expected for telemetry purposes. // Exact values are TBD, but could include things like user id. requiredClaimsHeaderPairs: Option>, // One of Disabled, Wireserver, WireserverAndImds. valid at version 1.0 #[serde(skip_serializing_if = "Option::is_none")] pub secureChannelState: Option, // Indicates if the secure channel is enabled. valid at version 2.0 pub secureChannelEnabled: Option, pub version: String, // Authorization rules for guest to evaluate. #[serde(skip_serializing_if = "Option::is_none")] pub authorizationRules: Option, } #[derive(Serialize, Deserialize, Clone)] #[allow(non_snake_case)] pub struct AuthorizationRules { #[serde(skip_serializing_if = "Option::is_none")] pub imds: Option, #[serde(skip_serializing_if = "Option::is_none")] pub wireserver: Option, #[serde(skip_serializing_if = "Option::is_none")] pub hostga: Option, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct AuthorizationItem { // The default access: allow, deny pub defaultAccess: String, // disabled, audit, enforce pub mode: String, // reference: SIG artifact resource id / inline: hashOfRules pub id: String, // This is the RBAC settings of how user can specify which process/user can access to which privilege #[serde(skip_serializing_if = "Option::is_none")] pub rules: Option, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct AccessControlRules { #[serde(skip_serializing_if = "Option::is_none")] pub privileges: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub roles: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub identities: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub roleAssignments: Option>, } impl Clone for AuthorizationItem { fn clone(&self) -> Self { let rules = self.rules.as_ref().map(|r| AccessControlRules { privileges: match r.privileges { Some(ref p) => { let mut privileges: Vec = Vec::new(); for privilege in p { privileges.push(privilege.clone()); } Some(privileges) } None => None, }, roles: match r.roles { Some(ref r) => { let mut roles: Vec = Vec::new(); for role in r { roles.push(role.clone()); } Some(roles) } None => None, }, identities: match r.identities { Some(ref i) => { let mut identities: Vec = Vec::new(); for identity in i { identities.push(identity.clone()); } Some(identities) } None => None, }, roleAssignments: match r.roleAssignments { Some(ref r) => { let mut role_assignments: Vec = Vec::new(); for role_assignment in r { role_assignments.push(role_assignment.clone()); } Some(role_assignments) } None => None, }, }); AuthorizationItem { defaultAccess: self.defaultAccess.to_string(), mode: self.mode.to_string(), rules, id: self.id.to_string(), } } } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct Privilege { pub name: String, pub path: String, #[serde(skip_serializing_if = "Option::is_none")] pub queryParameters: Option>, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct Role { pub name: String, pub privileges: Vec, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct Identity { pub name: String, #[serde(skip_serializing_if = "Option::is_none")] pub userName: Option, #[serde(skip_serializing_if = "Option::is_none")] pub groupName: Option, #[serde(skip_serializing_if = "Option::is_none")] pub exePath: Option, #[serde(skip_serializing_if = "Option::is_none")] pub processName: Option, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct RoleAssignment { pub role: String, pub identities: Vec, } impl Clone for Privilege { fn clone(&self) -> Self { Privilege { name: self.name.to_string(), path: self.path.to_string(), queryParameters: self.queryParameters.clone(), } } } impl Privilege { pub fn is_match(&self, logger: &mut ConnectionLogger, request_url: &Uri) -> bool { logger.write( LoggerLevel::Trace, format!("Start to match privilege '{}'", self.name), ); if request_url.path().to_lowercase().starts_with(&self.path) { logger.write( LoggerLevel::Trace, format!("Matched privilege path '{}'", self.path), ); if let Some(query_parameters) = &self.queryParameters { logger.write( LoggerLevel::Trace, format!( "Start to match query_parameters from privilege '{}'", self.name ), ); for (key, value) in query_parameters { match hyper_client::query_pairs(request_url) .into_iter() .find(|(k, _)| k.to_lowercase() == key.to_lowercase()) { Some((_, v)) => { if v.to_lowercase() == value.to_lowercase() { logger.write( LoggerLevel::Trace, format!( "Matched query_parameters '{}:{}' from privilege '{}'", key, v, self.name ), ); } else { logger.write( LoggerLevel::Trace, format!("Not matched query_parameters value '{}' from privilege '{}'", key, self.name), ); return false; } } None => { logger.write( LoggerLevel::Trace, format!( "Not matched query_parameters key '{}' from privilege '{}'", key, self.name ), ); return false; } } } } return true; } false } } impl Clone for Role { fn clone(&self) -> Self { Role { name: self.name.to_string(), privileges: self.privileges.clone(), } } } impl Clone for Identity { fn clone(&self) -> Self { Identity { name: self.name.to_string(), userName: self.userName.clone(), groupName: self.groupName.clone(), exePath: self.exePath.clone(), processName: self.processName.clone(), } } } impl Identity { pub fn is_match(&self, logger: &mut ConnectionLogger, claims: &Claims) -> bool { logger.write( LoggerLevel::Trace, format!("Start to match identity '{}'", self.name), ); if let Some(ref user_name) = self.userName { if *user_name == claims.userName { logger.write( LoggerLevel::Trace, format!( "Matched user name '{}' from identity '{}'", user_name, self.name ), ); } else { logger.write( LoggerLevel::Trace, format!( "Not matched user name '{}' from identity '{}'", user_name, self.name ), ); return false; } } if let Some(ref process_name) = self.processName { let process_name_os: OsString = process_name.into(); if process_name_os == claims.processName { logger.write( LoggerLevel::Trace, format!( "Matched process name '{}' from identity '{}'", process_name, self.name ), ); } else { logger.write( LoggerLevel::Trace, format!( "Not matched process name '{}' from identity '{}'", process_name, self.name ), ); return false; } } if let Some(ref exe_path) = self.exePath { let process_path_buf: PathBuf = exe_path.into(); if process_path_buf == claims.processFullPath { logger.write( LoggerLevel::Trace, format!( "Matched process full path '{}' from identity '{}'", exe_path, self.name ), ); } else { logger.write( LoggerLevel::Trace, format!( "Not matched process full path '{}' from identity '{}'", exe_path, self.name ), ); return false; } } if let Some(ref group_name) = self.groupName { let mut matched = false; for claims_user_group_name in &claims.userGroups { if claims_user_group_name == group_name { logger.write( LoggerLevel::Trace, format!( "Matched user group name '{}' from identity '{}'", group_name, self.name ), ); matched = true; break; } } if !matched { logger.write( LoggerLevel::Trace, format!( "Not matched user group name '{}' from identity '{}'", group_name, self.name ), ); return false; } } true } } impl Clone for RoleAssignment { fn clone(&self) -> Self { RoleAssignment { role: self.role.to_string(), identities: self.identities.clone(), } } } impl KeyStatus { fn validate(&self) -> Result { let mut validate_message = String::new(); let mut validate_result = true; // validate authorizationScheme let authorization_scheme = self.authorizationScheme.to_string(); if authorization_scheme != constants::AUTHORIZATION_SCHEME { validate_message.push_str("authorizationScheme must be 'Azure-HMAC-SHA256'; "); } // validate let key_delivery_method = self.keyDeliveryMethod.to_string(); if key_delivery_method != constants::KEY_DELIVERY_METHOD_HTTP && key_delivery_method != constants::KEY_DELIVERY_METHOD_VTPM { validate_message.push_str(&format!( "keyDeliveryMethod '{}' is invalid; ", key_delivery_method )); } if self.secureChannelEnabled.is_none() && self.secureChannelState.is_none() { validate_message.push_str( format!( "Both secureChannelEnabled and secureChannelState are missing in version: {}", self.version.as_str() ) .as_str(), ); validate_result = false; } // validate secureChannelState, it has to be Disabled, Wireserver or wireserverandImds match &self.secureChannelState { Some(s) => { let state = s.to_lowercase(); if state != super::DISABLE_STATE && state != super::MUST_SIG_WIRESERVER && state != super::MUST_SIG_WIRESERVER_IMDS { validate_message .push_str(&format!("secureChannelState '{}' is invalid; ", state)); validate_result = false; } } None => { if self.version == "1.0" { validate_message.push_str("secureChannelState is missing in version: 1.0"); validate_result = false; } } } if self.secureChannelEnabled.is_none() && self.version == "2.0" { validate_message.push_str("secureChannelEnabled is missing in version: 2.0"); validate_result = false; } if !validate_result { return Err(Error::Key(KeyErrorType::KeyStatusValidation( validate_message, ))); } Ok(validate_result) } pub fn get_secure_channel_state(&self) -> String { if self.version == "2.0" { match &self.secureChannelEnabled { Some(s) => { if *s { // need read details from authorizationRules let wireserver; let imds; let hostga; match &self.authorizationRules { Some(rules) => { match &rules.wireserver { Some(item) => { let mode = item.mode.to_lowercase(); if mode == ENFORCE_MODE { wireserver = "WireServer Enforce"; } else if mode == AUDIT_MODE { wireserver = "WireServer Audit"; } else { wireserver = "WireServer Disabled"; } } None => wireserver = "WireServer Disabled", }; match &rules.imds { Some(item) => { let mode = item.mode.to_lowercase(); if mode == ENFORCE_MODE { imds = " IMDS Enforce"; } else if mode == AUDIT_MODE { imds = " IMDS Audit"; } else { imds = " IMDS Disabled"; } } None => imds = " IMDS Disabled", }; // short-term: HostGA uses wireserver mode // long-term: TBD match &rules.wireserver { Some(item) => { let mode = item.mode.to_lowercase(); if mode == ENFORCE_MODE { hostga = "HostGA Enforce"; } else if mode == AUDIT_MODE { hostga = "HostGA Audit"; } else { hostga = "HostGA Disabled"; } } None => hostga = "HostGA Disabled", }; } None => return super::DISABLE_STATE.to_string(), } format!("{} - {} - {}", wireserver, imds, hostga) } else { super::DISABLE_STATE.to_string() } } None => super::DISABLE_STATE.to_string(), } } else { // version 1.0 match &self.secureChannelState { Some(s) => s.to_lowercase(), None => super::DISABLE_STATE.to_string(), } } } pub fn get_wireserver_rule_id(&self) -> String { match self.get_wireserver_rules() { Some(item) => item.id.to_string(), None => String::new(), } } pub fn get_imds_rule_id(&self) -> String { match self.get_imds_rules() { Some(item) => item.id.to_string(), None => String::new(), } } pub fn get_hostga_rule_id(&self) -> String { match self.get_hostga_rules() { Some(item) => item.id.to_string(), None => String::new(), } } pub fn get_wireserver_rules(&self) -> Option { match &self.authorizationRules { Some(rules) => rules.wireserver.clone(), None => None, } } pub fn get_imds_rules(&self) -> Option { match &self.authorizationRules { Some(rules) => rules.imds.clone(), None => None, } } pub fn get_hostga_rules(&self) -> Option { // short-term: HostGA has no rules // long-term: TBD match &self.authorizationRules { Some(rules) => rules.hostga.clone(), None => None, } } pub fn get_wire_server_mode(&self) -> String { if self.version == "2.0" { match &self.authorizationRules { Some(rules) => match &rules.wireserver { Some(item) => item.mode.to_lowercase(), None => "disabled".to_string(), }, None => "disabled".to_string(), } } else { let state = match &self.secureChannelState { Some(s) => s.to_lowercase(), None => "disabled".to_string(), }; if state == "wireserver" || state == "wireserverandimds" { ENFORCE_MODE.to_string() } else { AUDIT_MODE.to_string() } } } pub fn get_imds_mode(&self) -> String { if self.version == "2.0" { match &self.authorizationRules { Some(rules) => match &rules.imds { Some(item) => item.mode.to_lowercase(), None => "disabled".to_string(), }, None => "disabled".to_string(), } } else { let state = match &self.secureChannelState { Some(s) => s.to_lowercase(), None => "disabled".to_string(), }; if state == "wireserverandimds" { ENFORCE_MODE.to_string() } else { AUDIT_MODE.to_string() } } } pub fn get_hostga_mode(&self) -> String { // match self.get_hostga_rules() { // Some(item) => item.mode.to_lowercase(), // None => "disabled".to_string(), // } // short-term: HostGA uses wireserver mode self.get_wire_server_mode() } } impl Display for KeyStatus { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "authorizationScheme: {}, keyDeliveryMethod: {}, keyGuid: {}, secureChannelState: {}, version: {}", self.authorizationScheme, self.keyDeliveryMethod, match &self.keyGuid { Some(s) => s.to_string(), None => "None".to_string(), }, self.get_secure_channel_state(), self.version) } } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct Key { // The authorization scheme; // defines what the scheme is along with what algorithms will be used. // Only Azure-HMAC-SHA256 exists in V1. authorizationScheme: String, // An integer representing the incarnation of the key. #[serde(skip_serializing_if = "Option::is_none")] pub incarnationId: Option, // Unique ID of the key pub guid: String, // An ISO 8601 UTC timestamp of when the key was provisioned by wire server issued: String, // Hex encoded 256-bit key. This key is used for generating HMAC signatures. pub key: String, } impl Key { // create a default empty Key pub fn empty() -> Self { Key { authorizationScheme: constants::AUTHORIZATION_SCHEME.to_string(), incarnationId: None, guid: "00000000-0000-0000-0000-000000000000".to_string(), issued: String::new(), key: String::new(), } } } impl Clone for Key { fn clone(&self) -> Self { Key { authorizationScheme: self.authorizationScheme.to_string(), guid: self.guid.to_string(), incarnationId: self.incarnationId, issued: self.issued.to_string(), key: self.key.to_string(), } } } enum KeyAction { Acquire, Attest, } impl Display for KeyAction { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match *self { KeyAction::Acquire => write!(f, "acquire"), KeyAction::Attest => write!(f, "attest"), } } } const STATUS_URL: &str = "/secure-channel/status"; const KEY_URL: &str = "/secure-channel/key"; pub async fn get_status(base_url: &Uri) -> Result { let (host, port) = hyper_client::host_port_from_uri(base_url)?; let url = format!("http://{}:{}{}", host, port, STATUS_URL); let url: Uri = url.parse().map_err(|e| { Error::Key(KeyErrorType::ParseKeyUrl( base_url.to_string(), STATUS_URL.to_string(), e, )) })?; let mut headers = HashMap::new(); headers.insert(constants::METADATA_HEADER.to_string(), "True ".to_string()); let status: KeyStatus = hyper_client::get(&url, &headers, None, None, logger::write_warning).await?; status.validate()?; Ok(status) } pub async fn acquire_key(base_url: &Uri) -> Result { let (host, port) = hyper_client::host_port_from_uri(base_url)?; let url = format!("http://{}:{}{}", host, port, KEY_URL); let url: Uri = url.parse().map_err(|e| { Error::Key(KeyErrorType::ParseKeyUrl( base_url.to_string(), KEY_URL.to_string(), e, )) })?; let (host, port) = hyper_client::host_port_from_uri(&url)?; let mut headers = HashMap::new(); headers.insert(constants::METADATA_HEADER.to_string(), "True ".to_string()); headers.insert("Content-Type".to_string(), "application/json".to_string()); let body = r#"{"authorizationScheme": "Azure-HMAC-SHA256"}"#.to_string(); let request = hyper_client::build_request( hyper::Method::POST, &url, &headers, Some(body.as_bytes()), None, None, )?; let response = hyper_client::send_request(&host, port, request, logger::write_warning) .await .map_err(|e| { Error::Key(KeyErrorType::SendKeyRequest( format!("{}", KeyAction::Acquire), e.to_string(), )) })?; if response.status() != StatusCode::OK { return Err(Error::Key(KeyErrorType::KeyResponse( format!("{}", KeyAction::Acquire), response.status(), ))); } hyper_client::read_response_body(response).await } pub async fn attest_key(base_url: &Uri, key: &Key) -> Result<()> { // secure-channel/key/{key_guid}/key-attestation let (host, port) = hyper_client::host_port_from_uri(base_url)?; let url = format!( "http://{}:{}{}/{}/key-attestation", host, port, KEY_URL, key.guid ); let url: Uri = url .parse() .map_err(|e| Error::Key(KeyErrorType::ParseKeyUrl(base_url.to_string(), url, e)))?; let mut headers = HashMap::new(); headers.insert(constants::METADATA_HEADER.to_string(), "True ".to_string()); let request = hyper_client::build_request( Method::POST, &url, &headers, None, Some(key.guid.to_string()), Some(key.key.to_string()), )?; let response = hyper_client::send_request(&host, port, request, logger::write_warning) .await .map_err(|e| { Error::Key(KeyErrorType::SendKeyRequest( format!("{}", KeyAction::Attest), e.to_string(), )) })?; if response.status() != StatusCode::OK { return Err(Error::Key(KeyErrorType::KeyResponse( format!("{}", KeyAction::Attest), response.status(), ))); } Ok(()) } #[cfg(test)] mod tests { use std::ffi::OsString; #[cfg(not(windows))] use std::os::unix::ffi::OsStringExt; #[cfg(windows)] use std::os::windows::ffi::OsStringExt; use std::path::PathBuf; use super::Key; use super::KeyStatus; use crate::common::constants; use crate::key_keeper::key::Identity; use crate::key_keeper::key::Privilege; use crate::proxy::proxy_connection::ConnectionLogger; use hyper::Uri; use serde_json::json; #[test] fn key_status_v1_test() { let status_response_v1 = r#"{ "authorizationScheme": "Azure-HMAC-SHA256", "keyDeliveryMethod": "http", "keyGuid": null, "requiredClaimsHeaderPairs": null, "secureChannelState": "Wireserver", "version": "1.0" }"#; let status_v1: KeyStatus = serde_json::from_str(status_response_v1).unwrap(); assert_eq!( constants::AUTHORIZATION_SCHEME, status_v1.authorizationScheme, "authorizationScheme mismatch" ); assert_eq!( "http", status_v1.keyDeliveryMethod, "keyDeliveryMethod mismatch" ); assert_eq!(None, status_v1.keyGuid, "keyGuid must be None"); assert_eq!( None, status_v1.requiredClaimsHeaderPairs, "requiredClaimsHeaderPairs must be None" ); assert_eq!( Some("Wireserver".to_string()), status_v1.secureChannelState, "secureChannelState mismatch" ); assert!( status_v1.keyIncarnationId.is_none(), "keyIncarnationId must be None" ); assert_eq!("1.0".to_string(), status_v1.version, "version 1.0 mismatch"); assert!( status_v1.validate().unwrap(), "Key status validation must be true" ); assert!( status_v1.secureChannelEnabled.is_none(), "secureChannelEnabled must be None in version 1.0" ); assert_eq!( "", status_v1.get_imds_rule_id(), "IMDS rule id must be empty" ); assert_eq!( "", status_v1.get_wireserver_rule_id(), "WireServer rule id must be empty" ); assert_eq!( status_v1.get_wire_server_mode(), "enforce", "WireServer mode mismatch" ); assert_eq!(status_v1.get_imds_mode(), "audit", "IMDS mode mismatch"); } #[test] fn key_status_v2_test() { let status_response = r#"{ "authorizationScheme": "Azure-HMAC-SHA256", "keyDeliveryMethod": "http", "keyGuid": null, "requiredClaimsHeaderPairs": null, "secureChannelEnabled": true, "version": "2.0", "authorizationRules": { "imds": { "defaultAccess": "allow", "mode": "enforce", "id": "sigid", "rules": { "privileges": [ { "name": "test", "path": "/test" }, { "name": "test1", "path": "/test1" } ], "roles": [ { "name": "test", "privileges": [ "test", "test1" ] } ], "identities": [ { "name": "test", "userName": "test", "groupName": "test", "exePath": "test", "processName": "test" } ], "roleAssignments": [ { "role": "test", "identities": [ "test", "test1" ] } ] } }, "wireserver": { "defaultAccess": "deny", "mode": "enforce", "id": "sigid", "rules": { "privileges": [ { "name": "test", "path": "/test", "queryParameters": { "key1": "value1", "key2": "value2" } }, { "name": "test1", "path": "/test1", "queryParameters": { "key1": "value1", "key2": "value2" } } ], "roles": [ { "name": "test", "privileges": [ "test", "test1" ] }, { "name": "test1", "privileges": [ "test", "test1" ] } ], "identities": [ { "name": "test", "userName": "test", "groupName": "test", "exePath": "test", "processName": "test" }, { "name": "test1", "userName": "test1", "groupName": "test1", "exePath": "test1", "processName": "test1" } ], "roleAssignments": [ { "role": "test", "identities": [ "test", "test1" ] }, { "role": "test1", "identities": [ "test", "test1" ] } ] } }, "hostga": { "defaultAccess": "allow", "mode": "enforce", "id": "sigid", "rules": { "privileges": [ { "name": "test", "path": "/test", "queryParameters": { "key1": "value1", "key2": "value2" } }, { "name": "test2", "path": "/test2", "queryParameters": { "key1": "value3", "key2": "value4" } } ], "roles": [ { "name": "test3", "privileges": [ "test1", "test2" ] }, { "name": "test6", "privileges": [ "test4", "test5" ] } ], "identities": [ { "name": "test", "userName": "test", "groupName": "test", "exePath": "test", "processName": "test" }, { "name": "test1", "userName": "test1", "groupName": "test1", "exePath": "test1", "processName": "test1" } ], "roleAssignments": [ { "role": "test4", "identities": [ "test", "test1" ] }, { "role": "test5", "identities": [ "test", "test1" ] } ] } } } }"#; let status: KeyStatus = serde_json::from_str(status_response).unwrap(); assert_eq!( constants::AUTHORIZATION_SCHEME, status.authorizationScheme, "authorizationScheme mismatch" ); assert_eq!( "http", status.keyDeliveryMethod, "keyDeliveryMethod mismatch" ); assert_eq!("2.0".to_string(), status.version, "version 2.0 mismatch"); assert!( status.validate().unwrap(), "Key status validation must be true" ); assert!( status.secureChannelEnabled.is_some(), "secureChannelEnabled must have value in version 2.0" ); assert!( status.secureChannelState.is_none(), "secureChannelState must be None in version 2.0" ); // validate IMDS rules let imds_rules = status.get_imds_rules().unwrap(); assert_eq!("allow", imds_rules.defaultAccess, "defaultAccess mismatch"); assert_eq!("sigid", status.get_imds_rule_id(), "IMDS rule id mismatch"); assert_eq!("enforce", status.get_imds_mode(), "IMDS mode mismatch"); // validate WireServer rules let wireserver_rules = status.get_wireserver_rules().unwrap(); assert_eq!( "deny", wireserver_rules.defaultAccess, "defaultAccess mismatch" ); assert_eq!( "sigid", status.get_wireserver_rule_id(), "WireServer rule id mismatch" ); assert_eq!( "enforce", status.get_wire_server_mode(), "WireServer mode mismatch" ); // validate WireServer rule details let first_privilege = &wireserver_rules .rules .as_ref() .unwrap() .privileges .as_ref() .unwrap()[0]; assert_eq!("test", first_privilege.name, "privilege name mismatch"); assert_eq!("/test", first_privilege.path, "privilege path mismatch"); assert_eq!( "value1", first_privilege.queryParameters.as_ref().unwrap()["key1"], "privilege queryParameters mismatch" ); assert_eq!( "value2", first_privilege.queryParameters.as_ref().unwrap()["key2"], "privilege queryParameters mismatch" ); let second_privilege = &wireserver_rules .rules .as_ref() .unwrap() .privileges .as_ref() .unwrap()[1]; assert_eq!( "test1", second_privilege.name, "second privilege name mismatch" ); assert_eq!( "/test1", second_privilege.path, "second privilege path mismatch" ); assert_eq!( "value1", second_privilege.queryParameters.as_ref().unwrap()["key1"], "second privilege queryParameters mismatch" ); assert_eq!( "value2", second_privilege.queryParameters.as_ref().unwrap()["key2"], "second privilege queryParameters mismatch" ); let first_role = &wireserver_rules .rules .as_ref() .unwrap() .roles .as_ref() .unwrap()[0]; assert_eq!("test", first_role.name, "role name mismatch"); assert_eq!("test", first_role.privileges[0], "role privilege mismatch"); assert_eq!("test1", first_role.privileges[1], "role privilege mismatch"); let first_identity = &wireserver_rules .rules .as_ref() .unwrap() .identities .as_ref() .unwrap()[0]; assert_eq!("test", first_identity.name, "identity name mismatch"); assert_eq!( "test", first_identity.userName.as_ref().unwrap(), "identity userName mismatch" ); assert_eq!( "test", first_identity.groupName.as_ref().unwrap(), "identity groupName mismatch" ); assert_eq!( "test", first_identity.exePath.as_ref().unwrap(), "identity exePath mismatch" ); assert_eq!( "test", first_identity.processName.as_ref().unwrap(), "identity processName mismatch" ); let first_role_assignment = &wireserver_rules .rules .as_ref() .unwrap() .roleAssignments .as_ref() .unwrap()[0]; assert_eq!( "test", first_role_assignment.role, "roleAssignment role mismatch" ); assert_eq!( "test", first_role_assignment.identities[0], "roleAssignment identities mismatch" ); // Validate HostGA rules let hostga_rules = status.get_hostga_rules().unwrap(); assert_eq!( "allow", hostga_rules.defaultAccess, "defaultAccess mismatch" ); assert_eq!( "sigid", status.get_hostga_rule_id(), "HostGA rule id mismatch" ); assert_eq!("enforce", status.get_hostga_mode(), "HostGA mode mismatch"); // Validate HostGA rule details // Retrieve and validate second privilege for HostGA let privilege = &hostga_rules .rules .as_ref() .unwrap() .privileges .as_ref() .unwrap()[1]; assert_eq!("test2", privilege.name, "privilege name mismatch"); assert_eq!("/test2", privilege.path, "privilege path mismatch"); assert_eq!( "value3", privilege.queryParameters.as_ref().unwrap()["key1"], "privilege queryParameters mismatch" ); assert_eq!( "value4", privilege.queryParameters.as_ref().unwrap()["key2"], "privilege queryParameters mismatch" ); // Retrieve and validate second role for HostGA let role = &hostga_rules.rules.as_ref().unwrap().roles.as_ref().unwrap()[1]; assert_eq!("test6", role.name, "role name mismatch"); assert_eq!("test4", role.privileges[0], "role privilege mismatch"); assert_eq!("test5", role.privileges[1], "role privilege mismatch"); // Retrieve and validate first identity for HostGA let identity = &hostga_rules .rules .as_ref() .unwrap() .identities .as_ref() .unwrap()[0]; assert_eq!("test", identity.name, "identity name mismatch"); assert_eq!( "test", identity.userName.as_ref().unwrap(), "identity userName mismatch" ); assert_eq!( "test", identity.groupName.as_ref().unwrap(), "identity groupName mismatch" ); assert_eq!( "test", identity.exePath.as_ref().unwrap(), "identity exePath mismatch" ); assert_eq!( "test", identity.processName.as_ref().unwrap(), "identity processName mismatch" ); // Retrieve and validate first role assignment for HostGA let role_assignment = &hostga_rules .rules .as_ref() .unwrap() .roleAssignments .as_ref() .unwrap()[0]; assert_eq!( "test4", role_assignment.role, "roleAssignment role mismatch" ); assert_eq!( "test", role_assignment.identities[0], "roleAssignment identities mismatch" ); } #[test] fn key_test() { let key_response = r#"{ "authorizationScheme": "Azure-HMAC-SHA256", "guid": "9cf81e97-0316-4ad3-94a7-8ccbdee8ccbf", "incarnationId": 1, "issued": "2021-05-05T 12:00:00Z", "key": "4A404E635266556A586E3272357538782F413F4428472B4B6250645367566B59" }"#; let key: Key = serde_json::from_str(key_response).unwrap(); assert_eq!( constants::AUTHORIZATION_SCHEME.to_string(), key.authorizationScheme, "authorizationScheme mismatch" ); assert_eq!( "9cf81e97-0316-4ad3-94a7-8ccbdee8ccbf".to_string(), key.guid, "guid mismatch" ); assert_eq!(Some(1), key.incarnationId, "incarnationId mismatch"); assert_eq!( "2021-05-05T 12:00:00Z".to_string(), key.issued, "issued mismatch" ); assert_eq!( "4A404E635266556A586E3272357538782F413F4428472B4B6250645367566B59".to_string(), key.key, "key mismatch" ); } #[tokio::test] async fn test_privilege_is_match() { let mut logger = ConnectionLogger::new(1, 1); let privilege = r#"{ "name": "test", "path": "/test", "queryParameters": { "key1": "value1", "key2": "value2" } }"#; let privilege: Privilege = serde_json::from_str(privilege).unwrap(); let url: Uri = "http://localhost/test?key1=value1&key2=value2" .parse() .unwrap(); assert!( privilege.is_match(&mut logger, &url), "privilege should be matched" ); let url = "http://localhost/test?key1=value1&key2=value3" .parse() .unwrap(); assert!( !privilege.is_match(&mut logger, &url), "privilege should not be matched" ); let url = "http://localhost/test?key1=value1".parse().unwrap(); assert!( !privilege.is_match(&mut logger, &url), "privilege should not be matched" ); let privilege1 = r#"{ "name": "test", "path": "/test" }"#; let privilege1: Privilege = serde_json::from_str(privilege1).unwrap(); let url = "http://localhost/test?key1=value1&key2=value2" .parse() .unwrap(); assert!( privilege1.is_match(&mut logger, &url), "privilege should be matched" ); let privilege2 = r#"{ "name": "test", "path": "/test", "queryParameters": { "key1": "", "key2": "" } }"#; let privilege2: Privilege = serde_json::from_str(privilege2).unwrap(); let url = "http://localhost/test?key1=value1&key2=value2" .parse() .unwrap(); assert!( !privilege2.is_match(&mut logger, &url), "privilege should not be matched" ); } #[tokio::test] async fn test_identity_is_match() { let mut logger = ConnectionLogger::new(1, 1); let mut claims = super::Claims { userName: "test".to_string(), userGroups: vec!["test".to_string()], processName: OsString::from("test"), processCmdLine: "test".to_string(), userId: 0, processId: 0, clientIp: "00.000.000".to_string(), clientPort: 0, // doesn't matter for this test runAsElevated: true, processFullPath: PathBuf::from("test"), }; let identity = r#"{ "name": "test", "userName": "test", "groupName": "test", "exePath": "test", "processName": "test" }"#; let identity: Identity = serde_json::from_str(identity).unwrap(); assert!( identity.is_match(&mut logger, &claims), "identity should be matched" ); let identity1 = r#"{ "name": "test", "userName": "test1", "groupName": "test", "exePath": "test", "processName": "test" }"#; let identity1: Identity = serde_json::from_str(identity1).unwrap(); assert!( !identity1.is_match(&mut logger, &claims), "identity should not be matched" ); // test userName let identity2 = r#"{ "name": "test", "userName": "test1" }"#; let identity2: Identity = serde_json::from_str(identity2).unwrap(); assert!( !identity2.is_match(&mut logger, &claims), "identity should not be matched" ); let identity2 = r#"{ "name": "test", "userName": "test" }"#; let identity2: Identity = serde_json::from_str(identity2).unwrap(); assert!( identity2.is_match(&mut logger, &claims), "identity should be matched" ); // test processName let identity3 = r#"{ "name": "test", "processName": "test1" }"#; let identity3: Identity = serde_json::from_str(identity3).unwrap(); assert!( !identity3.is_match(&mut logger, &claims), "identity should not be matched" ); let identity3 = r#"{ "name": "test", "processName": "Test" }"#; let identity3: Identity = serde_json::from_str(identity3).unwrap(); assert!( !identity3.is_match(&mut logger, &claims), "identity should not be matched" ); let identity3 = r#"{ "name": "test", "processName": "test" }"#; let identity3: Identity = serde_json::from_str(identity3).unwrap(); assert!( identity3.is_match(&mut logger, &claims), "identity should be matched" ); // test exePath let identity4 = r#"{ "name": "test", "exePath": "test1" }"#; let identity4: Identity = serde_json::from_str(identity4).unwrap(); assert!( !identity4.is_match(&mut logger, &claims), "identity should not be matched" ); let identity4 = r#"{ "name": "test", "exePath": "TEST" }"#; let identity4: Identity = serde_json::from_str(identity4).unwrap(); assert!( !identity4.is_match(&mut logger, &claims), "identity should not be matched" ); let identity4 = r#"{ "name": "test", "exePath": "test" }"#; let identity4: Identity = serde_json::from_str(identity4).unwrap(); assert!( identity4.is_match(&mut logger, &claims), "identity should be matched" ); // test groupName let identity5 = r#"{ "name": "test", "groupName": "test1" }"#; let identity5: Identity = serde_json::from_str(identity5).unwrap(); assert!( !identity5.is_match(&mut logger, &claims), "identity should not be matched" ); let identity5 = r#"{ "name": "test", "groupName": "test" }"#; let identity5: Identity = serde_json::from_str(identity5).unwrap(); assert!( identity5.is_match(&mut logger, &claims), "identity should be matched" ); // Test with non-UTF8 valid process name #[cfg(windows)] { let invalid_utf16_bytes: Vec = vec![0xD800]; // Lone surrogate (0xD800) claims.processName = OsString::from_wide(invalid_utf16_bytes.as_slice()); } #[cfg(not(windows))] { let invalid_utf8_bytes: Vec = vec![0x80]; // Invalid UTF-8 claims.processName = OsString::from_vec(invalid_utf8_bytes); } let process_name_lossy = claims.processName.to_string_lossy().to_string(); let replacement_char = "īŋŊ"; let identity6 = json!({ "name": "test", "processName": replacement_char }); let identity6: Identity = serde_json::from_value(identity6).unwrap(); assert!( !identity6.is_match(&mut logger, &claims), "identity should not be matched" ); assert_eq!( replacement_char, process_name_lossy, "process name after lossy conversion should be equal to replacement char" ); } } GuestProxyAgent-1.0.30/proxy_agent/src/main.rs000066400000000000000000000101451500521614600213240ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod acl; pub mod common; pub mod host_clients; pub mod key_keeper; pub mod provision; pub mod proxy; pub mod proxy_agent_status; pub mod redirector; pub mod service; pub mod shared_state; pub mod telemetry; #[cfg(test)] pub mod test_mock; use common::cli::{Commands, CLI}; use common::constants; use common::helpers; use provision::provision_query::ProvisionQuery; use proxy_agent_shared::misc_helpers; use shared_state::SharedState; use std::{process, time::Duration}; #[cfg(windows)] use common::logger; #[cfg(windows)] use service::windows; #[cfg(windows)] use std::ffi::OsString; #[cfg(windows)] use windows_service::{define_windows_service, service_dispatcher}; #[cfg(windows)] define_windows_service!(ffi_service_main, proxy_agent_windows_service_main); // define_windows_service does not accept async function in fffi_service_main, // also it does not allow to pass tokio runtime or handle as arguments to the function. // we have to use the global variable to set the tokio runtime handle. #[cfg(windows)] static ASYNC_RUNTIME_HANDLE: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); #[tokio::main(flavor = "multi_thread")] async fn main() { // set the tokio runtime handle #[cfg(windows)] ASYNC_RUNTIME_HANDLE .set(tokio::runtime::Handle::current()) .unwrap(); // start the Instant to calculate the elapsed time let _time = helpers::get_elapsed_time_in_millisec(); if CLI.version { println!("{}", misc_helpers::get_current_version()); return; } if CLI.status { // --wait parameter to wait for the provision status until the given time in seconds // it is an optional, if not provided then it will query the provision state once by waiting for 0 seconds. let wait_time = CLI.wait.unwrap_or(0); let state = ProvisionQuery::new( constants::PROXY_AGENT_PORT, Some(Duration::from_secs(wait_time)), ) .get_provision_status_wait() .await; if !state.finished { // exit code 1 means provision not finished yet. process::exit(1); } else { // provision finished if !state.errorMessage.is_empty() { // if there is any error message then print it and exit with exit code 2. println!("{}", state.errorMessage); process::exit(2); } // no error message then exit with 0. return; } } if let Some(Commands::Console) = CLI.command { // console mode - start GPA as long running process let shared_state = SharedState::start_all(); service::start_service(shared_state.clone()).await; println!("Press Enter to end it."); let mut temp = String::new(); let _read = std::io::stdin().read_line(&mut temp); service::stop_service(shared_state.clone()); } else { // no argument provided, start the GPA as an OS service #[cfg(windows)] { match service_dispatcher::start(constants::PROXY_AGENT_SERVICE_NAME, ffi_service_main) { Ok(_) => {} Err(e) => { logger::write_error(format!("Error in starting the service dispatcher: {}", e)); } } } #[cfg(not(windows))] { service::start_service_wait().await; } } } /// This function is the entry point of the GPA windows service. #[cfg(windows)] fn proxy_agent_windows_service_main(_args: Vec) { // start the Instant to calculate the elapsed time let _time = helpers::get_elapsed_time_in_millisec(); // Pass the tokio runtime handle here to launch the windows service. let handle = ASYNC_RUNTIME_HANDLE .get() .expect("You must provide the Tokio runtime handle before this function is called"); handle.block_on(async { if let Err(e) = windows::run_service().await { logger::write_error(format!("Error in running the service: {}", e)); } }); } GuestProxyAgent-1.0.30/proxy_agent/src/provision.rs000066400000000000000000001076201500521614600224350ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module provides the provision functions for the GPA service and GPA --status command line. //! It is used to track the provision state for each module and write the provision state to provisioned.tag and status.tag files. //! It also provides the http handler to query the provision status for GPA service. //! It is used to query the provision status from GPA service http listener. //! Example for GPA service: //! ```rust //! use proxy_agent::provision; //! use proxy_agent::shared_state::agent_status_wrapper::AgentStatusModule; //! use proxy_agent::shared_state::agent_status_wrapper::AgentStatusSharedState; //! use proxy_agent::shared_state::key_keeper_wrapper::KeyKeeperSharedState; //! use proxy_agent::shared_state::provision_wrapper::ProvisionSharedState; //! use proxy_agent::shared_state::SharedState; //! use proxy_agent::shared_state::telemetry_wrapper::TelemetrySharedState; //! //! use std::time::Duration; //! //! let shared_state = SharedState::start_all(); //! let cancellation_token = shared_state.get_cancellation_token(); //! let key_keeper_shared_state = shared_state.get_key_keeper_shared_state(); //! let telemetry_shared_state = shared_state.get_telemetry_shared_state(); //! let provision_shared_state = shared_state.get_provision_shared_state(); //! let agent_status_shared_state = shared_state.get_agent_status_shared_state(); //! //! let provision_state = provision::get_provision_state( //! provision_shared_state.clone(), //! agent_status_shared_state.clone(), //! ).await; //! assert_eq!(false, provision_state.finished); //! assert_eq!(0, provision_state.errorMessage.len()); //! //! // update provision state when each provision finished //! provision::redirector_ready( //! cancellation_token.clone(), //! key_keeper_shared_state.clone(), //! telemetry_shared_state.clone(), //! provision_shared_state.clone(), //! agent_status_shared_state.clone(), //! ).await; //! provision::key_latched( //! cancellation_token.clone(), //! key_keeper_shared_state.clone(), //! telemetry_shared_state.clone(), //! provision_shared_state.clone(), //! agent_status_shared_state.clone(), //! ).await; //! provision::listener_started( //! cancellation_token.clone(), //! key_keeper_shared_state.clone(), //! telemetry_shared_state.clone(), //! provision_shared_state.clone(), //! agent_status_shared_state.clone(), //! ).await; //! //! let provision_state = provision::get_provision_state( //! provision_shared_state.clone(), //! agent_status_shared_state.clone(), //! ).await; //! assert_eq!(true, provision_state.finished); //! assert_eq!(0, provision_state.errorMessage.len()); //! ``` //! //! Example for GPA command line option --status [--wait seconds]: //! ```rust //! use proxy_agent::provision::ProvisionQuery; //! use std::time::Duration; //! //! let proxy_server_port = 8092; //! let provision_query = ProvisionQuery::new(proxy_server_port, None); //! let provision_not_finished_state = provision_query.get_provision_status_wait().await; //! assert_eq!(false, provision_state.0); //! assert_eq!(0, provision_state.1.len()); //! //! let provision_query = ProvisionQuery::new(proxy_server_port, Some(Duration::from_millis(5))); //! let provision_finished_state = provision_query.get_provision_status_wait().await; //! assert_eq!(true, provision_state.0); //! assert_eq!(0, provision_state.1.len()); //! ``` use crate::common::{config, helpers, logger}; use crate::key_keeper::{DISABLE_STATE, UNKNOWN_STATE}; use crate::proxy_agent_status; use crate::shared_state::agent_status_wrapper::{AgentStatusModule, AgentStatusSharedState}; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use crate::shared_state::provision_wrapper::ProvisionSharedState; use crate::shared_state::telemetry_wrapper::TelemetrySharedState; use crate::telemetry::event_reader::EventReader; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::telemetry::event_logger; use proxy_agent_shared::{misc_helpers, proxy_agent_aggregate_status}; use std::path::PathBuf; use std::time::Duration; use tokio_util::sync::CancellationToken; const PROVISION_TAG_FILE_NAME: &str = "provisioned.tag"; const STATUS_TAG_TMP_FILE_NAME: &str = "status.tag.tmp"; const STATUS_TAG_FILE_NAME: &str = "status.tag"; bitflags::bitflags! { /// Provision flags /// NONE - no provision finished /// REDIRECTOR_READY - redirector provision finished /// KEY_LATCH_READY - key latch provision finished /// LISTENER_READY - listener provision finished /// ALL_READY - all provision finished /// It is used to track each module provision state /// Example: /// ```rust /// use proxy_agent::provision::ProvisionFlags; /// /// let flags = ProvisionFlags::REDIRECTOR_READY | ProvisionFlags::KEY_LATCH_READY; /// assert_eq!(3, flags.bits()); /// assert_eq!(true, flags.contains(ProvisionFlags::REDIRECTOR_READY)); /// assert_eq!(true, flags.contains(ProvisionFlags::KEY_LATCH_READY)); /// assert_eq!(false, flags.contains(ProvisionFlags::LISTENER_READY)); /// /// let flags = ProvisionFlags::REDIRECTOR_READY | ProvisionFlags::KEY_LATCH_READY | ProvisionFlags::LISTENER_READY; /// assert_eq!(7, flags.bits()); /// assert_eq!(true, flags.contains(ProvisionFlags::REDIRECTOR_READY)); /// assert_eq!(true, flags.contains(ProvisionFlags::KEY_LATCH_READY)); /// assert_eq!(true, flags.contains(ProvisionFlags::LISTENER_READY)); /// ``` #[derive(Clone, Debug)] pub struct ProvisionFlags: u8 { const NONE = 0; const REDIRECTOR_READY = 1; const KEY_LATCH_READY = 2; const LISTENER_READY = 4; const ALL_READY = 7; } } /// Provision internal state /// It is used to represent the provision state within GPA service /// finished_time_tick - provision finished or timedout time tick, 0 means provision still in progress /// error_message - provision error message /// key_keeper_secure_channel_state - key keeper secure_channel state #[derive(Clone, Debug)] pub struct ProvisionStateInternal { pub finished_time_tick: i128, pub error_message: String, pub key_keeper_secure_channel_state: String, } impl ProvisionStateInternal { pub fn is_secure_channel_latched(&self) -> bool { self.key_keeper_secure_channel_state != DISABLE_STATE && self.key_keeper_secure_channel_state != UNKNOWN_STATE } } /// Update provision state when redirector provision finished /// It could be called by redirector module pub async fn redirector_ready( cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) { update_provision_state( ProvisionFlags::REDIRECTOR_READY, None, cancellation_token, key_keeper_shared_state, telemetry_shared_state, provision_shared_state, agent_status_shared_state, ) .await; } /// Update provision state when key latch provision finished /// It could be called by key latch module pub async fn key_latched( cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) { update_provision_state( ProvisionFlags::KEY_LATCH_READY, None, cancellation_token, key_keeper_shared_state, telemetry_shared_state, provision_shared_state, agent_status_shared_state, ) .await; } /// Update provision state when listener provision finished /// It could be called by listener module pub async fn listener_started( cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) { update_provision_state( ProvisionFlags::LISTENER_READY, None, cancellation_token, key_keeper_shared_state, telemetry_shared_state, provision_shared_state, agent_status_shared_state, ) .await; } /// Update provision state for each module to shared_state async fn update_provision_state( state: ProvisionFlags, provision_dir: Option, cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) { if let Ok(provision_state) = provision_shared_state.update_one_state(state).await { if provision_state.contains(ProvisionFlags::ALL_READY) { if let Err(e) = provision_shared_state.set_provision_finished(true).await { // log the error and continue logger::write_error(format!( "update_provision_state::Failed to set provision finished with error: {e}" )); } // write provision success state here write_provision_state( provision_dir, provision_shared_state.clone(), agent_status_shared_state.clone(), ) .await; // start event threads right after provision successfully start_event_threads( cancellation_token, key_keeper_shared_state, telemetry_shared_state, provision_shared_state, agent_status_shared_state, ) .await; } } } pub async fn key_latch_ready_state_reset(provision_shared_state: ProvisionSharedState) { reset_provision_state(ProvisionFlags::KEY_LATCH_READY, provision_shared_state).await; } async fn reset_provision_state( state_to_reset: ProvisionFlags, provision_shared_state: ProvisionSharedState, ) { let provision_state = match provision_shared_state.reset_one_state(state_to_reset).await { Ok(state) => state, Err(e) => { logger::write_error(format!("Failed to reset provision state with error: {e}")); return; } }; if let Err(e) = provision_shared_state .set_provision_finished(provision_state.contains(ProvisionFlags::ALL_READY)) .await { logger::write_error(format!( "reset_provision_state::Failed to set provision finished with error: {e}" )); } } /// Update provision state when provision timedout /// It will be called if key latch provision timedout /// Example: /// ```rust /// use proxy_agent::provision; /// use std::sync::{Arc, Mutex}; /// /// let shared_state = Arc::new(Mutex::new(SharedState::new())); /// provision::provision_timeup(None, shared_state.clone()); /// ``` pub async fn provision_timeup( provision_dir: Option, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) { let provision_state = provision_shared_state .get_state() .await .unwrap_or(ProvisionFlags::NONE); if !provision_state.contains(ProvisionFlags::ALL_READY) { if let Err(e) = provision_shared_state.set_provision_finished(true).await { logger::write_error(format!("Failed to set provision finished with error: {e}")); } // write provision state write_provision_state( provision_dir, provision_shared_state, agent_status_shared_state, ) .await; } } /// Start event logger & reader tasks and status reporting task /// It will be called when provision finished or timedout, /// it is designed to delay start those tasks to give more cpu time to provision tasks pub async fn start_event_threads( cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) { if let Ok(logger_threads_initialized) = provision_shared_state .get_event_log_threads_initialized() .await { if logger_threads_initialized { return; } } let cloned_agent_status_shared_state = agent_status_shared_state.clone(); tokio::spawn({ async { event_logger::start( config::get_events_dir(), Duration::default(), config::get_max_event_file_count(), move |status: String| { let cloned_agent_status_shared_state = cloned_agent_status_shared_state.clone(); async move { let _ = cloned_agent_status_shared_state .set_module_status_message(status, AgentStatusModule::TelemetryLogger) .await; } }, ) .await; } }); tokio::spawn({ let event_reader = EventReader::new( config::get_events_dir(), true, cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), agent_status_shared_state.clone(), ); async move { event_reader .start(Some(Duration::from_secs(300)), None, None) .await; } }); if let Err(e) = provision_shared_state .set_event_log_threads_initialized() .await { logger::write_warning(format!( "Failed to set event log threads initialized with error: {e}" )); } tokio::spawn({ let agent_status_task = proxy_agent_status::ProxyAgentStatusTask::new( Duration::from_secs(60), proxy_agent_aggregate_status::get_proxy_agent_aggregate_status_folder(), cancellation_token.clone(), key_keeper_shared_state.clone(), agent_status_shared_state.clone(), ); async move { agent_status_task.start().await; } }); } /// Write provision state to provisioned.tag file and status.tag file under provision_dir /// provisioned.tag is backcompat file, it is used to indicate the provision finished for pilot WinPA /// status.tag is used to store the provision error message for current WinPA service to query the provision status /// if status.tag file exists, it means provision finished /// if status.tag file does not exist, it means provision still in progress /// the content of the status.tag file is the provision error message, /// empty means provision success, otherwise provision failed with error message async fn write_provision_state( provision_dir: Option, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) { let provision_dir = provision_dir.unwrap_or_else(config::get_keys_dir); let provisioned_file: PathBuf = provision_dir.join(PROVISION_TAG_FILE_NAME); if let Err(e) = misc_helpers::try_create_folder(&provision_dir) { logger::write_error(format!("Failed to create provision folder with error: {e}")); return; } if let Err(e) = std::fs::write( provisioned_file, misc_helpers::get_date_time_string_with_milliseconds(), ) { logger::write_error(format!("Failed to write provisioned file with error: {e}")); } let mut failed_state_message = get_provision_failed_state_message(provision_shared_state, agent_status_shared_state).await; #[cfg(not(windows))] { if failed_state_message.is_empty() { logger::write_serial_console_log("Provision finished successfully".to_string()); } else { logger::write_serial_console_log(failed_state_message.clone()); } } if !failed_state_message.is_empty() { // escape xml characters to allow the message to able be composed into xml payload failed_state_message = helpers::xml_escape(failed_state_message); // write provision failed error message to event event_logger::write_event( LoggerLevel::Error, failed_state_message.to_string(), "write_provision_state", "provision", logger::AGENT_LOGGER_KEY, ); } let status_file: PathBuf = provision_dir.join(STATUS_TAG_TMP_FILE_NAME); match std::fs::write(status_file, failed_state_message.as_bytes()) { Ok(_) => { match std::fs::rename( provision_dir.join(STATUS_TAG_TMP_FILE_NAME), provision_dir.join(STATUS_TAG_FILE_NAME), ) { Ok(_) => {} Err(e) => { logger::write_error(format!("Failed to rename status file with error: {e}")); } } } Err(e) => { logger::write_error(format!("Failed to write temp status file with error: {e}")); } } } /// Get provision failed state message async fn get_provision_failed_state_message( provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, ) -> String { let provision_state = match provision_shared_state.get_state().await { Ok(state) => state, Err(e) => { logger::write_error(format!("Failed to get provision state with error: {e}")); ProvisionFlags::NONE } }; let mut state = String::new(); //provision success, write 0 byte to file if !provision_state.contains(ProvisionFlags::REDIRECTOR_READY) { state.push_str(&format!( "ebpfProgramStatus - {}\r\n", agent_status_shared_state .get_module_status(AgentStatusModule::Redirector) .await .message )); } if !provision_state.contains(ProvisionFlags::KEY_LATCH_READY) { state.push_str(&format!( "keyLatchStatus - {}\r\n", agent_status_shared_state .get_module_status(AgentStatusModule::KeyKeeper) .await .message )); } if !provision_state.contains(ProvisionFlags::LISTENER_READY) { state.push_str(&format!( "proxyListenerStatus - {}\r\n", agent_status_shared_state .get_module_status(AgentStatusModule::ProxyServer) .await .message )); } state } /// Get provision state /// It returns the current GPA serice provision state (from shared_state) for GPA service /// This function is designed and invoked in GPA service pub async fn get_provision_state_internal( provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, key_keeper_shared_state: KeyKeeperSharedState, ) -> ProvisionStateInternal { ProvisionStateInternal { finished_time_tick: provision_shared_state .get_provision_finished() .await .unwrap_or(0), error_message: get_provision_failed_state_message( provision_shared_state, agent_status_shared_state, ) .await, key_keeper_secure_channel_state: key_keeper_shared_state .get_current_secure_channel_state() .await .unwrap_or(UNKNOWN_STATE.to_string()), } } /// provision query module designed for GPA command line, serves for --status [--wait seconds] option /// It is used to query the provision status from GPA service via http request pub mod provision_query { use crate::common::{constants, error::Error, helpers, hyper_client, logger, result::Result}; use proxy_agent_shared::misc_helpers; use serde_derive::{Deserialize, Serialize}; use std::{collections::HashMap, net::Ipv4Addr, time::Duration}; /// Provision URL path, it is used to query the provision status from GPA service http listener pub const PROVISION_URL_PATH: &str = "/provision"; /// Provision status /// finished - provision finished or timedout /// true means provision finished or timedout, false means provision still in progress /// errorMessage - provision error message #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct ProvisionState { pub finished: bool, pub errorMessage: String, } impl ProvisionState { pub fn new(finished: bool, error_message: String) -> ProvisionState { ProvisionState { finished, errorMessage: error_message, } } } /// Provision query /// It is used to query the provision status from GPA service via http request /// This struct is designed for GPA command line, serves for --status [--wait seconds] option pub struct ProvisionQuery { port: u16, wait_duration: Option, query_time_tick: i128, } impl ProvisionQuery { pub fn new(port: u16, wait_duration: Option) -> ProvisionQuery { ProvisionQuery { port, wait_duration, query_time_tick: misc_helpers::get_date_time_unix_nano(), } } #[cfg(test)] pub fn get_query_time_tick(&self) -> i128 { self.query_time_tick } /// Get current GPA service provision status and wait until the GPA service provision finished or timeout /// This function is designed for GPA command line, serves for --status [--wait seconds] option pub async fn get_provision_status_wait(&self) -> ProvisionState { let mut first_loop = true; loop { // query the current provision state from GPA service via http request // ask GPA service listener to notify the its key_keeper in the first loop only let state = match self.get_current_provision_status(first_loop).await { Ok(state) => state, Err(e) => { println!( "Failed to query the current provision state with error: {}.", e ); ProvisionState::new(false, String::new()) } }; first_loop = false; if state.finished { return state; } if let Some(d) = self.wait_duration { if d.as_millis() >= helpers::get_elapsed_time_in_millisec() { tokio::time::sleep(Duration::from_millis(100)).await; continue; } } // wait timedout return as 'not finished' with empty message return ProvisionState::new(false, String::new()); } } // Get current provision status from GPA service via http request // return value // bool - true provision finished; false provision not finished // String - provision error message, empty means provision success or provision failed. async fn get_current_provision_status(&self, notify: bool) -> Result { let provision_url: String = format!( "http://{}:{}{}", Ipv4Addr::LOCALHOST, self.port, PROVISION_URL_PATH ); let provision_url: hyper::Uri = provision_url .parse::() .map_err(|e| Error::ParseUrl(provision_url, e.to_string()))?; let mut headers = HashMap::new(); headers.insert(constants::METADATA_HEADER.to_string(), "true".to_string()); headers.insert( constants::TIME_TICK_HEADER.to_string(), self.query_time_tick.to_string(), ); if notify { headers.insert(constants::NOTIFY_HEADER.to_string(), "true".to_string()); } hyper_client::get(&provision_url, &headers, None, None, logger::write_warning).await } } } #[cfg(test)] mod tests { use crate::provision::provision_query::ProvisionQuery; use crate::provision::ProvisionFlags; use crate::proxy::proxy_server; use crate::shared_state::SharedState; use std::env; use std::fs; use std::time::Duration; #[tokio::test] async fn provision_state_test() { let mut temp_test_path = env::temp_dir(); let logger_key = "provision_state_test"; temp_test_path.push(logger_key); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); // start listener, the port must different from the one used in production code let shared_state = SharedState::start_all(); let cancellation_token = shared_state.get_cancellation_token(); let provision_shared_state = shared_state.get_provision_shared_state(); let key_keeper_shared_state = shared_state.get_key_keeper_shared_state(); let telemetry_shared_state = shared_state.get_telemetry_shared_state(); let agent_status_shared_state = shared_state.get_agent_status_shared_state(); let port: u16 = 8092; let proxy_server = proxy_server::ProxyServer::new(port, &shared_state); tokio::spawn({ let proxy_server = proxy_server.clone(); async move { proxy_server.start().await; } }); // give some time to let the listener started let sleep_duration = Duration::from_millis(100); tokio::time::sleep(sleep_duration).await; let provision_query = ProvisionQuery::new(port, None); let provision_status = provision_query.get_provision_status_wait().await; assert!( !provision_status.finished, "provision_status.0 must be false" ); assert_eq!( 0, provision_status.errorMessage.len(), "provision_status.1 must be empty" ); let dir1 = temp_test_path.to_path_buf(); let dir2 = temp_test_path.to_path_buf(); let dir3 = temp_test_path.to_path_buf(); let handles = vec![ super::update_provision_state( ProvisionFlags::REDIRECTOR_READY, Some(dir1), cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ), super::update_provision_state( ProvisionFlags::KEY_LATCH_READY, Some(dir2), cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ), super::update_provision_state( ProvisionFlags::LISTENER_READY, Some(dir3), cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ), ]; for handle in handles { handle.await; } _ = key_keeper_shared_state .update_current_secure_channel_state(super::DISABLE_STATE.to_string()) .await; let provisioned_file = temp_test_path.join(super::PROVISION_TAG_FILE_NAME); assert!(provisioned_file.exists()); let status_file = temp_test_path.join(super::STATUS_TAG_FILE_NAME); assert!(status_file.exists()); assert_eq!( 0, status_file.metadata().unwrap().len(), "success status.tag file must be empty" ); let provision_query = ProvisionQuery::new(port, Some(Duration::from_millis(5))); let provision_state_internal = super::get_provision_state_internal( provision_shared_state.clone(), agent_status_shared_state.clone(), key_keeper_shared_state.clone(), ) .await; assert!( provision_state_internal.finished_time_tick > 0, "finished_time_tick must great than 0" ); assert!(!provision_state_internal.is_secure_channel_latched()); assert!( provision_state_internal.finished_time_tick < provision_query.get_query_time_tick(), "finished_time_tick must older than the query time_tick" ); let provision_status = provision_query.get_provision_status_wait().await; assert!(!provision_status.finished, "provision_status.0 must be false as secured channel is disabled and provision finished "); assert_eq!( 0, provision_status.errorMessage.len(), "provision_status.1 must be empty" ); let event_threads_initialized = provision_shared_state .get_event_log_threads_initialized() .await .unwrap(); assert!(event_threads_initialized); // update provision finish time_tick super::key_latched( cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ) .await; let provision_state_internal = super::get_provision_state_internal( provision_shared_state.clone(), agent_status_shared_state.clone(), key_keeper_shared_state.clone(), ) .await; assert!( provision_state_internal.finished_time_tick > 0, "finished_time_tick must great than 0" ); assert!(!provision_state_internal.is_secure_channel_latched()); assert!( provision_state_internal.finished_time_tick > provision_query.get_query_time_tick(), "finished_time_tick must later than the query time_tick" ); let provision_status = provision_query.get_provision_status_wait().await; assert!( provision_status.finished, "provision_status.finished must be true as provision finished_time_tick refreshed" ); assert_eq!( 0, provision_status.errorMessage.len(), "provision_status.1 must be empty" ); // test reset key latch provision state super::key_latch_ready_state_reset(provision_shared_state.clone()).await; let provision_state = provision_shared_state.get_state().await.unwrap(); assert!(!provision_state.contains(ProvisionFlags::KEY_LATCH_READY)); let provision_state_internal = super::get_provision_state_internal( provision_shared_state.clone(), agent_status_shared_state.clone(), key_keeper_shared_state.clone(), ) .await; assert!( provision_state_internal.finished_time_tick == 0, "finished_time_tick must be 0 as key latch provision state reset" ); let provision_status = provision_query.get_provision_status_wait().await; assert!( !provision_status.finished, "provision_status.0 must be false" ); assert_eq!( 0, provision_status.errorMessage.len(), "provision_status.1 must be empty" ); // test key_latched ready again super::key_latched( cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ) .await; let provision_state = provision_shared_state.get_state().await.unwrap(); assert!( provision_state.contains(ProvisionFlags::ALL_READY), "ALL_READY must be true after key_latched again" ); let provision_status = provision_query.get_provision_status_wait().await; assert!(provision_status.finished, "provision_status.0 must be true"); assert_eq!( 0, provision_status.errorMessage.len(), "provision_status.1 must be empty" ); // stop listener cancellation_token.cancel(); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); } #[tokio::test] async fn provision_status_tag_file_test() { let mut temp_test_path = env::temp_dir(); let logger_key = "provision_status_tag_file_test"; temp_test_path.push(logger_key); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); let shared_state = SharedState::start_all(); let cancellation_token = shared_state.get_cancellation_token(); let provision_shared_state = shared_state.get_provision_shared_state(); let key_keeper_shared_state = shared_state.get_key_keeper_shared_state(); let telemetry_shared_state = shared_state.get_telemetry_shared_state(); let agent_status_shared_state = shared_state.get_agent_status_shared_state(); // test all 3 provision states as ready super::update_provision_state( ProvisionFlags::LISTENER_READY, Some(temp_test_path.clone()), cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ) .await; super::update_provision_state( ProvisionFlags::KEY_LATCH_READY, Some(temp_test_path.clone()), cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ) .await; super::update_provision_state( ProvisionFlags::REDIRECTOR_READY, Some(temp_test_path.clone()), cancellation_token.clone(), key_keeper_shared_state.clone(), telemetry_shared_state.clone(), provision_shared_state.clone(), agent_status_shared_state.clone(), ) .await; let provisioned_file = temp_test_path.join(super::PROVISION_TAG_FILE_NAME); assert!(provisioned_file.exists()); let status_file = temp_test_path.join(super::STATUS_TAG_FILE_NAME); assert!(status_file.exists()); assert_eq!( 0, status_file.metadata().unwrap().len(), "success status.tag file must be empty" ); // test key_latch not ready, and error status message contains xml escape characters super::key_latch_ready_state_reset(provision_shared_state.clone()).await; _ = agent_status_shared_state .set_module_status_message( "keyLatchStatus - Failed to acquire key details: Key(KeyResponse(\"acquire\", 403))".to_string(), super::AgentStatusModule::KeyKeeper, ) .await; super::provision_timeup( Some(temp_test_path.clone()), provision_shared_state.clone(), agent_status_shared_state.clone(), ) .await; //let status_file = temp_test_path.join(super::STATUS_TAG_FILE_NAME); assert!(status_file.exists()); let status_file_content = fs::read_to_string(&status_file).unwrap(); assert_eq!( "keyLatchStatus - keyLatchStatus - Failed to acquire key details: Key(KeyResponse("acquire", 403))\r\n", status_file_content ); cancellation_token.cancel(); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy.rs000066400000000000000000000251401500521614600215620ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to get user and process details. //! When eBPF redirects the http traffic, it writes the uid and pid information to the eBPF map. //! The GPA service reads the audit/claims information via uid & pid. //! The GPA service uses the audit/claims information to authorize the requests before forwarding to the remote endpoints. //! //! Example //! ```rust //! use proxy_agent::proxy; //! use proxy_agent::shared_state::proxy_server_wrapper::ProxyServerSharedState; //! //! // Get the user details //! let logon_id = 999u64; //! let proxy_server_shared_state = ProxyServerSharedState::start_new(); //! let user = proxy::get_user(logon_id, proxy_server_shared_state.clone()).unwrap(); //! //! // Get the process details //! let pid = std::process::id(); //! let process = proxy::Process::from_pid(pid); //! //! // Get the claims from the audit entry //! let mut entry = proxy::AuditEntry::empty(); //! entry.logon_id = 999; // LocalSystem logon_id //! entry.process_id = std::process::id(); //! entry.destination_ipv4 = 0x10813FA8; //! entry.destination_port = 80; //! entry.is_admin = 1; //! let claims = proxy::Claims::from_audit_entry(&entry, "127.0.0.1".parse().unwrap(), proxy_server_shared_state.clone()).unwrap(); //! println!("{}", serde_json::to_string(&claims).unwrap()); //! ``` pub mod authorization_rules; pub mod proxy_authorizer; pub mod proxy_connection; pub mod proxy_server; pub mod proxy_summary; #[cfg(windows)] mod windows; use crate::common::result::Result; use crate::redirector::AuditEntry; use crate::shared_state::proxy_server_wrapper::ProxyServerSharedState; use serde_derive::{Deserialize, Serialize}; use std::{ffi::OsString, net::IpAddr, path::PathBuf}; #[cfg(not(windows))] use sysinfo::{Pid, ProcessRefreshKind, RefreshKind, System, UpdateKind}; #[derive(Serialize, Deserialize, Clone)] #[allow(non_snake_case)] pub struct Claims { pub userId: u64, pub userName: String, pub userGroups: Vec, pub processId: u32, pub processName: OsString, pub processFullPath: PathBuf, pub processCmdLine: String, pub runAsElevated: bool, pub clientIp: String, pub clientPort: u16, } struct Process { pub command_line: String, pub name: OsString, pub exe_full_name: PathBuf, pub pid: u32, } #[derive(Clone)] pub struct User { pub logon_id: u64, pub user_name: String, pub user_groups: Vec, } const UNDEFINED: &str = "undefined"; const EMPTY: &str = "empty"; async fn get_user( logon_id: u64, proxy_server_shared_state: ProxyServerSharedState, ) -> Result { // cache the logon_id -> user_name if let Ok(Some(user)) = proxy_server_shared_state.get_user(logon_id).await { Ok(user) } else { let user = User::from_logon_id(logon_id)?; if let Err(e) = proxy_server_shared_state.add_user(user.clone()).await { println!("Failed to add user: {} to cache", e); } Ok(user) } } #[cfg(not(windows))] fn get_process_info(process_id: u32) -> (PathBuf, String) { let mut process_name = PathBuf::default(); let mut process_cmd_line = UNDEFINED.to_string(); let pid = Pid::from_u32(process_id); let sys = System::new_with_specifics( RefreshKind::new().with_processes( ProcessRefreshKind::new() .with_cmd(UpdateKind::Always) .with_exe(UpdateKind::Always), ), ); if let Some(p) = sys.process(pid) { process_name = match p.exe() { Some(path) => path.to_path_buf(), None => PathBuf::default(), }; process_cmd_line = p.cmd().join(" "); } (process_name, process_cmd_line) } impl Claims { pub fn empty() -> Self { Claims { userId: 0, userName: EMPTY.to_string(), userGroups: Vec::new(), processId: 0, processName: OsString::from(EMPTY), processFullPath: PathBuf::from(EMPTY), processCmdLine: EMPTY.to_string(), runAsElevated: false, clientIp: EMPTY.to_string(), clientPort: 0, } } pub async fn from_audit_entry( entry: &AuditEntry, client_ip: IpAddr, client_port: u16, proxy_server_shared_state: ProxyServerSharedState, ) -> Result { let p = Process::from_pid(entry.process_id); let u = get_user(entry.logon_id, proxy_server_shared_state).await?; Ok(Claims { userId: entry.logon_id, userName: u.user_name.to_string(), userGroups: u.user_groups.clone(), processId: p.pid, processName: p.name, processFullPath: p.exe_full_name, processCmdLine: p.command_line.to_string(), runAsElevated: entry.is_admin == 1, clientIp: client_ip.to_string(), clientPort: client_port, }) } } impl Process { pub fn from_pid(pid: u32) -> Self { let (process_full_path, cmd); #[cfg(windows)] { let handler = windows::get_process_handler(pid).unwrap_or_else(|e| { println!("Failed to get process handler: {}", e); 0 }); let base_info = windows::query_basic_process_info(handler); match base_info { Ok(_) => { process_full_path = windows::get_process_full_name(handler).unwrap_or_default(); cmd = windows::get_process_cmd(handler).unwrap_or(UNDEFINED.to_string()); } Err(e) => { process_full_path = PathBuf::default(); cmd = UNDEFINED.to_string(); println!("Failed to query basic process info: {}", e); } } } #[cfg(not(windows))] { let process_info = get_process_info(pid); process_full_path = process_info.0; cmd = process_info.1; } let process_name = process_full_path .file_name() .unwrap_or_default() .to_os_string(); Process { command_line: cmd, name: process_name, exe_full_name: process_full_path, pid, } } } impl User { pub fn from_logon_id(logon_id: u64) -> Result { let user_name; let mut user_groups: Vec = Vec::new(); #[cfg(windows)] { let user = windows::get_user(logon_id)?; user_name = user.0; for g in user.1 { user_groups.push(g.to_string()); } } #[cfg(not(windows))] { match uzers::get_user_by_uid(logon_id as u32) { Some(u) => { user_name = u.name().to_string_lossy().to_string(); let g: Option> = uzers::get_user_groups(&user_name, u.primary_group_id()); if let Some(groups) = g { for group in groups { user_groups.push(group.name().to_string_lossy().to_string()); } } } None => user_name = UNDEFINED.to_string(), } } Ok(User { logon_id, user_name: user_name.to_string(), user_groups: user_groups.clone(), }) } } #[cfg(test)] mod tests { use super::Claims; use crate::{ redirector::AuditEntry, shared_state::proxy_server_wrapper::ProxyServerSharedState, }; use std::net::IpAddr; #[tokio::test] async fn user_test() { let logon_id; let expected_user_name; #[cfg(windows)] { logon_id = 999u64; expected_user_name = "SYSTEM"; } #[cfg(not(windows))] { logon_id = 0u64; expected_user_name = "root"; } let proxy_server_shared_state = ProxyServerSharedState::start_new(); let user = super::get_user(logon_id, proxy_server_shared_state.clone()) .await .unwrap(); println!("UserName: {}", user.user_name); println!("UserGroups: {}", user.user_groups.join(", ")); assert_eq!(expected_user_name, user.user_name, "user name mismatch."); #[cfg(windows)] { assert_eq!(0, user.user_groups.len(), "SYSTEM has no group."); } #[cfg(not(windows))] { assert!( !user.user_groups.is_empty(), "user_groups should not be empty." ); } // test the USERS.len will not change let len = proxy_server_shared_state.get_users_count().await.unwrap(); _ = super::get_user(logon_id, proxy_server_shared_state.clone()); _ = super::get_user(logon_id, proxy_server_shared_state.clone()); _ = super::get_user(logon_id, proxy_server_shared_state.clone()); _ = super::get_user(logon_id, proxy_server_shared_state.clone()); assert_eq!( len, proxy_server_shared_state.get_users_count().await.unwrap(), "users count should not change" ) } #[tokio::test] async fn entry_to_claims() { let mut entry = AuditEntry::empty(); entry.logon_id = 999; // LocalSystem logon_id entry.process_id = std::process::id(); entry.destination_ipv4 = 0x10813FA8; entry.destination_port = 80; entry.is_admin = 1; let proxy_server_shared_state = ProxyServerSharedState::start_new(); let claims = Claims::from_audit_entry( &entry, IpAddr::from([127, 0, 0, 1]), 0, // doesn't matter for this test proxy_server_shared_state.clone(), ) .await .unwrap(); println!("{}", serde_json::to_string(&claims).unwrap()); assert!(claims.runAsElevated, "runAsElevated must be true"); assert_ne!(String::new(), claims.userName, "userName cannot be empty."); assert!( !claims.processName.is_empty(), "processName cannot be empty." ); assert!( !claims.processFullPath.as_os_str().is_empty(), "processFullPath cannot be empty." ); assert_ne!( claims.processName, claims.processFullPath.as_os_str(), "processName and processFullPath should not be the same." ); assert_ne!( String::new(), claims.processCmdLine, "processCmdLine cannot be empty." ); } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy/000077500000000000000000000000001500521614600212125ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/proxy/authorization_rules.rs000066400000000000000000000606501500521614600257010ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to authorize the request based on the authorization rules. //! The authorization rules is from user inputted access control rules. //! //! Example //! ```rust //! use proxy_agent::authorization_rules; //! use proxy_agent::proxy_connection::ConnectionLogger; //! //! // convert the authorization item to access control rules //! let access_control_rules = AccessControlRules::from_authorization_item(authorization_item); //! //! // check if the request is allowed based on the access control rules //! let is_allowed = access_control_rules.is_allowed(connection_id, request_url, claims); //! //! ``` use super::{proxy_connection::ConnectionLogger, Claims}; use crate::common::logger; use crate::key_keeper::key::{AuthorizationItem, AuthorizationRules, Identity, Privilege, Role}; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::misc_helpers; use serde_derive::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::path::Path; use std::str::FromStr; #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub enum AuthorizationMode { Disabled, Audit, Enforce, } impl std::fmt::Display for AuthorizationMode { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { AuthorizationMode::Disabled => write!(f, "disabled"), AuthorizationMode::Audit => write!(f, "audit"), AuthorizationMode::Enforce => write!(f, "enforce"), } } } impl std::str::FromStr for AuthorizationMode { type Err = String; fn from_str(s: &str) -> Result { match s.to_lowercase().as_str() { "disabled" => Ok(AuthorizationMode::Disabled), "audit" => Ok(AuthorizationMode::Audit), "enforce" => Ok(AuthorizationMode::Enforce), _ => Err(format!("Invalid AuthorizationMode: {}", s)), } } } #[derive(Serialize, Deserialize, Clone)] #[allow(non_snake_case)] pub struct ComputedAuthorizationItem { pub id: String, // The default access: allow -> true, deny-> false pub defaultAllowed: bool, // disabled, audit, enforce pub mode: AuthorizationMode, // all the defined unique privileges, distinct by name pub privileges: HashMap, // The identities assigned to this privilege // key - privilege name, value - the assigned identity names pub privilegeAssignments: HashMap>, // all the defined unique identities, distinct by name // key - identity name, value - identity object pub identities: HashMap, } #[allow(dead_code)] impl ComputedAuthorizationItem { pub fn from_authorization_item( authorization_item: AuthorizationItem, ) -> ComputedAuthorizationItem { let authorization_mode = match AuthorizationMode::from_str(&authorization_item.mode) { Ok(mode) => mode, Err(err) => { // This should not happen, log the error and set the mode to disabled logger::write_error(format!("Failed to parse authorization mode: {}", err)); AuthorizationMode::Disabled } }; // Initialize with empty dictionaries let mut privilege_dict: HashMap = HashMap::new(); let mut identity_dict: HashMap = HashMap::new(); let mut privilege_assignments: HashMap> = HashMap::new(); if let Some(input_rules) = authorization_item.rules { if let (Some(privileges), Some(identities), Some(roles), Some(role_assignments)) = ( input_rules.privileges, input_rules.identities, input_rules.roles, input_rules.roleAssignments, ) { let role_dict = roles .into_iter() .map(|role| (role.name.clone(), role)) .collect::>(); identity_dict = identities .into_iter() .map(|identity| (identity.name.clone(), identity)) .collect::>(); privilege_dict = privileges .into_iter() .map(|privilege| (privilege.name.clone(), privilege)) .collect::>(); for role_assignment in role_assignments { match role_dict.get(&role_assignment.role) { Some(role) => { for privilege_name in &role.privileges { if privilege_dict.contains_key(privilege_name) { let assignments = if privilege_assignments.contains_key(privilege_name) { privilege_assignments.get_mut(privilege_name).unwrap() } else { let assignments = HashSet::new(); privilege_assignments .insert(privilege_name.clone(), assignments); privilege_assignments.get_mut(privilege_name).unwrap() }; for identity_name in &role_assignment.identities { if !identity_dict.contains_key(identity_name) { // skip the identity if the identity is not defined continue; } assignments.insert(identity_name.clone()); } } } } None => { // skip the assignment if the role is not defined logger::write_error(format!( "Role '{}' is not defined, skip the role assignment.", role_assignment.role )); continue; } } } } } ComputedAuthorizationItem { id: authorization_item.id, defaultAllowed: authorization_item.defaultAccess.to_lowercase() == "allow", mode: authorization_mode, identities: identity_dict, privileges: privilege_dict, privilegeAssignments: privilege_assignments, } } pub fn is_allowed( &self, logger: &mut ConnectionLogger, request_url: hyper::Uri, claims: Claims, ) -> bool { if self.mode == AuthorizationMode::Disabled { logger.write( LoggerLevel::Trace, "Access control is in disabled state, skip....".to_string(), ); return true; } let mut any_privilege_matched = false; for privilege in self.privileges.values() { let privilege_name = &privilege.name; if privilege.is_match(logger, &request_url) { any_privilege_matched = true; logger.write( LoggerLevel::Trace, format!("Request matched privilege '{}'.", privilege_name), ); if let Some(assignments) = self.privilegeAssignments.get(privilege_name) { for assignment in assignments { let identity_name = assignment.clone(); if let Some(identity) = self.identities.get(&identity_name) { if identity.is_match(logger, &claims) { logger.write( LoggerLevel::Trace, format!( "Request matched privilege '{}' and identity '{}'.", privilege_name, identity_name ), ); return true; } } } logger.write( LoggerLevel::Trace, format!( "Request matched privilege '{}' but no identity matched.", privilege_name ), ); } else { logger.write( LoggerLevel::Trace, format!( "Request matched privilege '{}' but no identity assigned.", privilege_name ), ); } } else { logger.write( LoggerLevel::Trace, format!("Request does not match privilege '{}'.", privilege_name), ); } } if any_privilege_matched { logger.write( LoggerLevel::Info, "Privilege matched at least once, but no identity matches, deny the access." .to_string(), ); return false; } logger.write( LoggerLevel::Trace, format!( "No privilege matched, fall back to use the default access: {}.", self.defaultAllowed ), ); self.defaultAllowed } } #[derive(Serialize, Deserialize, Clone)] #[allow(non_snake_case)] pub struct ComputedAuthorizationRules { #[serde(skip_serializing_if = "Option::is_none")] pub imds: Option, #[serde(skip_serializing_if = "Option::is_none")] pub wireserver: Option, #[serde(skip_serializing_if = "Option::is_none")] pub hostga: Option, } #[derive(Serialize, Deserialize, Clone)] #[allow(non_snake_case)] pub struct AuthorizationRulesForLogging { #[serde(skip_serializing_if = "Option::is_none")] pub inputRules: Option, pub computedRules: ComputedAuthorizationRules, } impl AuthorizationRulesForLogging { pub fn new( input_rules: Option, computed_rules: ComputedAuthorizationRules, ) -> AuthorizationRulesForLogging { AuthorizationRulesForLogging { inputRules: input_rules, computedRules: computed_rules, } } /// Write the authorization rules to a file for support purpose /// The file name is in the format of "AuthorizationRules_{timestamp}.json" /// The content is the json string of the AuthorizationRulesForLogging object /// The file is written to the path_dir specified by the input parameter pub fn write_all(&self, path_dir: &Path, max_file_count: usize) { // remove the old files let files = match misc_helpers::search_files(path_dir, r"^AuthorizationRules_.*\.json$") { Ok(files) => files, Err(e) => { // This should not happen, log the error and skip write the file logger::write_error(format!( "Failed to search the old authorization rules files under dir {} with error: {}", path_dir.display(), e )); return; } }; if files.len() >= max_file_count { let mut count = max_file_count; for file in &files { std::fs::remove_file(file).unwrap_or_else(|e| { logger::write_error(format!( "Failed to remove the old authorization rules file {} with error: {}", file.display(), e )); }); count += 1; if count > files.len() { break; } } } // compute the file name let new_file_name = format!( "AuthorizationRules_{}-{}.json", misc_helpers::get_date_time_string_with_milliseconds(), misc_helpers::get_date_time_unix_nano() ) .replace(':', "."); let full_file_path = path_dir.join(new_file_name); match misc_helpers::json_write_to_file(&self, &full_file_path) { Ok(_) => { logger::write_information(format!( "Authorization rules are written to file: {}", full_file_path.display() )); } Err(e) => { logger::write_error(format!( "Failed to write the authorization rules to file {} with error: {}", full_file_path.display(), e )); } }; } } #[cfg(test)] mod tests { use super::{AuthorizationRulesForLogging, ComputedAuthorizationRules}; use crate::key_keeper::key::{ AccessControlRules, AuthorizationItem, AuthorizationRules, Identity, Privilege, Role, RoleAssignment, }; use crate::proxy::authorization_rules::{AuthorizationMode, ComputedAuthorizationItem}; use crate::proxy::{proxy_connection::ConnectionLogger, Claims}; use proxy_agent_shared::misc_helpers; use std::ffi::OsString; use std::path::PathBuf; use std::str::FromStr; #[tokio::test] async fn test_authorization_rules() { let logger_key = "test_authorization_rules"; let mut temp_test_path = std::env::temp_dir(); temp_test_path.push(logger_key); let mut test_logger = ConnectionLogger::new(0, 0); // Test Enforce Mode let access_control_rules = AccessControlRules { roles: Some(vec![Role { name: "test".to_string(), privileges: vec!["test".to_string(), "test1".to_string()], }]), privileges: Some(vec![Privilege { name: "test".to_string(), path: "/test".to_string(), queryParameters: None, }]), identities: Some(vec![Identity { name: "test".to_string(), exePath: Some("test".to_string()), groupName: Some("test".to_string()), processName: Some("test".to_string()), userName: Some("test".to_string()), }]), roleAssignments: Some(vec![RoleAssignment { role: "test".to_string(), identities: vec!["test".to_string()], }]), }; let authorization_item: AuthorizationItem = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "enforce".to_string(), rules: Some(access_control_rules), id: "0".to_string(), }; let rules = ComputedAuthorizationItem::from_authorization_item(authorization_item); let _clone_rules = rules.clone(); assert!(!rules.defaultAllowed); assert_eq!(rules.mode, AuthorizationMode::Enforce); assert!(!rules.privilegeAssignments.is_empty()); assert!(!rules.identities.is_empty()); assert!(!rules.privileges.is_empty()); let mut claims = Claims { userId: 0, userName: "test".to_string(), userGroups: vec!["test".to_string()], processId: 0, processFullPath: PathBuf::from("test"), clientIp: "0".to_string(), clientPort: 0, // doesn't matter for this test processName: OsString::from("test"), processCmdLine: "test".to_string(), runAsElevated: true, }; // assert the claim is allowed given the rules above let url = hyper::Uri::from_str("http://localhost/test/test").unwrap(); assert!(rules.is_allowed(&mut test_logger, url, claims.clone())); let relative_url = hyper::Uri::from_str("/test/test").unwrap(); assert!(rules.is_allowed(&mut test_logger, relative_url.clone(), claims.clone())); claims.userName = "test1".to_string(); assert!(!rules.is_allowed(&mut test_logger, relative_url, claims.clone())); // Test Audit Mode let access_control_rules = AccessControlRules { roles: Some(vec![Role { name: "test".to_string(), privileges: vec!["test".to_string(), "test1".to_string()], }]), privileges: Some(vec![Privilege { name: "test".to_string(), path: "/test".to_string(), queryParameters: None, }]), identities: Some(vec![Identity { name: "test".to_string(), exePath: Some("test".to_string()), groupName: Some("test".to_string()), processName: Some("test".to_string()), userName: Some("test".to_string()), }]), roleAssignments: Some(vec![RoleAssignment { role: "test".to_string(), identities: vec!["test".to_string()], }]), }; let authorization_item: AuthorizationItem = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "audit".to_string(), rules: Some(access_control_rules), id: "0".to_string(), }; let rules = ComputedAuthorizationItem::from_authorization_item(authorization_item); assert!(!rules.defaultAllowed); assert_eq!(rules.mode, AuthorizationMode::Audit); assert!(!rules.privilegeAssignments.is_empty()); assert!(!rules.identities.is_empty()); assert!(!rules.privileges.is_empty()); // Test Disabled Mode let access_control_rules = AccessControlRules { roles: Some(vec![Role { name: "test".to_string(), privileges: vec!["test".to_string(), "test1".to_string()], }]), privileges: Some(vec![Privilege { name: "test".to_string(), path: "/test".to_string(), queryParameters: None, }]), identities: Some(vec![Identity { name: "test".to_string(), exePath: Some("test".to_string()), groupName: Some("test".to_string()), processName: Some("test".to_string()), userName: Some("test".to_string()), }]), roleAssignments: Some(vec![RoleAssignment { role: "test".to_string(), identities: vec!["test".to_string()], }]), }; let authorization_item: AuthorizationItem = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "disabled".to_string(), rules: Some(access_control_rules), id: "0".to_string(), }; let rules = ComputedAuthorizationItem::from_authorization_item(authorization_item); assert!(!rules.defaultAllowed); assert_eq!(rules.mode, AuthorizationMode::Disabled); assert!(!rules.privilegeAssignments.is_empty()); assert!(!rules.identities.is_empty()); assert!(!rules.privileges.is_empty()); let url = hyper::Uri::from_str("http://localhost/test/test1").unwrap(); assert!(rules.is_allowed(&mut test_logger, url, claims.clone())); let relative_url = hyper::Uri::from_str("/test/test1").unwrap(); assert!(rules.is_allowed(&mut test_logger, relative_url, claims.clone())); // Test enforce mode, identity not match let access_control_rules = AccessControlRules { roles: Some(vec![Role { name: "test".to_string(), privileges: vec!["test".to_string(), "test1".to_string()], }]), privileges: Some(vec![Privilege { name: "test".to_string(), path: "/test".to_string(), queryParameters: None, }]), identities: Some(vec![Identity { name: "test1".to_string(), exePath: Some("test".to_string()), groupName: Some("test".to_string()), processName: Some("test".to_string()), userName: Some("test".to_string()), }]), roleAssignments: Some(vec![RoleAssignment { role: "test".to_string(), identities: vec!["test1".to_string()], }]), }; let authorization_item: AuthorizationItem = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "enforce".to_string(), rules: Some(access_control_rules), id: "0".to_string(), }; let rules = ComputedAuthorizationItem::from_authorization_item(authorization_item); assert!(!rules.defaultAllowed); assert_eq!(rules.mode, AuthorizationMode::Enforce); assert!(!rules.privilegeAssignments.is_empty()); assert!(!rules.identities.is_empty()); assert!(!rules.privileges.is_empty()); let url = hyper::Uri::from_str("http://localhost/test?").unwrap(); assert!(!rules.is_allowed(&mut test_logger, url, claims.clone())); let relativeurl = hyper::Uri::from_str("/test?").unwrap(); assert!(!rules.is_allowed(&mut test_logger, relativeurl, claims.clone())); } #[tokio::test] async fn test_authorization_rules_for_logging() { let mut temp_test_path = std::env::temp_dir(); temp_test_path.push("test_authorization_rules_for_logging"); let mut log_dir = temp_test_path.to_path_buf(); log_dir.push("Logs"); // clean up and ignore the clean up errors match std::fs::remove_dir_all(&temp_test_path) { Ok(_) => {} Err(e) => { print!("Failed to remove_dir_all with error {}.", e); } } misc_helpers::try_create_folder(&temp_test_path).unwrap(); let access_control_rules = AccessControlRules { roles: Some(vec![Role { name: "test".to_string(), privileges: vec!["test".to_string(), "test1".to_string()], }]), privileges: Some(vec![Privilege { name: "test".to_string(), path: "/test".to_string(), queryParameters: None, }]), identities: Some(vec![Identity { name: "test".to_string(), exePath: Some("test".to_string()), groupName: Some("test".to_string()), processName: Some("test".to_string()), userName: Some("test".to_string()), }]), roleAssignments: Some(vec![RoleAssignment { role: "test".to_string(), identities: vec!["test".to_string()], }]), }; let authorization_item: AuthorizationItem = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "enforce".to_string(), rules: Some(access_control_rules), id: "0".to_string(), }; let computed_authorization_item = ComputedAuthorizationItem::from_authorization_item(authorization_item.clone()); let authorization_rules_for_logging = AuthorizationRulesForLogging::new( Some(AuthorizationRules { imds: Some(authorization_item.clone()), wireserver: Some(authorization_item.clone()), hostga: Some(authorization_item.clone()), }), ComputedAuthorizationRules { imds: Some(computed_authorization_item.clone()), wireserver: Some(computed_authorization_item.clone()), hostga: Some(computed_authorization_item.clone()), }, ); let max_file_count = 5; for _ in 0..10 { authorization_rules_for_logging.write_all(&temp_test_path, max_file_count); tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; } let files = misc_helpers::search_files(&temp_test_path, r"^AuthorizationRules_.*\.json$").unwrap(); assert_eq!(files.len(), max_file_count); // clean up and ignore the clean up errors _ = std::fs::remove_dir_all(&temp_test_path); } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy/proxy_authorizer.rs000066400000000000000000000573051500521614600252270ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to authorize the connection based on the claims. //! The claims are used to determine if the process is allowed to connect to the remote server. //! //! Example //! ```rust //! use proxy_agent::proxy_authorizer; //! use proxy_agent::proxy::Claims; //! use proxy_agent::shared_state::key_keeper_wrapper::KeyKeeperSharedState; //! use proxy_agent::common::constants; //! use std::str::FromStr; //! //! let key_keeper_shared_state = KeyKeeperSharedState::start_new(); //! let vm_metadata = proxy_authorizer::get_access_control_rules(constants::WIRE_SERVER_IP.to_string(), constants::WIRE_SERVER_PORT, key_keeper_shared_state.clone()).await.unwrap(); //! let authorizer = proxy_authorizer::get_authorizer(constants::WIRE_SERVER_IP, constants::WIRE_SERVER_PORT, claims); //! let url = hyper::Uri::from_str("http://localhost/test?").unwrap(); //! authorizer.authorize(logger, url, vm_metadata); //! use super::authorization_rules::{AuthorizationMode, ComputedAuthorizationItem}; use super::proxy_connection::ConnectionLogger; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use crate::{common::constants, common::result::Result, proxy::Claims}; use proxy_agent_shared::logger::LoggerLevel; #[derive(PartialEq)] pub enum AuthorizeResult { Ok, OkWithAudit, Forbidden, } pub trait Authorizer { // authorize the connection fn authorize( &self, logger: &mut ConnectionLogger, request_url: hyper::Uri, access_control_rules: Option, ) -> AuthorizeResult; fn to_string(&self) -> String; fn type_name(&self) -> String { std::any::type_name::().to_string() } } struct WireServer { claims: Claims, } impl Authorizer for WireServer { fn authorize( &self, logger: &mut ConnectionLogger, request_url: hyper::Uri, access_control_rules: Option, ) -> AuthorizeResult { if !self.claims.runAsElevated { return AuthorizeResult::Forbidden; } if let Some(rules) = access_control_rules { if rules.is_allowed(logger, request_url.clone(), self.claims.clone()) { return AuthorizeResult::Ok; } else { if rules.mode == AuthorizationMode::Audit { logger.write( LoggerLevel::Info, format!("WireServer request {} denied in audit mode, continue forward the request", request_url)); return AuthorizeResult::OkWithAudit; } return AuthorizeResult::Forbidden; } } AuthorizeResult::Ok } fn to_string(&self) -> String { format!( "WireServer {{ runAsElevated: {}, processName: {} }}", self.claims.runAsElevated, self.claims.processName.to_string_lossy() ) } } struct Imds { #[allow(dead_code)] claims: Claims, } impl Authorizer for Imds { fn authorize( &self, logger: &mut ConnectionLogger, request_url: hyper::Uri, access_control_rules: Option, ) -> AuthorizeResult { if let Some(rules) = access_control_rules { if rules.is_allowed(logger, request_url.clone(), self.claims.clone()) { return AuthorizeResult::Ok; } else { if rules.mode == AuthorizationMode::Audit { logger.write( LoggerLevel::Info, format!( "IMDS request {} denied in audit mode, continue forward the request", request_url ), ); return AuthorizeResult::OkWithAudit; } return AuthorizeResult::Forbidden; } } AuthorizeResult::Ok } fn to_string(&self) -> String { "IMDS".to_string() } } struct GAPlugin { claims: Claims, } impl Authorizer for GAPlugin { fn authorize( &self, logger: &mut ConnectionLogger, request_url: hyper::Uri, access_control_rules: Option, ) -> AuthorizeResult { if !self.claims.runAsElevated { return AuthorizeResult::Forbidden; } if let Some(rules) = access_control_rules { if rules.is_allowed(logger, request_url.clone(), self.claims.clone()) { return AuthorizeResult::Ok; } else { if rules.mode == AuthorizationMode::Audit { logger.write( LoggerLevel::Info, format!("HostGAPlugin request {} denied in audit mode, continue forward the request", request_url)); return AuthorizeResult::OkWithAudit; } return AuthorizeResult::Forbidden; } } AuthorizeResult::Ok } fn to_string(&self) -> String { format!( "GAPlugin {{ runAsElevated: {}, processName: {} }}", self.claims.runAsElevated, self.claims.processName.to_string_lossy() ) } } struct ProxyAgent {} impl Authorizer for ProxyAgent { fn authorize( &self, _logger: &mut ConnectionLogger, _request_url: hyper::Uri, _access_control_rules: Option, ) -> AuthorizeResult { // Forbid the request send to this listener directly AuthorizeResult::Forbidden } fn to_string(&self) -> String { "ProxyAgent".to_string() } } struct Default {} impl Authorizer for Default { fn authorize( &self, _logger: &mut ConnectionLogger, _request_url: hyper::Uri, _access_control_rules: Option, ) -> AuthorizeResult { AuthorizeResult::Ok } fn to_string(&self) -> String { "Default".to_string() } } pub fn get_authorizer(ip: String, port: u16, claims: Claims) -> Box { if ip == constants::WIRE_SERVER_IP && port == constants::WIRE_SERVER_PORT { Box::new(WireServer { claims }) } else if ip == constants::GA_PLUGIN_IP && port == constants::GA_PLUGIN_PORT { return Box::new(GAPlugin { claims }); } else if ip == constants::IMDS_IP && port == constants::IMDS_PORT { return Box::new(Imds { claims }); } else if ip == constants::PROXY_AGENT_IP && port == constants::PROXY_AGENT_PORT { return Box::new(ProxyAgent {}); } else { Box::new(Default {}) } } pub async fn get_access_control_rules( ip: String, port: u16, key_keeper_shared_state: KeyKeeperSharedState, ) -> Result> { match (ip.as_str(), port) { (constants::WIRE_SERVER_IP, constants::WIRE_SERVER_PORT) => { key_keeper_shared_state.get_wireserver_rules().await } (constants::GA_PLUGIN_IP, constants::GA_PLUGIN_PORT) => { key_keeper_shared_state.get_hostga_rules().await } (constants::IMDS_IP, constants::IMDS_PORT) => { key_keeper_shared_state.get_imds_rules().await } _ => Ok(None), } } pub fn authorize( ip: String, port: u16, logger: &mut ConnectionLogger, request_uri: hyper::Uri, claims: Claims, access_control_rules: Option, ) -> AuthorizeResult { let auth = get_authorizer(ip, port, claims); logger.write( LoggerLevel::Trace, format!("Got auth: {}", auth.to_string()), ); auth.authorize(logger, request_uri, access_control_rules) } #[cfg(test)] mod tests { use crate::{ key_keeper::key::AuthorizationItem, proxy::{proxy_authorizer::AuthorizeResult, proxy_connection::ConnectionLogger}, shared_state::key_keeper_wrapper::KeyKeeperSharedState, }; use std::{ffi::OsString, path::PathBuf, str::FromStr}; #[test] fn get_authenticate_test() { let claims = crate::proxy::Claims { userId: 0, userName: "test".to_string(), userGroups: vec!["test".to_string()], processId: std::process::id(), processName: OsString::from("test"), processFullPath: PathBuf::from("test"), processCmdLine: "test".to_string(), runAsElevated: true, clientIp: "127.0.0.1".to_string(), clientPort: 0, // doesn't matter for this test }; let mut test_logger = ConnectionLogger::new(0, 0); let auth: Box = super::get_authorizer( crate::common::constants::WIRE_SERVER_IP.to_string(), crate::common::constants::WIRE_SERVER_PORT, claims.clone(), ); let test_uri = hyper::Uri::from_str("test").unwrap(); assert_eq!( auth.to_string(), "WireServer { runAsElevated: true, processName: test }" ); assert!( AuthorizeResult::Ok == auth.authorize(&mut test_logger, test_uri.clone(), None), "WireServer authentication must be Ok" ); let auth = super::get_authorizer( crate::common::constants::GA_PLUGIN_IP.to_string(), crate::common::constants::GA_PLUGIN_PORT, claims.clone(), ); assert_eq!( auth.to_string(), "GAPlugin { runAsElevated: true, processName: test }" ); assert!( AuthorizeResult::Ok == auth.authorize(&mut test_logger, test_uri.clone(), None), "GAPlugin authentication must be Ok" ); let auth = super::get_authorizer( crate::common::constants::IMDS_IP.to_string(), crate::common::constants::IMDS_PORT, claims.clone(), ); assert_eq!(auth.to_string(), "IMDS"); assert!( AuthorizeResult::Ok == auth.authorize(&mut test_logger, test_uri.clone(), None), "IMDS authentication must be Ok" ); let auth = super::get_authorizer( crate::common::constants::PROXY_AGENT_IP.to_string(), crate::common::constants::PROXY_AGENT_PORT, claims.clone(), ); assert_eq!(auth.to_string(), "ProxyAgent"); assert!( AuthorizeResult::Forbidden == auth.authorize(&mut test_logger, test_uri.clone(), None), "ProxyAgent authentication must be Forbidden" ); let auth = super::get_authorizer( crate::common::constants::PROXY_AGENT_IP.to_string(), crate::common::constants::PROXY_AGENT_PORT + 1, claims.clone(), ); assert_eq!(auth.to_string(), "Default"); } #[tokio::test] async fn wireserver_authenticate_test() { let claims = crate::proxy::Claims { userId: 0, userName: "test".to_string(), userGroups: vec!["test".to_string()], processId: std::process::id(), processName: OsString::from("test"), processFullPath: PathBuf::from("test"), processCmdLine: "test".to_string(), runAsElevated: true, clientIp: "127.0.0.1".to_string(), clientPort: 0, // doesn't matter for this test }; let mut test_logger = ConnectionLogger::new(1, 1); let auth = super::get_authorizer( crate::common::constants::WIRE_SERVER_IP.to_string(), crate::common::constants::WIRE_SERVER_PORT, claims.clone(), ); let url = hyper::Uri::from_str("http://localhost/test?").unwrap(); let key_keeper_shared_state = KeyKeeperSharedState::start_new(); // validate disabled rules let disabled_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "disabled".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_wireserver_rules(Some(disabled_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state .get_wireserver_rules() .await .unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Ok, "WireServer authentication must be Ok with disabled rules" ); // validate audit rules let audit_deny_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "audit".to_string(), id: "id".to_string(), rules: None, }; let audit_allow_rules = AuthorizationItem { defaultAccess: "allow".to_string(), mode: "audit".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_wireserver_rules(Some(audit_allow_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state .get_wireserver_rules() .await .unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Ok, "WireServer authentication must be Ok with audit allow rules" ); key_keeper_shared_state .set_wireserver_rules(Some(audit_deny_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state .get_wireserver_rules() .await .unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::OkWithAudit, "WireServer authentication must be OkWithAudit with audit deny rules" ); // validate enforce rules let enforce_allow_rules = AuthorizationItem { defaultAccess: "allow".to_string(), mode: "enforce".to_string(), id: "id".to_string(), rules: None, }; let enforce_deny_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "enforce".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_wireserver_rules(Some(enforce_allow_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state .get_wireserver_rules() .await .unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Ok, "WireServer authentication must be Ok with enforce allow rules" ); key_keeper_shared_state .set_wireserver_rules(Some(enforce_deny_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state .get_wireserver_rules() .await .unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Forbidden, "WireServer authentication must be Forbidden with enforce deny rules" ); } #[tokio::test] async fn imds_authenticate_test() { let mut test_logger = ConnectionLogger::new(1, 1); let claims = crate::proxy::Claims { userId: 0, userName: "test".to_string(), userGroups: vec!["test".to_string()], processId: std::process::id(), processName: OsString::from("test"), processFullPath: PathBuf::from("test"), processCmdLine: "test".to_string(), runAsElevated: true, clientIp: "127.0.0.1".to_string(), clientPort: 0, // doesn't matter for this test }; let auth = super::get_authorizer( crate::common::constants::IMDS_IP.to_string(), crate::common::constants::IMDS_PORT, claims.clone(), ); let url = hyper::Uri::from_str("http://localhost/test?").unwrap(); let key_keeper_shared_state = KeyKeeperSharedState::start_new(); // validate disabled rules let disabled_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "disabled".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_imds_rules(Some(disabled_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_imds_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules,) == AuthorizeResult::Ok, "IMDS authentication must be Ok with disabled rules" ); // validate audit rules let audit_deny_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "audit".to_string(), id: "id".to_string(), rules: None, }; let audit_allow_rules = AuthorizationItem { defaultAccess: "allow".to_string(), mode: "audit".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_imds_rules(Some(audit_allow_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_imds_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules,) == AuthorizeResult::Ok, "IMDS authentication must be Ok with audit allow rules" ); key_keeper_shared_state .set_imds_rules(Some(audit_deny_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_imds_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules,) == AuthorizeResult::OkWithAudit, "IMDS authentication must be OkWithAudit with audit deny rules" ); // validate enforce rules let enforce_allow_rules = AuthorizationItem { defaultAccess: "allow".to_string(), mode: "enforce".to_string(), id: "id".to_string(), rules: None, }; let enforce_deny_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "enforce".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_imds_rules(Some(enforce_allow_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_imds_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules,) == AuthorizeResult::Ok, "IMDS authentication must be Ok with enforce allow rules" ); key_keeper_shared_state .set_imds_rules(Some(enforce_deny_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_imds_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules,) == AuthorizeResult::Forbidden, "IMDS authentication must be Forbidden with enforce deny rules" ); } #[tokio::test] async fn hostga_authenticate_test() { let claims = crate::proxy::Claims { userId: 0, userName: "test".to_string(), userGroups: vec!["test".to_string()], processId: std::process::id(), processName: OsString::from("test"), processFullPath: PathBuf::from("test"), processCmdLine: "test".to_string(), runAsElevated: true, clientIp: "127.0.0.1".to_string(), clientPort: 0, // doesn't matter for this test }; let mut test_logger = ConnectionLogger::new(1, 1); let auth = super::get_authorizer( crate::common::constants::GA_PLUGIN_IP.to_string(), crate::common::constants::GA_PLUGIN_PORT, claims.clone(), ); let url = hyper::Uri::from_str("http://localhost/test?").unwrap(); let key_keeper_shared_state = KeyKeeperSharedState::start_new(); // validate disabled rules let disabled_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "disabled".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_hostga_rules(Some(disabled_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_hostga_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Ok, "HostGA authentication must be Ok with disabled rules" ); // validate audit rules let audit_deny_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "audit".to_string(), id: "id".to_string(), rules: None, }; let audit_allow_rules = AuthorizationItem { defaultAccess: "allow".to_string(), mode: "audit".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_hostga_rules(Some(audit_allow_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_hostga_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Ok, "HostGA authentication must be Ok with audit allow rules" ); key_keeper_shared_state .set_hostga_rules(Some(audit_deny_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_hostga_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::OkWithAudit, "HostGA authentication must be OkWithAudit with audit deny rules" ); // validate enforce rules let enforce_allow_rules = AuthorizationItem { defaultAccess: "allow".to_string(), mode: "enforce".to_string(), id: "id".to_string(), rules: None, }; let enforce_deny_rules = AuthorizationItem { defaultAccess: "deny".to_string(), mode: "enforce".to_string(), id: "id".to_string(), rules: None, }; key_keeper_shared_state .set_hostga_rules(Some(enforce_allow_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_hostga_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Ok, "HostGA authentication must be Ok with enforce allow rules" ); key_keeper_shared_state .set_hostga_rules(Some(enforce_deny_rules)) .await .unwrap(); let access_control_rules = key_keeper_shared_state.get_hostga_rules().await.unwrap(); assert!( auth.authorize(&mut test_logger, url.clone(), access_control_rules) == AuthorizeResult::Forbidden, "HostGA authentication must be Forbidden with enforce deny rules" ); } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy/proxy_connection.rs000066400000000000000000000255451500521614600251730ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the connection context struct for the proxy listener, and write proxy processing logs to local file. use crate::common::error::{Error, HyperErrorType}; use crate::common::hyper_client; use crate::common::result::Result; use crate::proxy::Claims; use crate::redirector::{self, AuditEntry}; use crate::shared_state::proxy_server_wrapper::ProxyServerSharedState; use crate::shared_state::redirector_wrapper::RedirectorSharedState; use http_body_util::Full; use hyper::body::Bytes; use hyper::client::conn::http1; use hyper::Request; use proxy_agent_shared::logger::{self, logger_manager, LoggerLevel}; use std::net::{Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::Instant; use tokio::sync::Mutex; pub type RequestBody = Full; struct Client { sender: http1::SendRequest, } impl Client { async fn send_request( &mut self, req: Request, ) -> Result> { if self.sender.is_closed() { return Err(Error::Hyper(HyperErrorType::HostConnection( "the connection has been closed".to_string(), ))); } let full_url = req.uri().to_string(); self.sender.send_request(req).await.map_err(|e| { Error::Hyper(HyperErrorType::Custom( format!("Failed to send request to {}", full_url), e, )) }) } } #[derive(Clone)] pub struct TcpConnectionContext { pub id: u128, pub client_addr: SocketAddr, pub claims: Option, pub destination_ip: Option, // currently, we only support IPv4 pub destination_port: u16, sender: std::result::Result>, String>, logger: ConnectionLogger, } impl TcpConnectionContext { pub async fn new( id: u128, client_addr: SocketAddr, redirector_shared_state: RedirectorSharedState, proxy_server_shared_state: ProxyServerSharedState, #[cfg(windows)] raw_socket_id: usize, // windows only, it is the raw socket id, used to get audit entry from socket stream ) -> Self { let client_source_ip = client_addr.ip(); let client_source_port = client_addr.port(); let mut logger = ConnectionLogger::new(id, 0); let (claims, destination_ip, destination_port, sender) = match Self::get_audit_entry( &client_addr, &redirector_shared_state, &mut logger, #[cfg(windows)] raw_socket_id, ) .await { Ok(audit_entry) => { let claims = match Claims::from_audit_entry( &audit_entry, client_source_ip, client_source_port, proxy_server_shared_state, ) .await { Ok(claims) => Some(claims), Err(e) => { logger.write( LoggerLevel::Error, format!("Failed to get claims from audit entry: {}", e), ); // return None for claims None } }; let host_ip = audit_entry.destination_ipv4_addr().to_string(); let host_port = audit_entry.destination_port_in_host_byte_order(); let mut cloned_logger = logger.clone(); let fun = move |message: String| { cloned_logger.write(LoggerLevel::Warn, message); }; let sender = match hyper_client::build_http_sender(&host_ip, host_port, fun).await { Ok(sender) => { logger.write( LoggerLevel::Trace, "Successfully created http sender".to_string(), ); Ok(Arc::new(Mutex::new(Client { sender }))) } Err(e) => Err(e.to_string()), }; ( claims, Some(audit_entry.destination_ipv4_addr()), host_port, sender, ) } Err(e) => { logger.write( LoggerLevel::Warn, "This tcp connection may send to proxy agent tcp listener directly".to_string(), ); (None, None, 0, Err(e.to_string())) } }; Self { id, client_addr, claims, destination_ip, destination_port, sender, logger, } } async fn get_audit_entry( client_addr: &SocketAddr, redirector_shared_state: &RedirectorSharedState, logger: &mut ConnectionLogger, #[cfg(windows)] raw_socket_id: usize, ) -> Result { let client_source_port = client_addr.port(); match redirector::lookup_audit(client_source_port, redirector_shared_state).await { Ok(data) => { logger.write( LoggerLevel::Trace, format!( "Found audit entry with client_source_port '{}' successfully", client_source_port ), ); match redirector::remove_audit(client_source_port, redirector_shared_state).await { Ok(_) => logger.write( LoggerLevel::Trace, format!( "Removed audit entry with client_source_port '{}' successfully", client_source_port ), ), Err(e) => { logger.write( LoggerLevel::Warn, format!("Failed to remove audit entry: {}", e), ); } } Ok(data) } Err(e) => { let message = format!( "Failed to find audit entry with client_source_port '{}' with error: {}", client_source_port, e ); logger.write(LoggerLevel::Warn, message.clone()); #[cfg(not(windows))] { Err(Error::FindAuditEntryError(message)) } #[cfg(windows)] { logger.write( LoggerLevel::Info, "Try to get audit entry from socket stream".to_string(), ); match redirector::get_audit_from_stream_socket(raw_socket_id) { Ok(data) => { logger.write( LoggerLevel::Info, "Found audit entry from socket stream successfully".to_string(), ); Ok(data) } Err(e) => { logger.write( LoggerLevel::Warn, format!("Failed to get lookup_audit_from_stream with error: {}", e), ); Err(Error::FindAuditEntryError(message)) } } } } } } /// Get the target server ip address in string for logging purpose. pub fn get_ip_string(&self) -> String { if let Some(ip) = &self.destination_ip { return ip.to_string(); } "None".to_string() } pub fn log(&mut self, logger_level: LoggerLevel, message: String) { self.logger.write(logger_level, message) } async fn send_request( &self, request: hyper::Request, ) -> Result> { match &self.sender { Ok(sender) => sender.lock().await.send_request(request).await, Err(e) => Err(Error::Hyper(HyperErrorType::HostConnection(e.clone()))), } } } pub struct HttpConnectionContext { pub id: u128, pub now: Instant, pub method: hyper::Method, pub url: hyper::Uri, pub tcp_connection_context: TcpConnectionContext, pub logger: ConnectionLogger, } impl HttpConnectionContext { pub fn should_skip_sig(&self) -> bool { hyper_client::should_skip_sig(&self.method, &self.url) } pub fn contains_traversal_characters(&self) -> bool { self.url.path().contains("..") } pub fn log(&mut self, logger_level: LoggerLevel, message: String) { self.logger.write(logger_level, message) } pub fn get_logger_mut_ref(&mut self) -> &mut ConnectionLogger { &mut self.logger } pub async fn send_request( &self, request: hyper::Request, ) -> Result> { self.tcp_connection_context.send_request(request).await } } pub struct ConnectionLogger { pub tcp_connection_id: u128, pub http_connection_id: u128, queue: Vec, } impl ConnectionLogger { pub const CONNECTION_LOGGER_KEY: &'static str = "Connection_Logger"; pub fn new(tcp_connection_id: u128, http_connection_id: u128) -> Self { Self { tcp_connection_id, http_connection_id, queue: Vec::new(), } } pub fn write(&mut self, logger_level: LoggerLevel, message: String) { if logger_level > logger_manager::get_logger_level() { return; } self.queue.push(format!( "{}{}[{}] - {}", logger::get_log_header(logger_level), self.http_connection_id, self.tcp_connection_id, message )); } } impl Drop for ConnectionLogger { fn drop(&mut self) { if !self.queue.is_empty() { self.queue.push(format!( "{}{}[{}] - {}", logger::get_log_header(LoggerLevel::Info), self.http_connection_id, self.tcp_connection_id, "------------------------ ConnectionLogger is dropped ------------------------" )); logger_manager::write_many( Some(Self::CONNECTION_LOGGER_KEY.to_string()), self.queue.clone(), ); } } } impl Clone for ConnectionLogger { fn clone(&self) -> Self { Self { tcp_connection_id: self.tcp_connection_id, http_connection_id: self.http_connection_id, queue: Vec::new(), // Do not clone the queue, as it is used for logging } } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy/proxy_server.rs000066400000000000000000001171151500521614600243350ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module is responsible for starting the proxy server and handling incoming requests. //! It listens on a specified port and forwards the requests to the target server, //! then forward the response from the target server and sends it back to the client. //! It also handles the provision state check request. //! It uses the `hyper` crate to handle the HTTP requests and responses, //! uses the `tower` crate to limit the incoming request body size. //! //! Example: //! ```rust //! use crate::common::config; //! use crate::proxy::proxy_server; //! use crate::shared_state::SharedState; //! //! let shared_state = SharedState::start_all(); //! let port = config::get_proxy_port(); //! let proxy_server = proxy_server::ProxyServer::new(port, &shared_state); //! tokio::spawn(proxy_server.start()); //! ``` use super::proxy_authorizer::AuthorizeResult; use super::proxy_connection::{ConnectionLogger, HttpConnectionContext, TcpConnectionContext}; use crate::common::{ constants, error::{Error, HyperErrorType}, helpers, hyper_client, logger, result::Result, }; use crate::provision; use crate::proxy::{proxy_authorizer, proxy_summary::ProxySummary, Claims}; use crate::shared_state::agent_status_wrapper::{AgentStatusModule, AgentStatusSharedState}; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use crate::shared_state::provision_wrapper::ProvisionSharedState; use crate::shared_state::proxy_server_wrapper::ProxyServerSharedState; use crate::shared_state::redirector_wrapper::RedirectorSharedState; use crate::shared_state::telemetry_wrapper::TelemetrySharedState; use crate::shared_state::SharedState; use http_body_util::Full; use http_body_util::{combinators::BoxBody, BodyExt}; use hyper::body::{Bytes, Frame, Incoming}; use hyper::header::{HeaderName, HeaderValue}; use hyper::service::service_fn; use hyper::StatusCode; use hyper::{Request, Response}; use hyper_util::rt::TokioIo; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::proxy_agent_aggregate_status::ModuleState; use proxy_agent_shared::telemetry::event_logger; use std::time::Duration; use tokio::net::TcpListener; use tokio::net::TcpStream; use tokio_util::sync::CancellationToken; use tower::Service; use tower_http::{body::Limited, limit::RequestBodyLimitLayer}; const REQUEST_BODY_LOW_LIMIT_SIZE: usize = 1024 * 100; // 100KB const REQUEST_BODY_LARGE_LIMIT_SIZE: usize = 1024 * REQUEST_BODY_LOW_LIMIT_SIZE; // 100MB const START_LISTENER_RETRY_COUNT: u16 = 5; const START_LISTENER_RETRY_SLEEP_DURATION: Duration = Duration::from_secs(1); #[derive(Clone)] pub struct ProxyServer { port: u16, cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, provision_shared_state: ProvisionSharedState, agent_status_shared_state: AgentStatusSharedState, redirector_shared_state: RedirectorSharedState, proxy_server_shared_state: ProxyServerSharedState, } impl ProxyServer { pub fn new(port: u16, shared_state: &SharedState) -> Self { ProxyServer { port, cancellation_token: shared_state.get_cancellation_token(), key_keeper_shared_state: shared_state.get_key_keeper_shared_state(), telemetry_shared_state: shared_state.get_telemetry_shared_state(), provision_shared_state: shared_state.get_provision_shared_state(), agent_status_shared_state: shared_state.get_agent_status_shared_state(), redirector_shared_state: shared_state.get_redirector_shared_state(), proxy_server_shared_state: shared_state.get_proxy_server_shared_state(), } } /// start listener at the given address with retry logic if the address is in use async fn start_listener_with_retry( addr: &str, retry_count: u16, sleep_duration: Duration, ) -> Result { for i in 0..retry_count { let listener = TcpListener::bind(addr).await; match listener { Ok(l) => { return Ok(l); } Err(e) => match e.kind() { std::io::ErrorKind::AddrInUse => { let message = format!( "Failed bind to '{}' with error 'AddrInUse', wait '{:#?}' and retrying {}.", addr, sleep_duration, (i+1) ); logger::write_warning(message); tokio::time::sleep(sleep_duration).await; continue; } _ => { // other error, return it return Err(Error::Io( format!("Failed to bind TcpListener '{}'", addr), e, )); } }, } } // one more effort try bind to the addr TcpListener::bind(addr) .await .map_err(|e| Error::Io(format!("Failed to bind TcpListener '{}'", addr), e)) } pub async fn start(&self) { let addr = format!("{}:{}", std::net::Ipv4Addr::LOCALHOST, self.port); logger::write_information(format!("Start proxy listener at '{}'.", &addr)); let listener = match Self::start_listener_with_retry( &addr, START_LISTENER_RETRY_COUNT, START_LISTENER_RETRY_SLEEP_DURATION, ) .await { Ok(listener) => listener, Err(e) => { let message = e.to_string(); if let Err(e) = self .agent_status_shared_state .set_module_status_message(message.to_string(), AgentStatusModule::ProxyServer) .await { logger::write_warning(format!("Failed to set module status message: {}", e)); } if let Err(e) = self .agent_status_shared_state .set_module_state(ModuleState::STOPPED, AgentStatusModule::ProxyServer) .await { logger::write_warning(format!("Failed to set module state: {}", e)); } // send this critical error to event logger event_logger::write_event( LoggerLevel::Warn, message, "start", "proxy_server", logger::AGENT_LOGGER_KEY, ); return; } }; let message = helpers::write_startup_event( "Started proxy listener, ready to accept request", "start", "proxy_server", logger::AGENT_LOGGER_KEY, ); if let Err(e) = self .agent_status_shared_state .set_module_status_message(message.to_string(), AgentStatusModule::ProxyServer) .await { logger::write_warning(format!("Failed to set module status message: {}", e)); } if let Err(e) = self .agent_status_shared_state .set_module_state(ModuleState::RUNNING, AgentStatusModule::ProxyServer) .await { logger::write_warning(format!("Failed to set module state: {}", e)); } provision::listener_started( self.cancellation_token.clone(), self.key_keeper_shared_state.clone(), self.telemetry_shared_state.clone(), self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ) .await; // We start a loop to continuously accept incoming connections loop { tokio::select! { _ = self.cancellation_token.cancelled() => { logger::write_warning("cancellation token signal received, stop the listener.".to_string()); let _= self.agent_status_shared_state .set_module_state(ModuleState::STOPPED, AgentStatusModule::ProxyServer) .await; return; } result = listener.accept() => { match result { Ok((stream, client_addr)) =>{ self.handle_new_tcp_connection(stream, client_addr).await; }, Err(e) => { logger::write_error(format!("Failed to accept connection: {}", e)); } } } } } } async fn handle_new_tcp_connection( &self, stream: TcpStream, client_addr: std::net::SocketAddr, ) { let tcp_connection_id = match self .agent_status_shared_state .increase_tcp_connection_count() .await { Ok(id) => id, Err(e) => { ConnectionLogger::new(0, 0).write( LoggerLevel::Error, format!("Failed to increase tcp connection count: {}", e), ); return; } }; let mut tcp_connection_logger = ConnectionLogger::new(tcp_connection_id, 0); tcp_connection_logger.write( LoggerLevel::Trace, format!("Accepted new tcp connection [{}].", tcp_connection_id), ); tokio::spawn({ let cloned_proxy_server = self.clone(); async move { let (stream, _cloned_std_stream) = match Self::set_stream_read_time_out(stream, &mut tcp_connection_logger) { Ok((stream, cloned_std_stream)) => (stream, cloned_std_stream), Err(e) => { tcp_connection_logger.write( LoggerLevel::Error, format!("Failed to set stream read timeout: {}", e), ); return; } }; let tcp_connection_context = TcpConnectionContext::new( tcp_connection_id, client_addr, cloned_proxy_server.redirector_shared_state.clone(), cloned_proxy_server.proxy_server_shared_state.clone(), #[cfg(windows)] ProxyServer::get_stream_rocket_id(&_cloned_std_stream), ) .await; let cloned_tcp_connection_context = tcp_connection_context.clone(); // move client addr, cloned std stream and shared_state to the service_fn let service = service_fn(move |req| { // use tower service as middleware to limit the request body size let low_limit_layer = RequestBodyLimitLayer::new(REQUEST_BODY_LOW_LIMIT_SIZE); let large_limit_layer = RequestBodyLimitLayer::new(REQUEST_BODY_LARGE_LIMIT_SIZE); let low_limited_tower_service = tower::ServiceBuilder::new().layer(low_limit_layer); let large_limited_tower_service = tower::ServiceBuilder::new().layer(large_limit_layer); let tower_service_layer = if crate::common::hyper_client::should_skip_sig(req.method(), req.uri()) { // skip signature check for large request large_limited_tower_service.clone() } else { // use low limit for normal request low_limited_tower_service.clone() }; let cloned_proxy_server = cloned_proxy_server.clone(); let cloned_tcp_connection_context = cloned_tcp_connection_context.clone(); let mut tower_service = tower_service_layer.service_fn(move |req: Request<_>| { let cloned_proxy_server = cloned_proxy_server.clone(); cloned_proxy_server .handle_new_http_request(req, cloned_tcp_connection_context.clone()) }); tower_service.call(req) }); // Use an adapter to access something implementing `tokio::io` traits as if they implement let io = TokioIo::new(stream); // We use the `hyper::server::conn::Http` to serve the connection let mut http = hyper::server::conn::http1::Builder::new(); if let Err(e) = http .keep_alive(true) // set keep_alive to true explicitly .serve_connection(io, service) .await { tcp_connection_logger.write( LoggerLevel::Warn, format!("ProxyListener serve_connection error: {}", e), ); } } }); } #[cfg(windows)] fn get_stream_rocket_id(stream: &std::net::TcpStream) -> usize { use std::os::windows::io::AsRawSocket; stream.as_raw_socket() as usize } // Set the read timeout for the stream fn set_stream_read_time_out( stream: TcpStream, connection_logger: &mut ConnectionLogger, ) -> Result<(TcpStream, std::net::TcpStream)> { // Convert the stream to a std stream let std_stream = stream.into_std().map_err(|e| { Error::Io( "Failed to convert Tokio stream into std equivalent".to_string(), e, ) })?; // Set the read timeout if let Err(e) = std_stream.set_read_timeout(Some(std::time::Duration::from_secs(10))) { connection_logger.write( LoggerLevel::Warn, format!("Failed to set read timeout: {}", e), ); } // Clone the stream for the service_fn let cloned_std_stream = std_stream .try_clone() .map_err(|e| Error::Io("Failed to clone TCP stream".to_string(), e))?; // Convert the std stream back let tokio_tcp_stream = TcpStream::from_std(std_stream).map_err(|e| { Error::Io( "Failed to convert std stream into Tokio equivalent".to_string(), e, ) })?; Ok((tokio_tcp_stream, cloned_std_stream)) } async fn handle_new_http_request( self, request: Request>, mut tcp_connection_context: TcpConnectionContext, ) -> Result>> { let connection_id = match self .agent_status_shared_state .increase_connection_count() .await { Ok(id) => id, Err(e) => { tcp_connection_context.log( LoggerLevel::Error, format!("Failed to increase connection count: {}", e), ); return Ok(Self::empty_response(StatusCode::INTERNAL_SERVER_ERROR)); } }; let mut http_connection_context = HttpConnectionContext { id: connection_id, now: std::time::Instant::now(), url: request.uri().clone(), method: request.method().clone(), tcp_connection_context: tcp_connection_context.clone(), logger: ConnectionLogger::new(tcp_connection_context.id, connection_id), }; http_connection_context.log( LoggerLevel::Info, format!( "Got request from {} for {} {}", tcp_connection_context.client_addr, http_connection_context.method, http_connection_context.url ), ); if http_connection_context.contains_traversal_characters() { self.log_connection_summary( &mut http_connection_context, StatusCode::NOT_FOUND, false, "Traversal characters found in the request, return NOT_FOUND!".to_string(), ) .await; return Ok(Self::empty_response(StatusCode::NOT_FOUND)); } if http_connection_context.url == provision::provision_query::PROVISION_URL_PATH { return self .handle_provision_state_check_request( http_connection_context.get_logger_mut_ref(), request, ) .await; } let ip = match tcp_connection_context.destination_ip { Some(ip) => ip, None => { self.log_connection_summary( &mut http_connection_context, StatusCode::MISDIRECTED_REQUEST, false, "No remote destination_ip found in the request, return!".to_string(), ) .await; return Ok(Self::empty_response(StatusCode::MISDIRECTED_REQUEST)); } }; let port = tcp_connection_context.destination_port; let claims = match tcp_connection_context.claims { Some(c) => c.clone(), None => { self.log_connection_summary( &mut http_connection_context, StatusCode::MISDIRECTED_REQUEST, true, "No claims found in the request, return!".to_string(), ) .await; return Ok(Self::empty_response(StatusCode::MISDIRECTED_REQUEST)); } }; http_connection_context.log(LoggerLevel::Trace, format!("Use lookup value:{ip}:{port}.")); let claim_details: String = match serde_json::to_string(&claims) { Ok(json) => json, Err(e) => { self.log_connection_summary( &mut http_connection_context, StatusCode::MISDIRECTED_REQUEST, false, format!("Failed to get claims json string: {}", e), ) .await; return Ok(Self::empty_response(StatusCode::MISDIRECTED_REQUEST)); } }; http_connection_context.log(LoggerLevel::Trace, claim_details.to_string()); // authenticate the connection let access_control_rules = match proxy_authorizer::get_access_control_rules( ip.to_string(), port, self.key_keeper_shared_state.clone(), ) .await { Ok(rules) => rules, Err(e) => { self.log_connection_summary( &mut http_connection_context, StatusCode::INTERNAL_SERVER_ERROR, false, format!("Failed to get access control rules: {}", e), ) .await; return Ok(Self::empty_response(StatusCode::INTERNAL_SERVER_ERROR)); } }; let result = proxy_authorizer::authorize( ip.to_string(), port, http_connection_context.get_logger_mut_ref(), request.uri().clone(), claims.clone(), access_control_rules, ); if result != AuthorizeResult::Ok { // log to authorize failed connection summary self.log_connection_summary( &mut http_connection_context, StatusCode::FORBIDDEN, true, "Authorize failed".to_string(), ) .await; if result == AuthorizeResult::Forbidden { self.log_connection_summary( &mut http_connection_context, StatusCode::FORBIDDEN, false, format!("Block unauthorized request: {}", claim_details), ) .await; return Ok(Self::empty_response(StatusCode::FORBIDDEN)); } } // forward the request to the target server let mut proxy_request = request; // Add required headers let host_claims = format!( "{{ \"{}\": \"{}\"}}", constants::CLAIMS_IS_ROOT, claims.runAsElevated ); proxy_request.headers_mut().insert( HeaderName::from_static(constants::CLAIMS_HEADER), match HeaderValue::from_str(&host_claims) { Ok(value) => value, Err(e) => { http_connection_context.log( LoggerLevel::Error, format!( "Failed to add claims header: {} with error: {}", host_claims, e ), ); return Ok(Self::empty_response(StatusCode::BAD_GATEWAY)); } }, ); proxy_request.headers_mut().insert( HeaderName::from_static(constants::DATE_HEADER), match HeaderValue::from_str(&misc_helpers::get_date_time_rfc1123_string()) { Ok(value) => value, Err(e) => { http_connection_context.log( LoggerLevel::Error, format!("Failed to add date header with error: {}", e), ); return Ok(Self::empty_response(StatusCode::BAD_GATEWAY)); } }, ); if http_connection_context.should_skip_sig() { http_connection_context.log( LoggerLevel::Info, format!( "Skip compute signature for the request for {} {}", http_connection_context.method, http_connection_context.url ), ); } else { return self .handle_request_with_signature(http_connection_context, proxy_request) .await; } // start new request to the Host endpoint let request = match Self::convert_request(proxy_request).await { Ok(r) => r, Err(e) => { http_connection_context.log( LoggerLevel::Error, format!("Failed to convert request: {}", e), ); return Ok(Self::empty_response(StatusCode::BAD_REQUEST)); } }; let proxy_response = http_connection_context.send_request(request).await; self.forward_response(proxy_response, http_connection_context) .await } async fn convert_request( request: Request>, ) -> Result>> { let (head, body) = request.into_parts(); let whole_body = match body.collect().await { Ok(data) => data.to_bytes(), Err(e) => { return Err(Error::Hyper(HyperErrorType::RequestBody(e.to_string()))); } }; Ok(Request::from_parts(head, Full::new(whole_body))) } async fn handle_provision_state_check_request( &self, logger: &mut ConnectionLogger, request: Request>, ) -> Result>> { // check MetaData header exists or not if request.headers().get(constants::METADATA_HEADER).is_none() { logger.write( LoggerLevel::Warn, "No MetaData header found in the request.".to_string(), ); return Ok(Self::empty_response(StatusCode::BAD_REQUEST)); } // Get the query time_tick let query_time_tick = match request.headers().get(constants::TIME_TICK_HEADER) { Some(time_tick) => time_tick.to_str().unwrap_or("0"), None => { logger.write( LoggerLevel::Warn, "No 'x-ms-azure-time_tick' header found in the request, use '0'.".to_string(), ); "0" } }; let query_time_tick = match query_time_tick.parse::() { Ok(time_tick) => time_tick, Err(e) => { logger.write( LoggerLevel::Warn, format!("Failed to parse time_tick header: {}", e), ); 0 } }; let provision_state = provision::get_provision_state_internal( self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), self.key_keeper_shared_state.clone(), ) .await; // report as provision finished state // true only if the finished_time_tick is greater than or equal to the query_time_tick // or the secure channel is latched already let report_provision_finished = provision_state.finished_time_tick >= query_time_tick || provision_state.is_secure_channel_latched(); let find_notify_header = request.headers().get(constants::NOTIFY_HEADER).is_some(); if find_notify_header && !report_provision_finished { logger.write( LoggerLevel::Warn, "Provision is not finished yet, notify key_keeper to pull the status.".to_string(), ); if let Err(e) = self.key_keeper_shared_state.notify().await { logger.write( LoggerLevel::Warn, format!("Failed to notify key_keeper: {}", e), ); } } let provision_state = provision::provision_query::ProvisionState::new( report_provision_finished, provision_state.error_message, ); match serde_json::to_string(&provision_state) { Ok(json) => { logger.write(LoggerLevel::Info, format!("Provision state: {}", json)); let mut response = Response::new(hyper_client::full_body(json.as_bytes().to_vec())); response.headers_mut().insert( hyper::header::CONTENT_TYPE, HeaderValue::from_static("application/json; charset=utf-8"), ); Ok(response) } Err(e) => { let error = format!("Failed to get provision state: {}", e); logger.write(LoggerLevel::Warn, error.to_string()); let mut response = Response::new(hyper_client::full_body(error.as_bytes().to_vec())); *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; Ok(response) } } } async fn forward_response( &self, proxy_response: Result>, mut http_connection_context: HttpConnectionContext, ) -> Result>> { let proxy_response = match proxy_response { Ok(response) => response, Err(e) => { let http_status_code = match e { Error::Hyper(HyperErrorType::HostConnection(_)) => StatusCode::BAD_GATEWAY, _ => StatusCode::SERVICE_UNAVAILABLE, }; self.log_connection_summary( &mut http_connection_context, http_status_code, false, format!("Failed to send request to host: {}", e), ) .await; return Ok(Self::empty_response(http_status_code)); } }; let mut logger = http_connection_context.logger.clone(); let (head, body) = proxy_response.into_parts(); let frame_stream = body.map_frame(move |frame| { let frame = match frame.into_data() { Ok(data) => data.iter().map(|byte| byte.to_be()).collect::(), Err(e) => { logger.write( LoggerLevel::Error, format!("Failed to get frame data: {:?}", e), ); Bytes::new() } }; Frame::data(frame) }); let mut response = Response::from_parts(head, frame_stream.boxed()); // insert default x-ms-azure-host-authorization header to let the client know it is through proxy agent response.headers_mut().insert( HeaderName::from_static(constants::AUTHORIZATION_HEADER), HeaderValue::from_static("value"), ); self.log_connection_summary( &mut http_connection_context, response.status(), false, "".to_string(), ) .await; Ok(response) } async fn log_connection_summary( &self, http_connection_context: &mut HttpConnectionContext, response_status: StatusCode, log_authorize_failed: bool, mut error_details: String, ) { let elapsed_time = http_connection_context.now.elapsed(); let claims = match &http_connection_context.tcp_connection_context.claims { Some(c) => c.clone(), None => { let mut claim = Claims::empty(); // set the client ip and port from connection.client_addr claim.clientIp = http_connection_context .tcp_connection_context .client_addr .ip() .to_string(); claim.clientPort = http_connection_context .tcp_connection_context .client_addr .port(); claim } }; const MAX_ERROR_DETAILS_LEN: usize = 4096; // 4KB if error_details.len() > MAX_ERROR_DETAILS_LEN { error_details.truncate(MAX_ERROR_DETAILS_LEN); } let summary = ProxySummary { id: http_connection_context.id, userId: claims.userId, userName: claims.userName.to_string(), userGroups: claims.userGroups.clone(), clientIp: claims.clientIp.to_string(), clientPort: claims.clientPort, processFullPath: claims.processFullPath, processCmdLine: claims.processCmdLine.to_string(), runAsElevated: claims.runAsElevated, method: http_connection_context.method.to_string(), url: http_connection_context.url.to_string(), ip: http_connection_context .tcp_connection_context .get_ip_string(), port: http_connection_context .tcp_connection_context .destination_port, responseStatus: response_status.to_string(), elapsedTime: elapsed_time.as_millis(), errorDetails: error_details, }; if let Ok(json) = serde_json::to_string(&summary) { logger::write_console_log(json.to_string()); event_logger::write_event( LoggerLevel::Info, json, "log_connection_summary", "proxy_server", ConnectionLogger::CONNECTION_LOGGER_KEY, ); }; if log_authorize_failed { if let Err(e) = self .agent_status_shared_state .add_one_failed_connection_summary(summary) .await { http_connection_context.log( LoggerLevel::Warn, format!("Failed to add failed connection summary: {}", e), ); } } else if let Err(e) = self .agent_status_shared_state .add_one_connection_summary(summary) .await { http_connection_context.log( LoggerLevel::Warn, format!("Failed to add connection summary: {}", e), ); } } // We create some utility functions to make Empty and Full bodies // fit our broadened Response body type. fn empty_response(status_code: StatusCode) -> Response> { let mut response = Response::new(hyper_client::empty_body()); *response.status_mut() = status_code; response } async fn handle_request_with_signature( &self, mut http_connection_context: HttpConnectionContext, request: Request>, ) -> Result>> { let (head, body) = request.into_parts(); let whole_body = match body.collect().await { Ok(data) => data.to_bytes(), Err(e) => { http_connection_context.log( LoggerLevel::Error, format!("Failed to receive the request body: {}", e), ); return Ok(Self::empty_response(StatusCode::BAD_REQUEST)); } }; http_connection_context.log( LoggerLevel::Trace, format!( "Received the client request body (len={}) for {} {}", whole_body.len(), http_connection_context.method, http_connection_context.url, ), ); // create a new request to the Host endpoint let mut proxy_request: Request> = Request::from_parts(head.clone(), Full::new(whole_body.clone())); // sign the request // Add header x-ms-azure-host-authorization if let (Some(key), Some(key_guid)) = ( self.key_keeper_shared_state .get_current_key_value() .await .unwrap_or(None), self.key_keeper_shared_state .get_current_key_guid() .await .unwrap_or(None), ) { let input_to_sign = hyper_client::as_sig_input(head, whole_body); match helpers::compute_signature(&key, input_to_sign.as_slice()) { Ok(sig) => { let authorization_value = format!("{} {} {}", constants::AUTHORIZATION_SCHEME, key_guid, sig); proxy_request.headers_mut().insert( HeaderName::from_static(constants::AUTHORIZATION_HEADER), match HeaderValue::from_str(&authorization_value) { Ok(value) => value, Err(e) => { http_connection_context.log( LoggerLevel::Error, format!( "Failed to add authorization header: {} with error: {}", authorization_value, e ), ); return Ok(Self::empty_response(StatusCode::BAD_GATEWAY)); } }, ); http_connection_context.log( LoggerLevel::Trace, format!("Added authorization header {}", authorization_value), ) } Err(e) => { http_connection_context.log( LoggerLevel::Error, format!("compute_signature failed with error: {}", e), ); } } } else { http_connection_context.log( LoggerLevel::Trace, "current key is empty, skip computing the signature.".to_string(), ); } // start new request to the Host endpoint let proxy_response = http_connection_context.send_request(proxy_request).await; self.forward_response(proxy_response, http_connection_context) .await } } #[cfg(test)] mod tests { use crate::common::hyper_client; use crate::common::logger; use crate::proxy::proxy_server; use crate::shared_state; use http::Method; use std::collections::HashMap; use std::time::Duration; #[tokio::test] async fn direct_request_test() { // start listener, the port must different from the one used in production code let host = "127.0.0.1"; let port: u16 = 8091; let shared_state = shared_state::SharedState::start_all(); let key_keeper_shared_state = shared_state.get_key_keeper_shared_state(); let cancellation_token = shared_state.get_cancellation_token(); let proxy_server = proxy_server::ProxyServer::new(port, &shared_state); tokio::spawn({ let proxy_server = proxy_server.clone(); async move { proxy_server.start().await; } }); // give some time to let the listener started let sleep_duration = Duration::from_millis(100); tokio::time::sleep(sleep_duration).await; let url: hyper::Uri = format!("http://{}:{}/", host, port).parse().unwrap(); let request = hyper_client::build_request( Method::GET, &url, &HashMap::new(), None, key_keeper_shared_state .get_current_key_guid() .await .unwrap_or(None), key_keeper_shared_state .get_current_key_value() .await .unwrap_or(None), ) .unwrap(); let response = hyper_client::send_request(host, port, request, logger::write_warning) .await .unwrap(); assert_eq!( http::StatusCode::MISDIRECTED_REQUEST, response.status(), "response.status must be MISDIRECTED_REQUEST." ); // test with traversal characters let url: hyper::Uri = format!("http://{}:{}/test/../", host, port) .parse() .unwrap(); let request = hyper_client::build_request( Method::GET, &url, &HashMap::new(), None, key_keeper_shared_state .get_current_key_guid() .await .unwrap_or(None), key_keeper_shared_state .get_current_key_value() .await .unwrap_or(None), ) .unwrap(); let response = hyper_client::send_request(host, port, request, logger::write_warning) .await .unwrap(); assert_eq!( http::StatusCode::NOT_FOUND, response.status(), "response.status must be NOT_FOUND." ); // test large request body let body = vec![88u8; super::REQUEST_BODY_LOW_LIMIT_SIZE + 1]; let request = hyper_client::build_request( Method::POST, &url, &HashMap::new(), Some(body.as_slice()), key_keeper_shared_state .get_current_key_guid() .await .unwrap_or(None), key_keeper_shared_state .get_current_key_value() .await .unwrap_or(None), ) .unwrap(); let response = hyper_client::send_request(host, port, request, logger::write_warning) .await .unwrap(); assert_eq!( http::StatusCode::PAYLOAD_TOO_LARGE, response.status(), "response.status must be PAYLOAD_TOO_LARGE." ); // stop the listener cancellation_token.cancel(); } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy/proxy_summary.rs000066400000000000000000000034731500521614600245250ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the proxy summary struct. //! The proxy summary struct is used to store the summary of the proxied connections. use std::path::PathBuf; use proxy_agent_shared::proxy_agent_aggregate_status::ProxyConnectionSummary; use serde_derive::{Deserialize, Serialize}; #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct ProxySummary { pub id: u128, pub method: String, pub url: String, pub clientIp: String, pub clientPort: u16, pub ip: String, pub port: u16, pub userId: u64, pub userName: String, pub userGroups: Vec, pub processFullPath: PathBuf, pub processCmdLine: String, pub runAsElevated: bool, pub responseStatus: String, pub elapsedTime: u128, pub errorDetails: String, } impl ProxySummary { pub fn to_key_string(&self) -> String { format!( "{} {} {} {} {} {} {}", self.userName, self.clientIp, self.ip, self.port, self.processFullPath.to_string_lossy(), self.processCmdLine, self.responseStatus ) } } impl From for ProxyConnectionSummary { fn from(proxy_summary: ProxySummary) -> ProxyConnectionSummary { ProxyConnectionSummary { userName: proxy_summary.userName.to_string(), userGroups: Some(proxy_summary.userGroups.clone()), ip: proxy_summary.ip.to_string(), port: proxy_summary.port, processFullPath: Some(proxy_summary.processFullPath.to_string_lossy().to_string()), processCmdLine: proxy_summary.processCmdLine.to_string(), responseStatus: proxy_summary.responseStatus.to_string(), count: 1, } } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy/windows.rs000066400000000000000000000326251500521614600232620ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common::{ error::{Error, WindowsApiErrorType}, logger, result::Result, }; use libloading::{Library, Symbol}; use once_cell::sync::Lazy; use std::mem::MaybeUninit; use std::ptr::null_mut; use std::{collections::HashMap, ffi::OsString, os::windows::ffi::OsStringExt, path::PathBuf}; use windows_sys::Win32::Foundation::{BOOL, HANDLE, LUID, NTSTATUS, UNICODE_STRING}; use windows_sys::Win32::Security::Authentication::Identity; use windows_sys::Win32::Security::Authentication::Identity::SECURITY_LOGON_SESSION_DATA; use windows_sys::Win32::System::ProcessStatus::{ K32GetModuleBaseNameW, // kernel32.dll K32GetModuleFileNameExW, // kernel32.dll }; use windows_sys::Win32::System::Threading::{ NtQueryInformationProcess, // ntdll.dll OpenProcess, //kernel32.dll }; use windows_sys::Win32::System::Threading::{PROCESSINFOCLASS, PROCESS_BASIC_INFORMATION}; const LG_INCLUDE_INDIRECT: u32 = 1u32; const MAX_PREFERRED_LENGTH: u32 = 4294967295u32; #[repr(C)] struct LocalgroupUsersInfo0 { pub lgrui0_name: windows_sys::core::PWSTR, } static NETAPI32_DLL: Lazy = Lazy::new(load_netapi32_dll); fn load_netapi32_dll() -> Library { let dll_name = "netapi32.dll\0"; match unsafe { Library::new(dll_name) } { Ok(lib) => lib, Err(e) => { logger::write_error(format!("Loading {} failed with error: {}.", dll_name, e)); panic!("Loading {} failed with error: {}", dll_name, e); } } } type NetUserGetLocalGroups = unsafe extern "system" fn( servername: windows_sys::core::PWSTR, username: windows_sys::core::PWSTR, level: u32, flags: u32, bufptr: *mut *mut u8, prefmaxlen: u32, entriesread: *mut u32, totalentries: *mut u32, ) -> u32; #[allow(clippy::too_many_arguments)] fn net_user_get_local_groups( servername: windows_sys::core::PWSTR, username: windows_sys::core::PWSTR, level: u32, flags: u32, bufptr: *mut *mut LocalgroupUsersInfo0, prefmaxlen: u32, entriesread: *mut u32, totalentries: *mut u32, ) -> Result { unsafe { let fun_name = "NetUserGetLocalGroups\0"; let net_user_get_local_groups: Symbol = NETAPI32_DLL .get(fun_name.as_bytes()) .map_err(|e| Error::WindowsApi(WindowsApiErrorType::LoadNetUserGetLocalGroups(e)))?; let status = net_user_get_local_groups( servername, username, level, flags, bufptr as *mut *mut u8, prefmaxlen, entriesread, totalentries, ); Ok(status) } } static BUILTIN_USERS: Lazy> = Lazy::new(load_users); fn load_users() -> HashMap { let mut users = HashMap::new(); users.insert(0x3e4, "NETWORK SERVICE"); users.insert(0x3e5, "LOCAL SERVICE"); users.insert(0x3e6, "SYSTEM"); users.insert(0x3e7, "SYSTEM"); users.insert(0x3e8, "IIS_IUSRS"); users.insert(0x3e9, "IUSR"); users } /* Get user name and user group names */ pub fn get_user(logon_id: u64) -> Result<(String, Vec)> { let mut user_name; let luid = LUID { LowPart: (logon_id & 0xFFFFFFFF) as u32, // get lower part of 32 bits HighPart: (logon_id >> 32) as i32, }; let mut data = MaybeUninit::<*mut SECURITY_LOGON_SESSION_DATA>::uninit(); let status = unsafe { Identity::LsaGetLogonSessionData(&luid, data.as_mut_ptr()) }; if status != 0 { let e = std::io::Error::from_raw_os_error(status as i32); return Err(Error::WindowsApi( WindowsApiErrorType::LsaGetLogonSessionData(format!("failed with os error: {}", e)), )); } let session_data = unsafe { *data.assume_init() }; if session_data.UserName.Length != 0 { user_name = from_unicode_string(&session_data.UserName); } else { // When calling LsaGetLogonSessionData and receiving a successful return code, // but finding that SECURITY_LOGON_SESSION_DATA->UserName.Length is 0, // it typically means that the logon session exists but does not have an associated username. logger::write_warning(format!( "LsaGetLogonSessionData with login'{}' success, but user name is empty.", logon_id )); // return OK with UNDEFINED user name and empty groups return Ok((super::UNDEFINED.to_string(), Vec::new())); } let mut domain_user_name = user_name.clone(); if session_data.LogonDomain.Length != 0 { domain_user_name = format!( "{}\\{}", from_unicode_string(&session_data.LogonDomain), domain_user_name ); } // call NetUserGetLocalGroups to get local user group names let mut user_groups = Vec::new(); let mut group_count = 0; let mut total_group_count = 0; let mut group_info = null_mut(); let status = net_user_get_local_groups( null_mut(), to_pwstr(domain_user_name.as_str()).as_mut_ptr(), 0, LG_INCLUDE_INDIRECT, &mut group_info, MAX_PREFERRED_LENGTH, &mut group_count, &mut total_group_count, )?; if status == 0 { let group_info = unsafe { std::slice::from_raw_parts( group_info as *const u8 as *const LocalgroupUsersInfo0, group_count as usize, ) }; for group in group_info { let group_name = from_pwstr(group.lgrui0_name); user_groups.push(group_name); } } else { let e = std::io::Error::from_raw_os_error(status as i32); logger::write_warning(format!( "NetUserGetLocalGroups '{}' failed ({}) with os error: {}", domain_user_name, status, e )); } // update user name if it's a built-in user if BUILTIN_USERS.contains_key(&logon_id) { user_name = BUILTIN_USERS[&logon_id].to_string(); } Ok((user_name, user_groups)) } fn from_unicode_string(unicode_string: &UNICODE_STRING) -> String { let mut v = vec![0u16; unicode_string.Length as usize]; unsafe { std::ptr::copy_nonoverlapping( unicode_string.Buffer, v.as_mut_ptr(), unicode_string.Length as usize, ); } let mut rstr = String::new(); for val in v.iter() { let c: u8 = (*val & 0xFF) as u8; if c == 0 { break; } rstr.push(c as char); } rstr } fn from_pwstr(wide_string: *mut u16) -> String { let mut rstr = String::new(); let mut i = 0; loop { let c: u8 = unsafe { (*wide_string.offset(i) & 0xFF) as u8 }; if c == 0 { break; } rstr.push(c as char); i += 1; } rstr } fn to_pwstr(s: &str) -> Vec { let mut v: Vec = s.encode_utf16().collect(); v.push(0); v } /* Get process information */ const PROCESS_QUERY_INFORMATION: u32 = 0x0400; const PROCESS_VM_READ: u32 = 0x0010; const FALSE: BOOL = 0; const MAX_PATH: usize = 260; const STATUS_BUFFER_OVERFLOW: NTSTATUS = -2147483643; const STATUS_BUFFER_TOO_SMALL: NTSTATUS = -1073741789; const STATUS_INFO_LENGTH_MISMATCH: NTSTATUS = -1073741820; const PROCESS_BASIC_INFORMATION_CLASS: PROCESSINFOCLASS = 0; const PROCESS_COMMAND_LINE_INFORMATION_CLASS: PROCESSINFOCLASS = 60; pub fn query_basic_process_info(handler: isize) -> Result { unsafe { let mut process_basic_information = std::mem::zeroed::(); let mut return_length = 0; let status: NTSTATUS = NtQueryInformationProcess( handler, PROCESS_BASIC_INFORMATION_CLASS, &mut process_basic_information as *mut _ as *mut _, std::mem::size_of::() as u32, &mut return_length, ); if status != 0 { return Err(Error::WindowsApi(WindowsApiErrorType::WindowsOsError( std::io::Error::from_raw_os_error(status), ))); } Ok(process_basic_information) } } pub fn get_process_handler(pid: u32) -> Result { if pid == 0 { return Err(Error::Invalid("pid 0".to_string())); } let options = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ; unsafe { let handler = OpenProcess(options, FALSE, pid); if handler == 0 { return Err(Error::WindowsApi(WindowsApiErrorType::WindowsOsError( std::io::Error::last_os_error(), ))); } Ok(handler) } } pub fn get_process_cmd(handler: isize) -> Result { unsafe { let mut return_length = 0; let status: NTSTATUS = NtQueryInformationProcess( handler, PROCESS_COMMAND_LINE_INFORMATION_CLASS, null_mut(), 0, &mut return_length as *mut _, ); if status != STATUS_BUFFER_OVERFLOW && status != STATUS_BUFFER_TOO_SMALL && status != STATUS_INFO_LENGTH_MISMATCH { return Err(Error::WindowsApi(WindowsApiErrorType::WindowsOsError( std::io::Error::from_raw_os_error(status), ))); } println!("return_length: {}", return_length); let buf_len = (return_length as usize) / 2; let mut buffer: Vec = vec![0; buf_len + 1]; buffer.resize(buf_len + 1, 0); // set everything to 0 let status: NTSTATUS = NtQueryInformationProcess( handler, PROCESS_COMMAND_LINE_INFORMATION_CLASS, buffer.as_mut_ptr() as *mut _, return_length, &mut return_length as *mut _, ); if status < 0 { eprintln!("NtQueryInformationProcess failed with status: {}", status); return Err(Error::WindowsApi(WindowsApiErrorType::WindowsOsError( std::io::Error::from_raw_os_error(status), ))); } buffer.set_len(buf_len); buffer.push(0); let cmd_buffer = *(buffer.as_ptr() as *const UNICODE_STRING); let cmd = String::from_utf16_lossy(std::slice::from_raw_parts( cmd_buffer.Buffer, (cmd_buffer.Length / 2) as usize, )); Ok(cmd) } } #[allow(dead_code)] pub fn get_process_name(handler: isize) -> Result { unsafe { let mut buffer = [0u16; MAX_PATH + 1]; let size = K32GetModuleBaseNameW(handler, 0, buffer.as_mut_ptr(), buffer.len() as u32); if size == 0 { return Err(Error::WindowsApi(WindowsApiErrorType::WindowsOsError( std::io::Error::last_os_error(), ))); } Ok(PathBuf::from(OsString::from_wide(&buffer[..size as usize]))) } } pub fn get_process_full_name(handler: isize) -> Result { unsafe { let mut buffer = [0u16; MAX_PATH + 1]; let size = K32GetModuleFileNameExW(handler, 0, buffer.as_mut_ptr(), buffer.len() as u32); if size == 0 { return Err(Error::WindowsApi(WindowsApiErrorType::WindowsOsError( std::io::Error::last_os_error(), ))); } Ok(PathBuf::from(OsString::from_wide(&buffer[..size as usize]))) } } #[cfg(test)] mod tests { use std::mem::MaybeUninit; use windows_sys::Win32::Foundation::LUID; use windows_sys::Win32::Security::Authentication::Identity; #[tokio::test] async fn get_user_test() { unsafe { let mut data = MaybeUninit::<*mut LUID>::uninit(); let mut count: u32 = 10; let status = Identity::LsaEnumerateLogonSessions(&mut count, data.as_mut_ptr()); println!( "Identity::LsaEnumerateLogonSessions return value: {}", status ); for i in 0..count { let uid: LUID = *data.assume_init().offset(i as isize); println!("LUID: {:?} - {:?}", uid.HighPart, uid.LowPart); let logon_id: u64 = (uid.HighPart as u64) << 32 | uid.LowPart as u64; println!("LogonId: {}", logon_id); let user = super::get_user(logon_id).unwrap(); let user_name = user.0; let user_groups = user.1; println!("UserName: {}", user_name); println!("UserGroups: {}", user_groups.join(", ")); assert_ne!(String::new(), user_name, "user_name cannot be empty."); } // Couldn't find any user with group in our internal test environment // assert!( // false, // "test failed after enumerated all logon session accounts." // ); } } #[test] fn get_process_test() { let pid = std::process::id(); let handler = super::get_process_handler(pid).unwrap(); let name = super::get_process_name(handler).unwrap(); let full_name = super::get_process_full_name(handler).unwrap(); let cmd = super::get_process_cmd(handler).unwrap(); let base_info = super::query_basic_process_info(handler); assert!(base_info.is_ok(), "base_info must be ok"); assert!( !name.as_os_str().is_empty(), "process name should not be empty" ); assert!( !full_name.as_os_str().is_empty(), "process full name should not be empty" ); assert!(!cmd.is_empty(), "process cmd should not be empty"); } } GuestProxyAgent-1.0.30/proxy_agent/src/proxy_agent_status.rs000066400000000000000000000326731500521614600243540ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! //! This module contains the logic to get the status of the proxy agent. //! The status includes the status of the key keeper, ebpf program, proxy listener, telemetry logger and proxy connection summaries. //! The status is written to status.json file in the logs directory. //! //! Example //! ```rust //! use proxy_agent::proxy_agent_status; //! use proxy_agent::shared_state::key_keeper_wrapper::KeyKeeperSharedState; //! use proxy_agent::shared_state::agent_status_wrapper::AgentStatusSharedState; //! use std::time::Duration; //! use tokio_util::sync::CancellationToken; //! //! let key_keeper_shared_state = KeyKeeperSharedState::start_new(); //! let agent_status_shared_state = AgentStatusSharedState::start_new(); //! let cancellation_token = CancellationToken::new(); //! let interval = Duration::from_secs(60); //! let proxy_agent_status_task = proxy_agent_status::ProxyAgentStatusTask::new( //! interval, //! std::path::PathBuf::from("logs_dir"), //! cancellation_token, //! key_keeper_shared_state, //! agent_status_shared_state, //! ); //! tokio::spawn(proxy_agent_status_task.start()); //! ``` use crate::common::logger; use crate::key_keeper::UNKNOWN_STATE; use crate::shared_state::agent_status_wrapper::{AgentStatusModule, AgentStatusSharedState}; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::proxy_agent_aggregate_status::{ GuestProxyAgentAggregateStatus, ModuleState, OverallState, ProxyAgentDetailStatus, ProxyAgentStatus, }; use proxy_agent_shared::telemetry::event_logger; use std::collections::HashMap; use std::path::PathBuf; use std::time::{Duration, Instant}; use tokio_util::sync::CancellationToken; pub struct ProxyAgentStatusTask { interval: Duration, status_dir: PathBuf, cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, agent_status_shared_state: AgentStatusSharedState, } impl ProxyAgentStatusTask { pub fn new( interval: Duration, status_dir: PathBuf, cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, agent_status_shared_state: AgentStatusSharedState, ) -> ProxyAgentStatusTask { ProxyAgentStatusTask { interval, status_dir, cancellation_token, key_keeper_shared_state, agent_status_shared_state, } } pub async fn start(&self) { // Initialize the agent status state to running and ignore the error if it fails _ = self .agent_status_shared_state .set_module_state(ModuleState::RUNNING, AgentStatusModule::ProxyAgentStatus) .await; // Update the agent status message to running self.update_agent_status_message("Proxy agent status is running.".to_string()) .await; tokio::select! { _ = self.loop_status() => {} _ = self.cancellation_token.cancelled() => { logger::write_warning("cancellation token signal received, stop the guest_proxy_agent_status task.".to_string()); } } } async fn update_agent_status_message(&self, status_message: String) { if let Err(e) = self .agent_status_shared_state .set_module_status_message(status_message, AgentStatusModule::ProxyAgentStatus) .await { logger::write_error(format!("Error updating agent status message: {}", e)); } } async fn get_agent_status_message(&self) -> String { match self .agent_status_shared_state .get_module_status_message(AgentStatusModule::ProxyAgentStatus) .await { Ok(message) => message, Err(e) => { let message = format!("Error getting agent status message: {}", e); logger::write_error(message.clone()); message } } } async fn loop_status(&self) { let map_clear_duration = Duration::from_secs(60 * 60 * 24); let mut start_time = Instant::now(); let status_report_duration = Duration::from_secs(60 * 15); let mut status_report_time = Instant::now(); if let Err(e) = misc_helpers::try_create_folder(&self.status_dir) { logger::write_error(format!( "Error creating status directory: {} with error {}", self.status_dir.display(), e )); } loop { let aggregate_status = self.guest_proxy_agent_aggregate_status_new().await; // write proxyAgentStatus event if status_report_time.elapsed() >= status_report_duration { let status = match serde_json::to_string(&aggregate_status.proxyAgentStatus) { Ok(status) => status, Err(e) => format!("Error serializing proxy agent status: {}", e), }; event_logger::write_event( LoggerLevel::Info, status, "loop_status", "proxy_agent_status", logger::AGENT_LOGGER_KEY, ); status_report_time = Instant::now(); } // write the aggregate status to status.json file self.write_aggregate_status_to_file(aggregate_status).await; //Clear the connection map and reset start_time after 24 hours if start_time.elapsed() >= map_clear_duration { logger::write_information( "Clearing the connection summary map and failed authenticate summary map." .to_string(), ); if let Err(e) = self.agent_status_shared_state.clear_all_summary().await { logger::write_error(format!("Error clearing the connection summary map and failed authenticate summary map: {}", e)); } start_time = Instant::now(); } tokio::time::sleep(self.interval).await; } } async fn get_key_keeper_status(&self) -> ProxyAgentDetailStatus { let mut key_latch_status = self .agent_status_shared_state .get_module_status(AgentStatusModule::KeyKeeper) .await; let mut states = HashMap::new(); states.insert( "secureChannelState".to_string(), self.key_keeper_shared_state .get_current_secure_channel_state() .await .unwrap_or(UNKNOWN_STATE.to_string()), ); if let Ok(Some(key_guid)) = self.key_keeper_shared_state.get_current_key_guid().await { states.insert("keyGuid".to_string(), key_guid); } states.insert( "wireServerRuleId".to_string(), self.key_keeper_shared_state .get_wireserver_rule_id() .await .unwrap_or(UNKNOWN_STATE.to_string()), ); states.insert( "imdsRuleId".to_string(), self.key_keeper_shared_state .get_imds_rule_id() .await .unwrap_or(UNKNOWN_STATE.to_string()), ); states.insert( "hostGARuleId".to_string(), self.key_keeper_shared_state .get_hostga_rule_id() .await .unwrap_or(UNKNOWN_STATE.to_string()), ); if let Ok(Some(incarnation)) = self .key_keeper_shared_state .get_current_key_incarnation() .await { states.insert("keyIncarnationId".to_string(), incarnation.to_string()); } key_latch_status.states = Some(states); key_latch_status } async fn proxy_agent_status_new(&self) -> ProxyAgentStatus { let key_latch_status = self.get_key_keeper_status().await; let ebpf_status = self .agent_status_shared_state .get_module_status(AgentStatusModule::Redirector) .await; let proxy_status = self .agent_status_shared_state .get_module_status(AgentStatusModule::ProxyServer) .await; let status = if key_latch_status.status != ModuleState::RUNNING || ebpf_status.status != ModuleState::RUNNING || proxy_status.status != ModuleState::RUNNING { OverallState::ERROR } else { OverallState::SUCCESS }; ProxyAgentStatus { version: misc_helpers::get_current_version(), status, // monitorStatus is proxy_agent_status itself status monitorStatus: ProxyAgentDetailStatus { status: ModuleState::RUNNING, message: self.get_agent_status_message().await, states: None, }, keyLatchStatus: key_latch_status, ebpfProgramStatus: ebpf_status, proxyListenerStatus: proxy_status, telemetryLoggerStatus: self .agent_status_shared_state .get_module_status(AgentStatusModule::TelemetryLogger) .await, proxyConnectionsCount: match self.agent_status_shared_state.get_connection_count().await { Ok(count) => count, Err(e) => { logger::write_error(format!("Error getting connection count: {}", e)); 0 } }, } } async fn guest_proxy_agent_aggregate_status_new(&self) -> GuestProxyAgentAggregateStatus { GuestProxyAgentAggregateStatus { timestamp: misc_helpers::get_date_time_string_with_milliseconds(), proxyAgentStatus: self.proxy_agent_status_new().await, proxyConnectionSummary: match self .agent_status_shared_state .get_all_connection_summary() .await { Ok(summary) => summary, Err(e) => { logger::write_error(format!("Error getting connection summary: {}", e)); vec![] } }, failedAuthenticateSummary: match self .agent_status_shared_state .get_all_failed_connection_summary() .await { Ok(summary) => summary, Err(e) => { logger::write_error(format!("Error getting failed connection summary: {}", e)); vec![] } }, } } async fn write_aggregate_status_to_file(&self, status: GuestProxyAgentAggregateStatus) { let full_file_path = self.status_dir.join("status.json"); if let Err(e) = misc_helpers::json_write_to_file(&status, &full_file_path) { self.update_agent_status_message(format!( "Error writing aggregate status to status file: {}", e )) .await; } else { // need overwrite the status message to indicate the status file is written successfully self.update_agent_status_message(format!( "Aggregate status written to status file: {}", full_file_path.display() )) .await; } } } #[cfg(test)] mod tests { use crate::{ proxy_agent_status::ProxyAgentStatusTask, shared_state::{ agent_status_wrapper::AgentStatusSharedState, key_keeper_wrapper::KeyKeeperSharedState, }, }; use proxy_agent_shared::{ misc_helpers, proxy_agent_aggregate_status::GuestProxyAgentAggregateStatus, }; use std::time::Duration; use std::{env, fs}; use tokio_util::sync::CancellationToken; #[tokio::test] async fn write_aggregate_status_test() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("write_aggregate_status_test"); _ = fs::remove_dir_all(&temp_test_path); misc_helpers::try_create_folder(&temp_test_path).unwrap(); let task = ProxyAgentStatusTask::new( Duration::from_secs(1), temp_test_path.clone(), CancellationToken::new(), KeyKeeperSharedState::start_new(), AgentStatusSharedState::start_new(), ); let aggregate_status = task.guest_proxy_agent_aggregate_status_new().await; task.write_aggregate_status_to_file(aggregate_status).await; let file_path = temp_test_path.join("status.json"); assert!(file_path.exists(), "File does not exist in the directory"); let file_content = misc_helpers::json_read_from_file::(&file_path); assert!(file_content.is_ok(), "Failed to read file content"); //Check if field were written let gpa_aggregate_status = file_content.unwrap(); assert!( !gpa_aggregate_status.timestamp.is_empty(), "Failed to get Timestamp field" ); assert!( !gpa_aggregate_status.proxyAgentStatus.version.is_empty(), "Failed to get proxy_agent_status field" ); assert!( gpa_aggregate_status.proxyConnectionSummary.is_empty() || !gpa_aggregate_status.proxyConnectionSummary.is_empty(), "proxyConnectionSummary does not exist" ); } } GuestProxyAgent-1.0.30/proxy_agent/src/redirector.rs000066400000000000000000000364351500521614600225540ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to redirect the http traffic to the GPA service proxy listener via eBPF. //! The eBPF program is loaded by the GPA service and the eBPF program is used to redirect the traffic to the GPA service proxy listener. //! GPA service update the eBPF map to allow particular http traffics to be redirected to the GPA service proxy listener. //! When eBPF redirects the http traffic, it writes the audit information to the eBPF map. //! The GPA service reads the audit information from the eBPF map and authorizes the requests before forwarding to the remote endpoints. //! //! Example //! ```rust //! use proxy_agent::redirector; //! use proxy_agent::shared_state::redirector_wrapper::RedirectorSharedState; //! use proxy_agent::shared_state::key_keeper_wrapper::KeyKeeperSharedState; //! use proxy_agent::shared_state::agent_status_wrapper::AgentStatusSharedState; //! //! //! // start the redirector with the shared state //! let redirector_shared_state = RedirectorSharedState::new(); //! let key_keeper_shared_state = KeyKeeperSharedState::new(); //! let agent_status_shared_state = AgentStatusSharedState::new(); //! let local_port = 8080; //! let redirector = redirector::Redirector::new( //! local_port, //! redirector_shared_state.clone(), //! key_keeper_shared_state.clone(), //! agent_status_shared_state.clone(), //! ); //! tokio::spawn(redirector.start()); //! //! // Update the redirect policy for the traffics //! redirector::update_wire_server_redirect_policy(true, redirector_shared_state.clone()); //! redirector::update_imds_redirect_policy(false, redirector_shared_state.clone()); //! //! // Get the status of the redirector //! let status = agent_status_shared_state.get_status(AgentStatusModule::Redirector).await; //! //! // Close the redirector to offload the eBPF program //! redirector::close(redirector_shared_state.clone(), agent_status_shared_state.clone()).await; //! ``` #[cfg(windows)] mod windows; #[cfg(not(windows))] mod linux; use crate::common::constants; use crate::common::error::BpfErrorType; use crate::common::error::Error; use crate::common::helpers; use crate::common::result::Result; use crate::common::{config, logger}; use crate::provision; use crate::proxy::authorization_rules::AuthorizationMode; use crate::shared_state::agent_status_wrapper::{AgentStatusModule, AgentStatusSharedState}; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use crate::shared_state::provision_wrapper::ProvisionSharedState; use crate::shared_state::redirector_wrapper::RedirectorSharedState; use crate::shared_state::telemetry_wrapper::TelemetrySharedState; use crate::shared_state::SharedState; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::proxy_agent_aggregate_status::ModuleState; use proxy_agent_shared::telemetry::event_logger; use serde_derive::{Deserialize, Serialize}; use std::net::Ipv4Addr; use std::path::PathBuf; use std::sync::{Arc, Mutex}; use tokio_util::sync::CancellationToken; #[cfg(not(windows))] pub use linux::BpfObject; #[cfg(windows)] pub use windows::BpfObject; #[derive(Serialize, Deserialize)] #[repr(C)] pub struct AuditEntry { pub logon_id: u64, pub process_id: u32, pub is_admin: i32, pub destination_ipv4: u32, // in network byte order pub destination_port: u16, // in network byte order } impl AuditEntry { pub fn empty() -> Self { AuditEntry { logon_id: 0, process_id: 0, is_admin: 0, destination_ipv4: 0, destination_port: 0, } } pub fn destination_port_in_host_byte_order(&self) -> u16 { u16::from_be(self.destination_port) } pub fn destination_ipv4_addr(&self) -> Ipv4Addr { Ipv4Addr::from_bits(self.destination_ipv4.to_be()) } } pub struct Redirector { local_port: u16, redirector_shared_state: RedirectorSharedState, key_keeper_shared_state: KeyKeeperSharedState, agent_status_shared_state: AgentStatusSharedState, cancellation_token: CancellationToken, telemetry_shared_state: TelemetrySharedState, provision_shared_state: ProvisionSharedState, } impl Redirector { pub fn new(local_port: u16, shared_state: &SharedState) -> Self { Redirector { local_port, cancellation_token: shared_state.get_cancellation_token(), key_keeper_shared_state: shared_state.get_key_keeper_shared_state(), telemetry_shared_state: shared_state.get_telemetry_shared_state(), provision_shared_state: shared_state.get_provision_shared_state(), agent_status_shared_state: shared_state.get_agent_status_shared_state(), redirector_shared_state: shared_state.get_redirector_shared_state(), } } const MAX_RETRIES: usize = 5; const RETRY_INTERVAL_MS: u64 = 10; pub async fn start(&self) { let message = "eBPF redirector is starting"; if let Err(e) = self .agent_status_shared_state .set_module_status_message(message.to_string(), AgentStatusModule::Redirector) .await { logger::write_error(format!( "Failed to set error status '{}' for redirector: {}", message, e )); } let level = match self.start_impl().await { Ok(_) => LoggerLevel::Info, Err(_) => LoggerLevel::Error, }; event_logger::write_event( level, self.get_status_message().await, "start", "redirector", logger::AGENT_LOGGER_KEY, ); } async fn start_impl(&self) -> Result<()> { for _ in 0..Self::MAX_RETRIES { match self.start_internal().await { Ok(_) => return Ok(()), Err(e) => { self.set_error_status(format!("Failed to start redirector: {e}")) .await; } } tokio::time::sleep(std::time::Duration::from_millis(Self::RETRY_INTERVAL_MS)).await; } Err(Error::Bpf(BpfErrorType::FailedToStartRedirector)) } async fn start_internal(&self) -> Result<()> { #[cfg(windows)] { self.initialized()?; } let mut bpf_object = self.load_bpf_object()?; logger::write_information("Success loaded bpf object.".to_string()); // maps let pid = std::process::id(); bpf_object.update_skip_process_map(pid)?; logger::write_information(format!( "Success updated bpf skip_process map with pid={pid}." )); let wireserver_mode = if let Ok(Some(rules)) = self.key_keeper_shared_state.get_wireserver_rules().await { rules.mode } else { AuthorizationMode::Audit }; if wireserver_mode != AuthorizationMode::Disabled { bpf_object.update_policy_elem_bpf_map( "WireServer endpoints", self.local_port, constants::WIRE_SERVER_IP_NETWORK_BYTE_ORDER, //0x10813FA8 - 168.63.129.16 constants::WIRE_SERVER_PORT, )?; logger::write_information( "Success updated bpf map for WireServer support.".to_string(), ); } let imds_mode = if let Ok(Some(rules)) = self.key_keeper_shared_state.get_imds_rules().await { rules.mode } else { AuthorizationMode::Audit }; if imds_mode != AuthorizationMode::Disabled { bpf_object.update_policy_elem_bpf_map( "IMDS endpoints", self.local_port, constants::IMDS_IP_NETWORK_BYTE_ORDER, //0xFEA9FEA9, // 169.254.169.254 constants::IMDS_PORT, )?; logger::write_information("Success updated bpf map for IMDS support.".to_string()); } if config::get_host_gaplugin_support() > 0 { bpf_object.update_policy_elem_bpf_map( "Host GAPlugin endpoints", self.local_port, constants::GA_PLUGIN_IP_NETWORK_BYTE_ORDER, //0x10813FA8, // 168.63.129.16 constants::GA_PLUGIN_PORT, )?; logger::write_information( "Success updated bpf map for Host GAPlugin support.".to_string(), ); } // programs self.attach_bpf_prog(&mut bpf_object)?; logger::write_information("Success attached bpf prog.".to_string()); if let Err(e) = self .redirector_shared_state .update_bpf_object(Arc::new(Mutex::new(bpf_object))) .await { logger::write_error(format!("Failed to update bpf object in shared state: {e}")); } if let Err(e) = self .redirector_shared_state .set_local_port(self.local_port) .await { logger::write_error(format!("Failed to set local port in shared state: {e}")); } let message = helpers::write_startup_event( "Started Redirector with eBPF maps", "start", "redirector", logger::AGENT_LOGGER_KEY, ); if let Err(e) = self .agent_status_shared_state .set_module_status_message(message.to_string(), AgentStatusModule::Redirector) .await { logger::write_error(format!( "Failed to set module status message in shared state: {e}" )); } if let Err(e) = self .agent_status_shared_state .set_module_state(ModuleState::RUNNING, AgentStatusModule::Redirector) .await { logger::write_error(format!("Failed to set module state in shared state: {e}")); } // report redirector ready for provision provision::redirector_ready( self.cancellation_token.clone(), self.key_keeper_shared_state.clone(), self.telemetry_shared_state.clone(), self.provision_shared_state.clone(), self.agent_status_shared_state.clone(), ) .await; Ok(()) } async fn get_status_message(&self) -> String { self.agent_status_shared_state .get_module_status(AgentStatusModule::Redirector) .await .message } async fn set_error_status(&self, message: String) { if let Err(e) = self .agent_status_shared_state .set_module_status_message(message.to_string(), AgentStatusModule::Redirector) .await { logger::write_error(format!( "Failed to set error status '{}' for redirector: {}", message, e )); } } } #[cfg(windows)] pub fn get_audit_from_stream_socket(raw_socket_id: usize) -> Result { windows::get_audit_from_redirect_context(raw_socket_id) } pub fn ip_to_string(ip: u32) -> String { let mut ip_str = String::new(); let seg_number = 16 * 16; let seg = ip % seg_number; ip_str.push_str(seg.to_string().as_str()); ip_str.push('.'); let ip = ip / seg_number; let seg = ip % seg_number; ip_str.push_str(seg.to_string().as_str()); ip_str.push('.'); let ip = ip / seg_number; let seg = ip % seg_number; ip_str.push_str(seg.to_string().as_str()); ip_str.push('.'); let ip = ip / seg_number; let seg = ip % seg_number; ip_str.push_str(seg.to_string().as_str()); ip_str } pub fn string_to_ip(ip_str: &str) -> u32 { let ip_str_seg: Vec<&str> = ip_str.split('.').collect(); if ip_str_seg.len() != 4 { logger::write_warning(format!("string_to_ip:: ip_str {} is invalid", ip_str)); return 0; } let mut ip: u32 = 0; let mut seg: u32 = 1; let seg_number = 16 * 16; for str in ip_str_seg { match str.parse::() { Ok(n) => { ip += (n as u32) * seg; } Err(e) => { logger::write_warning(format!( "string_to_ip:: error parsing ip segment {} with error: {}", ip_str, e )); return 0; } } if seg < 16777216 { seg *= seg_number; } } ip } pub fn get_ebpf_file_path() -> PathBuf { // get ebpf file full path from environment variable let mut bpf_file_path = config::get_ebpf_file_full_path().unwrap_or_default(); let ebpf_file_name = config::get_ebpf_program_name(); #[cfg(not(windows))] { if !bpf_file_path.exists() { // linux ebpf file default to /usr/lib/azure-proxy-agent folder bpf_file_path = PathBuf::from(format!("/usr/lib/azure-proxy-agent/{ebpf_file_name}")); } } if !bpf_file_path.exists() { // default to current exe folder bpf_file_path = misc_helpers::get_current_exe_dir(); bpf_file_path.push(ebpf_file_name); } bpf_file_path } pub async fn lookup_audit( source_port: u16, redirector_shared_state: &RedirectorSharedState, ) -> Result { if let Ok(Some(bpf_object)) = redirector_shared_state.get_bpf_object().await { bpf_object.lock().unwrap().lookup_audit(source_port) } else { Err(Error::Bpf(BpfErrorType::NullBpfObject)) } } pub async fn remove_audit( source_port: u16, redirector_shared_state: &RedirectorSharedState, ) -> Result<()> { if let Ok(Some(bpf_object)) = redirector_shared_state.get_bpf_object().await { bpf_object .lock() .unwrap() .remove_audit_map_entry(source_port) } else { Err(Error::Bpf(BpfErrorType::NullBpfObject)) } } pub async fn close( redirector_shared_state: RedirectorSharedState, agent_status_shared_state: AgentStatusSharedState, ) { let _ = agent_status_shared_state .set_module_state(ModuleState::STOPPED, AgentStatusModule::Redirector) .await; // reset ebpf object #[cfg(windows)] { windows::close_bpf_object(redirector_shared_state.clone()).await; } let _ = redirector_shared_state.clear_bpf_object().await; } #[cfg(not(windows))] pub use linux::update_imds_redirect_policy; #[cfg(windows)] pub use windows::update_imds_redirect_policy; #[cfg(not(windows))] pub use linux::update_wire_server_redirect_policy; #[cfg(windows)] pub use windows::update_wire_server_redirect_policy; #[cfg(not(windows))] pub use linux::update_hostga_redirect_policy; #[cfg(windows)] pub use windows::update_hostga_redirect_policy; #[cfg(test)] mod tests { #[tokio::test] async fn ip_to_string_test() { let ip = 0x10813FA8u32; let ip_str = super::ip_to_string(ip); assert_eq!("168.63.129.16", ip_str, "ip_str mismatch."); let new_ip = super::string_to_ip(&ip_str); assert_eq!(ip, new_ip, "ip mismatch."); let ip = 0x100007Fu32; let ip_str = super::ip_to_string(ip); assert_eq!("127.0.0.1", ip_str, "ip_str mismatch."); let new_ip = super::string_to_ip("1270.0.0.1"); assert_eq!(0, new_ip, "ip must be 0 since the 1270.0.0.1 is invalid."); let new_ip = super::string_to_ip("1270.0.1"); assert_eq!(0, new_ip, "ip must be 0 since the 1270.0.1 is invalid."); } } GuestProxyAgent-1.0.30/proxy_agent/src/redirector/000077500000000000000000000000001500521614600221735ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/redirector/linux.rs000066400000000000000000000573311500521614600237110ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT mod ebpf_obj; use crate::common::{ config, constants, error::{BpfErrorType, Error}, logger, result::Result, }; use crate::redirector::{ip_to_string, AuditEntry}; use crate::shared_state::redirector_wrapper::RedirectorSharedState; use aya::programs::{CgroupSockAddr, KProbe}; use aya::{ maps::{HashMap, MapData}, programs::CgroupAttachMode, }; use aya::{Btf, Ebpf, EbpfLoader}; use ebpf_obj::{ destination_entry, sock_addr_audit_entry, sock_addr_audit_key, sock_addr_skip_process_entry, }; use proxy_agent_shared::telemetry::event_logger; use proxy_agent_shared::{logger::LoggerLevel, misc_helpers}; use std::convert::TryFrom; use std::path::PathBuf; pub struct BpfObject(Ebpf); // BpfObject is a wrapper around Bpf object to interact with Linux eBPF programs and maps impl BpfObject { pub fn new(ebpf: Ebpf) -> Self { BpfObject(ebpf) } pub fn get_bpf(&self) -> &Ebpf { &self.0 } pub fn from_ebpf_file(bpf_file_path: &PathBuf) -> Result { if !bpf_file_path.exists() || !bpf_file_path.is_file() { return Err(Error::Bpf(BpfErrorType::LoadBpfApi( misc_helpers::path_to_string(bpf_file_path), "File does not exist".to_string(), ))); } match EbpfLoader::new() // load the BTF data from /sys/kernel/btf/vmlinux .btf(Btf::from_sys_fs().ok().as_ref()) // finally load the code .load_file(bpf_file_path) { Ok(bpf) => Ok(BpfObject::new(bpf)), Err(err) => Err(Error::Bpf(BpfErrorType::LoadBpfApi( misc_helpers::path_to_string(bpf_file_path), err.to_string(), ))), } } pub fn update_skip_process_map(&mut self, pid: u32) -> Result<()> { let skip_process_map_name = "skip_process_map"; match self.0.map_mut(skip_process_map_name) { Some(map) => match HashMap::<&mut MapData, [u32; 1], [u32; 1]>::try_from(map) { Ok(mut skip_process_map) => { let key = sock_addr_skip_process_entry::from_pid(pid); let value = sock_addr_skip_process_entry::from_pid(pid); match skip_process_map.insert(key.to_array(), value.to_array(), 0) { Ok(_) => logger::write(format!("skip_process_map updated with {}", pid)), Err(err) => { return Err(Error::Bpf(BpfErrorType::UpdateBpfMapHashMap( skip_process_map_name.to_string(), format!("insert pid: {}", pid), err.to_string(), ))); } } } Err(err) => { return Err(Error::Bpf(BpfErrorType::LoadBpfMapHashMap( skip_process_map_name.to_string(), err.to_string(), ))); } }, None => { return Err(Error::Bpf(BpfErrorType::GetBpfMap( skip_process_map_name.to_string(), "Map does not exist".to_string(), ))); } } Ok(()) } pub fn update_policy_elem_bpf_map( &mut self, endpoint_name: &str, local_port: u16, dest_ipv4: u32, dest_port: u16, ) -> Result<()> { let policy_map_name = "policy_map"; match self.0.map_mut(policy_map_name) { Some(map) => match HashMap::<&mut MapData, [u32; 6], [u32; 6]>::try_from(map) { Ok(mut policy_map) => { let local_ip = super::string_to_ip(constants::PROXY_AGENT_IP); let key = destination_entry::from_ipv4(dest_ipv4, dest_port); let value = destination_entry::from_ipv4(local_ip, local_port); match policy_map.insert(key.to_array(), value.to_array(), 0) { Ok(_) => { logger::write(format!("policy_map updated for {endpoint_name}")); } Err(err) => { return Err(Error::Bpf(BpfErrorType::UpdateBpfMapHashMap( policy_map_name.to_string(), endpoint_name.to_string(), err.to_string(), ))); } } } Err(err) => { return Err(Error::Bpf(BpfErrorType::LoadBpfMapHashMap( policy_map_name.to_string(), err.to_string(), ))); } }, None => { return Err(Error::Bpf(BpfErrorType::GetBpfMap( policy_map_name.to_string(), "Map does not exist".to_string(), ))); } } Ok(()) } pub fn attach_cgroup_program(&mut self, cgroup2_root_path: PathBuf) -> Result<()> { let program_name = "connect4"; match std::fs::File::open(cgroup2_root_path.clone()) { Ok(cgroup) => match self.0.program_mut(program_name) { Some(program) => match program.try_into() { Ok(p) => { let program: &mut CgroupSockAddr = p; match program.load() { Ok(_) => logger::write("connect4 program loaded.".to_string()), Err(err) => { return Err(Error::Bpf(BpfErrorType::LoadBpfProgram( program_name.to_string(), err.to_string(), ))); } } match program.attach(cgroup, CgroupAttachMode::Single) { Ok(link_id) => { logger::write(format!( "connect4 program attached with id {:?}.", link_id )); } Err(err) => { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), err.to_string(), ))); } } } Err(err) => { return Err(Error::Bpf(BpfErrorType::ConvertBpfProgram( "CgroupSockAddr".to_string(), err.to_string(), ))); } }, None => { return Err(Error::Bpf(BpfErrorType::GetBpfProgram( program_name.to_string(), "Program does not exist".to_string(), ))); } }, Err(err) => { return Err(Error::Bpf(BpfErrorType::OpenCgroup( cgroup2_root_path.display().to_string(), err.to_string(), ))); } } Ok(()) } pub fn attach_kprobe_program(&mut self) -> Result<()> { let program_name = "tcp_v4_connect"; match self.0.program_mut(program_name) { Some(program) => match program.try_into() { Ok(p) => { let program: &mut KProbe = p; match program.load() { Ok(_) => logger::write("tcp_v4_connect program loaded.".to_string()), Err(err) => { return Err(Error::Bpf(BpfErrorType::LoadBpfProgram( program_name.to_string(), err.to_string(), ))); } } match program.attach("tcp_connect", 0) { Ok(link_id) => { logger::write(format!( "tcp_v4_connect program attached with id {:?}.", link_id )); } Err(err) => { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), err.to_string(), ))); } } } Err(err) => { return Err(Error::Bpf(BpfErrorType::ConvertBpfProgram( "KProbe".to_string(), err.to_string(), ))); } }, None => { return Err(Error::Bpf(BpfErrorType::GetBpfProgram( program_name.to_string(), "Program does not exist".to_string(), ))); } } Ok(()) } pub fn lookup_audit(&self, source_port: u16) -> Result { let audit_map_name = "audit_map"; match self.0.map(audit_map_name) { Some(map) => match HashMap::try_from(map) { Ok(audit_map) => { let key = sock_addr_audit_key::from_source_port(source_port); match audit_map.get(&key.to_array(), 0) { Ok(value) => { let audit_value = sock_addr_audit_entry::from_array(value); Ok(AuditEntry { logon_id: audit_value.logon_id as u64, process_id: audit_value.process_id, is_admin: audit_value.is_root as i32, destination_ipv4: audit_value.destination_ipv4, destination_port: audit_value.destination_port as u16, }) } Err(err) => Err(Error::Bpf(BpfErrorType::MapLookupElem( source_port.to_string(), err.to_string(), ))), } } Err(err) => Err(Error::Bpf(BpfErrorType::LoadBpfMapHashMap( audit_map_name.to_string(), err.to_string(), ))), }, None => Err(Error::Bpf(BpfErrorType::GetBpfMap( audit_map_name.to_string(), "Map does not exist".to_string(), ))), } } pub fn update_redirect_policy( &mut self, dest_ipv4: u32, dest_port: u16, local_port: u16, redirect: bool, ) { let policy_map_name = "policy_map"; match self.0.map_mut(policy_map_name) { Some(map) => match HashMap::<&mut MapData, [u32; 6], [u32; 6]>::try_from(map) { Ok(mut policy_map) => { let key = destination_entry::from_ipv4(dest_ipv4, dest_port); if !redirect { match policy_map.remove(&key.to_array()) { Ok(_) => { event_logger::write_event( LoggerLevel::Info, format!( "policy_map removed for destination: {}:{}", ip_to_string(dest_ipv4), dest_port ), "update_redirect_policy_internal", "redirector/linux", logger::AGENT_LOGGER_KEY, ); } Err(err) => { logger::write(format!("Failed to remove destination: {}:{} from policy_map with error: {}", ip_to_string(dest_ipv4), dest_port, err)); } }; } else { let local_ip = constants::PROXY_AGENT_IP.to_string(); event_logger::write_event( LoggerLevel::Info, format!( "update_redirect_policy_internal with local ip address: {}, dest_ipv4: {}, dest_port: {}, local_port: {}", local_ip, ip_to_string(dest_ipv4), dest_port, local_port ), "update_redirect_policy_internal", "redirector/linux", logger::AGENT_LOGGER_KEY, ); let local_ip: u32 = super::string_to_ip(&local_ip); let value = destination_entry::from_ipv4(local_ip, local_port); match policy_map.insert(key.to_array(), value.to_array(), 0) { Ok(_) => event_logger::write_event( LoggerLevel::Info, format!( "policy_map updated for destination: {}:{}", ip_to_string(dest_ipv4), dest_port ), "update_redirect_policy_internal", "redirector/linux", logger::AGENT_LOGGER_KEY, ), Err(err) => { logger::write(format!("Failed to insert destination: {}:{} to policy_map with error: {}", ip_to_string(dest_ipv4), dest_port, err)); } } } } Err(err) => { logger::write(format!( "Failed to load HashMap 'policy_map' with error: {}", err )); } }, None => { logger::write("Failed to get map 'policy_map'.".to_string()); } } } pub fn remove_audit_map_entry(&mut self, source_port: u16) -> Result<()> { let audit_map_name = "audit_map"; match self.0.map_mut(audit_map_name) { Some(map) => match HashMap::<&mut MapData, [u32; 2], [u32; 5]>::try_from(map) { Ok(mut audit_map) => { let key = sock_addr_audit_key::from_source_port(source_port); audit_map.remove(&key.to_array()).map_err(|err| { Error::Bpf(BpfErrorType::MapDeleteElem( source_port.to_string(), format!("Error: {}", err), )) })?; } Err(err) => { return Err(Error::Bpf(BpfErrorType::LoadBpfMapHashMap( audit_map_name.to_string(), err.to_string(), ))); } }, None => { return Err(Error::Bpf(BpfErrorType::GetBpfMap( audit_map_name.to_string(), "Map does not exist".to_string(), ))); } } Ok(()) } } // Redirector implementation for Linux platform impl super::Redirector { pub fn load_bpf_object(&self) -> Result { BpfObject::from_ebpf_file(&super::get_ebpf_file_path()) } pub fn attach_bpf_prog(&self, bpf_object: &mut BpfObject) -> Result<()> { bpf_object.attach_kprobe_program()?; let cgroup2_path = match proxy_agent_shared::linux::get_cgroup2_mount_path() { Ok(path) => { logger::write(format!( "Got cgroup2 mount path: '{}'", misc_helpers::path_to_string(&path) )); path } Err(e) => { event_logger::write_event( LoggerLevel::Warn, format!("Failed to get the cgroup2 mount path {}, fallback to use the cgroup2 path from config file.", e), "start", "redirector/linux", logger::AGENT_LOGGER_KEY, ); config::get_cgroup_root() } }; if let Err(e) = bpf_object.attach_cgroup_program(cgroup2_path) { let message = format!("Failed to attach cgroup program for redirection. {}", e); event_logger::write_event( LoggerLevel::Warn, message.to_string(), "start", "redirector", logger::AGENT_LOGGER_KEY, ); return Err(e); } Ok(()) } } pub async fn update_wire_server_redirect_policy( redirect: bool, redirector_shared_state: RedirectorSharedState, ) { if let (Ok(Some(bpf_object)), Ok(local_port)) = ( redirector_shared_state.get_bpf_object().await, redirector_shared_state.get_local_port().await, ) { bpf_object.lock().unwrap().update_redirect_policy( constants::WIRE_SERVER_IP_NETWORK_BYTE_ORDER, constants::WIRE_SERVER_PORT, local_port, redirect, ); } } pub async fn update_imds_redirect_policy( redirect: bool, redirector_shared_state: RedirectorSharedState, ) { if let (Ok(Some(bpf_object)), Ok(local_port)) = ( redirector_shared_state.get_bpf_object().await, redirector_shared_state.get_local_port().await, ) { bpf_object.lock().unwrap().update_redirect_policy( constants::IMDS_IP_NETWORK_BYTE_ORDER, constants::IMDS_PORT, local_port, redirect, ); } } pub async fn update_hostga_redirect_policy( redirect: bool, redirector_shared_state: RedirectorSharedState, ) { if let (Ok(Some(bpf_object)), Ok(local_port)) = ( redirector_shared_state.get_bpf_object().await, redirector_shared_state.get_local_port().await, ) { bpf_object.lock().unwrap().update_redirect_policy( constants::GA_PLUGIN_IP_NETWORK_BYTE_ORDER, constants::GA_PLUGIN_PORT, local_port, redirect, ); } } #[cfg(test)] #[cfg(feature = "test-with-root")] mod tests { use crate::common::config; use crate::common::constants; use crate::redirector::linux::ebpf_obj::sock_addr_audit_entry; use crate::redirector::linux::ebpf_obj::sock_addr_audit_key; use aya::maps::HashMap; use proxy_agent_shared::misc_helpers; use std::env; /// Test the Linux BpfObject struct /// This test requires root permission and BPF capability to run /// This test will fail if the current user does not have root permission /// So far, we know some container build environments do not have BPF capability /// This test will skip if the current environment does not have the capability to load BPF programs #[tokio::test] async fn linux_ebpf_test() { let logger_key = "linux_ebpf_test"; let mut temp_test_path = env::temp_dir(); temp_test_path.push(logger_key); let mut bpf_file_path = misc_helpers::get_current_exe_dir(); bpf_file_path.push("config::get_ebpf_program_name()"); let bpf = super::BpfObject::from_ebpf_file(&bpf_file_path); assert!( bpf.is_err(), "BpfObject::from_ebpf_file should return error from invalid file path" ); let mut bpf_file_path = misc_helpers::get_current_exe_dir(); bpf_file_path.push(config::get_ebpf_program_name()); let bpf = super::BpfObject::from_ebpf_file(&bpf_file_path); if bpf.is_err() { println!( "BpfObject::from_ebpf_file '{}' error: {}", bpf_file_path.display(), bpf.err().unwrap() ); let environment = env::var("Environment") .unwrap_or("normal".to_string()) .to_lowercase(); if environment == "onebranch/cbl-mariner" { println!("This is known: onebranch/cbl-mariner container image does not have the BPF capability, skip this test."); return; } assert!(false, "BpfObject::from_ebpf_file should not return Err"); return; } let mut bpf = bpf.unwrap(); let result = bpf.update_skip_process_map(std::process::id()); assert!( result.is_ok(), "update_skip_process_map should return success" ); let result = bpf.update_policy_elem_bpf_map( "test endpoints", 80, constants::GA_PLUGIN_IP_NETWORK_BYTE_ORDER, constants::GA_PLUGIN_PORT, ); assert!(result.is_ok(), "update_policy_map should return success"); // Do not attach the program to real cgroup2 path // it should fail for both attach let result = bpf.attach_kprobe_program(); assert!( result.is_ok(), "attach_kprobe_program should return success" ); let result = bpf.attach_cgroup_program(temp_test_path.clone()); assert!( result.is_err(), "attach_connect4_program should return error for invalid cgroup2 path" ); let source_port = 1; let audit = bpf.lookup_audit(source_port); assert!( audit.is_err(), "lookup_audit should return error for invalid source port" ); // insert to map an then look up let key = sock_addr_audit_key::from_source_port(source_port); let value = sock_addr_audit_entry { logon_id: 999, process_id: 888, is_root: 1, destination_ipv4: 0x10813FA8, destination_port: 80, }; { // drop map_mut("audit_map") within this scope let mut audit_map: HashMap<&mut aya::maps::MapData, [u32; 2], [u32; 5]> = HashMap::<&mut aya::maps::MapData, [u32; 2], [u32; 5]>::try_from( bpf.0.map_mut("audit_map").unwrap(), ) .unwrap(); audit_map .insert(key.to_array(), value.to_array(), 0) .unwrap(); } let audit = bpf.lookup_audit(source_port); match audit { Ok(entry) => { assert_eq!( entry.logon_id as u32, value.logon_id, "logon_id is not equal" ); assert_eq!( entry.process_id, value.process_id, "process_id is not equal" ); assert_eq!(entry.is_admin as u32, value.is_root, "is_root is not equal"); assert_eq!( entry.destination_ipv4, value.destination_ipv4, "destination_ipv4 is not equal" ); assert_eq!( entry.destination_port as u32, value.destination_port, "destination_port is not equal" ); } Err(err) => { println!("lookup_audit_internal error: {}", err); assert!(false, "lookup_audit_internal should not return Err"); } } } } GuestProxyAgent-1.0.30/proxy_agent/src/redirector/linux/000077500000000000000000000000001500521614600233325ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/redirector/linux/ebpf_obj.rs000066400000000000000000000133261500521614600254530ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #![allow(non_camel_case_types)] #[repr(C)] pub struct _ip_address { //pub ipv4: u32, pub ip: [u32; 4], // ipv4 uses the first element; ipv6 uses all 4 elements } impl _ip_address { fn empty() -> Self { _ip_address { ip: [0, 0, 0, 0] } } pub fn from_ipv4(ipv4: u32) -> Self { let mut ip = Self::empty(); ip.ip[0] = ipv4; ip } #[allow(dead_code)] pub fn from_ipv6(ipv6: [u32; 4]) -> Self { let mut ip = Self::empty(); ip.ip.copy_from_slice(&ipv6); ip } } pub type ip_address = _ip_address; #[repr(C)] pub struct _destination_entry { pub destination_ip: ip_address, pub destination_port: u32, // first element is the port number, second element is empty pub protocol: u32, } impl _destination_entry { pub fn empty() -> Self { _destination_entry { destination_ip: ip_address::empty(), destination_port: 0, protocol: IPPROTO_TCP, } } pub fn from_ipv4(ipv4: u32, port: u16) -> Self { let mut entry = Self::empty(); entry.destination_ip = ip_address::from_ipv4(ipv4); entry.destination_port = port.to_be() as u32; entry } pub fn to_array(&self) -> [u32; 6] { let mut array: [u32; 6] = [0; 6]; array[..4].copy_from_slice(&self.destination_ip.ip); array[4] = self.destination_port; array[5] = self.protocol; array } } pub type destination_entry = _destination_entry; pub const IPPROTO_TCP: u32 = 6; #[repr(C)] pub struct sock_addr_skip_process_entry { pub pid: u32, } impl sock_addr_skip_process_entry { fn empty() -> Self { sock_addr_skip_process_entry { pid: 0 } } pub fn from_pid(pid: u32) -> Self { let mut entry = Self::empty(); entry.pid = pid; entry } pub fn to_array(&self) -> [u32; 1] { let mut array: [u32; 1] = [0; 1]; array[0] = self.pid; array } } #[repr(C)] #[derive(Debug)] pub struct sock_addr_audit_key { pub protocol: u32, pub source_port: u32, } #[allow(dead_code)] impl sock_addr_audit_key { pub fn from_source_port(port: u16) -> Self { sock_addr_audit_key { protocol: IPPROTO_TCP, source_port: port as u32, } } pub fn to_array(&self) -> [u32; 2] { let mut array: [u32; 2] = [0; 2]; array[0] = self.protocol; array[1] = self.source_port; array } pub fn from_array(array: [u32; 2]) -> Self { sock_addr_audit_key { protocol: array[0], source_port: array[1], } } } #[repr(C)] #[derive(Debug)] pub struct sock_addr_audit_entry { pub logon_id: u32, pub process_id: u32, pub is_root: u32, pub destination_ipv4: u32, pub destination_port: u32, } #[allow(dead_code)] impl sock_addr_audit_entry { pub fn from_array(array: [u32; 5]) -> Self { sock_addr_audit_entry { logon_id: array[0], process_id: array[1], is_root: array[2], destination_ipv4: array[3], destination_port: array[4], } } #[allow(dead_code)] pub fn to_array(&self) -> [u32; 5] { let mut array: [u32; 5] = [0; 5]; array[0] = self.logon_id; array[1] = self.process_id; array[2] = self.is_root; array[3] = self.destination_ipv4; array[4] = self.destination_port; array } } #[cfg(test)] mod tests { use crate::common::constants; #[test] fn destination_entry_test() { let key = super::destination_entry::from_ipv4( constants::WIRE_SERVER_IP_NETWORK_BYTE_ORDER, constants::WIRE_SERVER_PORT, ); let array = key.to_array(); assert_eq!( array[0], constants::WIRE_SERVER_IP_NETWORK_BYTE_ORDER, "ip is not equal" ); assert_eq!( array[4], constants::WIRE_SERVER_PORT.to_be() as u32, "port is not equal" ); } #[test] fn sock_addr_skip_process_entry_test() { let pid = std::process::id(); let key = super::sock_addr_skip_process_entry::from_pid(pid); let array = key.to_array(); assert_eq!(array[0], pid, "pid is not equal"); } #[test] fn sock_addr_audit_key_test() { let source_port = 1234; let key = super::sock_addr_audit_key::from_source_port(source_port); let array = key.to_array(); assert_eq!(array[1], source_port as u32, "port is not equal"); let key2 = super::sock_addr_audit_key::from_array(array); assert_eq!( key2.source_port, source_port as u32, "port is not equal from_array" ); } #[test] fn sock_addr_audit_entry_test() { let audit = super::sock_addr_audit_entry { logon_id: 1, process_id: 2, is_root: 1, destination_ipv4: 4, destination_port: 5, }; let audit_value = super::sock_addr_audit_entry::from_array(audit.to_array()); assert_eq!( audit_value.logon_id, audit.logon_id, "logon_id is not equal" ); assert_eq!( audit_value.process_id, audit.process_id, "process_id is not equal" ); assert_eq!(audit_value.is_root, audit.is_root, "is_root is not equal"); assert_eq!( audit_value.destination_ipv4, audit.destination_ipv4, "destination_ipv4 is not equal" ); assert_eq!( audit_value.destination_port, audit.destination_port, "destination_port is not equal" ); } } GuestProxyAgent-1.0.30/proxy_agent/src/redirector/windows.rs000066400000000000000000000235371500521614600242450ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT mod bpf_api; mod bpf_obj; mod bpf_prog; use crate::common::error::{BpfErrorType, Error, WindowsApiErrorType}; use crate::common::{constants, logger, result::Result}; use crate::redirector::AuditEntry; use crate::shared_state::redirector_wrapper::RedirectorSharedState; use core::ffi::c_void; use std::mem; use std::ptr; use windows_sys::Win32::Networking::WinSock; pub struct BpfObject(pub *mut bpf_obj::bpf_object); // Safety: bpf_object, which is a reference to an eBPF object, has no dependencies on thread-local storage and can // safely be sent to another thread. This is not explicitly documented in the Windows eBPF library, but the library does // document it aims to be source-compatible with libbpf[0]. Note that synchronization is required to share this object // between threads, and care must be taken when using it as libbpf APIs make use of errno[1], which is thread-local. // // [0] https://github.com/microsoft/ebpf-for-windows/tree/Release-v0.17.1#2-does-this-provide-app-compatibility-with-ebpf-programs-written-for-linux // [1] https://libbpf.readthedocs.io/en/v1.4.5/api.html#error-handling unsafe impl Send for BpfObject {} impl Default for BpfObject { fn default() -> Self { Self::new() } } // Redirector implementation for Windows platform impl super::Redirector { pub fn initialized(&self) -> Result<()> { // Add retry logic to load the eBPF API // This is a workaround for the issue where the eBPF API is not loaded properly for _ in 0..Self::MAX_RETRIES { if bpf_api::try_load_ebpf_api() { return Ok(()); } std::thread::sleep(std::time::Duration::from_millis(Self::RETRY_INTERVAL_MS)); } // If the eBPF API is still not loaded, last retry and return error if it fails if !bpf_api::try_load_ebpf_api() { return Err(Error::Bpf(BpfErrorType::GetBpfApi)); } Ok(()) } pub fn load_bpf_object(&self) -> Result { let mut bpf_file_path = super::get_ebpf_file_path(); if let Some(ebpf_api_version) = bpf_api::get_ebpf_api_version() { // eBPF program has to work with the same version of eBPF API if windows eBPF had break changes // our latest eBPF program may not work with the older version of windows eBPF API // in some cases, the windows eBPF may not able, or be allowed to update, // so we need to load the eBPF program with the same version of eBPF API // the versioned eBPF program is named as ... let file_ext = bpf_file_path.extension().unwrap_or_default(); let file_name = bpf_file_path.file_stem().unwrap_or_default(); let file_name = format!( "{}.{}.{}.{}", file_name.to_string_lossy(), ebpf_api_version.major, ebpf_api_version.minor, file_ext.to_string_lossy() ); let file_path = bpf_file_path.with_file_name(file_name); let file_found: bool; if file_path.exists() && file_path.is_file() { bpf_file_path = file_path.to_path_buf(); file_found = true; } else { file_found = false; } logger::write(format!( "eBPF API version: '{}' found, eBPF program file with api version: '{}'{}found.", ebpf_api_version, file_path.display(), if file_found { " " } else { " not " }, )); } let mut bpf_object = BpfObject::new(); bpf_object.load_bpf_object(&bpf_file_path)?; Ok(bpf_object) } pub fn attach_bpf_prog(&self, bpf_object: &mut BpfObject) -> Result<()> { bpf_object.attach_bpf_prog() } } pub async fn close_bpf_object(redirector_shared_state: RedirectorSharedState) { if let Ok(Some(bpf_object)) = redirector_shared_state.get_bpf_object().await { bpf_object.lock().unwrap().close_bpf_object(); logger::write("Success closed bpf object.".to_string()); } } pub fn get_audit_from_redirect_context(raw_socket_id: usize) -> Result { // WSAIoctl - SIO_QUERY_WFP_CONNECTION_REDIRECT_CONTEXT let value = AuditEntry::empty(); let redirect_context_size = mem::size_of::() as u32; let mut redirect_context_returned: u32 = 0; let result = unsafe { WinSock::WSAIoctl( raw_socket_id, WinSock::SIO_QUERY_WFP_CONNECTION_REDIRECT_CONTEXT, ptr::null(), 0, &value as *const AuditEntry as *mut c_void, redirect_context_size, &mut redirect_context_returned, ptr::null_mut(), None, ) }; if result != 0 { let error = unsafe { WinSock::WSAGetLastError() }; return Err(Error::WindowsApi(WindowsApiErrorType::WSAIoctl(format!( "SIO_QUERY_WFP_CONNECTION_REDIRECT_CONTEXT result: {}, WSAGetLastError: {}", result, error, )))); } // Need to check the returned size to ensure it matches the expected size, // since the result is 0 even if there is no redirect context in this socket stream. if redirect_context_returned != redirect_context_size { return Err(Error::WindowsApi(WindowsApiErrorType::WSAIoctl(format!( "SIO_QUERY_WFP_CONNECTION_REDIRECT_CONTEXT returned size: {}, expected size: {}", redirect_context_returned, redirect_context_size, )))); } Ok(value) } pub async fn update_wire_server_redirect_policy( redirect: bool, redirector_shared_state: RedirectorSharedState, ) { if let Ok(Some(bpf_object)) = redirector_shared_state.get_bpf_object().await { if redirect { if let Ok(local_port) = redirector_shared_state.get_local_port().await { if let Err(e) = bpf_object.lock().unwrap().update_policy_elem_bpf_map( "WireServer endpoints", local_port, constants::WIRE_SERVER_IP_NETWORK_BYTE_ORDER, constants::WIRE_SERVER_PORT, ) { logger::write_error(format!( "Failed to update bpf map for wireserver redirect policy with result: {}", e )); } else { logger::write( "Success updated bpf map for wireserver redirect policy.".to_string(), ); } } } else if let Err(e) = bpf_object.lock().unwrap().remove_policy_elem_bpf_map( constants::WIRE_SERVER_IP_NETWORK_BYTE_ORDER, constants::WIRE_SERVER_PORT, ) { logger::write_error(format!( "Failed to delete bpf map for wireserver redirect policy with result: {}", e )); } else { logger::write("Success deleted bpf map for wireserver redirect policy.".to_string()); } } } pub async fn update_imds_redirect_policy( redirect: bool, redirector_shared_state: RedirectorSharedState, ) { if let Ok(Some(bpf_object)) = redirector_shared_state.get_bpf_object().await { if redirect { if let Ok(local_port) = redirector_shared_state.get_local_port().await { if let Err(e) = bpf_object.lock().unwrap().update_policy_elem_bpf_map( "IMDS endpoints", local_port, constants::IMDS_IP_NETWORK_BYTE_ORDER, constants::IMDS_PORT, ) { logger::write_error(format!( "Failed to update bpf map for IMDS redirect policy with result: {e}" )); } else { logger::write("Success updated bpf map for IMDS redirect policy.".to_string()); } } } else if let Err(e) = bpf_object .lock() .unwrap() .remove_policy_elem_bpf_map(constants::IMDS_IP_NETWORK_BYTE_ORDER, constants::IMDS_PORT) { logger::write_error(format!( "Failed to delete bpf map for IMDS redirect policy with result: {e}" )); } else { logger::write("Success deleted bpf map for IMDS redirect policy.".to_string()); } } } pub async fn update_hostga_redirect_policy( redirect: bool, redirector_shared_state: RedirectorSharedState, ) { if let Ok(Some(bpf_object)) = redirector_shared_state.get_bpf_object().await { if redirect { if let Ok(local_port) = redirector_shared_state.get_local_port().await { if let Err(e) = bpf_object.lock().unwrap().update_policy_elem_bpf_map( "Host GAPlugin endpoints", local_port, constants::GA_PLUGIN_IP_NETWORK_BYTE_ORDER, constants::GA_PLUGIN_PORT, ) { logger::write_error(format!( "Failed to update bpf map for HostGAPlugin redirect policy with result: {e}" )); } else { logger::write( "Success updated bpf map for HostGAPlugin redirect policy.".to_string(), ); } } } else if let Err(e) = bpf_object.lock().unwrap().remove_policy_elem_bpf_map( constants::GA_PLUGIN_IP_NETWORK_BYTE_ORDER, constants::GA_PLUGIN_PORT, ) { logger::write_error(format!( "Failed to delete bpf map for HostGAPlugin redirect policy with result: {e}" )); } else { logger::write("Success deleted bpf map for HostGAPlugin redirect policy.".to_string()); } } } GuestProxyAgent-1.0.30/proxy_agent/src/redirector/windows/000077500000000000000000000000001500521614600236655ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/redirector/windows/bpf_api.rs000066400000000000000000000262661500521614600256470ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #![cfg(windows)] #![allow(non_snake_case)] use super::bpf_obj::*; use crate::common::{ error::{BpfErrorType, Error}, logger, result::Result, }; use libloading::{Library, Symbol}; use proxy_agent_shared::{ logger::LoggerLevel, misc_helpers, telemetry::event_logger, version::Version, }; use std::ffi::{c_char, c_int, c_long, c_uint, c_void, CString}; use std::path::PathBuf; use std::{env, path::Path}; static EBPF_API: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); static EBPF_API_VERSION: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); const EBPF_API_FILE_NAME: &str = "EbpfApi.dll"; pub fn try_load_ebpf_api() -> bool { if !EBPF_API.initialized() { if let Some(ebpf_api) = init_ebpf_lib() { if let Err(e) = EBPF_API.set(ebpf_api) { event_logger::write_event( LoggerLevel::Error, format!("{}", e), "try_load_ebpf_api", "redirector", logger::AGENT_LOGGER_KEY, ); } } } EBPF_API.initialized() } pub fn get_ebpf_api_version() -> Option { EBPF_API_VERSION.get().cloned() } fn init_ebpf_lib() -> Option { let program_files_dir = env::var("ProgramFiles").unwrap_or("C:\\Program Files".to_string()); let program_files_dir = PathBuf::from(program_files_dir); let ebpf_for_windows_dir = program_files_dir.join("ebpf-for-windows"); let bpf_api_file_path = ebpf_for_windows_dir.join(EBPF_API_FILE_NAME); logger::write_information(format!( "Try to load ebpf api file from: {}", misc_helpers::path_to_string(&bpf_api_file_path) )); match load_ebpf_api(&bpf_api_file_path) { Ok(ebpf_lib) => { if !EBPF_API_VERSION.initialized() { // fetch the product version of this dll match proxy_agent_shared::windows::get_file_product_version(&bpf_api_file_path) { Ok(v) => { if let Err(e) = EBPF_API_VERSION.set(v) { event_logger::write_event( LoggerLevel::Error, format!("{}", e), "EBPF_API_VERSION.set", "redirector", logger::AGENT_LOGGER_KEY, ); } } Err(e) => { event_logger::write_event( LoggerLevel::Warn, format!("{}", e), "get_file_product_version", "redirector", logger::AGENT_LOGGER_KEY, ); } } } return Some(ebpf_lib); } Err(e) => { event_logger::write_event( LoggerLevel::Warn, format!("{}", e), "load_ebpf_api", "redirector", logger::AGENT_LOGGER_KEY, ); } } logger::write_warning("Try to load ebpf api file from default system path".to_string()); match load_ebpf_api(&PathBuf::from(EBPF_API_FILE_NAME)) { Ok(ebpf_lib) => { return Some(ebpf_lib); } Err(e) => { event_logger::write_event( LoggerLevel::Warn, format!("{}", e), "load_ebpf_api", "redirector", logger::AGENT_LOGGER_KEY, ); } } None } fn load_ebpf_api(bpf_api_file_path: &Path) -> Result { // Safety: Loading a library on Windows has the following safety requirements: // 1. The safety requirements of the library initialization and/or termination routines must be met. // The eBPF for Windows library does not have any safety requirements for its initialization and de-initialization function[0]. // [0] https://github.com/microsoft/ebpf-for-windows/blob/Release-v0.17.1/ebpfapi/dllmain.cpp // // 2. Calling this function is not safe in multi-thread scenarios where a library file name is provided and the library // search path is modified. // We satisfy the this requirement by providing the absolute path to the library. unsafe { Library::new(bpf_api_file_path) }.map_err(|e| { Error::Bpf(BpfErrorType::LoadBpfApi( misc_helpers::path_to_string(bpf_api_file_path), format!("{}, last OS error: {}", e, std::io::Error::last_os_error()), )) }) } fn get_ebpf_api() -> Result<&'static Library> { match EBPF_API.get() { Some(api) => Ok(api), None => Err(Error::Bpf(BpfErrorType::GetBpfApi)), } } // function name must null terminated with '\0'. fn get_ebpf_api_fun<'a, T>(ebpf_api: &'a Library, name: &str) -> Result> { unsafe { ebpf_api.get(name.as_bytes()) }.map_err(|e| { Error::Bpf(BpfErrorType::LoadBpfApiFunction( name.to_string(), format!("{}, last OS error: {}", e, std::io::Error::last_os_error()), )) }) } // Object type BpfObjectOpen = unsafe extern "C" fn(path: *const c_char) -> *mut bpf_object; type BpfObjectLoad = unsafe extern "C" fn(obj: *mut bpf_object) -> c_int; type BpfObjectClose = unsafe extern "C" fn(obj: *mut bpf_object) -> c_void; // Program type BpfObjectFindProgramByName = unsafe extern "C" fn(obj: *const bpf_object, name: *const c_char) -> *mut ebpf_program_t; // type BpfProgramFd = unsafe extern "C" fn(prog: *const ebpf_program_t) -> c_int; // type BpfProgAttach = unsafe extern "C" fn( // prog_fd: c_int, // attachable_fd: c_int, // attach_type: bpf_attach_type, // flags: c_uint, // ) -> c_int; type EBpfProgAttach = unsafe extern "C" fn( prog: *const ebpf_program_t, attach_type: *const ebpf_attach_type_t, attach_parameters: *const c_void, attach_params_size: usize, link: *mut *mut ebpf_link_t, ) -> c_int; type BpfLinkDisconnect = unsafe extern "C" fn(link: *mut ebpf_link_t) -> c_void; type BpfLinkDestroy = unsafe extern "C" fn(link: *mut ebpf_link_t) -> c_int; // Map type BpfObjectFindMapByName = unsafe extern "C" fn(obj: *const bpf_object, name: *const c_char) -> *mut bpf_map; type BpfMapFd = unsafe extern "C" fn(map: *const bpf_map) -> c_int; type BpfMapUpdateElem = unsafe extern "C" fn( map_fd: c_int, key: *const c_void, value: *const c_void, flags: c_uint, ) -> c_int; type BpfMapLookupElem = unsafe extern "C" fn(map_fd: c_int, key: *const c_void, value: *mut c_void) -> c_int; type BpfMapDeleteElem = unsafe extern "C" fn(map_fd: c_int, key: *const c_void) -> c_int; type LibBpfGetError = unsafe extern "C" fn(no_use_ptr: *const c_void) -> c_long; fn get_cstring(s: &str) -> Result { CString::new(s).map_err(|e| Error::Bpf(BpfErrorType::CString(e))) } pub fn libbpf_get_error() -> Result { let ebpf_api = get_ebpf_api()?; let libbpf_get_error: Symbol = get_ebpf_api_fun(ebpf_api, "libbpf_get_error\0")?; Ok(unsafe { libbpf_get_error(std::ptr::null()) }) } pub fn bpf_object__open(path: &str) -> Result<*mut bpf_object> { let ebpf_api = get_ebpf_api()?; let open_ebpf_object: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_object__open\0")?; // lifetime of the value must be longer than the lifetime of the pointer returned by as_ptr let c_string = get_cstring(path)?; Ok(unsafe { open_ebpf_object(c_string.as_ptr()) }) } pub fn bpf_object__load(obj: *mut bpf_object) -> Result { let ebpf_api = get_ebpf_api()?; let load_ebpf_object: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_object__load\0")?; Ok(unsafe { load_ebpf_object(obj) }) } pub fn bpf_object__close(object: *mut bpf_object) -> Result { let ebpf_api = get_ebpf_api()?; let object__close: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_object__close\0")?; Ok(unsafe { object__close(object) }) } pub fn bpf_object__find_program_by_name( obj: *mut bpf_object, name: &str, ) -> Result<*mut ebpf_program_t> { let ebpf_api = get_ebpf_api()?; let find_program_by_name: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_object__find_program_by_name\0")?; // lifetime of the value must be longer than the lifetime of the pointer returned by as_ptr let c_string = get_cstring(name)?; Ok(unsafe { find_program_by_name(obj, c_string.as_ptr()) }) } pub fn ebpf_prog_attach( prog: *mut ebpf_program_t, attach_type: *const ebpf_attach_type_t, attach_parameters: *const c_void, attach_params_size: usize, link: *mut *mut ebpf_link_t, ) -> Result { let ebpf_api = get_ebpf_api()?; let program_attach: Symbol = get_ebpf_api_fun(ebpf_api, "ebpf_program_attach\0")?; Ok(unsafe { program_attach( prog, attach_type, attach_parameters, attach_params_size, link, ) }) } pub fn bpf_link_disconnect(link: *mut ebpf_link_t) -> Result { let ebpf_api = get_ebpf_api()?; let link_disconnect: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_link__disconnect\0")?; Ok(unsafe { link_disconnect(link) }) } pub fn bpf_link_destroy(link: *mut ebpf_link_t) -> Result { let ebpf_api = get_ebpf_api()?; let link_destroy: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_link__destroy\0")?; Ok(unsafe { link_destroy(link) }) } pub fn bpf_object__find_map_by_name(obj: *mut bpf_object, name: &str) -> Result<*mut bpf_map> { let ebpf_api = get_ebpf_api()?; let find_map_by_name: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_object__find_map_by_name\0")?; // lifetime of the value must be longer than the lifetime of the pointer returned by as_ptr let c_string = get_cstring(name)?; Ok(unsafe { find_map_by_name(obj, c_string.as_ptr()) }) } pub fn bpf_map__fd(map: *mut bpf_map) -> Result { let ebpf_api = get_ebpf_api()?; let map__fd: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_map__fd\0")?; Ok(unsafe { map__fd(map) }) } pub fn bpf_map_update_elem( map_fd: c_int, key: *const c_void, value: *const c_void, flags: c_uint, ) -> Result { let ebpf_api = get_ebpf_api()?; let map_update_elem: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_map_update_elem\0")?; Ok(unsafe { map_update_elem(map_fd, key, value, flags) }) } pub fn bpf_map_lookup_elem(map_fd: c_int, key: *const c_void, value: *mut c_void) -> Result { let ebpf_api = get_ebpf_api()?; let map_lookup_elem: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_map_lookup_elem\0")?; Ok(unsafe { map_lookup_elem(map_fd, key, value) }) } pub fn bpf_map_delete_elem(map_fd: c_int, key: *const c_void) -> Result { let ebpf_api = get_ebpf_api()?; let map_delete_elem: Symbol = get_ebpf_api_fun(ebpf_api, "bpf_map_delete_elem\0")?; Ok(unsafe { map_delete_elem(map_fd, key) }) } GuestProxyAgent-1.0.30/proxy_agent/src/redirector/windows/bpf_obj.rs000066400000000000000000000202431500521614600256350ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #![allow(non_camel_case_types)] use std::ffi::c_char; pub type ebpf_id_t = u32; pub type fd_t = i32; pub type ebpf_handle_t = i64; pub type ebpf_program_type_t = uuid::Uuid; pub type ebpf_attach_type_t = uuid::Uuid; // Type aliases used by libbpf headers. pub type __s32 = i32; pub type __s64 = i64; pub type __be16 = u64; pub type __u16 = u16; pub type __be32 = u32; pub type __u32 = u32; pub type __wsum = u32; pub type __u64 = u64; #[allow(dead_code)] #[repr(C)] pub enum ebpf_map_type_t { BPF_MAP_TYPE_UNSPEC = 0, ///< Unspecified map type. BPF_MAP_TYPE_HASH = 1, ///< Hash table. BPF_MAP_TYPE_ARRAY = 2, ///< Array, where the map key is the array index. BPF_MAP_TYPE_PROG_ARRAY = 3, ///< Array of program fds usable with bpf_tail_call, where the map key is the array index. BPF_MAP_TYPE_PERCPU_HASH = 4, //< Per-CPU hash table. BPF_MAP_TYPE_PERCPU_ARRAY = 5, //< Per-CPU array. BPF_MAP_TYPE_HASH_OF_MAPS = 6, //< Hash table, where the map value is another map. BPF_MAP_TYPE_ARRAY_OF_MAPS = 7, //< Array, where the map value is another map. BPF_MAP_TYPE_LRU_HASH = 8, //< Least-recently-used hash table. BPF_MAP_TYPE_LPM_TRIE = 9, //< Longest prefix match trie. BPF_MAP_TYPE_QUEUE = 10, //< Queue. BPF_MAP_TYPE_LRU_PERCPU_HASH = 11, //< Per-CPU least-recently-used hash table. BPF_MAP_TYPE_STACK = 12, //< Stack. BPF_MAP_TYPE_RINGBUF = 13, //< Ring buffer map type. } #[allow(dead_code)] #[repr(C)] pub enum ebpf_pin_type_t { PIN_NONE, //< Object is not pinned. PIN_OBJECT_NS, //< Pinning that is local to an object. PIN_GLOBAL_NS, //< Pinning with a global namespace. PIN_CUSTOM_NS, //< Pinning with a custom path given as section parameter. } #[allow(dead_code)] #[repr(C)] pub enum ebpf_execution_type_t { EBPF_EXECUTION_ANY, //< Execute in JIT-compiled or interpreted mode, per system policy. EBPF_EXECUTION_JIT, //< Execute in JIT-compiled mode. EBPF_EXECUTION_INTERPRET, //< Execute in interpreted mode. EBPF_EXECUTION_NATIVE, //< Execute from native driver. } #[allow(dead_code)] #[repr(C)] pub enum bpf_attach_type { BPF_ATTACH_TYPE_UNSPEC, //< Unspecified attach type. /** @brief Attach type for handling incoming packets as early as possible. * * **Program type:** \ref BPF_PROG_TYPE_XDP */ BPF_XDP, /** @brief Attach type for handling socket bind() requests. * * **Program type:** \ref BPF_PROG_TYPE_BIND */ BPF_ATTACH_TYPE_BIND, /** @brief Attach type for handling IPv4 TCP connect() or UDP send * to a unique remote address/port tuple. * * **Program type:** \ref BPF_PROG_TYPE_CGROUP_SOCK_ADDR */ BPF_CGROUP_INET4_CONNECT, /** @brief Attach type for handling IPv6 TCP connect() or UDP send * to a unique remote address/port tuple. * * **Program type:** \ref BPF_PROG_TYPE_CGROUP_SOCK_ADDR */ BPF_CGROUP_INET6_CONNECT, /** @brief Attach type for handling IPv4 TCP accept() or on receiving * the first unicast UDP packet from a unique remote address/port tuple. * * **Program type:** \ref BPF_PROG_TYPE_CGROUP_SOCK_ADDR */ BPF_CGROUP_INET4_RECV_ACCEPT, /** @brief Attach type for handling IPv6 TCP accept() or on receiving * the first unicast UDP packet from a unique remote address/port tuple. * * **Program type:** \ref BPF_PROG_TYPE_CGROUP_SOCK_ADDR */ BPF_CGROUP_INET6_RECV_ACCEPT, /** @brief Attach type for handling various socket event notifications. * * **Program type:** \ref BPF_PROG_TYPE_SOCK_OPS */ BPF_CGROUP_SOCK_OPS, /** @brief Attach type implemented by eBPF Sample Extension driver, used for testing. * * **Program type:** \ref BPF_PROG_TYPE_SAMPLE */ BPF_ATTACH_TYPE_SAMPLE, __MAX_BPF_ATTACH_TYPE, } // ebpf instruction schema #[repr(C)] pub struct ebpf_inst { opcode: u8, dst: u8, //< Destination register src: u8, //< Source register offset: i16, imm: i32, //< Immediate constant } #[repr(C)] pub struct ebpf_program_t { object: *mut bpf_object, section_name: *mut c_char, program_name: *mut c_char, instructions: *mut ebpf_inst, instruction_count: usize, program_type: ebpf_program_type_t, attach_type: ebpf_attach_type_t, handle: ebpf_handle_t, fd: fd_t, pinned: bool, log_buffer: *mut c_char, log_buffer_size: u32, } #[repr(C)] pub struct ebpf_link_t { pin_path: *mut c_char, handle: ebpf_handle_t, fd: fd_t, disconnected: bool, } impl ebpf_link_t { pub fn empty() -> Self { ebpf_link_t { pin_path: std::ptr::null_mut(), handle: 0, fd: 0, disconnected: false, } } } /** * @brief eBPF Map Definition as it is stored in memory. */ #[repr(C)] pub struct _ebpf_map_definition_in_memory { map_type: ebpf_map_type_t, //< Type of map. key_size: u32, //< Size in bytes of a map key. value_size: u32, //< Size in bytes of a map value. max_entries: u32, //< Maximum number of entries allowed in the map. inner_map_id: ebpf_id_t, pinning: ebpf_pin_type_t, } pub type ebpf_map_definition_in_memory_t = _ebpf_map_definition_in_memory; #[repr(C)] pub struct bpf_map { object: *mut bpf_object, //< Pointer to the object containing this map. name: *mut c_char, //< Name of the map. ; // Map handle generated by the execution context. map_handle: ebpf_handle_t, // Map ID generated by the execution context. map_id: ebpf_id_t, // File descriptor specific to the caller's process. map_fd: fd_t, // Original fd as it appears in the eBPF byte code // before relocation. original_fd: fd_t, // Original fd of the inner_map. inner_map_original_fd: fd_t, inner_map: *mut bpf_map, map_definition: ebpf_map_definition_in_memory_t, pin_path: *mut c_char, pinned: bool, // Whether this map is newly created or reused // from an existing map. reused: bool, } pub type ebpf_map_t = bpf_map; #[repr(C)] pub struct bpf_object { object_name: *mut c_char, file_name: *mut c_char, programs: Vec<*mut ebpf_program_t>, maps: Vec<*mut ebpf_map_t>, loaded: bool, execution_type: ebpf_execution_type_t, } #[repr(C)] pub struct _sock_addr_audit_key { pub protocol: u32, pub source_port: [u16; 2], } pub type sock_addr_audit_key_t = _sock_addr_audit_key; impl sock_addr_audit_key_t { pub fn from_source_port(port: u16) -> Self { sock_addr_audit_key_t { protocol: IPPROTO_TCP, source_port: [port.to_be(), 0], } } } #[repr(C)] pub struct _ip_address { //pub ipv4: u32, pub ip: [u32; 4], // ipv4 uses the first element; ipv6 uses all 4 elements } impl _ip_address { fn empty() -> Self { _ip_address { ip: [0, 0, 0, 0] } } pub fn from_ipv4(ipv4: u32) -> Self { let mut ip = Self::empty(); ip.ip[0] = ipv4; ip } #[allow(dead_code)] pub fn from_ipv6(ipv6: [u32; 4]) -> Self { let mut ip = Self::empty(); ip.ip.copy_from_slice(&ipv6); ip } } pub type ip_address_t = _ip_address; #[repr(C)] pub struct _destination_entry { pub destination_ip: ip_address_t, pub destination_port: [u16; 2], // first element is the port number, second element is empty pub protocol: u32, } impl _destination_entry { pub fn empty() -> Self { _destination_entry { destination_ip: ip_address_t::empty(), destination_port: [0, 0], protocol: IPPROTO_TCP, } } pub fn from_ipv4(ipv4: u32, port: u16) -> Self { let mut entry = Self::empty(); entry.destination_ip = ip_address_t::from_ipv4(ipv4); entry.destination_port[0] = port.to_be(); entry } } pub type destination_entry_t = _destination_entry; pub const IPPROTO_TCP: u32 = 6; #[allow(dead_code)] pub const IPPROTO_UDP: u32 = 17; #[repr(C)] pub struct _sock_addr_skip_process_entry { pub pid: u32, } pub type sock_addr_skip_process_entry = _sock_addr_skip_process_entry; GuestProxyAgent-1.0.30/proxy_agent/src/redirector/windows/bpf_prog.rs000066400000000000000000000327551500521614600260450ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use super::bpf_api::*; use super::bpf_obj::*; use super::BpfObject; use crate::common::constants; use crate::common::logger; use crate::common::{ error::{BpfErrorType, Error}, result::Result, }; use crate::redirector::AuditEntry; use proxy_agent_shared::misc_helpers; use std::ffi::c_void; use std::mem::size_of_val; use std::path::Path; // This module contains the logic to interact with the windows eBPF program & maps. impl BpfObject { fn is_null(&self) -> bool { self.0.is_null() } pub fn new() -> Self { Self(std::ptr::null::().cast_mut()) } /** Routine Description: This routine load bpf object. Arguments: bpf_file_path - Path to the bpf object file. Return Value: On failure appropriate BpfErrorType is returned. */ pub fn load_bpf_object(&mut self, bpf_file_path: &Path) -> Result<()> { logger::write_information(format!( "Starting redirector with ebpf file {}", misc_helpers::path_to_string(bpf_file_path) )); self.close_bpf_object(); let obj = match bpf_object__open(&misc_helpers::path_to_string(bpf_file_path)) { Ok(obj) => obj, Err(e) => { //logger::write_error(format!("{}", e)); // return EBPF_OPEN_ERROR; return Err(Error::Bpf(BpfErrorType::OpenBpfObject( bpf_file_path.display().to_string(), e.to_string(), ))); } }; if obj.is_null() { let error_code = libbpf_get_error()?; return Err(Error::Bpf(BpfErrorType::OpenBpfObject( bpf_file_path.display().to_string(), format!( "bpf_object__open return null pointer with error code '{}'", error_code ), ))); } let result = match bpf_object__load(obj) { Ok(r) => r, Err(e) => { // logger::write_error(format!("{}", e)); // return EBPF_LOAD_ERROR; return Err(Error::Bpf(BpfErrorType::LoadBpfObject( bpf_file_path.display().to_string(), e.to_string(), ))); } }; if result == 0 { self.0 = obj; } else { return Err(Error::Bpf(BpfErrorType::LoadBpfObject( bpf_file_path.display().to_string(), format!("bpf_object__load return with error code '{}'", result), ))); } Ok(()) } /** Routine Description: This routine attach authorize_connect4 to bpf. Arguments: Return Value: On failure appropriate RESULT is returned. */ pub fn attach_bpf_prog(&self) -> Result<()> { if self.is_null() { return Err(Error::Bpf(BpfErrorType::NullBpfObject)); } let program_name = "authorize_connect4"; let connect4_program = match bpf_object__find_program_by_name(self.0, program_name) { Ok(p) => { logger::write_information(format!("Found {} program.", program_name)); p } Err(e) => { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), e.to_string(), ))); } }; if connect4_program.is_null() { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), "bpf_object__find_program_by_name return null".to_string(), ))); } let compartment_id = 1; let mut link: ebpf_link_t = ebpf_link_t::empty(); let mut link: *mut ebpf_link_t = &mut link as *mut ebpf_link_t; let link: *mut *mut ebpf_link_t = &mut link as *mut *mut ebpf_link_t; match ebpf_prog_attach( connect4_program, std::ptr::null(), &compartment_id as *const i32 as *const c_void, size_of_val(&compartment_id), link, ) { Ok(r) => { if r != 0 { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), format!("ebpf_prog_attach return with error code '{}'", r), ))); } logger::write_information( "Success attached authorize_connect4 program.".to_string(), ); match bpf_link_disconnect(unsafe { *link }) { Ok(_r) => { logger::write_information("Success disconnected link.".to_string()); match bpf_link_destroy(unsafe { *link }) { Ok(r) => { if r != 0 { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), format!("bpf_link_destroy return with error code '{}'", r), ))); } logger::write_information("Success destroyed link.".to_string()); } Err(e) => { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), format!("bpf_link_destroy return with error '{}'", e), ))); } } } Err(e) => { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), format!("bpf_link_disconnect return with error '{}'", e), ))); } } } Err(e) => { return Err(Error::Bpf(BpfErrorType::AttachBpfProgram( program_name.to_string(), format!("ebpf_prog_attach return with error '{}'", e), ))); } } Ok(()) } /** Routine Description: This routine add element to policy_map. Arguments: local_port - proxy local port. dest_ipv4 - destination ipv4 address. dest_port - destination port. shared_state - shared state. Return Value: On failure appropriate RESULT is returned. */ pub fn update_policy_elem_bpf_map( &self, endpoint_name: &str, local_port: u16, dest_ipv4: u32, dest_port: u16, ) -> Result<()> { let map_name = "policy_map"; let map_fd = self.get_bpf_map_fd(map_name)?; let key = destination_entry_t::from_ipv4(dest_ipv4, dest_port); let value = destination_entry_t::from_ipv4( constants::PROXY_AGENT_IP_NETWORK_BYTE_ORDER, //0x100007F - 127.0.0.1 local_port, ); let result = bpf_map_update_elem( map_fd, &key as *const destination_entry_t as *const c_void, &value as *const destination_entry_t as *const c_void, 0, ) .map_err(|e| { Error::Bpf(BpfErrorType::UpdateBpfMapHashMap( map_name.to_string(), endpoint_name.to_string(), format!("bpf_map_update_elem returned error {}", e), )) })?; if result != 0 { return Err(Error::Bpf(BpfErrorType::UpdateBpfMapHashMap( map_name.to_string(), endpoint_name.to_string(), format!("bpf_map_update_elem returned error code {}", result), ))); } Ok(()) } /** Routine Description: This routine close bpf object. Arguments: Return Value: */ pub fn close_bpf_object(&mut self) { if self.0.is_null() { return; } if let Err(e) = bpf_object__close(self.0) { logger::write_error(format!("bpf_object__close with error: {}", e)); } self.0 = std::ptr::null::().cast_mut(); } /** Routine Description: This routine lookup element from audit_map. Arguments: source_port - source local port. entry - element from audit_map. Return Value: 0 on success. On failure appropriate RESULT is returned. */ pub fn lookup_audit(&self, source_port: u16) -> Result { let map_name = "audit_map"; let map_fd = self.get_bpf_map_fd(map_name)?; // query by source port. let key = sock_addr_audit_key_t::from_source_port(source_port); let value = AuditEntry::empty(); let result = bpf_map_lookup_elem( map_fd, &key as *const sock_addr_audit_key_t as *const c_void, &value as *const AuditEntry as *mut c_void, ) .map_err(|e| { Error::Bpf(BpfErrorType::MapLookupElem( source_port.to_string(), format!("Error: {}", e), )) })?; if result != 0 { return Err(Error::Bpf(BpfErrorType::MapLookupElem( source_port.to_string(), format!("Result: {}", result), ))); } Ok(value) } /** Routine Description: This routine add element to skip_process_map. Arguments: pid - process pid to skip redirect. Return Value: On failure appropriate RESULT is returned. */ pub fn update_skip_process_map(&self, pid: u32) -> Result<()> { let map_name = "skip_process_map"; let map_fd = self.get_bpf_map_fd(map_name)?; // insert process id entry. let key = sock_addr_skip_process_entry { pid }; let value = sock_addr_skip_process_entry { pid }; let result = bpf_map_update_elem( map_fd, &key as *const sock_addr_skip_process_entry as *const c_void, &value as *const sock_addr_skip_process_entry as *const c_void, 0, ) .map_err(|e| { Error::Bpf(BpfErrorType::UpdateBpfMapHashMap( map_name.to_string(), format!("insert pid: {}", pid), format!("bpf_map_update_elem returned error {}", e), )) })?; if result != 0 { return Err(Error::Bpf(BpfErrorType::UpdateBpfMapHashMap( map_name.to_string(), format!("insert pid: {}", pid), format!("bpf_map_update_elem returned error code {}", result), ))); } Ok(()) } /** Routine Description: This routine delete element from policy_map. Arguments: dest_ipv4 - destination ipv4 address. dest_port - destination port. Return Value: On failure appropriate RESULT is returned. */ pub fn remove_policy_elem_bpf_map(&self, dest_ipv4: u32, dest_port: u16) -> Result<()> { let map_name = "policy_map"; let map_fd = self.get_bpf_map_fd(map_name)?; let key = destination_entry_t::from_ipv4(dest_ipv4, dest_port); let result = bpf_map_delete_elem(map_fd, &key as *const destination_entry_t as *const c_void) .map_err(|e| { Error::Bpf(BpfErrorType::MapDeleteElem( format!("dest_ipv4: {}, dest_port: {}", dest_ipv4, dest_port), format!("Error: {}", e), )) })?; if result != 0 { return Err(Error::Bpf(BpfErrorType::MapDeleteElem( format!("dest_ipv4: {}, dest_port: {}", dest_ipv4, dest_port), format!("Result: {}", result), ))); } Ok(()) } pub fn remove_audit_map_entry(&self, source_port: u16) -> Result<()> { let audit_map_name = "audit_map"; let map_fd = self.get_bpf_map_fd(audit_map_name)?; let key = sock_addr_audit_key_t::from_source_port(source_port); let result = bpf_map_delete_elem( map_fd, &key as *const sock_addr_audit_key_t as *const c_void, ) .map_err(|e| { Error::Bpf(BpfErrorType::MapDeleteElem( source_port.to_string(), format!("Error: {}", e), )) })?; if result != 0 { return Err(Error::Bpf(BpfErrorType::MapDeleteElem( source_port.to_string(), format!("Result: {}", result), ))); } Ok(()) } fn get_bpf_map_fd(&self, map_name: &str) -> Result { if self.is_null() { return Err(Error::Bpf(BpfErrorType::NullBpfObject)); } let bpf_map = bpf_object__find_map_by_name(self.0, map_name).map_err(|e| { Error::Bpf(BpfErrorType::GetBpfMap(map_name.to_string(), e.to_string())) })?; if bpf_map.is_null() { return Err(Error::Bpf(BpfErrorType::GetBpfMap( map_name.to_string(), "bpf_object__find_map_by_name returns null pointer".to_string(), ))); } bpf_map__fd(bpf_map).map_err(|e| Error::Bpf(BpfErrorType::MapFileDescriptor(e.to_string()))) } } GuestProxyAgent-1.0.30/proxy_agent/src/service.rs000066400000000000000000000123541500521614600220440ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #[cfg(windows)] pub mod windows; use crate::common::{config, constants, helpers, logger}; use crate::key_keeper::KeyKeeper; use crate::proxy::proxy_connection::ConnectionLogger; use crate::proxy::proxy_server::ProxyServer; use crate::redirector::{self, Redirector}; use crate::shared_state::SharedState; use proxy_agent_shared::logger::rolling_logger::RollingLogger; use proxy_agent_shared::logger::{logger_manager, LoggerLevel}; use proxy_agent_shared::telemetry::event_logger; use std::path::PathBuf; #[cfg(not(windows))] use std::time::Duration; /// Start the service with the shared state. /// Example: /// ```rust /// use proxy_agent::service; /// use proxy_agent::shared_state::SharedState; /// /// let shared_state = SharedState::start_all(); /// service::start_service(shared_state).await; /// ``` pub async fn start_service(shared_state: SharedState) { let log_folder = config::get_logs_dir(); if log_folder == PathBuf::from("") { logger::write_console_log( "The log folder is not set, skip write to GPA managed file log.".to_string(), ); } else { setup_loggers(log_folder, config::get_file_log_level()); } let start_message = format!( "============== GuestProxyAgent ({}) is starting on {}({}), elapsed: {}", proxy_agent_shared::misc_helpers::get_current_version(), helpers::get_long_os_version(), helpers::get_cpu_arch(), helpers::get_elapsed_time_in_millisec() ); logger::write_information(start_message.clone()); #[cfg(not(windows))] logger::write_serial_console_log(start_message); tokio::spawn({ let key_keeper = KeyKeeper::new( (format!("http://{}/", constants::WIRE_SERVER_IP)) .parse() .unwrap(), config::get_keys_dir(), config::get_logs_dir(), config::get_poll_key_status_duration(), &shared_state, ); async move { key_keeper.poll_secure_channel_status().await; } }); tokio::spawn({ let redirector: Redirector = Redirector::new(constants::PROXY_AGENT_PORT, &shared_state); async move { redirector.start().await; } }); tokio::spawn({ let proxy_server = ProxyServer::new(constants::PROXY_AGENT_PORT, &shared_state); async move { proxy_server.start().await; } }); } fn setup_loggers(log_folder: PathBuf, max_logger_level: LoggerLevel) { logger_manager::set_logger_level(max_logger_level); let agent_logger = RollingLogger::create_new( log_folder.clone(), "ProxyAgent.log".to_string(), constants::MAX_LOG_FILE_SIZE, constants::MAX_LOG_FILE_COUNT as u16, ); let connection_logger = RollingLogger::create_new( log_folder.clone(), "ProxyAgent.Connection.log".to_string(), constants::MAX_LOG_FILE_SIZE, constants::MAX_LOG_FILE_COUNT as u16, ); let mut loggers = std::collections::HashMap::new(); loggers.insert(logger::AGENT_LOGGER_KEY.to_string(), agent_logger); loggers.insert( ConnectionLogger::CONNECTION_LOGGER_KEY.to_string(), connection_logger, ); logger_manager::set_loggers(loggers, logger::AGENT_LOGGER_KEY.to_string()); } /// Start the service and wait until the service is stopped. /// Example: /// ```rust /// use proxy_agent::service; /// service::start_service_wait(); /// ``` #[cfg(not(windows))] pub async fn start_service_wait() { let shared_state = SharedState::start_all(); start_service(shared_state).await; loop { // continue to sleep until the service is stopped tokio::time::sleep(Duration::from_secs(1)).await; } } /// Stop the service with the shared state. /// Example: /// ```rust /// use proxy_agent::service; /// use proxy_agent::shared_state::SharedState; /// use std::sync::{Arc, Mutex}; /// /// let shared_state = SharedState::new(); /// service::stop_service(shared_state); /// ``` pub fn stop_service(shared_state: SharedState) { logger::write_information(format!( "============== GuestProxyAgent is stopping, elapsed: {}", helpers::get_elapsed_time_in_millisec() )); shared_state.cancel_cancellation_token(); tokio::spawn({ let shared_state = shared_state.clone(); async move { redirector::close( shared_state.get_redirector_shared_state(), shared_state.get_agent_status_shared_state(), ) .await; } }); event_logger::stop(); } #[cfg(test)] mod tests { use ctor::{ctor, dtor}; use proxy_agent_shared::logger::LoggerLevel; use std::env; use std::fs; const TEST_LOGGER_KEY: &str = "proxy_agent_test"; fn get_temp_test_dir() -> std::path::PathBuf { let mut temp_test_path = env::temp_dir(); temp_test_path.push(TEST_LOGGER_KEY); temp_test_path } #[ctor] fn setup() { // Setup logger_manager for unit tests super::setup_loggers(get_temp_test_dir(), LoggerLevel::Trace); } #[dtor] fn cleanup() { // clean up and ignore the clean up errors _ = fs::remove_dir_all(&get_temp_test_dir()); } } GuestProxyAgent-1.0.30/proxy_agent/src/service/000077500000000000000000000000001500521614600214715ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/service/windows.rs000066400000000000000000000075521500521614600235420ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to start the GPA service as a Windows service and hook up stop service control handler. //! The GPA service is implemented as a Windows service using the windows_service crate. //! It is started, stopped, and controlled by the Windows service manager. use crate::common::{constants, logger, result::Result}; use crate::{service, shared_state::SharedState}; use std::time::Duration; use windows_service::service::{ ServiceControl, ServiceControlAccept, ServiceExitCode, ServiceState, ServiceStatus, ServiceType, }; use windows_service::service_control_handler::{ self, ServiceControlHandlerResult, ServiceStatusHandle, }; // The private global variable to store the windows service status handle. // It is used to set the windows service status to Running and Stopped. // Its event handler does not support async + await, which it is not allow to get it via async mpsc. static SERVICE_STATUS_HANDLE: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); pub async fn run_service() -> Result<()> { let shared_state = SharedState::start_all(); let cloned_shared_state = shared_state.clone(); let event_handler = move |control_event| -> ServiceControlHandlerResult { match control_event { ServiceControl::Stop => { service::stop_service(cloned_shared_state.clone()); if let Some(status_handle) = SERVICE_STATUS_HANDLE.get() { let stop_state = ServiceStatus { service_type: ServiceType::OWN_PROCESS, current_state: ServiceState::Stopped, controls_accepted: ServiceControlAccept::STOP, exit_code: ServiceExitCode::Win32(0), checkpoint: 0, wait_hint: Duration::default(), process_id: None, }; if let Err(e) = status_handle.set_service_status(stop_state) { logger::write_error(format!( "Failed to set service status to Stopped: {}", e )); } } else { // workaround to stop the service by exiting the process logger::write_warning( "Force exit the process to stop the service.".to_string(), ); std::process::exit(0); } ServiceControlHandlerResult::NoError } ServiceControl::Interrogate => ServiceControlHandlerResult::NoError, _ => ServiceControlHandlerResult::NotImplemented, } }; // start service service::start_service(shared_state.clone()).await; // set the service state to Running let status_handle = service_control_handler::register(constants::PROXY_AGENT_SERVICE_NAME, event_handler)?; let running_state = ServiceStatus { service_type: ServiceType::OWN_PROCESS, current_state: ServiceState::Running, controls_accepted: ServiceControlAccept::STOP, exit_code: ServiceExitCode::Win32(0), checkpoint: 0, wait_hint: Duration::default(), process_id: None, }; status_handle.set_service_status(running_state)?; // set the service failure actions if let Err(e) = proxy_agent_shared::service::set_default_failure_actions( constants::PROXY_AGENT_SERVICE_NAME, ) { logger::write_error(format!( "Failed to set service '{}' default failure actions with error: {}", constants::PROXY_AGENT_SERVICE_NAME, e )); } // set the windows service status handle SERVICE_STATUS_HANDLE.set(status_handle).unwrap(); Ok(()) } GuestProxyAgent-1.0.30/proxy_agent/src/shared_state.rs000066400000000000000000000102031500521614600230410ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod agent_status_wrapper; pub mod key_keeper_wrapper; pub mod provision_wrapper; pub mod proxy_server_wrapper; pub mod redirector_wrapper; pub mod telemetry_wrapper; use tokio_util::sync::CancellationToken; const UNKNOWN_STATUS_MESSAGE: &str = "Status unknown."; /// The shared state is used to share the state between different modules. /// It contains the cancellation token, which is used to cancel the agent when the agent is stopped. /// It also contains the senders for the key keeper, telemetry event, provision, agent status, redirector, and proxy server modules. /// The shared state is used to start the modules and get the senders for the modules. /// The shared state is used to get the cancellation token and cancel the cancellation token. /// Example: /// ```rust /// use proxy_agent::shared_state::SharedState; /// let shared_state = SharedState::start_all(); /// let key_keeper_shared_state = shared_state.get_key_keeper_shared_state(); /// let telemetry_shared_state = shared_state.get_telemetry_shared_state(); /// let provision_shared_state = shared_state.get_provision_shared_state(); /// let agent_status_shared_state = shared_state.get_agent_status_shared_state(); /// let redirector_shared_state = shared_state.get_redirector_shared_state(); /// let proxy_server_shared_state = shared_state.get_proxy_server_shared_state(); /// let cancellation_token = shared_state.get_cancellation_token(); /// shared_state.cancel_cancellation_token(); /// ``` #[derive(Clone)] pub struct SharedState { /// The cancellation token is used to cancel the agent when the agent is stopped cancellation_token: CancellationToken, /// The sender for the key keeper module key_keeper_shared_state: key_keeper_wrapper::KeyKeeperSharedState, /// The sender for the telemetry event modules telemetry_shared_state: telemetry_wrapper::TelemetrySharedState, /// The sender for the provision module provision_shared_state: provision_wrapper::ProvisionSharedState, /// The sender for the agent status module agent_status_shared_state: agent_status_wrapper::AgentStatusSharedState, /// The sender for the redirector module redirector_shared_state: redirector_wrapper::RedirectorSharedState, /// The sender for the proxy server module proxy_server_shared_state: proxy_server_wrapper::ProxyServerSharedState, } impl SharedState { pub fn start_all() -> Self { SharedState { cancellation_token: CancellationToken::new(), key_keeper_shared_state: key_keeper_wrapper::KeyKeeperSharedState::start_new(), telemetry_shared_state: telemetry_wrapper::TelemetrySharedState::start_new(), provision_shared_state: provision_wrapper::ProvisionSharedState::start_new(), agent_status_shared_state: agent_status_wrapper::AgentStatusSharedState::start_new(), redirector_shared_state: redirector_wrapper::RedirectorSharedState::start_new(), proxy_server_shared_state: proxy_server_wrapper::ProxyServerSharedState::start_new(), } } pub fn get_key_keeper_shared_state(&self) -> key_keeper_wrapper::KeyKeeperSharedState { self.key_keeper_shared_state.clone() } pub fn get_telemetry_shared_state(&self) -> telemetry_wrapper::TelemetrySharedState { self.telemetry_shared_state.clone() } pub fn get_provision_shared_state(&self) -> provision_wrapper::ProvisionSharedState { self.provision_shared_state.clone() } pub fn get_agent_status_shared_state(&self) -> agent_status_wrapper::AgentStatusSharedState { self.agent_status_shared_state.clone() } pub fn get_redirector_shared_state(&self) -> redirector_wrapper::RedirectorSharedState { self.redirector_shared_state.clone() } pub fn get_proxy_server_shared_state(&self) -> proxy_server_wrapper::ProxyServerSharedState { self.proxy_server_shared_state.clone() } pub fn get_cancellation_token(&self) -> CancellationToken { self.cancellation_token.clone() } pub fn cancel_cancellation_token(&self) { self.cancellation_token.cancel(); } } GuestProxyAgent-1.0.30/proxy_agent/src/shared_state/000077500000000000000000000000001500521614600224775ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/shared_state/agent_status_wrapper.rs000066400000000000000000000720011500521614600273060ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to interact with the proxy agent status. //! The proxy agent status contains the 'state' and 'status message' of the key keeper, telemetry reader, telemetry logger, redirector, and proxy server modules. //! The proxy agent status contains the 'connection summary' of the proxy server. //! The proxy agent status contains the 'failed connection summary' of the proxy server. //! The proxy agent status contains the 'connection count' of the proxy server. //! Example //! ```rust //! use proxy_agent::shared_state::agent_status_wrapper::{AgentStatusModule, AgentStatusSharedState}; //! use proxy_agent_shared::proxy_agent_aggregate_status::ModuleState; //! use proxy_agent_shared::telemetry::event_logger; //! use std::collections::HashMap; //! use std::time::Duration; //! use tokio::time; //! //! #[tokio::main] //! async fn main() { //! let agent_status_shared_state = AgentStatusSharedState::start_new(); //! //! let module = AgentStatusModule::KeyKeeper; //! let state = ModuleState::RUNNING; //! let status_message = "KeyKeeper is running".to_string(); //! agent_status_shared_state.set_module_state(state.clone(), module.clone()).await.unwrap(); //! agent_status_shared_state.set_module_status_message(status_message.clone(), module.clone()).await.unwrap(); //! let get_state = agent_status_shared_state.get_module_state(module.clone()).await.unwrap(); //! let get_status_message = agent_status_shared_state.get_module_status_message(module.clone()).await.unwrap(); //! assert_eq!(state, get_state); //! assert_eq!(status_message, get_status_message); //! let connection_summary = ProxyConnectionSummary { //! count: 1, //! key: "key".to_string(), //! }; //! agent_status_shared_state.add_one_connection_summary(connection_summary.clone()).await.unwrap(); //! let get_all_connection_summary = agent_status_shared_state.get_all_connection_summary().await.unwrap(); //! assert_eq!(1, get_all_connection_summary.len()); //! assert_eq!(connection_summary, get_all_connection_summary[0]); //! //! let failed_connection_summary = ProxyConnectionSummary { //! count: 1, //! key: "key".to_string(), //! }; //! agent_status_shared_state.add_one_failed_connection_summary(failed_connection_summary.clone()).await.unwrap(); //! let get_all_failed_connection_summary = agent_status_shared_state.get_all_failed_connection_summary().await.unwrap(); //! assert_eq!(1, get_all_failed_connection_summary.len()); //! assert_eq!(failed_connection_summary, get_all_failed_connection_summary[0]); //! agent_status_shared_state.clear_all_summary().await.unwrap(); //! //! let get_connection_count = agent_status_shared_state.get_connection_count().await.unwrap(); //! assert_eq!(0, get_connection_count); //! agent_status_shared_state.increase_connection_count().await.unwrap(); //! let get_connection_count = agent_status_shared_state.get_connection_count().await.unwrap(); //! assert_eq!(1, get_connection_count); //! } //! ``` use crate::common::logger; use crate::common::result::Result; use crate::{common::error::Error, proxy::proxy_summary::ProxySummary}; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::proxy_agent_aggregate_status::{ ModuleState, ProxyAgentDetailStatus, ProxyConnectionSummary, }; use proxy_agent_shared::telemetry::event_logger; use std::collections::{hash_map, HashMap}; use tokio::sync::{mpsc, oneshot}; const MAX_STATUS_MESSAGE_LENGTH: usize = 1024; enum AgentStatusAction { SetStatusMessage { message: String, module: AgentStatusModule, response: oneshot::Sender, }, GetStatusMessage { module: AgentStatusModule, response: oneshot::Sender, }, SetState { state: ModuleState, module: AgentStatusModule, response: oneshot::Sender, }, GetState { module: AgentStatusModule, response: oneshot::Sender, }, AddOneConnectionSummary { summary: ProxySummary, response: oneshot::Sender<()>, }, AddOneFailedConnectionSummary { summary: ProxySummary, response: oneshot::Sender<()>, }, GetAllConnectionSummary { response: oneshot::Sender>, }, GetAllFailedConnectionSummary { response: oneshot::Sender>, }, ClearAllSummary { response: oneshot::Sender<()>, }, GetConnectionCount { response: oneshot::Sender, }, IncreaseConnectionCount { response: oneshot::Sender, }, IncreaseTcpConnectionCount { response: oneshot::Sender, }, } #[derive(Clone, Debug)] pub enum AgentStatusModule { KeyKeeper, TelemetryReader, TelemetryLogger, Redirector, ProxyServer, ProxyAgentStatus, } #[derive(Clone, Debug)] pub struct AgentStatusSharedState(mpsc::Sender); impl AgentStatusSharedState { pub fn start_new() -> Self { let (tx, mut rx) = mpsc::channel(100); tokio::spawn(async move { let mut key_keeper_state: ModuleState = ModuleState::UNKNOWN; let mut key_keeper_status_message: String = super::UNKNOWN_STATUS_MESSAGE.to_string(); let mut telemetry_reader_state = ModuleState::UNKNOWN; let mut telemetry_logger_state = ModuleState::UNKNOWN; let mut telemetry_reader_status_message = super::UNKNOWN_STATUS_MESSAGE.to_string(); let mut telemetry_logger_status_message = super::UNKNOWN_STATUS_MESSAGE.to_string(); let mut redirector_state = ModuleState::UNKNOWN; let mut redirector_status_message = super::UNKNOWN_STATUS_MESSAGE.to_string(); let mut proxy_server_state = ModuleState::UNKNOWN; let mut proxy_server_status_message = super::UNKNOWN_STATUS_MESSAGE.to_string(); let mut proxy_agent_status_state = ModuleState::UNKNOWN; let mut proxy_agent_status_message = super::UNKNOWN_STATUS_MESSAGE.to_string(); // The proxy connection summary from the proxy let mut proxy_summary: HashMap = HashMap::new(); // The failed authenticate summary from the proxy let mut failed_authenticate_summary: HashMap = HashMap::new(); // The proxied connection count for the listener let mut tcp_connection_count: u128 = 0; let mut http_connection_count: u128 = 0; while let Some(action) = rx.recv().await { match action { AgentStatusAction::SetStatusMessage { message, module, response, } => { let mut updated = true; match module { AgentStatusModule::KeyKeeper => { if key_keeper_status_message == message { updated = false; } else { key_keeper_status_message = message; } } AgentStatusModule::TelemetryReader => { if telemetry_reader_status_message == message { updated = false; } else { telemetry_reader_status_message = message; } } AgentStatusModule::TelemetryLogger => { if telemetry_logger_status_message == message { updated = false; } else { telemetry_logger_status_message = message; } } AgentStatusModule::Redirector => { if redirector_status_message == message { updated = false; } else { redirector_status_message = message; } } AgentStatusModule::ProxyServer => { if proxy_server_status_message == message { updated = false; } else { proxy_server_status_message = message; } } AgentStatusModule::ProxyAgentStatus => { if proxy_agent_status_message == message { updated = false; } else { proxy_agent_status_message = message; } } } if response.send(updated).is_err() { logger::write_warning(format!("Failed to send response to AgentStatusAction::SetStatusMessage for module {:?}", module)); } } AgentStatusAction::GetStatusMessage { module, response } => { let message = match module { AgentStatusModule::KeyKeeper => key_keeper_status_message.clone(), AgentStatusModule::TelemetryReader => { telemetry_reader_status_message.clone() } AgentStatusModule::TelemetryLogger => { telemetry_logger_status_message.clone() } AgentStatusModule::Redirector => redirector_status_message.clone(), AgentStatusModule::ProxyServer => proxy_server_status_message.clone(), AgentStatusModule::ProxyAgentStatus => { proxy_agent_status_message.clone() } }; if let Err(message) = response.send(message) { logger::write_warning(format!( "Failed to send response to AgentStatusAction::GetStatusMessage for module '{:?}' with message '{:?}'", module,message )); } } AgentStatusAction::SetState { state, module, response, } => { match module { AgentStatusModule::KeyKeeper => { key_keeper_state = state.clone(); } AgentStatusModule::TelemetryReader => { telemetry_reader_state = state.clone() } AgentStatusModule::TelemetryLogger => { telemetry_logger_state = state.clone(); } AgentStatusModule::Redirector => { redirector_state = state.clone(); } AgentStatusModule::ProxyServer => { proxy_server_state = state.clone(); } AgentStatusModule::ProxyAgentStatus => { proxy_agent_status_state = state.clone(); } } if let Err(state) = response.send(state) { logger::write_warning(format!("Failed to send response to AgentStatusAction::SetState '{:?}' for module '{:?}'", state, module)); } } AgentStatusAction::GetState { module, response } => { let state = match module { AgentStatusModule::KeyKeeper => key_keeper_state.clone(), AgentStatusModule::TelemetryReader => telemetry_reader_state.clone(), AgentStatusModule::TelemetryLogger => telemetry_logger_state.clone(), AgentStatusModule::Redirector => redirector_state.clone(), AgentStatusModule::ProxyServer => proxy_server_state.clone(), AgentStatusModule::ProxyAgentStatus => proxy_agent_status_state.clone(), }; if let Err(state) = response.send(state) { logger::write_warning(format!( "Failed to send response to AgentStatusAction::GetState for module '{:?}' with state '{:?}'", module,state )); } } AgentStatusAction::AddOneConnectionSummary { summary, response } => { let key = summary.to_key_string(); if let hash_map::Entry::Vacant(e) = proxy_summary.entry(key.clone()) { e.insert(summary.into()); } else if let Some(connection_summary) = proxy_summary.get_mut(&key) { //increase_count(connection_summary); connection_summary.count += 1; } if response.send(()).is_err() { logger::write_warning("Failed to send response to AgentStatusAction::AddOneConnectionSummary".to_string()); } } AgentStatusAction::AddOneFailedConnectionSummary { summary, response } => { let key = summary.to_key_string(); if let hash_map::Entry::Vacant(e) = failed_authenticate_summary.entry(key.clone()) { e.insert(summary.into()); } else if let Some(connection_summary) = failed_authenticate_summary.get_mut(&key) { //increase_count(connection_summary); connection_summary.count += 1; } if response.send(()).is_err() { logger::write_warning("Failed to send response to AgentStatusAction::AddOneFailedConnectionSummary".to_string()); } } AgentStatusAction::GetAllConnectionSummary { response } => { let mut copy_summary: Vec = Vec::new(); for (_, connection_summary) in proxy_summary.iter() { copy_summary.push(connection_summary.clone()); } if let Err(summary) = response.send(copy_summary) { logger::write_warning(format!( "Failed to send response to AgentStatusAction::GetAllConnectionSummary with summary count '{:?}'", summary.len() )); } } AgentStatusAction::GetAllFailedConnectionSummary { response } => { let mut copy_summary: Vec = Vec::new(); for (_, connection_summary) in failed_authenticate_summary.iter() { copy_summary.push(connection_summary.clone()); } if let Err(summary) = response.send(copy_summary) { logger::write_warning(format!( "Failed to send response to AgentStatusAction::GetAllFailedConnectionSummary with summary count '{:?}'", summary.len() )); } } AgentStatusAction::ClearAllSummary { response } => { proxy_summary.clear(); failed_authenticate_summary.clear(); if response.send(()).is_err() { logger::write_warning( "Failed to send response to AgentStatusAction::ClearAllSummary" .to_string(), ); } } AgentStatusAction::GetConnectionCount { response } => { if let Err(count) = response.send(http_connection_count) { logger::write_warning(format!( "Failed to send response to AgentStatusAction::GetConnectionCount with count '{:?}'", count )); } } AgentStatusAction::IncreaseConnectionCount { response } => { // if overflow, reset to 0 and continue increase the count http_connection_count = http_connection_count.overflowing_add(1).0; if let Err(count) = response.send(http_connection_count) { logger::write_warning(format!( "Failed to send response to AgentStatusAction::IncreaseConnectionCount with count '{:?}'", count )); } } AgentStatusAction::IncreaseTcpConnectionCount { response } => { // if overflow, reset to 0 and continue increase the count tcp_connection_count = tcp_connection_count.overflowing_add(1).0; if let Err(count) = response.send(tcp_connection_count) { logger::write_warning(format!( "Failed to send response to AgentStatusAction::IncreaseTcpConnectionCount with count '{:?}'", count )); } } } } }); AgentStatusSharedState(tx) } pub async fn add_one_connection_summary(&self, summary: ProxySummary) -> Result<()> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::AddOneConnectionSummary { summary, response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::AddOneConnectionSummary".to_string(), e.to_string(), ) })?; response_rx.await.map_err(|e| { Error::RecvError("AgentStatusAction::AddOneConnectionSummary".to_string(), e) }) } pub async fn add_one_failed_connection_summary(&self, summary: ProxySummary) -> Result<()> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::AddOneFailedConnectionSummary { summary, response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::AddOneFailedConnectionSummary".to_string(), e.to_string(), ) })?; response_rx.await.map_err(|e| { Error::RecvError( "AgentStatusAction::AddOneFailedConnectionSummary".to_string(), e, ) }) } pub async fn clear_all_summary(&self) -> Result<()> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::ClearAllSummary { response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::ClearAllSummary".to_string(), e.to_string(), ) })?; response_rx .await .map_err(|e| Error::RecvError("AgentStatusAction::ClearAllSummary".to_string(), e))?; Ok(()) } pub async fn get_all_connection_summary(&self) -> Result> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::GetAllConnectionSummary { response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::GetAllConnectionSummary".to_string(), e.to_string(), ) })?; response_rx.await.map_err(|e| { Error::RecvError("AgentStatusAction::GetAllConnectionSummary".to_string(), e) }) } pub async fn get_all_failed_connection_summary(&self) -> Result> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::GetAllFailedConnectionSummary { response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::GetAllFailedConnectionSummary".to_string(), e.to_string(), ) })?; response_rx.await.map_err(|e| { Error::RecvError( "AgentStatusAction::GetAllFailedConnectionSummary".to_string(), e, ) }) } async fn get_module_state(&self, module: AgentStatusModule) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::GetState { module: module.clone(), response: response_tx, }) .await .map_err(|e| { Error::SendError( format!("AgentStatusAction::GetState ({:?})", module), e.to_string(), ) })?; response_rx .await .map_err(|e| Error::RecvError(format!("AgentStatusAction::GetState ({:?})", module), e)) } pub async fn set_module_state( &self, state: ModuleState, module: AgentStatusModule, ) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::SetState { state, module: module.clone(), response: response_tx, }) .await .map_err(|e| { Error::SendError( format!("AgentStatusAction::SetState ({:?})", module), e.to_string(), ) })?; response_rx .await .map_err(|e| Error::RecvError(format!("AgentStatusAction::SetState ({:?})", module), e)) } pub async fn get_module_status_message(&self, module: AgentStatusModule) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::GetStatusMessage { module: module.clone(), response: response_tx, }) .await .map_err(|e| { Error::SendError( format!("AgentStatusAction::GetStatusMessage ({:?})", module), e.to_string(), ) })?; response_rx.await.map_err(|e| { Error::RecvError( format!("AgentStatusAction::GetStatusMessage ({:?})", module), e, ) }) } /// Set the status message for the module /// # Arguments /// * `message` - The status message /// * `module` - The module name /// # Returns /// * `bool` - True if the status message is updated, false if the status message is not updated /// * 'error' if the message is not sent successfully pub async fn set_module_status_message( &self, message: String, module: AgentStatusModule, ) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::SetStatusMessage { message: message.to_string(), module: module.clone(), response: response_tx, }) .await .map_err(|e| { Error::SendError( format!("AgentStatusAction::SetStatusMessage ({:?})", module), e.to_string(), ) })?; let update = response_rx.await.map_err(|e| { Error::RecvError( format!("AgentStatusAction::SetStatusMessage ({:?})", module), e, ) })?; // Log the event if the status message is updated if update { event_logger::write_event( LoggerLevel::Warn, message, "set_module_status_message", &format!("{:?}", module), logger::AGENT_LOGGER_KEY, ); } Ok(update) } pub async fn get_module_status(&self, module: AgentStatusModule) -> ProxyAgentDetailStatus { let state = match self.get_module_state(module.clone()).await { Ok(state) => state, Err(e) => { logger::write_warning(format!("Error getting module '{:?}' status: {}", module, e)); ModuleState::UNKNOWN } }; let mut message = match self.get_module_status_message(module.clone()).await { Ok(message) => message, Err(e) => { logger::write_warning(format!( "Error getting module '{:?}' status message: {}", module, e )); super::UNKNOWN_STATUS_MESSAGE.to_string() } }; if message.len() > MAX_STATUS_MESSAGE_LENGTH { event_logger::write_event( LoggerLevel::Warn, format!( "Status message is too long, truncating to {} characters. Message: {}", MAX_STATUS_MESSAGE_LENGTH, message ), "get_status", &format!("{:?}", module), logger::AGENT_LOGGER_KEY, ); message = format!("{}...", &message[0..MAX_STATUS_MESSAGE_LENGTH]); } ProxyAgentDetailStatus { status: state, message, states: None, } } pub async fn get_connection_count(&self) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::GetConnectionCount { response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::GetConnectionCount".to_string(), e.to_string(), ) })?; response_rx .await .map_err(|e| Error::RecvError("AgentStatusAction::GetConnectionCount".to_string(), e)) } pub async fn increase_connection_count(&self) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::IncreaseConnectionCount { response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::IncreaseConnectionCount".to_string(), e.to_string(), ) })?; response_rx.await.map_err(|e| { Error::RecvError("AgentStatusAction::IncreaseConnectionCount".to_string(), e) }) } pub async fn increase_tcp_connection_count(&self) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(AgentStatusAction::IncreaseTcpConnectionCount { response: response_tx, }) .await .map_err(|e| { Error::SendError( "AgentStatusAction::IncreaseTcpConnectionCount".to_string(), e.to_string(), ) })?; response_rx.await.map_err(|e| { Error::RecvError( "AgentStatusAction::IncreaseTcpConnectionCount".to_string(), e, ) }) } } GuestProxyAgent-1.0.30/proxy_agent/src/shared_state/key_keeper_wrapper.rs000066400000000000000000000624651500521614600267450ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT /// The KeyKeeperState struct is used to send actions to the KeyKeeper module related shared state fields /// Example: /// ``` /// use crate::shared_state::key_keeper_wrapper::KeyKeeperState; /// use crate::key_keeper::key::Key; /// use crate::common::result::Result; /// use std::sync::Arc; /// use tokio::sync::Notify; /// /// #[tokio::main] /// async fn main() -> Result<()> { /// let key_keeper_state = KeyKeeperState::start_new(); /// let key = Key { /// key: "key".to_string(), /// guid: "guid".to_string(), /// incarnationId: 1, /// }; /// // set the set when the feature is enabled /// key_keeper_state.update_key(key).await?; /// let key = key_keeper_state.get_current_key_value().await?; /// let guid = key_keeper_state.get_current_key_guid().await?; /// let incarnation = key_keeper_state.get_current_key_incarnation().await?; /// let state = key_keeper_state.get_current_secure_channel_state().await?; /// let rule_id = key_keeper_state.get_wireserver_rule_id().await?; /// let rule_id = key_keeper_state.get_imds_rule_id().await?; /// let status_message = key_keeper_state.get_status_message().await?; /// /// // clear the key once the feature is disabled /// key_keeper_state.clear_key().await?; /// /// let notify = key_keeper_state.get_notify().await?; /// key_keeper_state.notify().await?; /// Ok(()) /// } /// ``` use crate::common::error::Error; use crate::common::result::Result; use crate::key_keeper::key::AuthorizationItem; use crate::proxy::authorization_rules::ComputedAuthorizationItem; use crate::{common::logger, key_keeper::key::Key}; use std::sync::Arc; use tokio::sync::{mpsc, oneshot, Notify}; /// The KeyKeeperAction enum represents the actions that can be performed on the KeyKeeper module enum KeyKeeperAction { SetKey { key: Option, response: oneshot::Sender<()>, }, GetKey { response: oneshot::Sender>, }, SetSecureChannelState { state: String, response: oneshot::Sender<()>, }, GetSecureChannelState { response: oneshot::Sender, }, SetWireServerRuleId { rule_id: String, response: oneshot::Sender<()>, }, GetWireServerRuleId { response: oneshot::Sender, }, SetImdsRuleId { rule_id: String, response: oneshot::Sender<()>, }, GetImdsRuleId { response: oneshot::Sender, }, GetHostGARuleId { response: oneshot::Sender, }, SetHostGARuleId { rule_id: String, response: oneshot::Sender<()>, }, SetWireServerRules { rules: Option, response: oneshot::Sender<()>, }, GetWireServerRules { response: oneshot::Sender>, }, SetImdsRules { rules: Option, response: oneshot::Sender<()>, }, GetImdsRules { response: oneshot::Sender>, }, SetHostGARules { rules: Option, response: oneshot::Sender<()>, }, GetHostGARules { response: oneshot::Sender>, }, GetNotify { response: oneshot::Sender>, }, } #[derive(Clone, Debug)] pub struct KeyKeeperSharedState(mpsc::Sender); impl KeyKeeperSharedState { pub fn start_new() -> Self { let (sender, mut receiver) = mpsc::channel(100); tokio::spawn(async move { // The key is used to compute signature for the data between the agent and the host endpoints let mut key = None; // The current secure channel state let mut current_secure_channel_state: String = crate::key_keeper::UNKNOWN_STATE.to_string(); // The rule ID for the WireServer endpoints let mut wireserver_rule_id: String = String::new(); // The rule ID for the IMDS endpoints let mut imds_rule_id: String = String::new(); // The rule ID for the HostGA endpoints let mut hostga_rule_id: String = String::new(); // The authorization rules for the WireServer endpoints let mut wireserver_rules: Option = None; // The authorization rules for the IMDS endpoints let mut imds_rules: Option = None; // The authorization rules for the HostGAPlugin endpoints let mut hostga_rules: Option = None; let notify = Arc::new(Notify::new()); loop { match receiver.recv().await { Some(KeyKeeperAction::SetKey { key: new_key, response, }) => { key = new_key.clone(); if response.send(()).is_err() { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::SetKey with guid '{:?}'", new_key.map(|k| k.guid), )); } } Some(KeyKeeperAction::GetKey { response }) => { if let Err(key) = response.send(key.clone()) { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::GetKey with guid '{:?}'", key.map(|e| e.guid) )); } } Some(KeyKeeperAction::SetSecureChannelState { state, response }) => { current_secure_channel_state = state.to_string(); if response.send(()).is_err() { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::SetSecureChannelState '{}' ", state )); } } Some(KeyKeeperAction::GetSecureChannelState { response }) => { if let Err(state) = response.send(current_secure_channel_state.clone()) { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::GetSecureChannelState '{}'", state )); } } Some(KeyKeeperAction::SetWireServerRuleId { rule_id, response }) => { wireserver_rule_id = rule_id.to_string(); if response.send(()).is_err() { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::SetWireServerRuleId '{}'", rule_id )); } } Some(KeyKeeperAction::GetWireServerRuleId { response }) => { if let Err(rule_id) = response.send(wireserver_rule_id.clone()) { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::GetWireServerRuleId '{}'", rule_id )); } } Some(KeyKeeperAction::SetImdsRuleId { rule_id, response }) => { imds_rule_id = rule_id.to_string(); if response.send(()).is_err() { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::SetImdsRuleId '{}'", rule_id )); } } Some(KeyKeeperAction::GetImdsRuleId { response }) => { if let Err(rule_id) = response.send(imds_rule_id.clone()) { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::GetImdsRuleId '{}'", rule_id )); } } Some(KeyKeeperAction::GetHostGARuleId { response }) => { if let Err(rule_id) = response.send(hostga_rule_id.clone()) { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::GetHostGARuleId '{}'", rule_id )); } } Some(KeyKeeperAction::SetHostGARuleId { rule_id, response }) => { hostga_rule_id = rule_id.to_string(); if response.send(()).is_err() { logger::write_warning(format!( "Failed to send response to KeyKeeperAction::SetHostGARuleId '{}'", rule_id )); } } Some(KeyKeeperAction::SetWireServerRules { rules, response }) => { wireserver_rules = rules; if response.send(()).is_err() { logger::write_warning( "Failed to send response to KeyKeeperAction::SetWireServerRules" .to_string(), ); } } Some(KeyKeeperAction::GetWireServerRules { response }) => { if response.send(wireserver_rules.clone()).is_err() { logger::write_warning( "Failed to send response to KeyKeeperAction::GetWireServerRules" .to_string(), ); } } Some(KeyKeeperAction::SetImdsRules { rules, response }) => { imds_rules = rules; if response.send(()).is_err() { logger::write_warning( "Failed to send response to KeyKeeperAction::SetImdsRules" .to_string(), ); } } Some(KeyKeeperAction::GetImdsRules { response }) => { if response.send(imds_rules.clone()).is_err() { logger::write_warning( "Failed to send response to KeyKeeperAction::GetImdsRules" .to_string(), ); } } Some(KeyKeeperAction::SetHostGARules { rules, response }) => { hostga_rules = rules; if response.send(()).is_err() { logger::write_warning( "Failed to send response to KeyKeeperAction::SetHostGARules" .to_string(), ); } } Some(KeyKeeperAction::GetHostGARules { response }) => { if response.send(hostga_rules.clone()).is_err() { logger::write_warning( "Failed to send response to KeyKeeperAction::GetHostGARules" .to_string(), ); } } Some(KeyKeeperAction::GetNotify { response }) => { if response.send(notify.clone()).is_err() { logger::write_warning( "Failed to send response to KeyKeeperAction::GetNotify".to_string(), ); } } None => break, } } }); Self(sender) } async fn set_key(&self, key: Option) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetKey { key, response }) .await .map_err(|e| Error::SendError("KeyKeeperAction::SetKey".to_string(), e.to_string()))?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetKey".to_string(), e)) } async fn get_key(&self) -> Result> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetKey { response }) .await .map_err(|e| Error::SendError("KeyKeeperAction::GetKey".to_string(), e.to_string()))?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetKey".to_string(), e)) } pub async fn update_key(&self, key: Key) -> Result<()> { self.set_key(Some(key)).await } pub async fn clear_key(&self) -> Result<()> { self.set_key(None).await } pub async fn get_current_key_value(&self) -> Result> { match self.get_key().await { Ok(Some(k)) => Ok(Some(k.key)), Ok(None) => Ok(None), Err(e) => Err(e), } } pub async fn get_current_key_guid(&self) -> Result> { match self.get_key().await { Ok(Some(k)) => Ok(Some(k.guid)), Ok(None) => Ok(None), Err(e) => Err(e), } } pub async fn get_current_key_incarnation(&self) -> Result> { match self.get_key().await { Ok(Some(k)) => Ok(k.incarnationId), Ok(None) => Ok(None), Err(e) => Err(e), } } /// Update the current secure channel state /// # Arguments /// * `state` - String /// # Returns /// * `bool` - true if the state is update successfully /// * - false if state is the same as the current state /// * `Error` - Error if the state is not read or updated successfully pub async fn update_current_secure_channel_state(&self, state: String) -> Result { let current_state = self.get_current_secure_channel_state().await?; if current_state == state { Ok(false) } else { self.set_secure_channel_state(state).await?; Ok(true) } } async fn set_secure_channel_state(&self, state: String) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetSecureChannelState { state, response }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::SetSecureChannelState".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetSecureChannelState".to_string(), e)) } pub async fn get_current_secure_channel_state(&self) -> Result { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetSecureChannelState { response }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::GetSecureChannelState".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetSecureChannelState".to_string(), e)) } /// Update the WireServer rule ID /// # Arguments /// * `rule_id` - String /// # Returns /// * `bool` - true if the rule ID is update successfully /// * - false if rule ID is the same as the current state /// * `String` - the rule Id before the update operation /// * `Error` - Error if the rule ID is not read or updated successfully pub async fn update_wireserver_rule_id(&self, rule_id: String) -> Result<(bool, String)> { let old_rule_id = self.get_wireserver_rule_id().await?; if old_rule_id == rule_id { Ok((false, old_rule_id)) } else { self.set_wireserver_rule_id(rule_id).await?; Ok((true, old_rule_id)) } } async fn set_wireserver_rule_id(&self, rule_id: String) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetWireServerRuleId { rule_id, response }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::SetWireServerRuleId".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetWireServerRuleId".to_string(), e)) } pub async fn get_wireserver_rule_id(&self) -> Result { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetWireServerRuleId { response }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::GetWireServerRuleId".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetWireServerRuleId".to_string(), e)) } pub async fn get_imds_rule_id(&self) -> Result { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetImdsRuleId { response }) .await .map_err(|e| { Error::SendError("KeyKeeperAction::GetImdsRuleId".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetImdsRuleId".to_string(), e)) } async fn set_imds_rule_id(&self, rule_id: String) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetImdsRuleId { rule_id, response }) .await .map_err(|e| { Error::SendError("KeyKeeperAction::SetImdsRuleId".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetImdsRuleId".to_string(), e)) } /// Update the IMDS rule ID /// # Arguments /// * `rule_id` - String /// # Returns /// * `bool` - true if the rule ID is update successfully /// * `String` - the rule Id before the update operation /// * `Error` - Error if the rule ID is not read or updated successfully pub async fn update_imds_rule_id(&self, rule_id: String) -> Result<(bool, String)> { let old_rule_id = self.get_imds_rule_id().await?; if old_rule_id == rule_id { Ok((false, old_rule_id)) } else { self.set_imds_rule_id(rule_id).await?; Ok((true, old_rule_id)) } } pub async fn get_hostga_rule_id(&self) -> Result { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetHostGARuleId { response }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::GetHostGARuleId".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetHostGARuleId".to_string(), e)) } async fn set_hostga_rule_id(&self, rule_id: String) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetHostGARuleId { rule_id, response }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::SetHostGARuleId".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetHostGARuleId".to_string(), e)) } /// Update the HostGA rule ID /// # Arguments /// * `rule_id` - String /// # Returns /// * `bool` - true if the rule ID is update successfully /// * - false if rule ID is the same as the current state /// * `String` - the rule Id before the update operation /// * `Error` - Error if the rule ID is not read or updated successfully pub async fn update_hostga_rule_id(&self, rule_id: String) -> Result<(bool, String)> { let old_rule_id = self.get_hostga_rule_id().await?; if old_rule_id == rule_id { Ok((false, old_rule_id)) } else { self.set_hostga_rule_id(rule_id).await?; Ok((true, old_rule_id)) } } pub async fn set_wireserver_rules(&self, rules: Option) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetWireServerRules { rules: rules.map(ComputedAuthorizationItem::from_authorization_item), response, }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::SetWireServerRules".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetWireServerRules".to_string(), e)) } pub async fn get_wireserver_rules(&self) -> Result> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetWireServerRules { response }) .await .map_err(|e| { Error::SendError( "KeyKeeperAction::GetWireServerRules".to_string(), e.to_string(), ) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetWireServerRules".to_string(), e)) } pub async fn set_imds_rules(&self, rules: Option) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetImdsRules { rules: rules.map(ComputedAuthorizationItem::from_authorization_item), response, }) .await .map_err(|e| { Error::SendError("KeyKeeperAction::SetImdsRules".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetImdsRules".to_string(), e)) } pub async fn get_imds_rules(&self) -> Result> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetImdsRules { response }) .await .map_err(|e| { Error::SendError("KeyKeeperAction::GetImdsRules".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetImdsRules".to_string(), e)) } pub async fn set_hostga_rules(&self, rules: Option) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::SetHostGARules { rules: rules.map(ComputedAuthorizationItem::from_authorization_item), response, }) .await .map_err(|e| { Error::SendError("KeyKeeperAction::SetHostGARules".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::SetHostGARules".to_string(), e)) } pub async fn get_hostga_rules(&self) -> Result> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetHostGARules { response }) .await .map_err(|e| { Error::SendError("KeyKeeperAction::GetHostGARules".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetHostGARules".to_string(), e)) } pub async fn get_notify(&self) -> Result> { let (response, receiver) = oneshot::channel(); self.0 .send(KeyKeeperAction::GetNotify { response }) .await .map_err(|e| { Error::SendError("KeyKeeperAction::GetNotify".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("KeyKeeperAction::GetNotify".to_string(), e)) } pub async fn notify(&self) -> Result<()> { let notify = self.get_notify().await?; notify.notify_one(); Ok(()) } } GuestProxyAgent-1.0.30/proxy_agent/src/shared_state/provision_wrapper.rs000066400000000000000000000276561500521614600266550ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to interact with the provision state of the GPA service. //! The provision state is a bit field, which is used to store the provision state of the GPA service. //! It contains the provision state, event log threads initialized, and provision finished. //! //! Example //! ```rust //! use proxy_agent::shared_state::provision_wrapper::ProvisionSharedState; //! use proxy_agent::provision::ProvisionFlags; //! //! let provision_shared_state = ProvisionSharedState::start_new(); //! let state = ProvisionFlags::REDIRECTOR_READY|ProvisionFlags::PROXY_SERVER_READY; //! let updated_state = provision_shared_state.update_one_state(state).await.unwrap(); //! assert_eq!(updated_state, state); //! let reset_state = provision_shared_state.reset_one_state(ProvisionFlags::REDIRECTOR_READY).await.unwrap(); //! assert_eq!(reset_state, ProvisionFlags::PROXY_SERVER_READY); //! let get_state = provision_shared_state.get_state().await.unwrap(); //! let set_event_log_threads_initialized = provision_shared_state.set_event_log_threads_initialized().await.unwrap(); //! let get_event_log_threads_initialized = provision_shared_state.get_event_log_threads_initialized().await.unwrap(); //! assert_eq!(get_event_log_threads_initialized, true); //! let set_provision_finished = provision_shared_state.set_provision_finished(true).await.unwrap(); //! let get_provision_finished = provision_shared_state.get_provision_finished().await.unwrap(); //! assert_eq!(get_provision_finished, true); //! let _= provision_shared_state.set_provision_finished(false).await.unwrap(); //! let get_provision_finished = provision_shared_state.get_provision_finished().await.unwrap(); //! assert_eq!(get_provision_finished, false); //! ``` use crate::common::error::Error; use crate::common::logger; use crate::common::result::Result; use crate::provision::ProvisionFlags; use proxy_agent_shared::misc_helpers; use tokio::sync::{mpsc, oneshot}; enum ProvisionAction { UpdateState { state: ProvisionFlags, response: oneshot::Sender, }, ResetState { state: ProvisionFlags, response: oneshot::Sender, }, GetState { response: oneshot::Sender, }, SetEventLogThreadsInitialized { response: oneshot::Sender<()>, }, GetEventLogsThreadsInitialized { response: oneshot::Sender, }, SetProvisionFinished { finished: bool, response: oneshot::Sender, }, GetProvisionFinished { response: oneshot::Sender, }, } #[derive(Clone, Debug)] pub struct ProvisionSharedState(mpsc::Sender); impl ProvisionSharedState { pub fn start_new() -> Self { let (tx, mut rx) = mpsc::channel(100); tokio::spawn(async move { // The provision state, it is a bitflag field let mut provision_state: ProvisionFlags = ProvisionFlags::NONE; // The flag to indicate if the event log threads are initialized let mut provision_event_log_threads_initialized: bool = false; // It indicate the time_tick when GPA service provision is finished, 0 means not finished let mut provision_finished_time_tick: i128 = 0; while let Some(action) = rx.recv().await { match action { ProvisionAction::UpdateState { state, response } => { provision_state |= state; if let Err(new_state) = response.send(provision_state.clone()) { logger::write_warning(format!( "Failed to send response to ProvisionAction::UpdateState with new state '{:?}'", new_state )); } } ProvisionAction::ResetState { state, response } => { provision_state &= !state; if let Err(new_state) = response.send(provision_state.clone()) { logger::write_warning(format!( "Failed to send response to ProvisionAction::ResetState with new state '{:?}'", new_state )); } } ProvisionAction::GetState { response } => { if let Err(state) = response.send(provision_state.clone()) { logger::write_warning(format!( "Failed to send response to ProvisionAction::GetState with state '{:?}'", state )); } } ProvisionAction::SetEventLogThreadsInitialized { response } => { provision_event_log_threads_initialized = true; if response.send(()).is_err() { logger::write_warning("Failed to send response to ProvisionAction::SetEventLogThreadsInitialized".to_string()); } } ProvisionAction::GetEventLogsThreadsInitialized { response } => { if let Err(initialized) = response.send(provision_event_log_threads_initialized) { logger::write_warning(format!( "Failed to send response to ProvisionAction::GetEventLogsThreadsInitialized with initialized '{:?}'", initialized )); } } ProvisionAction::SetProvisionFinished { finished, response } => { if finished { provision_finished_time_tick = misc_helpers::get_date_time_unix_nano(); } else { provision_finished_time_tick = 0; } if response.send(provision_finished_time_tick).is_err() { logger::write_warning( "Failed to send response to ProvisionAction::SetProvisionFinished" .to_string(), ); } } ProvisionAction::GetProvisionFinished { response } => { if let Err(finished) = response.send(provision_finished_time_tick) { logger::write_warning(format!( "Failed to send response to ProvisionAction::GetProvisionFinished with finished '{:?}'", finished )); } } } } }); ProvisionSharedState(tx) } /// Update the one of the provision state /// # Arguments /// * `state` - ProvisionFlags /// # Returns /// * `ProvisionFlags` - the updated provision state /// # Errors - SendError, RecvError /// # Remarks /// * The provision state is a bit field, the state is updated by OR operation pub async fn update_one_state(&self, state: ProvisionFlags) -> Result { let (tx, rx) = oneshot::channel(); self.0 .send(ProvisionAction::UpdateState { state, response: tx, }) .await .map_err(|e| { Error::SendError("ProvisionAction::UpdateState".to_string(), e.to_string()) })?; rx.await .map_err(|e| Error::RecvError("ProvisionAction::UpdateState".to_string(), e)) } /// Reset the provision state /// # Arguments /// * `state` - ProvisionFlags to reset/remove from the provision state /// # Returns /// * `ProvisionFlags` - the updated provision state /// # Errors - SendError, RecvError /// # Remarks /// * The provision state is a bit field, the state is updated by AND & NOT operation pub async fn reset_one_state(&self, state: ProvisionFlags) -> Result { let (tx, rx) = oneshot::channel(); self.0 .send(ProvisionAction::ResetState { state, response: tx, }) .await .map_err(|e| { Error::SendError("ProvisionAction::ResetState".to_string(), e.to_string()) })?; rx.await .map_err(|e| Error::RecvError("ProvisionAction::ResetState".to_string(), e)) } pub async fn get_state(&self) -> Result { let (tx, rx) = oneshot::channel(); self.0 .send(ProvisionAction::GetState { response: tx }) .await .map_err(|e| { Error::SendError("ProvisionAction::GetState".to_string(), e.to_string()) })?; rx.await .map_err(|e| Error::RecvError("ProvisionAction::GetState".to_string(), e)) } pub async fn set_event_log_threads_initialized(&self) -> Result<()> { let (tx, rx) = oneshot::channel(); self.0 .send(ProvisionAction::SetEventLogThreadsInitialized { response: tx }) .await .map_err(|e| { Error::SendError( "ProvisionAction::SetEventLogThreadsInitialized".to_string(), e.to_string(), ) })?; rx.await.map_err(|e| { Error::RecvError( "ProvisionAction::SetEventLogThreadsInitialized".to_string(), e, ) }) } pub async fn get_event_log_threads_initialized(&self) -> Result { let (tx, rx) = oneshot::channel(); self.0 .send(ProvisionAction::GetEventLogsThreadsInitialized { response: tx }) .await .map_err(|e| { Error::SendError( "ProvisionAction::GetEventLogsThreadsInitialized".to_string(), e.to_string(), ) })?; rx.await.map_err(|e| { Error::RecvError( "ProvisionAction::GetEventLogsThreadsInitialized".to_string(), e, ) }) } /// Set the provision finished state /// # Arguments /// * `finished` - bool, true means provision finished, false means provision not finished /// # Returns /// * `i128` - the time_tick when the provision finished, 0 means not finished /// # Errors - SendError, RecvError pub async fn set_provision_finished(&self, finished: bool) -> Result { let (tx, rx) = oneshot::channel(); self.0 .send(ProvisionAction::SetProvisionFinished { finished, response: tx, }) .await .map_err(|e| { Error::SendError( "ProvisionAction::SetProvisionFinished".to_string(), e.to_string(), ) })?; rx.await .map_err(|e| Error::RecvError("ProvisionAction::SetProvisionFinished".to_string(), e)) } /// Get the provision finished state /// # Returns /// * `i128` - the time_tick when the provision finished, 0 means not finished /// # Errors - SendError, RecvError pub async fn get_provision_finished(&self) -> Result { let (tx, rx) = oneshot::channel(); self.0 .send(ProvisionAction::GetProvisionFinished { response: tx }) .await .map_err(|e| { Error::SendError( "ProvisionAction::GetProvisionFinished".to_string(), e.to_string(), ) })?; rx.await .map_err(|e| Error::RecvError("ProvisionAction::GetProvisionFinished".to_string(), e)) } } GuestProxyAgent-1.0.30/proxy_agent/src/shared_state/proxy_server_wrapper.rs000066400000000000000000000123461500521614600273620ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to interact with the proxy server. //! The proxy server is used to store the users information. //! //! Example //! ```rust //! use proxy_agent::shared_state::proxy_server_wrapper::ProxyServerSharedState; //! use proxy_agent::proxy::User; //! //! let proxy_server_shared_state = ProxyServerSharedState::start_new(); //! let user = User::new(1, "user1".to_string()); //! proxy_server_shared_state.add_user(user).await.unwrap(); //! let user = proxy_server_shared_state.get_user(1).await.unwrap().unwrap(); //! assert_eq!(user.user_name, "user1"); //! ``` use crate::common::error::Error; use crate::common::logger; use crate::common::result::Result; use crate::proxy::User; use std::collections::HashMap; use tokio::sync::{mpsc, oneshot}; enum ProxyServerAction { AddUser { user: User, response: oneshot::Sender<()>, }, GetUser { user_id: u64, response: oneshot::Sender>, }, #[cfg(test)] GetUsersCount { response: oneshot::Sender, }, ClearUsers { response: oneshot::Sender<()>, }, } #[derive(Clone, Debug)] pub struct ProxyServerSharedState(mpsc::Sender); impl ProxyServerSharedState { pub fn start_new() -> Self { let (tx, mut rx) = mpsc::channel(100); tokio::spawn(async move { let mut users: HashMap = HashMap::new(); while let Some(action) = rx.recv().await { match action { ProxyServerAction::AddUser { user, response } => { let id = user.logon_id; users.insert(id, user); if response.send(()).is_err() { logger::write_warning(format!("Failed to send response to ProxyServerAction::AddUser with id '{}'", id)); } } ProxyServerAction::GetUser { user_id, response } => { let user = users.get(&user_id).cloned(); if response.send(user).is_err() { logger::write_warning(format!("Failed to send response to ProxyServerAction::GetUser with id '{}'", user_id)); } } #[cfg(test)] ProxyServerAction::GetUsersCount { response } => { if response.send(users.len()).is_err() { logger::write_warning( "Failed to send response to ProxyServerAction::GetUsersCount" .to_string(), ); } } ProxyServerAction::ClearUsers { response } => { users.clear(); if response.send(()).is_err() { logger::write_warning( "Failed to send response to ProxyServerAction::ClearUsers" .to_string(), ); } } } } }); ProxyServerSharedState(tx) } pub async fn add_user(&self, user: User) -> Result<()> { let (tx, rx) = oneshot::channel(); self.0 .send(ProxyServerAction::AddUser { user, response: tx }) .await .map_err(|e| { Error::SendError("ProxyServerAction::AddUser".to_string(), e.to_string()) })?; rx.await .map_err(|e| Error::RecvError("ProxyServerAction::AddUser".to_string(), e)) } pub async fn get_user(&self, user_id: u64) -> Result> { let (tx, rx) = oneshot::channel(); self.0 .send(ProxyServerAction::GetUser { user_id, response: tx, }) .await .map_err(|e| { Error::SendError("ProxyServerAction::GetUser".to_string(), e.to_string()) })?; rx.await .map_err(|e| Error::RecvError("ProxyServerAction::GetUser".to_string(), e)) } #[cfg(test)] pub async fn get_users_count(&self) -> Result { let (tx, rx) = oneshot::channel(); self.0 .send(ProxyServerAction::GetUsersCount { response: tx }) .await .map_err(|e| { Error::SendError( "ProxyServerAction::GetUsersCount".to_string(), e.to_string(), ) })?; rx.await .map_err(|e| Error::RecvError("ProxyServerAction::GetUsersCount".to_string(), e)) } // TODO:: need caller to refresh the users info regularly pub async fn clear_users(&self) -> Result<()> { let (tx, rx) = oneshot::channel(); self.0 .send(ProxyServerAction::ClearUsers { response: tx }) .await .map_err(|e| { Error::SendError("ProxyServerAction::ClearUsers".to_string(), e.to_string()) })?; rx.await .map_err(|e| Error::RecvError("ProxyServerAction::ClearUsers".to_string(), e)) } } GuestProxyAgent-1.0.30/proxy_agent/src/shared_state/redirector_wrapper.rs000066400000000000000000000150701500521614600267520ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to interact with the eBPF/redirector. //! The redirector is used to redirect the traffic to the proxy server. //! The eBPF is used to filter the traffic and redirect the traffic to the proxy server. //! The redirector is used to set the local port, get the local port, set the eBPF object, get the eBPF object. //! Example //! ```rust //! use proxy_agent::shared_state::redirector_wrapper::RedirectorSharedState; //! use proxy_agent::redirector::BpfObject; //! use std::sync::{Arc, Mutex}; //! //! let redirector_shared_state = RedirectorSharedState::start_new(); //! let local_port = redirector_shared_state.get_local_port().await.unwrap(); //! redirector_shared_state.set_local_port(80).await.unwrap(); //! let bpf_object = Arc::new(Mutex::new(BpfObject::new())); //! redirector_shared_state.update_bpf_object(bpf_object.clone()).await.unwrap(); //! let bpf_object = redirector_shared_state.get_bpf_object().await.unwrap().unwrap(); //! ``` use crate::common::error::Error; use crate::common::logger; use crate::common::result::Result; use crate::redirector; use std::sync::{Arc, Mutex}; use tokio::sync::{mpsc, oneshot}; enum RedirectorAction { SetLocalPort { local_port: u16, response: oneshot::Sender<()>, }, GetLocalPort { response: oneshot::Sender, }, SetBpfObject { bpf_object: Option>>, response: oneshot::Sender<()>, }, GetBpfObject { response: oneshot::Sender>>>, }, } #[derive(Clone, Debug)] pub struct RedirectorSharedState(mpsc::Sender); impl RedirectorSharedState { pub fn start_new() -> Self { let (tx, mut rx) = mpsc::channel(100); tokio::spawn(async move { let mut local_port: u16 = 0; let mut bpf_object: Option>> = None; while let Some(action) = rx.recv().await { match action { RedirectorAction::SetLocalPort { local_port: new_local_port, response, } => { local_port = new_local_port; if response.send(()).is_err() { logger::write_warning(format!( "Failed to send response to RedirectorAction::SetLocalPort '{}'", new_local_port )); } } RedirectorAction::GetLocalPort { response } => { if let Err(port) = response.send(local_port) { logger::write_warning(format!( "Failed to send response to RedirectorAction::GetLocalPort '{}'", port )); } } RedirectorAction::SetBpfObject { bpf_object: new_bpf_object, response, } => { bpf_object = new_bpf_object; if response.send(()).is_err() { logger::write_warning( "Failed to send response to RedirectorAction::SetBpfObject" .to_string(), ); } } RedirectorAction::GetBpfObject { response } => { if response.send(bpf_object.clone()).is_err() { logger::write_warning( "Failed to send response to RedirectorAction::GetBpfObject" .to_string(), ); } } } } }); RedirectorSharedState(tx) } pub async fn set_local_port(&self, local_port: u16) -> Result<()> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(RedirectorAction::SetLocalPort { local_port, response: response_tx, }) .await .map_err(|e| { Error::SendError("RedirectorAction::SetLocalPort".to_string(), e.to_string()) })?; response_rx .await .map_err(|e| Error::RecvError("RedirectorAction::SetLocalPort".to_string(), e)) } pub async fn get_local_port(&self) -> Result { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(RedirectorAction::GetLocalPort { response: response_tx, }) .await .map_err(|e| { Error::SendError("RedirectorAction::GetLocalPort".to_string(), e.to_string()) })?; response_rx .await .map_err(|e| Error::RecvError("RedirectorAction::GetLocalPort".to_string(), e)) } async fn set_bpf_object( &self, bpf_object: Option>>, ) -> Result<()> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(RedirectorAction::SetBpfObject { bpf_object, response: response_tx, }) .await .map_err(|e| { Error::SendError("RedirectorAction::SetBpfObject".to_string(), e.to_string()) })?; response_rx .await .map_err(|e| Error::RecvError("RedirectorAction::SetBpfObject".to_string(), e)) } pub async fn update_bpf_object( &self, bpf_object: Arc>, ) -> Result<()> { self.set_bpf_object(Some(bpf_object)).await } pub async fn clear_bpf_object(&self) -> Result<()> { self.set_bpf_object(None).await } pub async fn get_bpf_object(&self) -> Result>>> { let (response_tx, response_rx) = oneshot::channel(); self.0 .send(RedirectorAction::GetBpfObject { response: response_tx, }) .await .map_err(|e| { Error::SendError("RedirectorAction::GetBpfObject".to_string(), e.to_string()) })?; response_rx .await .map_err(|e| Error::RecvError("RedirectorAction::GetBpfObject".to_string(), e)) } } GuestProxyAgent-1.0.30/proxy_agent/src/shared_state/telemetry_wrapper.rs000066400000000000000000000071231500521614600266220ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to interact with the telemetry module. //! Example //! ```rust //! use proxy_agent::shared_state::telemetry_wrapper::TelemetrySharedState; //! use proxy_agent::telemetry::event_reader::VmMetaData; //! //! let telemetry_shared_state = TelemetrySharedState::start_new(); //! let vm_meta_data = VmMetaData::new("vm_id".to_string(), "vm_name".to_string()); //! telemetry_shared_state.set_vm_meta_data(Some(vm_meta_data.clone())).await.unwrap(); //! let meta_data = telemetry_shared_state.get_vm_meta_data().await.unwrap().unwrap(); //! assert_eq!(meta_data, vm_meta_data); //! ``` use crate::common::result::Result; use crate::common::{error::Error, logger}; use crate::telemetry::event_reader::VmMetaData; use tokio::sync::{mpsc, oneshot}; enum TelemetryAction { SetVmMetaData { vm_meta_data: Option, response: oneshot::Sender<()>, }, GetVmMetaData { response: oneshot::Sender>, }, } #[derive(Clone, Debug)] pub struct TelemetrySharedState(mpsc::Sender); impl TelemetrySharedState { pub fn start_new() -> Self { let (sender, mut receiver) = mpsc::channel(100); tokio::spawn(async move { let mut vm_meta_data: Option = None; loop { match receiver.recv().await { Some(TelemetryAction::SetVmMetaData { vm_meta_data: meta_data, response, }) => { vm_meta_data = meta_data.clone(); if response.send(()).is_err() { logger::write_warning(format!( "Failed to send response to TelemetryAction::SetVmMetaData '{:?}'", meta_data, )); } } Some(TelemetryAction::GetVmMetaData { response }) => { if let Err(meta_data) = response.send(vm_meta_data.clone()) { logger::write_warning(format!( "Failed to send response to TelemetryAction::GetVmMetaData '{:?}'", meta_data, )); } } None => { break; } } } }); Self(sender) } pub async fn set_vm_meta_data(&self, vm_meta_data: Option) -> Result<()> { let (response, receiver) = oneshot::channel(); self.0 .send(TelemetryAction::SetVmMetaData { vm_meta_data, response, }) .await .map_err(|e| { Error::SendError("TelemetryAction::SetVmMetaData".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("TelemetryAction::SetVmMetaData".to_string(), e)) } pub async fn get_vm_meta_data(&self) -> Result> { let (response, receiver) = oneshot::channel(); self.0 .send(TelemetryAction::GetVmMetaData { response }) .await .map_err(|e| { Error::SendError("TelemetryAction::GetVmMetaData".to_string(), e.to_string()) })?; receiver .await .map_err(|e| Error::RecvError("TelemetryAction::GetVmMetaData".to_string(), e)) } } GuestProxyAgent-1.0.30/proxy_agent/src/telemetry.rs000066400000000000000000000001661500521614600224140ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod event_reader; pub mod telemetry_event; GuestProxyAgent-1.0.30/proxy_agent/src/telemetry/000077500000000000000000000000001500521614600220435ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/telemetry/event_reader.rs000066400000000000000000000433671500521614600250710ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to read the telemetry event files and send them to the wire server. //! The telemetry event files are written by the event_logger module. //! Example //! ```rust //! use proxy_agent::telemetry::event_reader; //! use proxy_agent::shared_state::agent_status::wrapper::AgentStatusSharedState; //! use proxy_agent::shared_state::key_keeper::wrapper::KeyKeeperSharedState; //! use proxy_agent::shared_state::telemetry::wrapper::TelemetrySharedState; //! use std::path::PathBuf; //! use std::time::Duration; //! use tokio_util::sync::CancellationToken; //! //! // start the telemetry event reader with the shared state //! let agent_status_shared_state = AgentStatusSharedState::start_new(); //! let key_keeper_shared_state = KeyKeeperSharedState::start_new(); //! let telemetry_shared_state = TelemetrySharedState::start_new(); //! let cancellation_token = CancellationToken::new(); //! //! let dir_path = PathBuf::from("/tmp"); //! let interval = Some(Duration::from_secs(300)); //! let delay_start = false; //! let server_ip = None; //! let server_port = None; //! let event_reader = event_reader::EventReader::new( //! dir_path, //! delay_start, //! cancellation_token, //! key_keeper_shared_state, //! telemetry_shared_state, //! agent_status_shared_state, //! ); //! //! tokio::spawn(event_reader.start(interval, server_ip, server_port)); //! //! // stop the telemetry event reader //! cancellation_token.cancel(); //! ``` use super::telemetry_event::TelemetryData; use super::telemetry_event::TelemetryEvent; use crate::common::{constants, logger, result::Result}; use crate::host_clients::imds_client::ImdsClient; use crate::host_clients::wire_server_client::WireServerClient; use crate::shared_state::agent_status_wrapper::AgentStatusModule; use crate::shared_state::agent_status_wrapper::AgentStatusSharedState; use crate::shared_state::key_keeper_wrapper::KeyKeeperSharedState; use crate::shared_state::telemetry_wrapper::TelemetrySharedState; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::proxy_agent_aggregate_status::ModuleState; use proxy_agent_shared::telemetry::Event; use std::fs::remove_file; use std::path::PathBuf; use std::time::Duration; use tokio_util::sync::CancellationToken; /// VmMetaData contains the metadata of the VM. /// The metadata is used to identify the VM and the image origin. /// It will be part of the telemetry data send to the wire server. /// The metadata is updated by the wire server and the IMDS client. #[derive(Clone, Debug)] pub struct VmMetaData { pub container_id: String, pub tenant_name: String, pub role_name: String, pub role_instance_name: String, pub subscription_id: String, pub resource_group_name: String, pub vm_id: String, pub image_origin: u64, } impl VmMetaData { #[cfg(test)] pub fn empty() -> Self { VmMetaData { container_id: constants::EMPTY_GUID.to_string(), tenant_name: constants::EMPTY_GUID.to_string(), role_name: constants::EMPTY_GUID.to_string(), role_instance_name: constants::EMPTY_GUID.to_string(), subscription_id: constants::EMPTY_GUID.to_string(), resource_group_name: constants::EMPTY_GUID.to_string(), vm_id: constants::EMPTY_GUID.to_string(), image_origin: 3, // unknown } } } pub struct EventReader { dir_path: PathBuf, delay_start: bool, cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, agent_status_shared_state: AgentStatusSharedState, } impl EventReader { pub fn new( dir_path: PathBuf, delay_start: bool, cancellation_token: CancellationToken, key_keeper_shared_state: KeyKeeperSharedState, telemetry_shared_state: TelemetrySharedState, agent_status_shared_state: AgentStatusSharedState, ) -> EventReader { EventReader { dir_path, delay_start, cancellation_token, key_keeper_shared_state, telemetry_shared_state, agent_status_shared_state, } } pub async fn start( &self, interval: Option, server_ip: Option<&str>, server_port: Option, ) { logger::write_information("telemetry event reader task started.".to_string()); let wire_server_client = WireServerClient::new( server_ip.unwrap_or(constants::WIRE_SERVER_IP), server_port.unwrap_or(constants::WIRE_SERVER_PORT), self.key_keeper_shared_state.clone(), ); let imds_client = ImdsClient::new( server_ip.unwrap_or(constants::IMDS_IP), server_port.unwrap_or(constants::IMDS_PORT), self.key_keeper_shared_state.clone(), ); let interval = interval.unwrap_or(Duration::from_secs(300)); tokio::select! { _ = self.loop_reader(interval, wire_server_client, imds_client ) => {} _ = self.cancellation_token.cancelled() => { logger::write_warning("cancellation token signal received, stop the telemetry event reader task.".to_string()); self.stop().await; } } } async fn loop_reader( &self, interval: Duration, wire_server_client: WireServerClient, imds_client: ImdsClient, ) { let mut first = true; loop { if first { if self.delay_start { // delay start the event_reader task to give additional CPU cycles to more important threads tokio::time::sleep(Duration::from_secs(60)).await; } first = false; } // refresh vm metadata match self .update_vm_meta_data(&wire_server_client, &imds_client) .await { Ok(()) => { logger::write("success updated the vm metadata.".to_string()); } Err(e) => { logger::write_warning(format!("Failed to read vm metadata with error {}.", e)); } } if let Ok(Some(vm_meta_data)) = self.telemetry_shared_state.get_vm_meta_data().await { let _processed = self .process_events(&wire_server_client, &vm_meta_data) .await; } tokio::time::sleep(interval).await; } } async fn process_events( &self, wire_server_client: &WireServerClient, vm_meta_data: &VmMetaData, ) -> usize { let event_count: usize; // get all .json event files in the directory match misc_helpers::search_files(&self.dir_path, r"^(.*\.json)$") { Ok(files) => { let file_count = files.len(); event_count = self .process_events_and_clean(files, wire_server_client, vm_meta_data) .await; let message = format!( "Telemetry event reader sent {} events from {} files", event_count, file_count ); logger::write(message); } Err(e) => { logger::write_warning(format!( "Event Files not found in directory {}: {}", self.dir_path.display(), e )); event_count = 0; } } event_count } async fn stop(&self) { let _ = self .agent_status_shared_state .set_module_state(ModuleState::STOPPED, AgentStatusModule::TelemetryReader) .await; } async fn update_vm_meta_data( &self, wire_server_client: &WireServerClient, imds_client: &ImdsClient, ) -> Result<()> { let goal_state = wire_server_client.get_goalstate().await?; let shared_config = wire_server_client .get_shared_config(goal_state.get_shared_config_uri()) .await?; let instance_info = imds_client.get_imds_instance_info().await?; let vm_meta_data = VmMetaData { container_id: goal_state.get_container_id(), role_name: shared_config.get_role_name(), role_instance_name: shared_config.get_role_instance_name(), tenant_name: shared_config.get_deployment_name(), subscription_id: instance_info.get_subscription_id(), resource_group_name: instance_info.get_resource_group_name(), vm_id: instance_info.get_vm_id(), image_origin: instance_info.get_image_origin(), }; self.telemetry_shared_state .set_vm_meta_data(Some(vm_meta_data.clone())) .await?; logger::write(format!("Updated VM Metadata: {:?}", vm_meta_data)); Ok(()) } async fn process_events_and_clean( &self, files: Vec, wire_server_client: &WireServerClient, vm_meta_data: &VmMetaData, ) -> usize { let mut num_events_logged = 0; for file in files { match misc_helpers::json_read_from_file::>(&file) { Ok(events) => { num_events_logged += events.len(); Self::send_events(events, wire_server_client, vm_meta_data).await; } Err(e) => { logger::write_warning(format!( "Failed to read events from file {}: {}", file.display(), e )); } } Self::clean_files(file); } num_events_logged } const MAX_MESSAGE_SIZE: usize = 1024 * 64; async fn send_events( mut events: Vec, wire_server_client: &WireServerClient, vm_meta_data: &VmMetaData, ) { while !events.is_empty() { let mut telemetry_data = TelemetryData::new(); let mut add_more_events = true; while !events.is_empty() && add_more_events { match events.pop() { Some(event) => { telemetry_data.add_event(TelemetryEvent::from_event_log( &event, vm_meta_data.clone(), )); if telemetry_data.get_size() >= Self::MAX_MESSAGE_SIZE { telemetry_data.remove_last_event(); if telemetry_data.event_count() == 0 { match serde_json::to_string(&event) { Ok(json) => { logger::write_warning(format!( "Event data too large. Not sending to wire-server. Event: {}.", json )); } Err(_) => { logger::write_warning( "Event data too large. Not sending to wire-server. Event cannot be displayed.".to_string() ); } } } else { events.push(event); } add_more_events = false; } } None => { break; } } } Self::send_data_to_wire_server(telemetry_data, wire_server_client).await; } } async fn send_data_to_wire_server( telemetry_data: TelemetryData, wire_server_client: &WireServerClient, ) { if telemetry_data.event_count() == 0 { return; } for _ in [0; 5] { match wire_server_client .send_telemetry_data(telemetry_data.to_xml()) .await { Ok(()) => { break; } Err(e) => { logger::write_warning(format!( "Failed to send telemetry data to host with error: {}", e )); // wait 15 seconds and retry tokio::time::sleep(Duration::from_secs(15)).await; } } } } fn clean_files(file: PathBuf) { match remove_file(&file) { Ok(_) => { logger::write(format!("Removed File: {}", file.display())); } Err(e) => { logger::write_warning(format!("Failed to remove file {}: {}", file.display(), e)); } } } #[cfg(test)] async fn get_vm_meta_data(&self) -> VmMetaData { if let Ok(Some(vm_meta_data)) = self.telemetry_shared_state.get_vm_meta_data().await { vm_meta_data } else { VmMetaData::empty() } } } #[cfg(test)] mod tests { use super::*; use crate::common::logger; use crate::key_keeper::key::Key; use crate::test_mock::server_mock; use proxy_agent_shared::misc_helpers; use std::{env, fs}; #[tokio::test] async fn test_event_reader_thread() { let mut temp_dir = env::temp_dir(); temp_dir.push("test_event_reader_thread"); _ = fs::remove_dir_all(&temp_dir); let mut events_dir = temp_dir.to_path_buf(); events_dir.push("Events"); // start wire_server listener let ip = "127.0.0.1"; let port = 7071u16; let cancellation_token = CancellationToken::new(); let key_keeper_shared_state = KeyKeeperSharedState::start_new(); let event_reader = EventReader { dir_path: events_dir.clone(), delay_start: false, key_keeper_shared_state: key_keeper_shared_state.clone(), telemetry_shared_state: TelemetrySharedState::start_new(), cancellation_token: cancellation_token.clone(), agent_status_shared_state: AgentStatusSharedState::start_new(), }; let wire_server_client = WireServerClient::new(ip, port, key_keeper_shared_state.clone()); let imds_client = ImdsClient::new(ip, port, key_keeper_shared_state.clone()); key_keeper_shared_state .update_key(Key::empty()) .await .unwrap(); tokio::spawn(server_mock::start( ip.to_string(), port, cancellation_token.clone(), )); tokio::time::sleep(Duration::from_millis(100)).await; logger::write("server_mock started.".to_string()); match event_reader .update_vm_meta_data(&wire_server_client, &imds_client) .await { Ok(()) => { logger::write("success updated the vm metadata.".to_string()); } Err(e) => { logger::write_warning(format!("Failed to read vm metadata with error {}.", e)); } } // Write 10 events to events dir let message = r#"{\"method\":\"GET\",\"url\":\"/machine/37569ad2-69a3-44fd-b653-813e62a177cf/68938c06%2D5233%2D4ff9%2Da173%2D0ac0a2754f8a.%5FWS2022?comp=config&type=hostingEnvironmentConfig&incarnation=2\",\"ip\":\"168.63.129.16\",\"port\":80,\"userId\":999,\"userName\":\"WS2022$\",\"processName\":\"C:\\\\WindowsAzure\\\\GuestAgent_2.7.41491.1071_2023-03-02_185502\\\\WindowsAzureGuestAgent.exe\",\"runAsElevated\":true,\"responseStatus\":\"200 OK\",\"elapsedTime\":8}"#; let mut events: Vec = Vec::new(); for _ in [0; 10] { events.push(Event::new( "Informational".to_string(), message.to_string(), "test_deserialize_events_from_file".to_string(), "test_deserialize_events_from_file".to_string(), )); } logger::write("10 events created.".to_string()); misc_helpers::try_create_folder(&events_dir).unwrap(); let mut file_path = events_dir.to_path_buf(); file_path.push(format!("{}.json", misc_helpers::get_date_time_unix_nano())); misc_helpers::json_write_to_file(&events, &file_path).unwrap(); // Check the events processed let vm_meta_data = event_reader.get_vm_meta_data().await; let events_processed = event_reader .process_events(&wire_server_client, &vm_meta_data) .await; logger::write(format!("Send {} events from event files", events_processed)); //Should be 10 events written and read into events Vector assert_eq!(events_processed, 10, "Events processed should be 10"); let files = misc_helpers::get_files(&events_dir).unwrap(); assert!(files.is_empty(), "Events files not cleaned up."); // Test not processing the non-json files let mut file_path = events_dir.to_path_buf(); file_path.push(format!( "{}.notjson", misc_helpers::get_date_time_unix_nano() )); misc_helpers::json_write_to_file(&events, &file_path).unwrap(); let events_processed = event_reader .process_events(&wire_server_client, &vm_meta_data) .await; assert_eq!(0, events_processed, "events_processed must be 0."); let files = misc_helpers::get_files(&events_dir).unwrap(); assert!( !files.is_empty(), ".notjson files should not been cleaned up." ); cancellation_token.cancel(); _ = fs::remove_dir_all(&temp_dir); } } GuestProxyAgent-1.0.30/proxy_agent/src/telemetry/telemetry_event.rs000066400000000000000000000227111500521614600256270ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT //! This module contains the logic to generate the telemetry data to be send to wire server. //! Example //! ```rust //! use proxy_agent::telemetry::TelemetryData; //! //! // Create the telemetry data //! let mut telemetry_data = TelemetryData::new(); //! //! // Add the event to the telemetry data //! let event_log = Event { //! EventPid: "123".to_string(), //! EventTid: "456".to_string(), //! Version: "1.0".to_string(), //! TaskName: "TaskName".to_string(), //! TimeStamp: "2024-09-04T02:00:00.222z".to_string(), //! EventLevel: "Info".to_string(), //! Message: "Message".to_string(), //! OperationId: "OperationId".to_string(), //! }; //! let vm_meta_data = VmMetaData { //! container_id: "container_id".to_string(), //! tenant_name: "tenant_name".to_string(), //! role_name: "role_name".to_string(), //! role_instance_name: "role_instance_name".to_string(), //! subscription_id: "subscription_id".to_string(), //! resource_group_name: "resource_group_name".to_string(), //! vm_id: "vm_id".to_string(), //! image_origin: 1, //! }; //! let event = TelemetryEvent::from_event_log(&event_log, vm_meta_data); //! telemetry_data.add_event(event); //! //! // Get the size of the telemetry data //! let size = telemetry_data.get_size(); //! //! // Get the xml of the telemetry data //! let xml = telemetry_data.to_xml(); //! //! // Remove the last event from the telemetry data //! let event = telemetry_data.remove_last_event(); //! //! // Get the event count of the telemetry data //! let count = telemetry_data.event_count(); //! ``` use super::event_reader::VmMetaData; use crate::common::helpers; use once_cell::sync::Lazy; use proxy_agent_shared::telemetry::Event; use serde_derive::{Deserialize, Serialize}; /// TelemetryData struct to hold the telemetry events send to wire server. pub struct TelemetryData { events: Vec, } impl Default for TelemetryData { fn default() -> Self { Self::new() } } impl TelemetryData { pub fn new() -> Self { TelemetryData { events: Vec::new() } } /// Convert the telemetry data to xml format. /// The xml format is defined by the wire server. pub fn to_xml(&self) -> String { let mut xml: String = String::new(); xml.push_str(""); for e in &self.events { xml.push_str(&e.to_xml_event()); } xml.push_str(""); xml } /// Get the size of the telemetry data in bytes. pub fn get_size(&self) -> usize { self.to_xml().len() } pub fn add_event(&mut self, event: TelemetryEvent) { self.events.push(event); } pub fn remove_last_event(&mut self) -> Option { self.events.pop() } pub fn event_count(&self) -> usize { self.events.len() } } pub struct TelemetryEvent { event_pid: u64, event_tid: u64, ga_version: String, container_id: String, task_name: String, opcode_name: String, keyword_name: String, os_version: String, execution_mode: String, ram: u64, processors: u64, tenant_name: String, role_name: String, role_instance_name: String, subscription_id: String, resource_group_name: String, vm_id: String, image_origin: u64, event_name: String, capability_used: String, context1: String, context2: String, context3: String, } impl TelemetryEvent { pub fn from_event_log(event_log: &Event, vm_meta_data: VmMetaData) -> Self { TelemetryEvent { event_pid: event_log.EventPid.parse::().unwrap_or(0), event_tid: event_log.EventTid.parse::().unwrap_or(0), ga_version: event_log.Version.to_string(), task_name: event_log.TaskName.to_string(), opcode_name: event_log.TimeStamp.to_string(), capability_used: event_log.EventLevel.to_string(), context1: event_log.Message.to_string(), context2: event_log.TimeStamp.to_string(), context3: event_log.OperationId.to_string(), execution_mode: "ProxyAgent".to_string(), event_name: "MicrosoftAzureGuestProxyAgent".to_string(), os_version: helpers::get_long_os_version(), keyword_name: CURRENT_KEYWORD_NAME.to_string(), ram: helpers::get_ram_in_mb(), processors: helpers::get_cpu_count() as u64, container_id: vm_meta_data.container_id, tenant_name: vm_meta_data.tenant_name, role_name: vm_meta_data.role_name, role_instance_name: vm_meta_data.role_instance_name, subscription_id: vm_meta_data.subscription_id, resource_group_name: vm_meta_data.resource_group_name, vm_id: vm_meta_data.vm_id, image_origin: vm_meta_data.image_origin, } } fn to_xml_event(&self) -> String { let mut xml: String = String::new(); xml.push_str("", helpers::xml_escape(self.opcode_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.keyword_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.task_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.tenant_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.role_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.role_instance_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.container_id.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.resource_group_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.subscription_id.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.vm_id.to_string()) )); xml.push_str(&format!( "", self.event_pid )); xml.push_str(&format!( "", self.event_tid )); xml.push_str(&format!( "", self.image_origin )); xml.push_str(&format!( "", helpers::xml_escape(self.execution_mode.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.os_version.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.ga_version.to_string()) )); xml.push_str(&format!( "", self.ram )); xml.push_str(&format!( "", self.processors )); xml.push_str(&format!( "", helpers::xml_escape(self.event_name.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.capability_used.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.context1.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.context2.to_string()) )); xml.push_str(&format!( "", helpers::xml_escape(self.context3.to_string()) )); xml.push_str("]]>"); xml } } static CURRENT_KEYWORD_NAME: Lazy = Lazy::new(|| KeywordName::new(helpers::get_cpu_arch()).to_json()); #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct KeywordName { CpuArchitecture: String, } impl KeywordName { pub fn new(arch: String) -> Self { KeywordName { CpuArchitecture: arch, } } pub fn to_json(&self) -> String { serde_json::to_string(self).unwrap_or_else(|_| "".to_owned()) } } GuestProxyAgent-1.0.30/proxy_agent/src/test_mock.rs000066400000000000000000000001341500521614600223650ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod server_mock; GuestProxyAgent-1.0.30/proxy_agent/src/test_mock/000077500000000000000000000000001500521614600220215ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent/src/test_mock/server_mock.rs000066400000000000000000000457361500521614600247250ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common::{hyper_client, logger, result::Result}; use crate::key_keeper; use crate::key_keeper::key::{Key, KeyStatus}; use http_body_util::combinators::BoxBody; use hyper::body::Bytes; use hyper::server::conn::http1; use hyper::service::service_fn; use hyper::Request; use hyper::Response; use hyper::StatusCode; use hyper_util::rt::TokioIo; use once_cell::sync::Lazy; use tokio::net::TcpListener; use tokio_util::sync::CancellationToken; use uuid::Uuid; static EMPTY_GUID: Lazy = Lazy::new(|| "00000000-0000-0000-0000-000000000000".to_string()); static GUID: Lazy = Lazy::new(|| Uuid::new_v4().to_string()); static mut CURRENT_STATE: Lazy = Lazy::new(|| String::from(key_keeper::MUST_SIG_WIRESERVER)); pub async fn start(ip: String, port: u16, cancellation_token: CancellationToken) { logger::write_information("Mock Server starting...".to_string()); let addr = format!("{}:{}", ip, port); let listener = TcpListener::bind(&addr).await.unwrap(); println!("Listening on http://{}", addr); loop { tokio::select! { _ = cancellation_token.cancelled() => { logger::write_warning("cancellation token signal received, stop the listener.".to_string()); return; } result = listener.accept() => { match result { Ok((stream, _)) =>{ let ip = ip.to_string(); tokio::spawn(async move { let io = TokioIo::new(stream); let ip = ip.to_string(); let service = service_fn(move |req| handle_request(ip.to_string(), port, req)); if let Err(err) = http1::Builder::new().serve_connection(io, service).await { println!("Error serving connection: {:?}", err); } }); }, Err(e) => { logger::write_error(format!("Failed to accept connection: {}", e)); } } } } } } async fn handle_request( ip: String, port: u16, request: Request, ) -> Result>> { logger::write_information("WireServer processing request.".to_string()); let path: String = request.uri().path_and_query().unwrap().to_string(); let path = path.trim_start_matches('/'); let segments: Vec<&str> = path.split('/').collect(); println!("handle_request: {}, {:?}", request.method(), path); println!("segments: {:?}", segments); let mut content_type = String::from("application/json; charset=utf-8"); let mut body_string = String::new(); if request.method() == "GET" { if !segments.is_empty() && segments[0] == "secure-channel" { if segments.len() > 1 && segments[1] == "status" { // get key status let status_response = r#"{ "authorizationScheme": "Azure-HMAC-SHA256", "keyDeliveryMethod": "http", "keyGuid": "", "requiredClaimsHeaderPairs": [ "isRoot" ], "secureChannelState": "Wireserver", "version": "1.0" }"#; let mut status: KeyStatus = serde_json::from_str(status_response).unwrap(); unsafe { if *CURRENT_STATE == key_keeper::DISABLE_STATE { status.secureChannelState = Some(key_keeper::DISABLE_STATE.to_string()); } else { status.secureChannelState = Some(key_keeper::MUST_SIG_WIRESERVER.to_string()); } } body_string = serde_json::to_string(&status).unwrap(); } } else if !segments.is_empty() && segments[0] == "machine?comp=goalstate" { let goal_state_str = r#" 2015-04-05 16 Started 300000 16001 TRUE http://##ip##:##port##/machine/?comp=package&incarnation=Win8-Win8_2.7.32211.3_221108-1339_GuestAgentPackage_NoWER.zip Win8-Win8_2.7.32211.3_221108-1339_GuestAgentPackage_NoWER.zip 374188df-b0a2-456a-a7b2-83f28b18d36f 7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker_IN_0 Started http://##ip##:##port##/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=hostingEnvironmentConfig&incarnation=16 http://##ip##:##port##/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=sharedConfig&incarnation=16 http://##ip##:##port##/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=extensionsConfig&incarnation=16 http://##ip##:##port##/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=config&type=fullConfig&incarnation=16 http://##ip##:##port##/machine/374188df-b0a2-456a-a7b2-83f28b18d36f/7d2798bb72a0413d9a60b355277df726.TenantAdminApi.Worker%5FIN%5F0?comp=certificates&incarnation=16 7d2798bb72a0413d9a60b355277df726.132.7d2798bb72a0413d9a60b355277df726.78.TenantAdminApi.Worker_IN_0.1.xml "#; let goal_state_str = goal_state_str.replace("##ip##", &ip); let goal_state_str = goal_state_str.replace("##port##", &port.to_string()); content_type = "text/xml; charset=utf-8".to_string(); body_string = goal_state_str.to_string(); } else if path.starts_with("machine/") && path.contains("type=sharedConfig") { let shared_config_str = r#" "#; content_type = "text/xml; charset=utf-8".to_string(); body_string = shared_config_str.to_string(); } else if path.starts_with("metadata/instance") { let response_data = r#"{ "compute": { "azEnvironment": "AZUREPUBLICCLOUD", "additionalCapabilities": { "hibernationEnabled": "true" }, "hostGroup": { "id": "testHostGroupId" }, "extendedLocation": { "type": "edgeZone", "name": "microsoftlosangeles" }, "evictionPolicy": "", "isHostCompatibilityLayerVm": "true", "licenseType": "Windows_Client", "location": "westus", "name": "examplevmname", "offer": "WindowsServer", "osProfile": { "adminUsername": "admin", "computerName": "examplevmname", "disablePasswordAuthentication": "true" }, "osType": "Windows", "placementGroupId": "f67c14ab-e92c-408c-ae2d-da15866ec79a", "plan": { "name": "planName", "product": "planProduct", "publisher": "planPublisher" }, "platformFaultDomain": "36", "platformSubFaultDomain": "", "platformUpdateDomain": "42", "priority": "Regular", "publicKeys": [{ "keyData": "ssh-rsa 0", "path": "/home/user/.ssh/authorized_keys0" }, { "keyData": "ssh-rsa 1", "path": "/home/user/.ssh/authorized_keys1" } ], "publisher": "RDFE-Test-Microsoft-Windows-Server-Group", "resourceGroupName": "macikgo-test-may-23", "resourceId": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test-may-23/providers/Microsoft.Compute/virtualMachines/examplevmname", "securityProfile": { "secureBootEnabled": "true", "virtualTpmEnabled": "false", "encryptionAtHost": "true", "securityType": "TrustedLaunch" }, "sku": "2019-Datacenter", "storageProfile": { "dataDisks": [{ "bytesPerSecondThrottle": "979202048", "caching": "None", "createOption": "Empty", "diskCapacityBytes": "274877906944", "diskSizeGB": "1024", "image": { "uri": "" }, "isSharedDisk": "false", "isUltraDisk": "true", "lun": "0", "managedDisk": { "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test-may-23/providers/Microsoft.Compute/disks/exampledatadiskname", "storageAccountType": "StandardSSD_LRS" }, "name": "exampledatadiskname", "opsPerSecondThrottle": "65280", "vhd": { "uri": "" }, "writeAcceleratorEnabled": "false" }], "imageReference": { "id": "", "offer": "WindowsServer", "publisher": "MicrosoftWindowsServer", "sku": "2019-Datacenter", "version": "latest" }, "osDisk": { "caching": "ReadWrite", "createOption": "FromImage", "diskSizeGB": "30", "diffDiskSettings": { "option": "Local" }, "encryptionSettings": { "enabled": "false", "diskEncryptionKey": { "sourceVault": { "id": "/subscriptions/test-source-guid/resourceGroups/testrg/providers/Microsoft.KeyVault/vaults/test-kv" }, "secretUrl": "https://test-disk.vault.azure.net/secrets/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx" }, "keyEncryptionKey": { "sourceVault": { "id": "/subscriptions/test-key-guid/resourceGroups/testrg/providers/Microsoft.KeyVault/vaults/test-kv" }, "keyUrl": "https://test-key.vault.azure.net/secrets/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx" } }, "image": { "uri": "" }, "managedDisk": { "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test-may-23/providers/Microsoft.Compute/disks/exampleosdiskname", "storageAccountType": "StandardSSD_LRS" }, "name": "exampleosdiskname", "osType": "Windows", "vhd": { "uri": "" }, "writeAcceleratorEnabled": "false" }, "resourceDisk": { "size": "4096" } }, "subscriptionId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx", "tags": "baz:bash;foo:bar", "userData": "Zm9vYmFy", "version": "15.05.22", "virtualMachineScaleSet": { "id": "/subscriptions/xxxxxxxx-xxxx-xxx-xxx-xxxx/resourceGroups/resource-group-name/providers/Microsoft.Compute/virtualMachineScaleSets/virtual-machine-scale-set-name" }, "vmId": "02aab8a4-74ef-476e-8182-f6d2ba4166a6", "vmScaleSetName": "crpteste9vflji9", "vmSize": "Standard_A3", "zone": "" }, "network": { "interface": [{ "ipv4": { "ipAddress": [{ "privateIpAddress": "10.144.133.132", "publicIpAddress": "" }], "subnet": [{ "address": "10.144.133.128", "prefix": "26" }] }, "ipv6": { "ipAddress": [ ] }, "macAddress": "0011AAFFBB22" }] } }"#; body_string = response_data.to_string(); } } else if request.method() == "POST" { if !segments.is_empty() && segments[0] == "secure-channel" { if segments.len() > 1 && segments[1] == "key" { // get key details let key_response = r#"{ "authorizationScheme": "Azure-HMAC-SHA256", "guid": "", "issued": "2021-05-05T 12:00:00Z", "key": "4A404E635266556A586E3272357538782F413F4428472B4B6250645367566B59" }"#; let mut key: Key = serde_json::from_str(key_response).unwrap(); unsafe { if *CURRENT_STATE == key_keeper::DISABLE_STATE { key.guid = EMPTY_GUID.to_string(); } else { key.guid = GUID.to_string(); } } body_string = serde_json::to_string(&key).unwrap(); } } else if !segments.is_empty() && segments[0] == "machine" && segments.len() > 1 && segments[1] == "?comp=telemetrydata" { content_type = String::new(); body_string = String::new(); } } let response = Response::builder() .status(StatusCode::OK) .header(hyper::header::CONTENT_TYPE, content_type) .body(hyper_client::full_body(body_string.as_bytes().to_vec())) .unwrap(); logger::write_information("WireServer processed request.".to_string()); Ok(response) } pub fn set_secure_channel_state(enabled: bool) { if enabled { unsafe { *CURRENT_STATE = key_keeper::MUST_SIG_WIRESERVER.to_string(); } } else { unsafe { *CURRENT_STATE = key_keeper::DISABLE_STATE.to_string(); } } } GuestProxyAgent-1.0.30/proxy_agent_extension/000077500000000000000000000000001500521614600213365ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_extension/Cargo.toml000066400000000000000000000021521500521614600232660ustar00rootroot00000000000000[package] name = "ProxyAgentExt" version = "1.0.30" # always 3-number version edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" once_cell = "1.18.0" proxy_agent_shared = { path ="../proxy_agent_shared"} clap = { version = "4.5.17", features =["derive"] } # Command Line Argument Parser thiserror = "1.0.64" tokio = { version = "1", features = ["rt", "rt-multi-thread", "time", "macros", "sync"] } ctor = "0.3.6" # used for test setup and clean up [target.'cfg(windows)'.dependencies] windows-service = "0.7.0" # windows NT [target.'cfg(windows)'.build-dependencies] winres = "0.1.12" # Rust Windows resource helper to add file version static_vcruntime = "2.0.0" # Statically link the VCRuntime when using the MSVC toolchain [target.'cfg(not(windows))'.dependencies.nix] version = "0.29.0" features = [ "net", "signal" ] [target.'cfg(not(windows))'.dependencies] sysinfo = "0.30.13" # read process information for Linux GuestProxyAgent-1.0.30/proxy_agent_extension/build.rs000066400000000000000000000003561500521614600230070ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT fn main() { #[cfg(windows)] { static_vcruntime::metabuild(); let res = winres::WindowsResource::new(); res.compile().unwrap(); } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/000077500000000000000000000000001500521614600221255ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_extension/src/common.rs000066400000000000000000000514141500521614600237700ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::constants; use crate::error::Error; use crate::logger; use crate::result::Result; use crate::structs; use crate::structs::FormattedMessage; use crate::structs::HandlerEnvironment; use crate::structs::TopLevelStatus; use proxy_agent_shared::{misc_helpers, telemetry}; use std::fs; use std::path::Path; use std::path::PathBuf; use std::process; #[cfg(windows)] use proxy_agent_shared::service; pub fn get_handler_environment(exe_path: &Path) -> HandlerEnvironment { let mut handler_env_path: PathBuf = exe_path.to_path_buf(); handler_env_path.push(constants::HANDLER_ENVIRONMENT_FILE); let handler_env_file: Vec = match misc_helpers::json_read_from_file(&handler_env_path) { Ok(temp) => temp, Err(e) => { eprintln!("Error in reading handler env file: {e}"); process::exit(constants::EXIT_CODE_HANDLER_ENV_ERR); } }; if handler_env_file.is_empty() { eprintln!("Handler environment file is empty"); process::exit(constants::EXIT_CODE_HANDLER_ENV_ERR); } handler_env_file[0].handlerEnvironment.clone() } pub fn report_heartbeat(heartbeat_file_path: PathBuf, heartbeat_obj: structs::HeartbeatObj) { //Heartbeat Instance let root_heartbeat_obj = structs::TopLevelHeartbeat { version: constants::VERSION.to_string(), heartbeat: heartbeat_obj, }; let root_obj: Vec = vec![root_heartbeat_obj]; let root_heartbeat = match serde_json::to_string(&root_obj) { Ok(temp) => temp, Err(e) => { logger::write(format!("Error in serializing heartbeat object: {e}")); return; } }; match fs::write(&heartbeat_file_path, root_heartbeat) { Ok(_) => { logger::write(format!( "HeartBeat file created: {:?}", heartbeat_file_path.to_path_buf() )); } Err(e) => { logger::write(format!("Error in creating HeartBeat file: {:?}", e)); } } } pub fn get_file_path(status_folder: PathBuf, config_seq_no: &str, file_extension: &str) -> PathBuf { let mut file: PathBuf = status_folder; if let Err(e) = misc_helpers::try_create_folder(&file) { logger::write(format!("Error in creating folder: {:?}", e)); } file.push(config_seq_no); file.set_extension(file_extension); file } pub fn report_status( status_folder_path: PathBuf, config_seq_no: &str, status_obj: &structs::StatusObj, ) { //Status Instance let status_file: PathBuf = get_file_path( status_folder_path, config_seq_no, constants::STATUS_FILE_SUFFIX, ); let current_datetime: String = misc_helpers::get_date_time_string_with_milliseconds(); let root_status_obj = TopLevelStatus { version: constants::VERSION.to_string(), timestampUTC: current_datetime, status: status_obj.clone(), }; let root_vec: Vec = vec![root_status_obj]; let root_status = match serde_json::to_string(&root_vec) { Ok(temp) => temp, Err(e) => { logger::write(format!("Error in serializing status object: {e}")); return; } }; // TODO: retry if write failed match fs::write(&status_file, root_status) { Ok(_) => { logger::write(format!("Status file created: {:?}", status_file)); } Err(e) => { logger::write(format!("Error in creating status file: {:?}", e)); } } } /// Update the current seq no in the CURRENT_SEQ_NO_FILE /// If the seq no is different from the current seq no, update the seq no in the file /// If the seq no is same as the current seq no, do not update the seq no in the file /// Returns true if the seq no is updated in the file, false otherwise /// Returns error if there is an error in writing the seq no to the file pub fn update_current_seq_no(config_seq_no: &str, exe_path: &Path) -> Result { let mut should_report_status = true; logger::write(format!("enable command with new seq no: {config_seq_no}")); let current_seq_no_stored_file: PathBuf = exe_path.join(constants::CURRENT_SEQ_NO_FILE); match fs::read_to_string(¤t_seq_no_stored_file) { Ok(seq_no) => { if seq_no != *config_seq_no { logger::write(format!( "updating seq no from {} to {}", seq_no, config_seq_no )); if let Err(e) = fs::write(¤t_seq_no_stored_file, config_seq_no) { logger::write(format!("Error in writing seq no to file: {:?}", e)); return Err(Error::Io(e)); } } else { logger::write("no update on seq no".to_string()); should_report_status = false; } } Err(_e) => { logger::write(format!( "no seq no found, writing seq no {} to file '{}'", config_seq_no, current_seq_no_stored_file.display() )); if let Err(e) = fs::write(¤t_seq_no_stored_file, config_seq_no) { logger::write(format!("Error in writing seq no to file: {:?}", e)); return Err(Error::Io(e)); } } } Ok(should_report_status) } pub fn get_current_seq_no(exe_path: &Path) -> String { let current_seq_no_stored_file: PathBuf = exe_path.join(constants::CURRENT_SEQ_NO_FILE); match fs::read_to_string(current_seq_no_stored_file) { Ok(seq_no) => { logger::write(format!("Current seq no: {}", seq_no)); seq_no } Err(e) => { logger::write(format!("Error reading current seq no file: {:?}", e)); "".to_string() } } } pub fn get_proxy_agent_service_path() -> PathBuf { #[cfg(windows)] { service::query_service_executable_path(constants::PROXY_AGENT_SERVICE_NAME) } #[cfg(not(windows))] { // linux service hard-coded to this location PathBuf::from(proxy_agent_shared::linux::EXE_FOLDER_PATH).join("azure-proxy-agent") } } pub fn get_proxy_agent_exe_path() -> PathBuf { let exe_path = misc_helpers::get_current_exe_dir(); logger::write( "Current proxy agent exe path: ".to_string() + &misc_helpers::path_to_string(&exe_path), ); #[cfg(windows)] { exe_path.join("ProxyAgent/ProxyAgent/GuestProxyAgent.exe") } #[cfg(not(windows))] { exe_path.join("ProxyAgent/ProxyAgent/azure-proxy-agent") } } pub fn report_status_enable_command( status_folder: PathBuf, config_seq_no: &str, status: Option, ) { let message: &str = "Enabling the ProxyAgent Extension..."; //Report Status let handler_status = structs::StatusObj { name: constants::PLUGIN_NAME.to_string(), operation: constants::ENABLE_OPERATION.to_string(), configurationAppliedTime: misc_helpers::get_date_time_string(), code: constants::STATUS_CODE_OK, status: status.unwrap_or_else(|| constants::TRANSITIONING_STATUS.to_string()), formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: message.to_string(), }, substatus: Default::default(), }; report_status(status_folder, config_seq_no, &handler_status); } pub async fn start_event_logger() { logger::write("starting event logger".to_string()); tokio::spawn({ async move { let interval: std::time::Duration = std::time::Duration::from_secs(60); let max_event_file_count: usize = 50; let exe_path = misc_helpers::get_current_exe_dir(); let event_folder = PathBuf::from(get_handler_environment(&exe_path).eventsFolder.to_string()); telemetry::event_logger::start(event_folder, interval, max_event_file_count, |_| { async { // do nothing } }) .await; } }); } pub fn stop_event_logger() { logger::write("stopping event logger".to_string()); telemetry::event_logger::stop(); } pub struct StatusState { current_state: String, consecutive_fail_count: u32, consecutive_success_count: u32, transition_to_error_threshold: u32, } pub fn setup_tool_exe_path() -> PathBuf { #[cfg(windows)] { misc_helpers::get_current_exe_dir().join("ProxyAgent/proxy_agent_setup.exe") } #[cfg(not(windows))] { misc_helpers::get_current_exe_dir().join("ProxyAgent/proxy_agent_setup") } } impl Default for StatusState { fn default() -> Self { Self::new() } } impl StatusState { const MAX_CONSECUTIVE_COUNT: u32 = 10000; pub fn new() -> StatusState { StatusState { current_state: constants::TRANSITIONING_STATUS.to_string(), consecutive_fail_count: 0, consecutive_success_count: 0, transition_to_error_threshold: 20, } } pub fn update_state(&mut self, operation_success: bool) -> String { if operation_success { self.consecutive_fail_count = 0; if self.consecutive_success_count < StatusState::MAX_CONSECUTIVE_COUNT { self.consecutive_success_count += 1; } } else { self.consecutive_success_count = 0; if self.consecutive_fail_count < StatusState::MAX_CONSECUTIVE_COUNT { self.consecutive_fail_count += 1; } } match self.current_state.as_str() { constants::SUCCESS_STATUS => { if self.consecutive_fail_count >= 1 { self.current_state = constants::TRANSITIONING_STATUS.to_string(); } } constants::TRANSITIONING_STATUS => { if self.consecutive_success_count >= 1 { self.current_state = constants::SUCCESS_STATUS.to_string(); } else if self.consecutive_fail_count >= self.transition_to_error_threshold { self.current_state = constants::ERROR_STATUS.to_string(); } } constants::ERROR_STATUS => { if self.consecutive_success_count >= 1 { self.current_state = constants::TRANSITIONING_STATUS.to_string(); } } _ => { self.current_state = constants::TRANSITIONING_STATUS.to_string(); } } self.current_state.clone() } } #[cfg(test)] mod tests { use crate::{common, constants, structs::*}; use proxy_agent_shared::misc_helpers; use std::env; use std::fs::{self}; use std::path::PathBuf; #[test] fn test_handler_env_file() { //Set the temp directory for handler environment json file let mut temp_test_path = env::temp_dir(); temp_test_path.push("test_handler_env_file"); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); //Add HandlerEnvironment.json in the temp directory let handler_env_file = temp_test_path.to_path_buf().join("HandlerEnvironment.json"); //Create raw handler environment json string let json_handler_linux: &str = r#"[{ "version": 1.0, "handlerEnvironment": { "logFolder": "log", "configFolder": "config", "statusFolder": "status", "heartbeatFile": "heartbeat.json", "eventsFolder": "test_kusto" } }]"#; //Deserialize handler environment json string let handler_env_obj: Vec = serde_json::from_str(json_handler_linux).unwrap(); //Write the deserialized json object to HandlerEnvironment.json file _ = misc_helpers::json_write_to_file(&handler_env_obj, &handler_env_file); let handler_env = super::get_handler_environment(&temp_test_path); assert_eq!(handler_env.logFolder, "log".to_string()); assert_eq!(handler_env.configFolder, "config".to_string()); assert_eq!(handler_env.statusFolder, "status".to_string()); assert_eq!(handler_env.heartbeatFile, "heartbeat.json".to_string()); assert_eq!(handler_env.eventsFolder, "test_kusto".to_string()); assert_eq!(handler_env.deploymentid, None); assert_eq!(handler_env.rolename, None); assert_eq!(handler_env.instance, None); assert_eq!(handler_env.hostResolverAddress, None); _ = fs::remove_dir_all(&temp_test_path); } #[tokio::test] async fn test_status_file() { // Create temp directory for status folder let mut temp_test_path = env::temp_dir(); temp_test_path.push("test_status_file"); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); let status_folder: PathBuf = temp_test_path.join("status"); //Set the config_seq_no value let seq_no = "0"; let expected_status_file: &PathBuf = &temp_test_path.join("status").join("0.status"); let handler_status = StatusObj { name: "test".to_string(), operation: "test".to_string(), configurationAppliedTime: "1-2-3".to_string(), code: 0, status: "test success".to_string(), formattedMessage: FormattedMessage { lang: "en-US".to_string(), message: "test status".to_string(), }, substatus: Default::default(), }; common::report_status(status_folder, &seq_no.to_string(), &handler_status); let status_obj = misc_helpers::json_read_from_file::>(&expected_status_file) .unwrap(); assert_eq!(status_obj.len(), 1); assert_eq!(status_obj[0].status.name, "test".to_string()); _ = fs::remove_dir_all(&temp_test_path); } #[test] fn test_get_file_path() { // Create temp directory for status folder let mut temp_test_path = env::temp_dir(); temp_test_path.push("test_get_file_path"); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); let status_folder: PathBuf = temp_test_path.join("status"); let config_seq_no = "0"; let expected_status_file: &PathBuf = &temp_test_path.join("status").join("0.status"); let status_file = common::get_file_path(status_folder, config_seq_no, "status"); assert_eq!(status_file, *expected_status_file); _ = fs::remove_dir_all(&temp_test_path); } #[tokio::test] async fn test_update_current_seq_no() { // Create temp directory for status folder let mut temp_test_path = env::temp_dir(); temp_test_path.push("test_update_current_seq_no"); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); // test invalid dir_path let exe_path = PathBuf::from("invalid_path"); let config_seq_no = "0"; let should_report_status = common::update_current_seq_no(config_seq_no, &exe_path); assert!( should_report_status.is_err(), "Error expected when update current seq no to an invalid_path" ); // test valid dir_path let exe_path = &temp_test_path; // test seq no file not found, first write let config_seq_no = "0"; let should_report_status = common::update_current_seq_no(config_seq_no, &exe_path).unwrap(); assert!(should_report_status); let seq_no = common::get_current_seq_no(&exe_path); assert_eq!(seq_no, "0".to_string()); // test seq no file found, write different seq no let config_seq_no = "1"; let should_report_status = common::update_current_seq_no(config_seq_no, &exe_path).unwrap(); assert!(should_report_status); let seq_no = common::get_current_seq_no(&exe_path); assert_eq!(seq_no, "1".to_string()); // test seq no file found, write same seq no let config_seq_no = "1"; let should_report_status = common::update_current_seq_no(config_seq_no, &exe_path).unwrap(); assert!(!should_report_status); let seq_no = common::get_current_seq_no(&exe_path); assert_eq!(seq_no, "1".to_string()); _ = fs::remove_dir_all(&temp_test_path); } #[tokio::test] async fn test_report_status_enable_command() { // Create temp directory for status folder let mut temp_test_path = env::temp_dir(); temp_test_path.push("test_report_status_enable_command"); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); let status_folder: PathBuf = temp_test_path.join("status"); let config_seq_no = "0"; let expected_status_file: &PathBuf = &temp_test_path.join("status").join("0.status"); super::report_status_enable_command(status_folder, config_seq_no, None); let status_obj = misc_helpers::json_read_from_file::>(&expected_status_file) .unwrap(); assert_eq!(status_obj.len(), 1); assert_eq!(status_obj[0].status.operation, "Enable"); _ = fs::remove_dir_all(&temp_test_path); } #[tokio::test] async fn test_heartbeat_file() { // Create temp directory for status folder let mut temp_test_path = env::temp_dir(); temp_test_path.push("test_heartbeat_file"); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); let expected_heartbeat_file: PathBuf = temp_test_path.join("heartbeat.json"); let heartbeat_obj = HeartbeatObj { status: "test".to_string(), code: "0".to_string(), formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "test".to_string(), }, }; common::report_heartbeat(expected_heartbeat_file.to_path_buf(), heartbeat_obj); let heartbeat_obj = misc_helpers::json_read_from_file::>(&expected_heartbeat_file) .unwrap(); assert_eq!(heartbeat_obj.len(), 1); assert_eq!(heartbeat_obj[0].heartbeat.status, "test".to_string()); _ = fs::remove_dir_all(&temp_test_path); } #[test] fn test_StatusState() { let mut status_state_obj = super::StatusState::new(); // Case 1: Testing Success to Transitioning status_state_obj.current_state = constants::SUCCESS_STATUS.to_string(); status_state_obj.consecutive_success_count = 2; let updated_state = status_state_obj.update_state(false); assert_eq!(updated_state, constants::TRANSITIONING_STATUS.to_string()); // Case 2: Testing Transitioning to Success status_state_obj.current_state = constants::TRANSITIONING_STATUS.to_string(); status_state_obj.consecutive_fail_count = 2; let updated_state = status_state_obj.update_state(true); assert_eq!(updated_state, constants::SUCCESS_STATUS.to_string()); // Case 3: Testing Transitioning to Error status_state_obj.current_state = constants::TRANSITIONING_STATUS.to_string(); status_state_obj.consecutive_fail_count = 19; let updated_state = status_state_obj.update_state(false); assert_eq!(updated_state, constants::ERROR_STATUS.to_string()); // Case 4: Testing Error to Transitioning status_state_obj.current_state = constants::ERROR_STATUS.to_string(); status_state_obj.consecutive_fail_count = 2; let updated_state = status_state_obj.update_state(true); assert_eq!(updated_state, constants::TRANSITIONING_STATUS.to_string()); // Case 5: Testing report transitioning for the first time status_state_obj.current_state = "".to_string(); let updated_state = status_state_obj.update_state(false); assert_eq!(updated_state, constants::TRANSITIONING_STATUS.to_string()); // Case 6: Testing max consecutive count status_state_obj.consecutive_success_count = super::StatusState::MAX_CONSECUTIVE_COUNT; status_state_obj.current_state = status_state_obj.update_state(true); assert_eq!( status_state_obj.consecutive_success_count, super::StatusState::MAX_CONSECUTIVE_COUNT ); } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/constants.rs000066400000000000000000000072101500521614600245070ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub const PLUGIN_NAME: &str = "ProxyAgentVMExtension"; pub const PLUGIN_CONNECTION_NAME: &str = "ProxyAgentConnectionSummary"; pub const PLUGIN_STATUS_NAME: &str = "ProxyAgentStatus"; pub const PLUGIN_FAILED_AUTH_NAME: &str = "ProxyAgentFailedAuthenticationSummary"; pub const HANDLER_ENVIRONMENT_FILE: &str = "HandlerEnvironment.json"; pub const HANDLER_LOG_FILE: &str = "ProxyAgentExtension.log"; pub const SERVICE_LOG_FILE: &str = "ProxyAgentExtensionService.log"; pub const EXTENSION_SERVICE_NAME: &str = "GuestProxyAgentVMExtension"; #[cfg(not(windows))] pub const EXTENSION_PROCESS_NAME: &str = "ProxyAgentExt"; #[cfg(windows)] pub const EXTENSION_PROCESS_NAME: &str = "ProxyAgentExt.exe"; pub const EXTENSION_SERVICE_DISPLAY_NAME: &str = "Microsoft Azure GuestProxyAgent VMExtension"; pub const PROXY_AGENT_SERVICE_NAME: &str = "GuestProxyAgent"; pub const UPDATE_TAG_FILE: &str = "update.tag"; pub const ENABLE_OPERATION: &str = "Enable"; pub const LANG_EN_US: &str = "en-US"; pub const STATUS_FILE_SUFFIX: &str = "status"; pub const CONFIG_FILE_SUFFIX: &str = "settings"; pub const HEARTBEAT_FILE_SUFFIX: &str = "json"; #[cfg(windows)] pub const TRANSITIONING_STATUS: &str = "Transitioning"; #[cfg(not(windows))] pub const TRANSITIONING_STATUS: &str = "transitioning"; #[cfg(windows)] pub const ERROR_STATUS: &str = "Error"; #[cfg(not(windows))] pub const ERROR_STATUS: &str = "error"; #[cfg(windows)] pub const SUCCESS_STATUS: &str = "Success"; #[cfg(not(windows))] pub const SUCCESS_STATUS: &str = "success"; #[cfg(windows)] pub const WARNING_STATUS: &str = "Warning"; #[cfg(not(windows))] pub const WARNING_STATUS: &str = "warning"; #[cfg(windows)] pub const HEARTBEAT_READY_STATUS: &str = "Ready"; #[cfg(not(windows))] pub const HEARTBEAT_READY_STATUS: &str = "ready"; pub const CURRENT_SEQ_NO_FILE: &str = "current_seq_no.txt"; pub const VERSION: &str = "1.0"; pub const WINDOWS_SUPPORTED_VERSIONS: &str = "10.0.17763"; pub const INVALID_FILE_VERSION: &str = "0.0.0.0"; pub const SERVICE_START_RETRY_COUNT: u32 = 5; pub const STATUS_CODE_OK: i32 = 0; // Non zero exit codes pub const EXIT_CODE_HANDLER_ENV_ERR: i32 = 1; pub const EXIT_CODE_SERVICE_UPDATE_ERR: i32 = 2; pub const EXIT_CODE_SERVICE_INSTALL_ERR: i32 = 3; pub const STATUS_CODE_NOT_OK: i32 = 4; pub const EXIT_CODE_SERVICE_UNINSTALL_ERR: i32 = 5; pub const EXIT_CODE_NOT_SUPPORTED_OS_VERSION: i32 = 6; pub const EXIT_CODE_SERVICE_START_ERR: i32 = 7; pub const EXIT_CODE_SERVICE_STOP_ERR: i32 = 8; pub const EXIT_CODE_UPDATE_TO_VERSION_ENV_VAR_NOTFOUND: i32 = 9; pub const EXIT_CODE_WRITE_CURRENT_SEQ_NO_ERROR: i32 = 10; pub const MIN_SUPPORTED_OS_BUILD: u32 = 17763; pub const STATE_KEY_READ_PROXY_AGENT_STATUS_FILE: &str = "ReadProxyAgentStatusFile"; pub const STATE_KEY_FILE_VERSION: &str = "FileVersion"; pub const EBPF_CORE: &str = "EbpfCore"; pub const EBPF_EXT: &str = "NetEbpfExt"; pub const EBPF_SUBSTATUS_NAME: &str = "EbpfStatus"; pub const MAX_CONNECTION_SUMMARY_LEN: usize = 100; pub const MAX_FAILED_AUTH_SUMMARY_LEN: usize = 50; #[cfg(not(windows))] pub mod linux { pub const MIN_SUPPORTED_MARINER_OS_VERSION_MAJOR: u32 = 2; pub const MIN_SUPPORTED_UBUNTU_OS_VERSION_MAJOR: u32 = 20; pub const MIN_SUPPORTED_AZURE_LINUX_OS_VERSION_MAJOR: u32 = 3; pub const RED_HAT_OS_NAME: &str = "red hat enterprise linux"; pub const MIN_RED_HAT_OS_VERSION_MAJOR: u32 = 9; pub const ROCKY_OS_NAME: &str = "rocky linux"; pub const MIN_ROCKY_OS_VERSION_MAJOR: u32 = 9; pub const SUSE_OS_NAME: &str = "suse linux enterprise server"; pub const MIN_SUSE_OS_VERSION_MAJOR: u32 = 15; pub const MIN_SUSE_OS_VERSION_MINOR: u32 = 4; } GuestProxyAgent-1.0.30/proxy_agent_extension/src/error.rs000066400000000000000000000004331500521614600236240ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #[derive(Debug, thiserror::Error)] pub enum Error { #[cfg(windows)] #[error(transparent)] WindowsService(#[from] windows_service::Error), #[error(transparent)] Io(#[from] std::io::Error), } GuestProxyAgent-1.0.30/proxy_agent_extension/src/handler_main.rs000066400000000000000000000363321500521614600251230ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common; use crate::constants; use crate::logger; use crate::structs; use crate::ExtensionCommand; use once_cell::sync::Lazy; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::version::Version; use std::fs::{self}; use std::path::{Path, PathBuf}; use std::process; use std::process::Command; use std::str; use std::time::Duration; #[cfg(windows)] use crate::windows::service_ext; #[cfg(windows)] use proxy_agent_shared::windows; #[cfg(not(windows))] use nix::sys::signal::{kill, SIGKILL}; #[cfg(not(windows))] use nix::unistd::Pid as NixPid; #[cfg(not(windows))] use proxy_agent_shared::linux; #[cfg(not(windows))] use sysinfo::{ProcessRefreshKind, RefreshKind, System, UpdateKind}; static HANDLER_ENVIRONMENT: Lazy = Lazy::new(|| { let exe_path = misc_helpers::get_current_exe_dir(); common::get_handler_environment(&exe_path) }); pub async fn program_start(command: ExtensionCommand, config_seq_no: String) { //Set up Logger instance let log_folder = HANDLER_ENVIRONMENT.logFolder.to_string(); logger::init_logger(log_folder, constants::HANDLER_LOG_FILE); logger::write(format!( "GuestProxyAgentExtension Version: {}, OS Arch: {}, OS Version: {}", misc_helpers::get_current_version(), misc_helpers::get_processor_arch(), misc_helpers::get_long_os_version() )); if !check_os_version_supported() { report_os_not_supported(config_seq_no); process::exit(constants::EXIT_CODE_NOT_SUPPORTED_OS_VERSION); } handle_command(command, config_seq_no).await; } #[cfg(windows)] fn check_windows_os_version(version: Version) -> bool { match version.build { Some(build) => { logger::write(format!("OS build version: {}", build)); build >= constants::MIN_SUPPORTED_OS_BUILD } None => false, } } fn check_os_version_supported() -> bool { #[cfg(windows)] { match windows::get_os_version() { Ok(version) => check_windows_os_version(version), Err(e) => { logger::write(format!("Error in getting OS version: {e}")); false } } } #[cfg(not(windows))] { match Version::from_string(linux::get_os_version()) { Ok(version) => check_linux_os_supported(version), Err(e) => { logger::write(format!("Error in getting OS version: {e}")); false } } } } #[cfg(not(windows))] fn check_linux_os_supported(version: Version) -> bool { let linux_type = linux::get_os_type().to_lowercase(); if linux_type.contains("ubuntu") { version.major >= constants::linux::MIN_SUPPORTED_UBUNTU_OS_VERSION_MAJOR } else if linux_type.contains("mariner") { return version.major >= constants::linux::MIN_SUPPORTED_MARINER_OS_VERSION_MAJOR; } else if linux_type.contains("azure linux") { return version.major >= constants::linux::MIN_SUPPORTED_AZURE_LINUX_OS_VERSION_MAJOR; } else if linux_type.contains(constants::linux::RED_HAT_OS_NAME) { return version.major >= constants::linux::MIN_RED_HAT_OS_VERSION_MAJOR; } else if linux_type.contains(constants::linux::ROCKY_OS_NAME) { return version.major >= constants::linux::MIN_ROCKY_OS_VERSION_MAJOR; } else if linux_type.contains(constants::linux::SUSE_OS_NAME) { // SUSE 15 SP4+ is supported return version.major > constants::linux::MIN_SUSE_OS_VERSION_MAJOR || (version.major == constants::linux::MIN_SUSE_OS_VERSION_MAJOR && version.minor >= constants::linux::MIN_SUSE_OS_VERSION_MINOR); } else { return false; } } fn report_os_not_supported(config_seq_no: String) { // report to status folder if the os version is not supported let status_folder = HANDLER_ENVIRONMENT.statusFolder.to_string(); let status_folder_path: PathBuf = Path::new(&status_folder).to_path_buf(); let message = format!( "OS version not supported: {}", misc_helpers::get_long_os_version() ); let status_obj = structs::StatusObj { name: constants::PLUGIN_NAME.to_string(), operation: "CheckOSVersionSupport".to_string(), configurationAppliedTime: misc_helpers::get_date_time_string(), status: constants::ERROR_STATUS.to_string(), code: constants::EXIT_CODE_NOT_SUPPORTED_OS_VERSION, formattedMessage: structs::FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: message.to_string(), }, substatus: Default::default(), }; logger::write(message); common::report_status(status_folder_path, &config_seq_no, &status_obj); } fn get_update_tag_file() -> PathBuf { let exe_parent = get_exe_parent(); let update_tag_file = exe_parent.join(constants::UPDATE_TAG_FILE); update_tag_file.to_path_buf() } fn update_tag_file_exists() -> bool { let update_tag_file = get_update_tag_file(); if update_tag_file.exists() { logger::write(format!("update tag file exists: {:?}", update_tag_file)); true } else { logger::write(format!( "update tag file does not exist: {:?}", update_tag_file )); false } } fn get_exe_parent() -> PathBuf { let exe_path = misc_helpers::get_current_exe_dir(); let exe_parent = match exe_path.parent() { Some(parent) => parent, None => { logger::write("exe parent is None".to_string()); Path::new("") } }; logger::write(format!("exe parent: {:?}", exe_parent)); exe_parent.to_path_buf() } async fn handle_command(command: ExtensionCommand, config_seq_no: String) { logger::write(format!("entering handle command: {:?}", command)); let status_folder = HANDLER_ENVIRONMENT.statusFolder.to_string(); let status_folder_path: PathBuf = PathBuf::from(&status_folder); match command { ExtensionCommand::Install => install_handler(), ExtensionCommand::Uninstall => uninstall_handler(), ExtensionCommand::Enable => enable_handler(status_folder_path, config_seq_no).await, ExtensionCommand::Disable => disable_handler().await, ExtensionCommand::Reset => reset_handler(), ExtensionCommand::Update => update_handler().await, } } fn install_handler() { logger::write("Installing Handler".to_string()); #[cfg(windows)] { service_ext::install_extension_service(); } } fn uninstall_handler() { logger::write("Uninstalling Handler".to_string()); if !update_tag_file_exists() { let setup_tool = misc_helpers::path_to_string(&common::setup_tool_exe_path()); match Command::new(setup_tool).arg("uninstall").output() { Ok(output) => { match str::from_utf8(&output.stdout) { Ok(output_string) => { logger::write(format!( "uninstalling GuestProxyAgent, output: {}", output_string )); } Err(e) => { logger::write(format!("error in uninstalling GuestProxyAgent: {:?}", e)); } } match str::from_utf8(&output.stderr) { Ok(output_string) => { logger::write(format!( "output stderr for uninstall GuestProxyAgent: {}", output_string )); } Err(e) => { logger::write(format!("error in uninstalling GuestProxyAgent: {:?}", e)); } } } Err(e) => { logger::write(format!("error in uninstalling GuestProxyAgent: {:?}", e)); } } } } async fn enable_handler(status_folder: PathBuf, config_seq_no: String) { let exe_path = misc_helpers::get_current_exe_dir(); match common::update_current_seq_no(&config_seq_no, &exe_path) { Ok(should_report_status) => { if should_report_status { common::report_status_enable_command( status_folder.to_path_buf(), &config_seq_no, None, ); } } Err(e) => { logger::write(format!("error in updating current seq no: {:?}", e)); process::exit(constants::EXIT_CODE_WRITE_CURRENT_SEQ_NO_ERROR); } } #[cfg(windows)] { service_ext::start_extension_service().await; } #[cfg(not(windows))] { let process_running = get_linux_extension_long_running_process().is_some(); let mut count = 0; loop { if process_running { logger::write("ProxyAgentExt process running".to_string()); break; } if count > constants::SERVICE_START_RETRY_COUNT { common::report_status_enable_command( status_folder.to_path_buf(), &config_seq_no, Some(constants::ERROR_STATUS.to_string()), ); process::exit(constants::EXIT_CODE_SERVICE_START_ERR); } else { // start the process GuestProxyAgentVMExtension if process not started let exe_path = misc_helpers::get_current_exe_dir(); let service_exe_path = exe_path.join(constants::EXTENSION_PROCESS_NAME); match Command::new(service_exe_path).spawn() { Ok(child) => { let pid = child.id(); logger::write(format!( "ProxyAgentExt started with pid: {}, do not start new one.", pid )); break; } Err(e) => { logger::write(format!("error in starting ProxyAgentExt: {:?}", e)); } } } count += 1; tokio::time::sleep(Duration::from_secs(15)).await; } } if update_tag_file_exists() { let update_tag_file = get_update_tag_file(); match fs::remove_file(&update_tag_file) { Ok(_) => logger::write(format!( "update tag file removed: {:?}", update_tag_file.to_path_buf() )), Err(e) => logger::write(format!("error in removing update tag file: {:?}", e)), } } } #[cfg(not(windows))] fn get_linux_extension_long_running_process() -> Option { // check if the process GuestProxyAgentVMExtension running AND without parameters let system = System::new_with_specifics( RefreshKind::new().with_processes( ProcessRefreshKind::new() .with_cmd(UpdateKind::Always) .with_exe(UpdateKind::Always), ), ); for p in system.processes_by_exact_name(constants::EXTENSION_PROCESS_NAME) { let cmd = p.cmd(); logger::write(format!("cmd: {:?}", cmd)); if cmd.len() == 1 { logger::write(format!("ProxyAgentExt running with pid: {}", p.pid())); return Some(p.pid().as_u32() as i32); } } None } async fn disable_handler() { logger::write("Disabling Handler".to_string()); #[cfg(windows)] { service_ext::stop_extension_service().await; } #[cfg(not(windows))] { match get_linux_extension_long_running_process() { Some(pid) => { let p = NixPid::from_raw(pid); match kill(p, SIGKILL) { Ok(_) => { logger::write(format!("ProxyAgentExt process with pid: {} killed", pid)); } Err(e) => { logger::write(format!("error in killing ProxyAgentExt process: {:?}", e)); } } } None => { logger::write("ProxyAgentExt not running".to_string()); } } } } fn reset_handler() { let exe_path = misc_helpers::get_current_exe_dir(); let update_tag_file = get_update_tag_file(); let seq_no_file = exe_path.join(constants::CURRENT_SEQ_NO_FILE); match fs::remove_file(&update_tag_file) { Ok(_) => logger::write(format!( "update tag file removed: {:?}", update_tag_file.to_path_buf() )), Err(e) => logger::write(format!("error in removing update tag file: {:?}", e)), } match fs::remove_file(&seq_no_file) { Ok(_) => logger::write(format!( "seq no file removed: {:?}", seq_no_file.to_path_buf() )), Err(e) => logger::write(format!("error in removing seq no file: {:?}", e)), } } async fn update_handler() { #[cfg(windows)] { let version = match std::env::var("VERSION") { Ok(ver) => ver, Err(e) => { logger::write(format!("error in getting VERSION from env::var: {:?}", e)); process::exit(constants::EXIT_CODE_UPDATE_TO_VERSION_ENV_VAR_NOTFOUND); } }; let extension_dir = get_exe_parent(); let extension_dir = extension_dir.join(version); service_ext::update_extension_service(extension_dir); } let update_tag_file = get_update_tag_file(); let mut count = 0; loop { if count > constants::SERVICE_START_RETRY_COUNT { logger::write(format!( "service start retry count exceeded: {}", constants::SERVICE_START_RETRY_COUNT )); break; } else { match fs::write(&update_tag_file, misc_helpers::get_date_time_string()) { Ok(_) => { logger::write(format!( "update tag file created: {:?}", update_tag_file.to_path_buf() )); break; } Err(e) => { logger::write(format!("error in creating update tag file: {:?}", e)); } } } count += 1; tokio::time::sleep(Duration::from_secs(15)).await; } } #[cfg(test)] mod tests { #[cfg(windows)] use crate::handler_main; #[cfg(windows)] use proxy_agent_shared::version::Version; #[test] fn test_check_os_supported() { #[cfg(windows)] { let version = Version { major: 10, minor: 0, build: Some(17765), revision: None, }; assert!(handler_main::check_windows_os_version(version)); let version = Version { major: 10, minor: 0, build: Some(17762), revision: None, }; assert!(!handler_main::check_windows_os_version(version)); let version = Version { major: 10, minor: 0, build: None, revision: None, }; assert!(!handler_main::check_windows_os_version(version)); } } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux.rs000066400000000000000000000005511500521614600236330ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::service_main; use std::time::Duration; #[cfg(not(windows))] pub async fn start_service_wait() { // start service service_main::run(); loop { // continue to sleep until the service is stopped tokio::time::sleep(Duration::from_secs(1)).await; } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/000077500000000000000000000000001500521614600232645ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/HandlerManifest.json000066400000000000000000000010451500521614600272230ustar00rootroot00000000000000[{ "version": 1.0, "handlerManifest" : { "installCommand": "./install.sh", "uninstallCommand": "./uninstall.sh", "updateCommand": "./update.sh", "enableCommand": "./enable.sh", "disableCommand": "./disable.sh", "rebootAfterInstall": "false", "reportHeartbeat": "true", "resetStateCommand": "./reset.sh", "supportMultipleExtensions": "false" }, "resourceLimits": { "services": [{ "name": "azure-proxy-agent" }] } }]GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/disable.sh000077500000000000000000000002451500521614600252270ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT script_dir="$(dirname "$(readlink -f "$0")")" "$script_dir/ProxyAgentExt" disable GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/enable.sh000077500000000000000000000002441500521614600250510ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT script_dir="$(dirname "$(readlink -f "$0")")" "$script_dir/ProxyAgentExt" enable GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/install.sh000077500000000000000000000002451500521614600252720ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT script_dir="$(dirname "$(readlink -f "$0")")" "$script_dir/ProxyAgentExt" install GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/reset.sh000077500000000000000000000002431500521614600247440ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT script_dir="$(dirname "$(readlink -f "$0")")" "$script_dir/ProxyAgentExt" reset GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/uninstall.sh000077500000000000000000000002471500521614600256370ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT script_dir="$(dirname "$(readlink -f "$0")")" "$script_dir/ProxyAgentExt" uninstall GuestProxyAgent-1.0.30/proxy_agent_extension/src/linux/update.sh000077500000000000000000000002441500521614600251050ustar00rootroot00000000000000#!/bin/bash # Copyright (c) Microsoft Corporation # SPDX-License-Identifier: MIT script_dir="$(dirname "$(readlink -f "$0")")" "$script_dir/ProxyAgentExt" update GuestProxyAgent-1.0.30/proxy_agent_extension/src/logger.rs000066400000000000000000000033641500521614600237600ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use proxy_agent_shared::logger::{logger_manager, rolling_logger::RollingLogger, LoggerLevel}; use std::path::PathBuf; static LOGGER_KEY: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); pub fn get_logger_key() -> String { LOGGER_KEY .get() .expect("You must set the LOGGER_KEY before this function is called") .to_string() } pub fn init_logger(log_folder: String, log_name: &str) { let logger = RollingLogger::create_new( PathBuf::from(log_folder), log_name.to_string(), 20 * 1024 * 1024, 30, ); let mut loggers = std::collections::HashMap::new(); loggers.insert(log_name.to_string(), logger); logger_manager::set_loggers(loggers, log_name.to_string()); if !LOGGER_KEY.initialized() { if let Err(e) = LOGGER_KEY.set(log_name.to_string()) { eprintln!("Failed to set logger key: {}", e); }; } } pub fn write(message: String) { logger_manager::write_log(LoggerLevel::Info, message); } #[cfg(test)] mod tests { use ctor::{ctor, dtor}; use std::env; use std::fs; const TEST_LOGGER_KEY: &str = "proxy_agent_extension_test"; fn get_temp_test_dir() -> std::path::PathBuf { let mut temp_test_path = env::temp_dir(); temp_test_path.push(TEST_LOGGER_KEY); temp_test_path } #[ctor] fn setup() { // Setup logger_manager for unit tests super::init_logger( get_temp_test_dir().to_string_lossy().to_string(), "test.log", ); } #[dtor] fn cleanup() { // clean up and ignore the clean up errors _ = fs::remove_dir_all(&get_temp_test_dir()); } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/main.rs000066400000000000000000000062641500521614600234270ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #![allow(non_snake_case)] pub mod common; pub mod constants; pub mod error; pub mod handler_main; pub mod logger; pub mod result; pub mod service_main; pub mod structs; #[cfg(not(windows))] pub mod linux; #[cfg(windows)] pub mod windows; use clap::{Parser, Subcommand}; use proxy_agent_shared::misc_helpers; use std::env; #[cfg(windows)] use std::ffi::OsString; #[cfg(windows)] use windows_service::{define_windows_service, service_dispatcher}; #[cfg(windows)] define_windows_service!(ffi_service_main, proxy_agent_extension_windows_service_main); // define_windows_service does not accept async function in fffi_service_main, // also it does not allow to pass tokio runtime or handle as arguments to the function. // we have to use the global variable to set the tokio runtime handle. #[cfg(windows)] static ASYNC_RUNTIME_HANDLE: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); const CONFIG_SEQ_NO_ENV_VAR: &str = "ConfigSequenceNumber"; #[derive(Parser)] #[command()] struct Cli { /// GPA VM Extension commands #[command(subcommand)] command: Option, } #[derive(Subcommand, Debug)] pub enum ExtensionCommand { /// enable the GPA VM Extension Enable, /// disable the GPA VM Extension Disable, /// uninstall the GPA VM Extension Uninstall, /// install the GPA VM Extension Install, /// update the GPA VM Extension Update, /// reset the GPA VM Extension state Reset, } #[tokio::main(flavor = "multi_thread")] async fn main() { // set the tokio runtime handle #[cfg(windows)] ASYNC_RUNTIME_HANDLE .set(tokio::runtime::Handle::current()) .unwrap(); let cli = Cli::parse(); if let Some(command) = cli.command { // extension commands let config_seq_no = env::var(CONFIG_SEQ_NO_ENV_VAR).unwrap_or_else(|_e| "no seq no".to_string()); handler_main::program_start(command, config_seq_no).await; } else { // no arguments, start it as a service let exe_path = misc_helpers::get_current_exe_dir(); let log_folder = common::get_handler_environment(&exe_path) .logFolder .to_string(); logger::init_logger(log_folder, constants::SERVICE_LOG_FILE); common::start_event_logger().await; #[cfg(windows)] { if let Err(e) = service_dispatcher::start(constants::PLUGIN_NAME, ffi_service_main) { logger::write(format!("Failed to start the service: {}", e)); } } #[cfg(not(windows))] { linux::start_service_wait().await; } } } #[cfg(windows)] fn proxy_agent_extension_windows_service_main(args: Vec) { // Pass the tokio runtime handle here to launch the windows service. let handle = ASYNC_RUNTIME_HANDLE .get() .expect("You must provide the Tokio runtime handle before this function is called"); handle.block_on(async { if let Err(e) = service_main::windows_main::run_service(args).await { logger::write(format!("Failed to start the service: {}", e)); } }); } GuestProxyAgent-1.0.30/proxy_agent_extension/src/result.rs000066400000000000000000000002271500521614600240120ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::error::Error; pub type Result = core::result::Result; GuestProxyAgent-1.0.30/proxy_agent_extension/src/service_main.rs000066400000000000000000001206611500521614600251450ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common; use crate::constants; use crate::logger; use crate::structs::*; use proxy_agent_shared::logger::LoggerLevel; use proxy_agent_shared::proxy_agent_aggregate_status::{ self, GuestProxyAgentAggregateStatus, ProxyConnectionSummary, }; use proxy_agent_shared::telemetry::event_logger; use proxy_agent_shared::{misc_helpers, telemetry}; use service_state::ServiceState; use std::io::Error; use std::path::PathBuf; use std::process::Command; use std::process::Output; use std::time::Duration; pub mod service_state; #[cfg(windows)] pub mod windows_main; #[cfg(windows)] use proxy_agent_shared::service; const MAX_STATE_COUNT: u32 = 120; pub fn run() { let message = format!( "============== GuestProxyAgentExtension Enabling Agent, Version: {}, OS Arch: {}, OS Version: {}", misc_helpers::get_current_version(), misc_helpers::get_processor_arch(), misc_helpers::get_long_os_version() ); telemetry::event_logger::write_event( LoggerLevel::Info, message, "run", "service_main", &logger::get_logger_key(), ); tokio::spawn({ async { monitor_thread().await; } }); tokio::spawn({ async { heartbeat_thread().await; } }); } async fn heartbeat_thread() { let exe_path = misc_helpers::get_current_exe_dir(); let handler_environment = common::get_handler_environment(&exe_path); let heartbeat_file_path: PathBuf = handler_environment.heartbeatFile.to_string().into(); let duration = Duration::from_secs(5 * 60); loop { let heartbeat_obj = HeartbeatObj { status: constants::HEARTBEAT_READY_STATUS.to_string(), code: constants::STATUS_CODE_OK.to_string(), formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "Extension is running".to_string(), }, }; common::report_heartbeat(heartbeat_file_path.to_path_buf(), heartbeat_obj); tokio::time::sleep(duration).await; } } async fn monitor_thread() { let exe_path = misc_helpers::get_current_exe_dir(); let handler_environment = common::get_handler_environment(&exe_path); let status_folder_path: PathBuf = handler_environment.statusFolder.to_string().into(); let mut cache_seq_no = String::new(); let proxyagent_file_version_in_extension = get_proxy_agent_file_version_in_extension(); let mut service_state = ServiceState::default(); let mut status = StatusObj { name: constants::PLUGIN_NAME.to_string(), operation: constants::ENABLE_OPERATION.to_string(), configurationAppliedTime: misc_helpers::get_date_time_string(), code: constants::STATUS_CODE_OK, status: constants::SUCCESS_STATUS.to_string(), formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "Update Proxy Agent command output successfully".to_string(), }, substatus: Default::default(), }; let mut status_state_obj = common::StatusState::new(); let logger_key: &String = &logger::get_logger_key(); let mut restored_in_error = false; let mut proxy_agent_update_reported: Option = None; loop { let current_seq_no = common::get_current_seq_no(&exe_path); if cache_seq_no != current_seq_no { telemetry::event_logger::write_event( LoggerLevel::Info, format!( "Current seq_no: {} does not match cached seq no {}", current_seq_no, cache_seq_no ), "monitor_thread", "service_main", logger_key, ); cache_seq_no = current_seq_no.to_string(); let proxy_service_exe_file_path = common::get_proxy_agent_service_path(); let proxyagent_service_file_version = match misc_helpers::get_proxy_agent_version(&proxy_service_exe_file_path) { Ok(version) => version, Err(e) => { logger::write(format!( "Failed to get GuestProxyAgent version from file {} with error: {}", misc_helpers::path_to_string(&proxy_service_exe_file_path), e )); // return empty string if failed to get version "".to_string() } }; if proxyagent_file_version_in_extension != proxyagent_service_file_version { // Call setup tool to install or update proxy agent service telemetry::event_logger::write_event( LoggerLevel::Info, format!("Version mismatch between file versions. ProxyAgentService File Version: {}, ProxyAgent in Extension File Version: {}", proxyagent_service_file_version, proxyagent_file_version_in_extension), "monitor_thread", "service_main", logger_key, ); let setup_tool = misc_helpers::path_to_string(&common::setup_tool_exe_path()); backup_proxyagent(&setup_tool); let mut install_command = Command::new(&setup_tool); // Set the current directory to the directory of the current executable for the setup tool to work properly install_command.current_dir(misc_helpers::get_current_exe_dir()); let proxy_agent_update_command = telemetry::span::SimpleSpan::new(); proxy_agent_update_reported = Some(telemetry::span::SimpleSpan::new()); install_command.arg("install"); let output = install_command.output(); report_proxy_agent_service_status( output, exe_path.join("status"), &cache_seq_no, &mut status, &mut status_state_obj, ); // Time taken to update proxy agent service proxy_agent_update_command.write_event( "Update Proxy Agent command completed", "monitor_thread", "service_main", logger_key, ); } } // Read proxy agent aggregate status file and get ProxyAgentAggregateStatus object report_proxy_agent_aggregate_status( &proxyagent_file_version_in_extension, &mut status, &mut status_state_obj, &mut restored_in_error, &mut service_state, ); // Time taken to report success for proxy agent service after update if status.status == *constants::SUCCESS_STATUS { if let Some(proxy_agent_update_reported) = proxy_agent_update_reported.as_ref() { proxy_agent_update_reported.write_event( "Proxy Agent Service is updated and reporting successful status", "monitor_thread", "service_main", logger_key, ); } proxy_agent_update_reported = None; } #[cfg(windows)] { report_ebpf_status(&mut status); } common::report_status( status_folder_path.to_path_buf(), &cache_seq_no.to_string(), &status, ); tokio::time::sleep(Duration::from_secs(15)).await; } } fn write_state_event( state_key: &str, state_value: &str, message: String, method_name: &str, module_name: &str, logger_key: &str, service_state: &mut ServiceState, ) { if service_state.update_service_state_entry(state_key, state_value, MAX_STATE_COUNT) { event_logger::write_event( LoggerLevel::Info, message, method_name, module_name, logger_key, ); } } #[cfg(windows)] fn report_ebpf_status(status_obj: &mut StatusObj) { match service::check_service_installed(constants::EBPF_CORE) { (true, message) => { logger::write(message.to_string()); match service::check_service_installed(constants::EBPF_EXT) { (true, message) => { logger::write(message.to_string()); status_obj.substatus = { let mut substatus = status_obj.substatus.clone(); substatus.push(SubStatus { name: constants::EBPF_SUBSTATUS_NAME.to_string(), status: constants::SUCCESS_STATUS.to_string(), code: constants::STATUS_CODE_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "Ebpf Drivers successfully queried.".to_string(), }, }); substatus }; } (false, message) => { logger::write(message.to_string()); status_obj.substatus = { let mut substatus = status_obj.substatus.clone(); substatus.push(SubStatus { name: constants::EBPF_SUBSTATUS_NAME.to_string(), status: constants::ERROR_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: format!( "Ebpf Driver: {} unsuccessfully queried.", constants::EBPF_EXT ), }, }); substatus }; } } } (false, message) => { logger::write(message.to_string()); status_obj.substatus = { let mut substatus = status_obj.substatus.clone(); substatus.push(SubStatus { name: constants::EBPF_SUBSTATUS_NAME.to_string(), status: constants::ERROR_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: format!( "Ebpf Driver: {} unsuccessfully queried.", constants::EBPF_CORE ), }, }); substatus }; } } } fn backup_proxyagent(setup_tool: &String) { match Command::new(setup_tool).arg("backup").output() { Ok(output) => { let event_level = if output.status.success() { LoggerLevel::Info } else { LoggerLevel::Warn }; telemetry::event_logger::write_event( event_level, format!( "Backup Proxy Agent command finished with stdoutput: {}, stderr: {}", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ), "backup_proxyagent", "service_main", &logger::get_logger_key(), ); } Err(e) => { telemetry::event_logger::write_event( LoggerLevel::Info, format!("Error in running Backup Proxy Agent command: {}", e), "backup_proxyagent", "service_main", &logger::get_logger_key(), ); } } } fn report_proxy_agent_aggregate_status( proxyagent_file_version_in_extension: &String, status: &mut StatusObj, status_state_obj: &mut common::StatusState, restored_in_error: &mut bool, service_state: &mut ServiceState, ) { let aggregate_status_file_path = proxy_agent_aggregate_status::get_proxy_agent_aggregate_status_folder() .join(proxy_agent_aggregate_status::PROXY_AGENT_AGGREGATE_STATUS_FILE_NAME); let proxy_agent_aggregate_status_top_level: GuestProxyAgentAggregateStatus; match misc_helpers::json_read_from_file::( &aggregate_status_file_path, ) { Ok(ok) => { write_state_event( constants::STATE_KEY_READ_PROXY_AGENT_STATUS_FILE, constants::SUCCESS_STATUS, "Successfully read proxy agent aggregate status file".to_string(), "report_proxy_agent_aggregate_status", "service_main", &logger::get_logger_key(), service_state, ); proxy_agent_aggregate_status_top_level = ok; extension_substatus( proxy_agent_aggregate_status_top_level, proxyagent_file_version_in_extension, status, status_state_obj, service_state, ); } Err(e) => { let error_message = format!("Error in reading proxy agent aggregate status file: {}", e); write_state_event( constants::STATE_KEY_READ_PROXY_AGENT_STATUS_FILE, constants::ERROR_STATUS, error_message.to_string(), "report_proxy_agent_aggregate_status", "service_main", &logger::get_logger_key(), service_state, ); status.status = status_state_obj.update_state(false); status.configurationAppliedTime = misc_helpers::get_date_time_string(); status.substatus = { vec![ SubStatus { name: constants::PLUGIN_CONNECTION_NAME.to_string(), status: constants::TRANSITIONING_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: error_message.to_string(), }, }, SubStatus { name: constants::PLUGIN_STATUS_NAME.to_string(), status: constants::TRANSITIONING_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: error_message.to_string(), }, }, SubStatus { name: constants::PLUGIN_FAILED_AUTH_NAME.to_string(), status: constants::TRANSITIONING_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: error_message.to_string(), }, }, ] }; } } if !(*restored_in_error) { *restored_in_error = restore_purge_proxyagent(status); } } fn extension_substatus( proxy_agent_aggregate_status_top_level: GuestProxyAgentAggregateStatus, proxyagent_file_version_in_extension: &String, status: &mut StatusObj, status_state_obj: &mut common::StatusState, service_state: &mut ServiceState, ) { let proxy_agent_aggregate_status_obj = proxy_agent_aggregate_status_top_level.proxyAgentStatus; let proxy_agent_aggregate_status_file_version = proxy_agent_aggregate_status_obj.version.to_string(); if proxy_agent_aggregate_status_file_version != *proxyagent_file_version_in_extension { status.status = status_state_obj.update_state(false); let version_mismatch_message = format!("Proxy agent aggregate status file version {} does not match proxy agent file version in extension {}", proxy_agent_aggregate_status_file_version, proxyagent_file_version_in_extension); write_state_event( constants::STATE_KEY_FILE_VERSION, constants::ERROR_STATUS, version_mismatch_message.to_string(), "extension_substatus", "service_main", &logger::get_logger_key(), service_state, ); status.configurationAppliedTime = misc_helpers::get_date_time_string(); status.substatus = { vec![ SubStatus { name: constants::PLUGIN_CONNECTION_NAME.to_string(), status: constants::TRANSITIONING_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: version_mismatch_message.to_string(), }, }, SubStatus { name: constants::PLUGIN_STATUS_NAME.to_string(), status: constants::TRANSITIONING_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: version_mismatch_message.to_string(), }, }, SubStatus { name: constants::PLUGIN_FAILED_AUTH_NAME.to_string(), status: constants::TRANSITIONING_STATUS.to_string(), code: constants::STATUS_CODE_NOT_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: version_mismatch_message.to_string(), }, }, ] }; } // Success Status and report to status file for CRP to read from else { let substatus_proxy_agent_message = match serde_json::to_string(&proxy_agent_aggregate_status_obj) { Ok(proxy_agent_aggregate_status) => proxy_agent_aggregate_status, Err(e) => { let error_message = format!("Error in serializing proxy agent aggregate status: {}", e); logger::write(error_message.to_string()); error_message } }; let substatus_proxy_agent_connection_message: String; if !proxy_agent_aggregate_status_top_level .proxyConnectionSummary .is_empty() { let proxy_agent_aggregate_connection_status_obj = get_top_proxy_connection_summary( proxy_agent_aggregate_status_top_level .proxyConnectionSummary .clone(), constants::MAX_CONNECTION_SUMMARY_LEN, ); match serde_json::to_string(&proxy_agent_aggregate_connection_status_obj) { Ok(proxy_agent_aggregate_connection_status) => { substatus_proxy_agent_connection_message = proxy_agent_aggregate_connection_status; } Err(e) => { let error_message = format!( "Error in serializing proxy agent aggregate connection status: {}", e ); logger::write(error_message.to_string()); substatus_proxy_agent_connection_message = error_message; } } } else { logger::write("proxy connection summary is empty".to_string()); substatus_proxy_agent_connection_message = "proxy connection summary is empty".to_string(); } let substatus_failed_auth_message: String; if !proxy_agent_aggregate_status_top_level .failedAuthenticateSummary .is_empty() { let proxy_agent_aggregate_failed_auth_status_obj = get_top_proxy_connection_summary( proxy_agent_aggregate_status_top_level .failedAuthenticateSummary .clone(), constants::MAX_FAILED_AUTH_SUMMARY_LEN, ); match serde_json::to_string(&proxy_agent_aggregate_failed_auth_status_obj) { Ok(proxy_agent_aggregate_failed_auth_status) => { substatus_failed_auth_message = proxy_agent_aggregate_failed_auth_status; } Err(e) => { let error_message = format!( "Error in serializing proxy agent aggregate failed auth status: {}", e ); logger::write(error_message.to_string()); substatus_failed_auth_message = error_message; } } } else { logger::write("proxy failed auth summary is empty".to_string()); substatus_failed_auth_message = "proxy failed auth summary is empty".to_string(); } status.substatus = { vec![ SubStatus { name: constants::PLUGIN_CONNECTION_NAME.to_string(), status: constants::SUCCESS_STATUS.to_string(), code: constants::STATUS_CODE_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: substatus_proxy_agent_connection_message.to_string(), }, }, SubStatus { name: constants::PLUGIN_STATUS_NAME.to_string(), status: constants::SUCCESS_STATUS.to_string(), code: constants::STATUS_CODE_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: substatus_proxy_agent_message.to_string(), }, }, SubStatus { name: constants::PLUGIN_FAILED_AUTH_NAME.to_string(), status: constants::SUCCESS_STATUS.to_string(), code: constants::STATUS_CODE_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: substatus_failed_auth_message.to_string(), }, }, ] }; status.status = status_state_obj.update_state(true); status.configurationAppliedTime = misc_helpers::get_date_time_string(); write_state_event( constants::STATE_KEY_FILE_VERSION, constants::SUCCESS_STATUS, substatus_proxy_agent_connection_message.to_string(), "extension_substatus", "service_main", &logger::get_logger_key(), service_state, ); } } fn get_top_proxy_connection_summary( mut summary: Vec, max_count: usize, ) -> Vec { summary.sort_by(|a, b| a.count.cmp(&b.count)); let len = summary.len(); if len > max_count { summary = summary.split_off(len - max_count); } summary } fn restore_purge_proxyagent(status: &mut StatusObj) -> bool { let setup_tool = misc_helpers::path_to_string(&common::setup_tool_exe_path()); if status.status == *constants::ERROR_STATUS { let output = Command::new(&setup_tool).arg("restore").output(); match output { Ok(output) => { let event_level = if output.status.success() { LoggerLevel::Info } else { LoggerLevel::Warn }; telemetry::event_logger::write_event( event_level, format!( "Restore Proxy Agent command finished with stdoutput: {}, stderr: {}", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ), "restore_purge_proxyagent", "service_main", &logger::get_logger_key(), ); } Err(e) => { telemetry::event_logger::write_event( LoggerLevel::Info, format!("Error in running Restore Proxy Agent command: {}", e), "restore_purge_proxyagent", "service_main", &logger::get_logger_key(), ); } } true } else if status.status == *constants::SUCCESS_STATUS { let output = Command::new(setup_tool).arg("purge").output(); match output { Ok(output) => { let event_level = if output.status.success() { LoggerLevel::Info } else { LoggerLevel::Warn }; telemetry::event_logger::write_event( event_level, format!( "Purge Proxy Agent command finished with stdoutput: {}, stderr: {}", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ), "restore_purge_proxyagent", "service_main", &logger::get_logger_key(), ); } Err(e) => { telemetry::event_logger::write_event( LoggerLevel::Info, format!("Error in running Purge Proxy Agent command: {}", e), "restore_purge_proxyagent", "service_main", &logger::get_logger_key(), ); } } return true; } else { return false; } } fn report_proxy_agent_service_status( output: Result, status_folder: PathBuf, seq_no: &str, status: &mut StatusObj, status_state_obj: &mut common::StatusState, ) { match output { Ok(output) => { logger::write(format!( "Update Proxy Agent command output: {}", String::from_utf8_lossy(&output.stdout) )); if output.status.success() { logger::write("Update Proxy Agent command output successfully".to_string()); status.configurationAppliedTime = misc_helpers::get_date_time_string(); status.code = constants::STATUS_CODE_OK; status.status = status_state_obj.update_state(false); status.formattedMessage.message = "Update Proxy Agent command output successfully".to_string(); status.substatus = Default::default(); common::report_status(status_folder, seq_no, status); } else { telemetry::event_logger::write_event( LoggerLevel::Info, format!( "Update Proxy Agent command failed with error: {}", String::from_utf8_lossy(&output.stderr) ), "report_proxy_agent_service_status", "service_main", &logger::get_logger_key(), ); status.configurationAppliedTime = misc_helpers::get_date_time_string(); status.code = output .status .code() .unwrap_or(constants::STATUS_CODE_NOT_OK); status.status = status_state_obj.update_state(false); status.formattedMessage.message = "Update Proxy Agent command failed with error".to_string(); status.substatus = Default::default(); common::report_status(status_folder, seq_no, status); } } Err(e) => { telemetry::event_logger::write_event( LoggerLevel::Info, format!("Error in running Update Proxy Agent command: {}", e), "report_proxy_agent_service_status", "service_main", &logger::get_logger_key(), ); // report proxyagent service update failed state status.configurationAppliedTime = misc_helpers::get_date_time_string(); status.code = constants::STATUS_CODE_NOT_OK; status.status = status_state_obj.update_state(false); status.formattedMessage.message = format!("Update Proxy Agent command failed with error: {}", e); status.substatus = Default::default(); common::report_status(status_folder, seq_no, status); } } } fn get_proxy_agent_file_version_in_extension() -> String { // File version of proxy agent service already downloaded by VM Agent let path = common::get_proxy_agent_exe_path(); match misc_helpers::get_proxy_agent_version(&path) { Ok(version) => version, Err(e) => { logger::write(format!( "Failed to get GuestProxyAgent version from file {} with error: {}", misc_helpers::path_to_string(&path), e )); // return empty string if failed to get version "".to_string() } } } // test report status #[cfg(test)] mod tests { use crate::constants; use crate::structs::*; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::proxy_agent_aggregate_status::*; #[test] #[cfg(windows)] fn report_proxy_agent_service_status() { use std::env; use std::fs; use std::io::Write; use std::path::PathBuf; use std::process::Command; // Create temp directory for status folder let mut temp_test_path = env::temp_dir(); temp_test_path.push("test_status_file"); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); _ = misc_helpers::try_create_folder(&temp_test_path); let status_folder: PathBuf = temp_test_path.join("status"); let mut test_good = temp_test_path.clone(); test_good.push("test.ps1"); let mut file = fs::File::create(&test_good).unwrap(); file.write_all(b"\"Hello World\"").unwrap(); let output = Command::new("powershell.exe").args(&test_good).output(); //Set the config_seq_no value let seq_no = "0"; let expected_status_file: &PathBuf = &temp_test_path.join("status").join("0.status"); let mut status = StatusObj { name: constants::PLUGIN_NAME.to_string(), operation: constants::ENABLE_OPERATION.to_string(), configurationAppliedTime: misc_helpers::get_date_time_string(), code: constants::STATUS_CODE_OK, status: constants::SUCCESS_STATUS.to_string(), formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "Update Proxy Agent command output successfully".to_string(), }, substatus: Default::default(), }; let mut status_state_obj = super::common::StatusState::new(); super::report_proxy_agent_service_status( output, status_folder, &seq_no, &mut status, &mut status_state_obj, ); let handler_status = misc_helpers::json_read_from_file::>(&expected_status_file) .unwrap(); assert_eq!(handler_status.len(), 1); assert_eq!(handler_status[0].status.code, 0); let status_folder_bad = temp_test_path.join("status_bad"); let mut test_bad = temp_test_path.clone(); test_bad.push("&?@(random)?.ps1"); let output = Command::new("powershell.exe").args(&test_bad).output(); let expected_status_file_bad: &PathBuf = &temp_test_path.join("status_bad").join("0.status"); super::report_proxy_agent_service_status( output, status_folder_bad, &seq_no, &mut status, &mut status_state_obj, ); let handler_status_bad = misc_helpers::json_read_from_file::>(expected_status_file_bad) .unwrap(); assert_eq!(handler_status_bad.len(), 1); assert_eq!(handler_status_bad[0].status.code, 1); //Clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); } #[test] fn test_proxyagent_service_success_status() { let proxy_agent_status_obj = ProxyAgentStatus { version: "1.0.0".to_string(), status: OverallState::SUCCESS, monitorStatus: ProxyAgentDetailStatus { status: ModuleState::RUNNING, message: "test".to_string(), states: None, }, keyLatchStatus: ProxyAgentDetailStatus { status: ModuleState::RUNNING, message: "test".to_string(), states: None, }, ebpfProgramStatus: ProxyAgentDetailStatus { status: ModuleState::RUNNING, message: "test".to_string(), states: None, }, proxyListenerStatus: ProxyAgentDetailStatus { status: ModuleState::RUNNING, message: "test".to_string(), states: None, }, telemetryLoggerStatus: ProxyAgentDetailStatus { status: ModuleState::RUNNING, message: "test".to_string(), states: None, }, proxyConnectionsCount: 1, }; let proxy_connection_summary_obj = ProxyConnectionSummary { userName: "test".to_string(), ip: "test".to_string(), port: 1, processCmdLine: "test".to_string(), responseStatus: "test".to_string(), count: 1, processFullPath: Some("test".to_string()), userGroups: Some(vec!["test".to_string()]), }; let proxy_failedAuthenticateSummary_obj = ProxyConnectionSummary { userName: "test".to_string(), ip: "test".to_string(), port: 1, processCmdLine: "test".to_string(), responseStatus: "test".to_string(), count: 1, processFullPath: Some("test".to_string()), userGroups: Some(vec!["test".to_string()]), }; let toplevel_status = GuestProxyAgentAggregateStatus { timestamp: misc_helpers::get_date_time_string(), proxyAgentStatus: proxy_agent_status_obj, proxyConnectionSummary: vec![proxy_connection_summary_obj], failedAuthenticateSummary: vec![proxy_failedAuthenticateSummary_obj], }; let mut status = StatusObj { name: constants::PLUGIN_NAME.to_string(), operation: constants::ENABLE_OPERATION.to_string(), configurationAppliedTime: misc_helpers::get_date_time_string(), code: constants::STATUS_CODE_OK, status: constants::SUCCESS_STATUS.to_string(), formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "Update Proxy Agent command output successfully".to_string(), }, substatus: Default::default(), }; let mut status_state_obj = super::common::StatusState::new(); let proxyagent_file_version_in_extension: &String = &"1.0.0".to_string(); let mut service_state = super::service_state::ServiceState::default(); super::extension_substatus( toplevel_status, proxyagent_file_version_in_extension, &mut status, &mut status_state_obj, &mut service_state, ); assert_eq!(status.status, constants::SUCCESS_STATUS.to_string()); } #[tokio::test] #[cfg(windows)] async fn test_report_ebpf_status() { let mut status = StatusObj { name: constants::PLUGIN_NAME.to_string(), operation: constants::ENABLE_OPERATION.to_string(), configurationAppliedTime: misc_helpers::get_date_time_string(), code: constants::STATUS_CODE_OK, status: constants::SUCCESS_STATUS.to_string(), formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "Update Proxy Agent command output successfully".to_string(), }, substatus: { vec![ SubStatus { name: constants::PLUGIN_CONNECTION_NAME.to_string(), status: constants::SUCCESS_STATUS.to_string(), code: constants::STATUS_CODE_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "test".to_string(), }, }, SubStatus { name: constants::PLUGIN_STATUS_NAME.to_string(), status: constants::SUCCESS_STATUS.to_string(), code: constants::STATUS_CODE_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "test".to_string(), }, }, SubStatus { name: constants::PLUGIN_FAILED_AUTH_NAME.to_string(), status: constants::SUCCESS_STATUS.to_string(), code: constants::STATUS_CODE_OK, formattedMessage: FormattedMessage { lang: constants::LANG_EN_US.to_string(), message: "test".to_string(), }, }, ] }, }; super::report_ebpf_status(&mut status); assert_eq!( status.substatus[0].name, constants::PLUGIN_CONNECTION_NAME.to_string() ); assert_eq!( status.substatus[1].name, constants::PLUGIN_STATUS_NAME.to_string() ); assert_eq!( status.substatus[2].name, constants::PLUGIN_FAILED_AUTH_NAME.to_string() ); assert_eq!( status.substatus[3].name, constants::EBPF_SUBSTATUS_NAME.to_string() ); } #[tokio::test] async fn get_top_proxy_connection_summary_tests() { let mut summary = Vec::new(); let mut proxy_connection_summary_obj = ProxyConnectionSummary { userName: "test".to_string(), ip: "test".to_string(), port: 1, processCmdLine: "test".to_string(), responseStatus: "test".to_string(), count: 1, processFullPath: Some("test".to_string()), userGroups: Some(vec!["test".to_string()]), }; summary.push(proxy_connection_summary_obj.clone()); proxy_connection_summary_obj.count = 5; summary.push(proxy_connection_summary_obj.clone()); proxy_connection_summary_obj.count = 2; summary.push(proxy_connection_summary_obj.clone()); proxy_connection_summary_obj.count = 4; summary.push(proxy_connection_summary_obj.clone()); proxy_connection_summary_obj.count = 2; summary.push(proxy_connection_summary_obj.clone()); let max_len = 3; let result = super::get_top_proxy_connection_summary(summary, max_len); assert_eq!(result.len(), max_len); assert_eq!(result[0].count, 2); // lowest count assert_eq!(result[1].count, 4); // 2nd highest count assert_eq!(result[2].count, 5); // 3rd highest count } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/service_main/000077500000000000000000000000001500521614600245715ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_extension/src/service_main/service_state.rs000066400000000000000000000037541500521614600300100ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use std::collections::HashMap; /// Global state variables for the extension service. #[derive(Clone, Default)] pub struct ServiceState { state_map: HashMap, } impl ServiceState { /// Update the service state entry with the given key and value. /// If the state value is the same as the previous value, increment the count. /// If the count reaches the maximum value, update the state value and reset the count. /// Return true if the state value is updated, false otherwise. /// # Arguments /// * `service_state` - The service state to update. /// * `state_key` - The key of the state entry. /// * `state_value` - The value of the state entry. /// * `max_count` - The maximum count before reset the state value count. /// # Returns /// * `bool` - True if the state value is updated or state value count reset, false otherwise. pub fn update_service_state_entry( &mut self, state_key: &str, state_value: &str, max_count: u32, ) -> bool { match self.state_map.get_mut(state_key) { Some(entry) => { let value = entry.0.to_string(); let count = entry.1; // State value changed or max count reached if value != state_value || count >= max_count { // Update the state value and reset the count to 1 self.state_map .insert(state_key.to_string(), (state_value.to_string(), 1)); true } else { self.state_map .insert(state_key.to_string(), (state_value.to_string(), count + 1)); false } } None => { self.state_map .insert(state_key.to_string(), (state_value.to_string(), 1)); true } } } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/service_main/windows_main.rs000066400000000000000000000057131500521614600276430ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::common; use crate::constants; use crate::logger; use crate::result::Result; use crate::service_main; use std::ffi::OsString; use std::time::Duration; use windows_service::service::{ ServiceControl, ServiceControlAccept, ServiceExitCode, ServiceState, ServiceStatus, ServiceType, }; use windows_service::service_control_handler::{ self, ServiceControlHandlerResult, ServiceStatusHandle, }; // The private global variable to store the windows service status handle. // It is used to set the windows service status to Running and Stopped. // Its event handler does not support async + await, which it is not allow to get it via async mpsc. static SERVICE_STATUS_HANDLE: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); pub async fn run_service(_args: Vec) -> Result<()> { let event_handler = move |control_event| -> ServiceControlHandlerResult { match control_event { ServiceControl::Stop => { common::stop_event_logger(); if let Some(status_handle) = SERVICE_STATUS_HANDLE.get() { let stop_state = ServiceStatus { service_type: ServiceType::OWN_PROCESS, current_state: ServiceState::Stopped, controls_accepted: ServiceControlAccept::STOP, exit_code: ServiceExitCode::Win32(0), checkpoint: 0, wait_hint: Duration::default(), process_id: None, }; if let Err(e) = status_handle.set_service_status(stop_state) { logger::write(format!("Failed to set service status to Stopped: {}", e)); } } else { // workaround to stop the service by exiting the process logger::write("Force exit the process to stop the service.".to_string()); std::process::exit(0); } ServiceControlHandlerResult::NoError } ServiceControl::Interrogate => ServiceControlHandlerResult::NoError, _ => ServiceControlHandlerResult::NotImplemented, } }; // start service service_main::run(); // set the service state to Running let status_handle = service_control_handler::register(constants::PLUGIN_NAME, event_handler)?; let running_state = ServiceStatus { service_type: ServiceType::OWN_PROCESS, current_state: ServiceState::Running, controls_accepted: ServiceControlAccept::STOP, exit_code: ServiceExitCode::Win32(0), checkpoint: 0, wait_hint: Duration::default(), process_id: None, }; status_handle.set_service_status(running_state)?; // set the windows service status handle SERVICE_STATUS_HANDLE.set(status_handle).unwrap(); Ok(()) } GuestProxyAgent-1.0.30/proxy_agent_extension/src/structs.rs000066400000000000000000000163061500521614600242100ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug)] #[allow(non_snake_case)] pub struct HandlerEnvironment { pub logFolder: String, pub statusFolder: String, pub configFolder: String, pub heartbeatFile: String, pub deploymentid: Option, pub rolename: Option, pub instance: Option, pub hostResolverAddress: Option, pub eventsFolder: String, } impl Clone for HandlerEnvironment { fn clone(&self) -> Self { HandlerEnvironment { logFolder: self.logFolder.clone(), statusFolder: self.statusFolder.clone(), configFolder: self.configFolder.clone(), heartbeatFile: self.heartbeatFile.clone(), deploymentid: self.deploymentid.clone(), rolename: self.rolename.clone(), instance: self.instance.clone(), hostResolverAddress: self.hostResolverAddress.clone(), eventsFolder: self.eventsFolder.clone(), } } } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct Handler { pub handlerEnvironment: HandlerEnvironment, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] #[derive(Clone)] pub struct StatusObj { pub name: String, pub operation: String, pub configurationAppliedTime: String, pub status: String, pub code: i32, pub formattedMessage: FormattedMessage, pub substatus: Vec, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] #[derive(Clone)] pub struct FormattedMessage { pub lang: String, pub message: String, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] #[derive(Clone)] pub struct SubStatus { pub name: String, pub status: String, pub code: i32, pub formattedMessage: FormattedMessage, } impl Default for SubStatus { fn default() -> Self { SubStatus { name: "".to_string(), status: "".to_string(), code: 0, formattedMessage: FormattedMessage { lang: "".to_string(), message: "".to_string(), }, } } } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct TopLevelStatus { pub version: String, pub timestampUTC: String, pub status: StatusObj, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct TopLevelHeartbeat { pub version: String, pub heartbeat: HeartbeatObj, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct HeartbeatObj { pub status: String, pub code: String, pub formattedMessage: FormattedMessage, } #[cfg(test)] mod tests { #[test] fn handler_env_test() { // test handler env, init, serialize, deserialize and compare original and deserialized //Create raw handler environment json string let json_handler: &str = r#"[{ "version": 1.0, "handlerEnvironment": { "logFolder": "log", "configFolder": "config", "statusFolder": "status", "heartbeatFile": "heartbeat.json", "deploymentid": "000", "rolename": "test_rolename", "instance": "test_instance", "hostResolverAddress": "000", "eventsFolder": "test_kusto" } }]"#; //Deserialize handler environment json string let handler_env_obj: Vec = serde_json::from_str(json_handler).unwrap(); let handlerEnvironment = handler_env_obj[0].handlerEnvironment.clone(); assert_eq!( "log".to_string(), handlerEnvironment.logFolder, "logFolder mismatch" ); assert_eq!( "config".to_string(), handlerEnvironment.configFolder, "configFolder mismatch" ); assert_eq!( "status".to_string(), handlerEnvironment.statusFolder, "statusFolder mismatch" ); assert_eq!( "heartbeat.json".to_string(), handlerEnvironment.heartbeatFile, "heartbeatFile mismatch" ); assert_eq!( "000".to_string(), handlerEnvironment.deploymentid.unwrap(), "deploymentid mismatch" ); assert_eq!( "test_rolename".to_string(), handlerEnvironment.rolename.unwrap(), "rolename mismatch" ); } #[test] fn status_obj_test() { // test status obj, init, serialize, deserialize and compare original and deserialized //Create raw status obj json string let json_status: &str = r#"{ "version": "1.0", "timestampUTC": "2021-01-01T00:00:00.000Z", "status": { "name": "test_status_name", "operation": "test_operation", "configurationAppliedTime": "2021-01-01T00:00:00.000Z", "code": 0, "status": "test_status", "formattedMessage": { "lang": "en-US", "message": "test_formatted_message" }, "substatus": [{ "name": "test_substatus_name", "status": "test_substatus", "code": 0, "formattedMessage": { "lang": "en-US", "message": "test_substatus_formatted_message" } }] } }"#; //Deserialize status obj json string let status_obj: super::TopLevelStatus = serde_json::from_str(json_status).unwrap(); let status = status_obj.status; assert_eq!("1.0".to_string(), status_obj.version, "version mismatch"); assert_eq!( "2021-01-01T00:00:00.000Z".to_string(), status_obj.timestampUTC, "timestampUTC mismatch" ); assert_eq!("test_status_name".to_string(), status.name, "name mismatch"); assert_eq!(0, status.code, "code mismatch"); assert_eq!("test_status".to_string(), status.status, "status mismatch"); } #[test] fn heartbeat_obj_test() { // test heartbeat obj, init, serialize, deserialize and compare original and deserialized //Create raw heartbeat obj json string let json_heartbeat: &str = r#"{ "version": "1.0", "heartbeat": { "status": "test_status", "code": "0", "formattedMessage": { "lang": "en-US", "message": "test_formatted_message" } } }"#; //Deserialize heartbeat obj json string let heartbeat_obj: super::TopLevelHeartbeat = serde_json::from_str(json_heartbeat).unwrap(); let heartbeat = heartbeat_obj.heartbeat; assert_eq!("1.0".to_string(), heartbeat_obj.version, "version mismatch"); assert_eq!( "test_status".to_string(), heartbeat.status, "status mismatch" ); assert_eq!("0".to_string(), heartbeat.code, "code mismatch"); } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/windows.rs000066400000000000000000000001341500521614600241630ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod service_ext; GuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/000077500000000000000000000000001500521614600236175ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/HandlerManifest.json000066400000000000000000000013501500521614600275550ustar00rootroot00000000000000[{ "version": 1.0, "handlerManifest" : { "installCommand": "install.cmd", "uninstallCommand": "uninstall.cmd", "updateCommand": "update.cmd", "enableCommand": "enable.cmd", "disableCommand": "disable.cmd", "rebootAfterInstall": "false", "reportHeartbeat": "true", "resetStateCommand": "reset.cmd", "supportMultipleExtensions": "false" }, "resourceLimits": { "processes": [{ "name": "ProxyAgentExt", "cpuQuotaPercentage": 5, "memoryQuotaMB": 75 }, { "name": "GuestProxyAgent", "cpuQuotaPercentage": 2, "memoryQuotaMB": 17 }] } }]GuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/disable.cmd000066400000000000000000000002051500521614600257040ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT set script_dir=%~dp0 %script_dir%\ProxyAgentExt.exe disableGuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/enable.cmd000066400000000000000000000002041500521614600255260ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT set script_dir=%~dp0 %script_dir%\ProxyAgentExt.exe enableGuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/install.cmd000066400000000000000000000002051500521614600257470ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT set script_dir=%~dp0 %script_dir%\ProxyAgentExt.exe installGuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/reset.cmd000066400000000000000000000002031500521614600254210ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT set script_dir=%~dp0 %script_dir%\ProxyAgentExt.exe resetGuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/service_ext.rs000066400000000000000000000111471500521614600265110ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::constants; use crate::logger; use proxy_agent_shared::{misc_helpers, service}; use std::path::PathBuf; use std::process; pub fn install_extension_service() { //Get executable file path let exe_root_path: PathBuf = misc_helpers::get_current_exe_dir(); let service_exe_path = exe_root_path.join(constants::EXTENSION_PROCESS_NAME); match service::install_service( constants::EXTENSION_SERVICE_NAME, constants::EXTENSION_SERVICE_DISPLAY_NAME, vec![], service_exe_path, ) { Ok(_service) => { logger::write(format!( "Service {} successfully installed", constants::EXTENSION_SERVICE_NAME )); } Err(e) => { logger::write(format!( "Service {} install failed: {}", constants::EXTENSION_SERVICE_NAME, e )); eprintln!( "Service {} install failed: {}", constants::EXTENSION_SERVICE_NAME, e ); process::exit(constants::EXIT_CODE_SERVICE_INSTALL_ERR); } } } pub async fn uninstall_extension_service() { // Stop and Delete the service given match service::stop_and_delete_service(constants::EXTENSION_SERVICE_NAME).await { Ok(_service) => { logger::write(format!( "Service {} successfully uninstalled", constants::EXTENSION_SERVICE_NAME )); } Err(e) => { logger::write(format!( "Service {} uninstall failed: {}", constants::EXTENSION_SERVICE_NAME, e )); eprintln!( "Service {} update failed: {}", constants::EXTENSION_SERVICE_NAME, e ); process::exit(constants::EXIT_CODE_SERVICE_UNINSTALL_ERR); } } } pub async fn start_extension_service() { match service::start_service( constants::EXTENSION_SERVICE_NAME, constants::SERVICE_START_RETRY_COUNT, std::time::Duration::from_secs(15), ) .await { Ok(_) => { logger::write(format!( "Service {} successfully started", constants::EXTENSION_SERVICE_NAME )); } Err(e) => { logger::write(format!( "Service {} start failed: {}", constants::EXTENSION_SERVICE_NAME, e )); eprintln!( "Service {} start failed: {}", constants::EXTENSION_SERVICE_NAME, e ); process::exit(constants::EXIT_CODE_SERVICE_START_ERR); } } } pub async fn stop_extension_service() { match service::stop_service(constants::EXTENSION_SERVICE_NAME).await { Ok(_service) => { logger::write(format!( "Service {} successfully stopped", constants::EXTENSION_SERVICE_NAME )); } Err(e) => { logger::write(format!( "Service {} stop failed: {}", constants::EXTENSION_SERVICE_NAME, e )); eprintln!( "Service {} stop failed: {}", constants::EXTENSION_SERVICE_NAME, e ); process::exit(constants::EXIT_CODE_SERVICE_STOP_ERR); } } } pub fn update_extension_service(exe_root_path: PathBuf) { let service_exe_path = exe_root_path.join(constants::EXTENSION_PROCESS_NAME); logger::write(format!( "Updating service {} with exe_path {}", constants::EXTENSION_SERVICE_NAME, misc_helpers::path_to_string(&service_exe_path) )); match service::update_service( constants::EXTENSION_SERVICE_NAME, constants::EXTENSION_SERVICE_DISPLAY_NAME, vec![], service_exe_path, ) { Ok(_service) => { logger::write(format!( "Service {} successfully updated", constants::EXTENSION_SERVICE_NAME )); } Err(e) => { logger::write(format!( "Service {} update failed: {}", constants::EXTENSION_SERVICE_NAME, e )); eprintln!( "Service {} update failed: {}", constants::EXTENSION_SERVICE_NAME, e ); process::exit(constants::EXIT_CODE_SERVICE_UPDATE_ERR); } } } GuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/uninstall.cmd000066400000000000000000000002071500521614600263140ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT set script_dir=%~dp0 %script_dir%\ProxyAgentExt.exe uninstallGuestProxyAgent-1.0.30/proxy_agent_extension/src/windows/update.cmd000066400000000000000000000002041500521614600255620ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT set script_dir=%~dp0 %script_dir%\ProxyAgentExt.exe updateGuestProxyAgent-1.0.30/proxy_agent_setup/000077500000000000000000000000001500521614600204625ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_setup/Cargo.toml000066400000000000000000000012051500521614600224100ustar00rootroot00000000000000[package] name = "proxy_agent_setup" version = "1.0.30" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] proxy_agent_shared = { path ="../proxy_agent_shared"} clap = { version = "4.5.17", features =["derive"] } # Command Line Argument Parser thiserror = "1.0.64" tokio = { version = "1", features = ["rt", "rt-multi-thread", "time", "macros"] } [target.'cfg(windows)'.build-dependencies] winres = "0.1.12" # Rust Windows resource helper to add file version static_vcruntime = "2.0.0" # Statically link the VCRuntime when using the MSVC toolchain GuestProxyAgent-1.0.30/proxy_agent_setup/build.rs000066400000000000000000000003561500521614600221330ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT fn main() { #[cfg(windows)] { static_vcruntime::metabuild(); let res = winres::WindowsResource::new(); res.compile().unwrap(); } } GuestProxyAgent-1.0.30/proxy_agent_setup/src/000077500000000000000000000000001500521614600212515ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_setup/src/args.rs000066400000000000000000000033621500521614600225570ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use clap::{Parser, Subcommand, ValueEnum}; use std::fmt::{Display, Formatter}; #[derive(Parser)] #[command()] pub(crate) struct Cli { /// GPA VM Extension commands #[command(subcommand)] pub command: Command, } #[derive(Subcommand)] pub(crate) enum Command { /// backup the GPA service Backup, /// restore the GPA service Restore { #[arg(default_value_t = true)] delete_backup: bool, }, /// uninstall the GPA service Uninstall { #[arg(default_value_t = UninstallMode::Service)] uninstall_mode: UninstallMode, }, /// install the GPA VM service Install, /// purge the backup GPA service files Purge, } #[derive(ValueEnum, Clone, Debug, PartialEq)] pub(crate) enum UninstallMode { Service, Package, } impl Display for Cli { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.command) } } impl Display for Command { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { Command::Backup => write!(f, "backup"), Command::Restore { delete_backup } => { write!(f, "restore delete_backup={}", delete_backup) } Command::Uninstall { uninstall_mode } => write!(f, "uninstall {}", uninstall_mode), Command::Install => write!(f, "install"), Command::Purge => write!(f, "purge"), } } } impl Display for UninstallMode { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { UninstallMode::Service => write!(f, "service"), UninstallMode::Package => write!(f, "package"), } } } GuestProxyAgent-1.0.30/proxy_agent_setup/src/backup.rs000066400000000000000000000005341500521614600230660ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::setup; use std::path::PathBuf; pub fn proxy_agent_backup_folder() -> PathBuf { let path = setup::proxy_agent_folder_in_setup(); path.join("Backup") } pub fn proxy_agent_backup_package_folder() -> PathBuf { proxy_agent_backup_folder().join("Package") } GuestProxyAgent-1.0.30/proxy_agent_setup/src/error.rs000066400000000000000000000002701500521614600227470ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #[derive(Debug, thiserror::Error)] pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), } GuestProxyAgent-1.0.30/proxy_agent_setup/src/linux.rs000066400000000000000000000100251500521614600227540ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #![cfg(not(windows))] use crate::{backup, logger, result::Result, running}; use proxy_agent_shared::misc_helpers; use std::{fs, path::PathBuf}; const SERVICE_CONFIG_FILE_NAME: &str = "azure-proxy-agent.service"; const CONFIG_FILE: &str = "proxy-agent.json"; const EBPF_FILE: &str = "ebpf_cgroup.o"; const CONFIG_PATH: &str = "/etc/azure/proxy-agent.json"; const EBPF_PATH: &str = "/usr/lib/azure-proxy-agent/ebpf_cgroup.o"; pub fn setup_service(service_name: &str, service_file_dir: PathBuf) -> Result { copy_service_config_file(service_name, service_file_dir) } fn copy_service_config_file(service_name: &str, service_file_dir: PathBuf) -> Result { let service_config_name = format!("{}.service", service_name); let src_config_file_path = service_file_dir.join(&service_config_name); let dst_config_file_path = PathBuf::from(proxy_agent_shared::linux::SERVICE_CONFIG_FOLDER_PATH) .join(&service_config_name); fs::copy(src_config_file_path, dst_config_file_path).map_err(Into::into) } fn backup_service_config_file(backup_folder: PathBuf) { let backup_service_file = backup_folder.join(SERVICE_CONFIG_FILE_NAME); match fs::copy( PathBuf::from(proxy_agent_shared::linux::SERVICE_CONFIG_FOLDER_PATH) .join(SERVICE_CONFIG_FILE_NAME), &backup_service_file, ) { Ok(_) => { logger::write(format!( "Copied service config file to {:?}", backup_service_file )); } Err(e) => { logger::write(format!( "Failed to copy service config file to {:?}, error: {:?}", backup_service_file, e )); } } } fn copy_file(src_file: PathBuf, dst_file: PathBuf) { if let Some(p) = dst_file.parent() { if let Err(e) = misc_helpers::try_create_folder(p) { logger::write(format!("Failed to create folder {:?}, error: {:?}", p, e)); } } match fs::copy(&src_file, &dst_file) { Ok(_) => { logger::write(format!("Copied file {:?} to {:?}", src_file, dst_file)); } Err(e) => { logger::write(format!( "Failed to copy file {:?} to {:?}, error: {:?}", src_file, dst_file, e )); } } } fn delete_file(file_to_be_delete: PathBuf) { match fs::remove_file(&file_to_be_delete) { Ok(_) => { logger::write(format!("Deleted file {:?}", file_to_be_delete)); } Err(e) => { logger::write(format!( "Failed to delete file {:?}, error: {:?}", file_to_be_delete, e )); } } } // copy azure-proxy-agent, proxy-agent.json, ebpf_cgroup.o, service config files to backup folder pub fn backup_files() { let backup_folder = backup::proxy_agent_backup_package_folder(); copy_file(PathBuf::from(CONFIG_PATH), backup_folder.join(CONFIG_FILE)); copy_file(PathBuf::from(EBPF_PATH), backup_folder.join(EBPF_FILE)); copy_file( running::proxy_agent_running_folder("").join("azure-proxy-agent"), backup_folder.join("azure-proxy-agent"), ); backup_service_config_file(backup::proxy_agent_backup_folder()); } // copy azure-proxy-agent, proxy-agent.json, ebpf_cgroup.o to different destination folders pub fn copy_files(src_folder: PathBuf) { let dst_folder = crate::running::proxy_agent_running_folder(""); copy_file( src_folder.join("azure-proxy-agent"), dst_folder.join("azure-proxy-agent"), ); copy_file(src_folder.join(CONFIG_FILE), PathBuf::from(CONFIG_PATH)); copy_file(src_folder.join(EBPF_FILE), PathBuf::from(EBPF_PATH)); } pub fn delete_files() { let proxy_agent_running_folder = crate::running::proxy_agent_running_folder("azure-proxy-agent"); delete_file(proxy_agent_running_folder.join("azure-proxy-agent")); delete_file(PathBuf::from(crate::linux::CONFIG_PATH)); delete_file(PathBuf::from(crate::linux::EBPF_PATH)); } GuestProxyAgent-1.0.30/proxy_agent_setup/src/linux/000077500000000000000000000000001500521614600224105ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_setup/src/linux/azure-proxy-agent.service000066400000000000000000000024241500521614600273750ustar00rootroot00000000000000[Unit] Description=Microsoft Azure GuestProxyAgent After=local-fs.target Before=network-pre.target Wants=network-pre.target [Service] Type=simple ExecStart=/usr/sbin/azure-proxy-agent Restart=always RestartSec=5 ProtectSystem=strict LogsDirectory=azure-proxy-agent StateDirectory=azure-proxy-agent RestrictNamespaces=~user RestrictNamespaces=~pid RestrictNamespaces=~net RestrictNamespaces=~uts RestrictNamespaces=~mnt CapabilityBoundingSet=~CAP_LEASE CapabilityBoundingSet=~CAP_MKNOD RestrictNamespaces=~cgroup RestrictSUIDSGID=yes RestrictNamespaces=~ipc ProtectHostname=yes CapabilityBoundingSet=~CAP_CHOWN CAP_FSETID SETFCAP CapabilityBoundingSet=~CAP_SETUID CAP_SETGID CAP_SETPCAP ProtectHome=tmpfs ProtectClock=yes NoNewPrivileges=yes DevicePolicy=closed DeviceAllow=/dev/console w IPAddressAllow=127.0.0.1 168.63.129.16 169.254.169.254 IPAddressDeny=any CapabilityBoundingSet=~CAP_SYS_MODULE CapabilityBoundingSet=~CAP_SYS_TTY_CONFIG CapabilityBoundingSet=~CAP_SYS_BOOT CapabilityBoundingSet=~CAP_SYS_CHROOT SystemCallFilter=~@clock SystemCallFilter=~@cpu-emulation SystemCallFilter=~@module SystemCallFilter=~@mount SystemCallFilter=~@obsolete SystemCallFilter=~@raw-io SystemCallFilter=~@reboot SystemCallFilter=~@resources SystemCallFilter=~@swap [Install] WantedBy=multi-user.target GuestProxyAgent-1.0.30/proxy_agent_setup/src/logger.rs000066400000000000000000000014721500521614600231020ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use proxy_agent_shared::{ logger::{logger_manager, rolling_logger::RollingLogger, LoggerLevel}, misc_helpers, }; use std::path::PathBuf; const LOGGER_KEY: &str = "setup.log"; pub fn init_logger() { force_init_logger(misc_helpers::get_current_exe_dir(), LOGGER_KEY); } fn force_init_logger(log_folder: PathBuf, log_name: &str) { let logger = RollingLogger::create_new(log_folder, log_name.to_string(), 20 * 1024 * 1024, 30); let mut loggers = std::collections::HashMap::new(); loggers.insert(log_name.to_string(), logger); logger_manager::set_loggers(loggers, log_name.to_string()); } pub fn write(message: String) { println!("{}", message); logger_manager::log(LOGGER_KEY.to_string(), LoggerLevel::Info, message); } GuestProxyAgent-1.0.30/proxy_agent_setup/src/main.rs000066400000000000000000000230301500521614600225410ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT mod args; pub mod backup; pub mod error; pub mod logger; pub mod result; pub mod running; pub mod setup; #[cfg(not(windows))] mod linux; use clap::Parser; use proxy_agent_shared::misc_helpers; use proxy_agent_shared::service; use std::process; use std::time::Duration; use std::{fs, path::PathBuf}; #[cfg(windows)] const SERVICE_NAME: &str = "GuestProxyAgent"; const SERVICE_DISPLAY_NAME: &str = "Microsoft Azure Guest Proxy Agent"; #[cfg(not(windows))] const SERVICE_NAME: &str = "azure-proxy-agent"; #[tokio::main] async fn main() { logger::init_logger(); let cli = args::Cli::parse(); logger::write(format!( "\r\n\r\n============== ProxyAgent Setup Tool ({}) is starting with args: {} ==============", misc_helpers::get_current_version(), cli )); match cli.command { args::Command::Backup => { backup_proxy_agent(); } args::Command::Restore { delete_backup } => { if !check_backup_exists() { logger::write("Backup check failed, skip the restore operation.".to_string()); return; } stop_service().await; let proxy_agent_target_folder = restore_proxy_agent(); setup_service( proxy_agent_target_folder, backup::proxy_agent_backup_folder(), ) .await; if delete_backup { delete_backup_folder(); } } args::Command::Uninstall { uninstall_mode } => { let proxy_agent_running_folder = uninstall_service().await; if uninstall_mode == args::UninstallMode::Package { delete_package(proxy_agent_running_folder); } } args::Command::Purge => { delete_backup_folder(); } args::Command::Install => { stop_service().await; let proxy_agent_target_folder = copy_proxy_agent(); setup_service( proxy_agent_target_folder, misc_helpers::get_current_exe_dir(), ) .await; } } } fn copy_proxy_agent() -> PathBuf { let src_folder = setup::proxy_agent_folder_in_setup(); let dst_folder = running::proxy_agent_version_target_folder(&setup::proxy_agent_exe_in_setup()); #[cfg(windows)] { copy_proxy_agent_files(src_folder, dst_folder.to_path_buf()); } #[cfg(not(windows))] { linux::copy_files(src_folder); } dst_folder } fn backup_proxy_agent() { #[cfg(windows)] { copy_proxy_agent_files( running::proxy_agent_running_folder(SERVICE_NAME), backup::proxy_agent_backup_package_folder(), ); } #[cfg(not(windows))] { linux::backup_files(); } } fn restore_proxy_agent() -> PathBuf { let src_folder = backup::proxy_agent_backup_package_folder(); let dst_folder = running::proxy_agent_version_target_folder(&setup::proxy_agent_exe_path(&src_folder)); #[cfg(windows)] { copy_proxy_agent_files(src_folder, dst_folder.to_path_buf()); } #[cfg(not(windows))] { linux::copy_files(src_folder); } dst_folder } #[cfg(windows)] fn copy_proxy_agent_files(src_folder: PathBuf, dst_folder: PathBuf) { match misc_helpers::try_create_folder(&dst_folder) { Ok(_) => {} Err(e) => { logger::write(format!( "Failed to create folder {:?}, error: {:?}", dst_folder, e )); } } match misc_helpers::get_files(&src_folder) { Ok(files) => { for file in files { let file_name = misc_helpers::get_file_name(&file); let dst_file = dst_folder.join(&file_name); match fs::copy(&file, &dst_file) { Ok(_) => { logger::write(format!("Copied {:?} to {:?}", file, dst_file)); } Err(e) => { logger::write(format!( "Failed to copy {:?} to {:?}, error: {:?}", file, dst_file, e )); } } } } Err(e) => { logger::write(format!( "Failed to get files from {:?}, error: {:?}", src_folder, e )); } } } async fn stop_service() { match service::stop_service(SERVICE_NAME).await { Ok(_) => { logger::write(format!("Stopped service {} successfully", SERVICE_NAME)); } Err(e) => { logger::write(format!( "Stopped service {} failed, error: {:?}", SERVICE_NAME, e )); } } } async fn setup_service(proxy_agent_target_folder: PathBuf, _service_config_folder_path: PathBuf) { #[cfg(windows)] { // delete the existing proxy agent service folder let proxy_agent_running_folder = running::proxy_agent_running_folder(SERVICE_NAME); if proxy_agent_running_folder.exists() && proxy_agent_running_folder != proxy_agent_target_folder { delete_folder(proxy_agent_running_folder); } } #[cfg(not(windows))] { match linux::setup_service(SERVICE_NAME, _service_config_folder_path) { Ok(_) => { logger::write(format!("Setup service {} successfully", SERVICE_NAME)); } Err(e) => { logger::write(format!( "Setup service {} failed, error: {:?}", SERVICE_NAME, e )); process::exit(1); } } } match service::install_service( SERVICE_NAME, SERVICE_DISPLAY_NAME, vec!["EbpfCore", "NetEbpfExt"], setup::proxy_agent_exe_path(&proxy_agent_target_folder), ) { Ok(_) => { logger::write(format!("Install service {} successfully", SERVICE_NAME)); } Err(e) => { logger::write(format!( "Install service {} failed, error: {:?}", SERVICE_NAME, e )); process::exit(1); } } #[cfg(windows)] { // check if eBPF setup script exists, if exist then try launch the eBPF setup scripts let ebpf_setup_script_file = setup::ebpf_setup_script_file(); if ebpf_setup_script_file.exists() && ebpf_setup_script_file.is_file() { let setup_script_file_str = misc_helpers::path_to_string(&ebpf_setup_script_file); match misc_helpers::execute_command( "powershell.exe", vec![ "-ExecutionPolicy", "Bypass", "-File", &setup_script_file_str, ], 1, ) { Ok(output) => { logger::write(format!( "ebpf_setup: invoked script file '{}' with result: '{}'.", setup_script_file_str, output.message() )); } Err(e) => { logger::write(format!( "ebpf_setup: failed to invoke script file '{}', error: '{:?}'.", setup_script_file_str, e )); } } } } match service::start_service(SERVICE_NAME, 5, Duration::from_secs(15)).await { Ok(_) => { logger::write(format!("Service {} start successfully", SERVICE_NAME)); } Err(e) => { logger::write(format!( "Service {} start failed, error: {:?}", SERVICE_NAME, e )); process::exit(1); } } logger::write(format!("Service {} start successfully", SERVICE_NAME)); } fn check_backup_exists() -> bool { let proxy_agent_exe = setup::proxy_agent_exe_path(&backup::proxy_agent_backup_package_folder()); if !proxy_agent_exe.exists() { logger::write(format!( "GuestProxyAgent ({:?}) does not exists.", proxy_agent_exe )); return false; } true } async fn uninstall_service() -> PathBuf { let proxy_agent_running_folder = running::proxy_agent_running_folder(SERVICE_NAME); match service::stop_and_delete_service(SERVICE_NAME).await { Ok(_) => { logger::write(format!("Uninstall service {} successfully", SERVICE_NAME)); } Err(e) => { logger::write(format!( "Uninstall service {} failed, error: {:?}", SERVICE_NAME, e )); process::exit(1); } } proxy_agent_running_folder } fn delete_package(_proxy_agent_running_folder: PathBuf) { #[cfg(windows)] { delete_folder(_proxy_agent_running_folder); } #[cfg(not(windows))] { linux::delete_files(); } } fn delete_folder(folder_to_be_delete: PathBuf) { match fs::remove_dir_all(&folder_to_be_delete) { Ok(_) => { logger::write(format!("Deleted folder {:?}", folder_to_be_delete)); } Err(e) => { logger::write(format!( "Failed to delete folder {:?}, error: {:?}", folder_to_be_delete, e )); } } } fn delete_backup_folder() { let backup_folder = backup::proxy_agent_backup_folder(); delete_folder(backup_folder); } GuestProxyAgent-1.0.30/proxy_agent_setup/src/result.rs000066400000000000000000000002271500521614600231360ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::error::Error; pub type Result = core::result::Result; GuestProxyAgent-1.0.30/proxy_agent_setup/src/running.rs000066400000000000000000000034711500521614600233040ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::logger; use proxy_agent_shared::misc_helpers; use std::path::{Path, PathBuf}; #[cfg(windows)] use proxy_agent_shared::service; pub fn proxy_agent_running_folder(_service_name: &str) -> PathBuf { let path; #[cfg(windows)] { path = match service::query_service_executable_path(_service_name).parent() { Some(p) => p.to_path_buf(), None => proxy_agent_parent_folder().join("Package"), }; } #[cfg(not(windows))] { path = PathBuf::from(proxy_agent_shared::linux::EXE_FOLDER_PATH); } path } pub fn proxy_agent_parent_folder() -> PathBuf { #[cfg(windows)] { let path = misc_helpers::resolve_env_variables("%SYSTEMDRIVE%\\WindowsAzure\\ProxyAgent") .unwrap_or("C:\\WindowsAzure\\ProxyAgent".to_string()); PathBuf::from(path) } #[cfg(not(windows))] { panic!("Not implemented") } } pub fn proxy_agent_version_target_folder(proxy_agent_exe: &Path) -> PathBuf { let proxy_agent_version = match misc_helpers::get_proxy_agent_version(proxy_agent_exe) { Ok(v) => v, Err(e) => { // This should not happen, if failed to get version, we should not proceed logger::write(format!( "Failed to get proxy agent version with error: {}", e )); panic!("Failed to get proxy agent version with error: {}", e); } }; logger::write(format!("Proxy agent version: {}", &proxy_agent_version)); #[cfg(windows)] { let path = proxy_agent_parent_folder(); path.join(format!("Package_{}", proxy_agent_version)) } #[cfg(not(windows))] { PathBuf::from(proxy_agent_shared::linux::EXE_FOLDER_PATH) } } GuestProxyAgent-1.0.30/proxy_agent_setup/src/setup.rs000066400000000000000000000015411500521614600227600ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use proxy_agent_shared::misc_helpers; use std::path::{Path, PathBuf}; pub fn proxy_agent_folder_in_setup() -> PathBuf { let path: PathBuf = misc_helpers::get_current_exe_dir(); path.join("ProxyAgent") } pub fn proxy_agent_exe_in_setup() -> PathBuf { proxy_agent_exe_path(&proxy_agent_folder_in_setup()) } pub fn proxy_agent_exe_path(proxy_agent_package_dir: &Path) -> PathBuf { #[cfg(windows)] { proxy_agent_package_dir.join("GuestProxyAgent.exe") } #[cfg(not(windows))] { proxy_agent_package_dir.join("azure-proxy-agent") } } fn ebpf_folder() -> PathBuf { let path: PathBuf = misc_helpers::get_current_exe_dir(); path.join("eBPF-For-Windows") } pub fn ebpf_setup_script_file() -> PathBuf { ebpf_folder().join("setup.ps1") } GuestProxyAgent-1.0.30/proxy_agent_shared/000077500000000000000000000000001500521614600205705ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_shared/Cargo.toml000066400000000000000000000024261500521614600225240ustar00rootroot00000000000000[package] name = "proxy_agent_shared" version = "1.0.30" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] concurrent-queue = "2.1.0" # for event queue once_cell = "1.17.0" # use Lazy time = { version = "0.3.30", features = ["formatting"] } thread-id = "4.0.0" serde = "1.0.152" serde_derive = "1.0.152" serde_json = "1.0.91" # json Deserializer regex = "1.11" # match file name thiserror = "1.0.64" tokio = { version = "1", features = ["rt", "macros", "sync", "time"] } log = { version = "0.4.26", features = ["std"] } ctor = "0.3.6" # used for test setup and clean up [target.'cfg(windows)'.dependencies] windows-service = "0.7.0" # windows NT service winreg = "0.11.0" # windows reg read/write [target.'cfg(windows)'.dependencies.windows-sys] version = "0.42.0" features = [ "Win32_Foundation", "Win32_Networking_WinSock", "Win32_System_IO", "Win32_Security", "Win32_System_WindowsProgramming", "Win32_Security_Authentication_Identity", "Win32_System_Diagnostics_Debug", "Win32_System_SystemInformation", "Win32_Storage_FileSystem", ] [target.'cfg(not(windows))'.dependencies] os_info = "3.7.0" # read Linux OS version and archGuestProxyAgent-1.0.30/proxy_agent_shared/src/000077500000000000000000000000001500521614600213575ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_shared/src/error.rs000066400000000000000000000045751500521614600230710ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #[derive(Debug, thiserror::Error)] pub enum Error { // windows_service::Error is a custom error type from the windows-service crate // it does not display the IO error message, so we need to add it manually #[cfg(windows)] #[error("{0}: {1}")] WindowsService(windows_service::Error, std::io::Error), #[error(transparent)] Io(#[from] std::io::Error), #[error(transparent)] Json(#[from] serde_json::Error), #[error("Failed to create regex with error: {0}")] Regex(#[from] regex::Error), #[cfg(windows)] #[error("WindowsApi '{0}' failed with error: {1}")] WindowsApi(String, std::io::Error), #[error("{0}")] ParseVersion(ParseVersionErrorType), #[error("{0} command: {1}")] Command(CommandErrorType, String), } #[derive(Debug, thiserror::Error)] pub enum ParseVersionErrorType { #[error("Invalid version string '{0}'")] InvalidString(String), #[error("Cannot read Major build from {0}")] MajorBuild(String), #[error("Cannot read Minor build from {0}")] MinorBuild(String), } #[derive(Debug, thiserror::Error)] pub enum CommandErrorType { #[error("Findmnt")] Findmnt, #[error("{0}")] CommandName(String), } #[cfg(test)] mod test { use super::{CommandErrorType, Error, ParseVersionErrorType}; use std::fs; #[test] fn error_formatting_test() { let mut error: Error = fs::metadata("file.txt").map_err(Into::into).unwrap_err(); let expected_err = if cfg!(windows) { "The system cannot find the file specified. (os error 2)" } else { "No such file or directory (os error 2)" }; assert_eq!(error.to_string(), expected_err); error = regex::Regex::new(r"abc(").map_err(Into::into).unwrap_err(); assert!(error .to_string() .contains("Failed to create regex with error: regex parse error:")); error = Error::ParseVersion(ParseVersionErrorType::MajorBuild("1.5.0".to_string())); assert_eq!(error.to_string(), "Cannot read Major build from 1.5.0"); error = Error::Command( CommandErrorType::Findmnt, format!("Failed with exit code: {}", 5), ); assert_eq!( error.to_string(), "Findmnt command: Failed with exit code: 5" ); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/lib.rs000066400000000000000000000004561500521614600225000ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod error; pub mod logger; pub mod misc_helpers; pub mod proxy_agent_aggregate_status; pub mod result; pub mod service; pub mod telemetry; pub mod version; #[cfg(windows)] pub mod windows; #[cfg(not(windows))] pub mod linux; GuestProxyAgent-1.0.30/proxy_agent_shared/src/linux.rs000066400000000000000000000126421500521614600230710ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::error::{CommandErrorType, Error}; use crate::logger::logger_manager; use crate::misc_helpers; use crate::result::Result; use once_cell::sync::Lazy; use os_info::Info; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::{fs, str}; pub const SERVICE_CONFIG_FOLDER_PATH: &str = "/usr/lib/systemd/system/"; pub const EXE_FOLDER_PATH: &str = "/usr/sbin"; pub const OS_RELEASE_PATH: &str = "/etc/os-release"; pub const OS_VERSION: &str = "VERSION_ID="; pub const OS_NAME: &str = "NAME="; #[derive(Serialize, Deserialize)] struct FileMount { filesystems: Vec, } #[derive(Serialize, Deserialize)] struct FileSystem { target: String, source: String, fstype: String, options: String, } static OS_INFO: Lazy = Lazy::new(os_info::get); pub fn get_os_version() -> String { let linux_type = OS_INFO.os_type().to_string().to_lowercase(); if linux_type == "linux" { match fs::read_to_string(OS_RELEASE_PATH) { Ok(output) => { for line in output.lines() { if line.starts_with(OS_VERSION) { let version = line .trim_start_matches(OS_VERSION) .trim_matches('"') .to_string(); return version; } } } Err(e) => { let message = format!( "Failed to read os-release file in get_os_version(): {} with error: {}", OS_RELEASE_PATH, e ); logger_manager::write_warn(message); return "Unknown".to_string(); } } } OS_INFO.version().to_string() } pub fn get_long_os_version() -> String { format!("Linux:{}-{}", get_os_type(), get_os_version()) } pub fn get_os_type() -> String { let linux_type = OS_INFO.os_type().to_string().to_lowercase(); if linux_type == "linux" { match fs::read_to_string(OS_RELEASE_PATH) { Ok(output) => { for line in output.lines() { if line.starts_with(OS_NAME) { let name = line .trim_start_matches(OS_NAME) .trim_matches('"') .to_string(); return name; } } } Err(e) => { let message = format!( "Failed to read os-release file in get_os_type(): {} with error: {}", OS_RELEASE_PATH, e ); logger_manager::write_warn(message); return "Unknown".to_string(); } } } OS_INFO.os_type().to_string() } pub fn get_processor_arch() -> String { match OS_INFO.architecture() { Some(arch) => arch.to_string(), None => "Unknown".to_string(), } } pub fn get_cgroup2_mount_path() -> Result { let output = misc_helpers::execute_command("findmnt", vec!["-t", "cgroup2", "--json"], -1)?; if !output.is_success() { return Err(Error::Command(CommandErrorType::Findmnt, output.message())); } let mount: FileMount = serde_json::from_str(&output.stdout())?; if !mount.filesystems.is_empty() { let cgroup2_path = mount.filesystems[0].target.to_string(); return Ok(PathBuf::from(cgroup2_path)); } Err(Error::Command( CommandErrorType::Findmnt, format!("Cannot find cgroup2 file mount: {}.", output.message()), )) } #[cfg(test)] mod tests { use crate::misc_helpers; #[test] fn get_os_version_tests() { let os_version = super::get_os_version(); assert_ne!("", os_version, "os version cannot be empty"); let long_os_version = super::get_long_os_version(); assert!( long_os_version.starts_with("Linux"), "long_os_version must starts with 'Linux'" ); assert!( long_os_version.ends_with(&os_version), "long_os_version must ends with os_version" ) } #[test] fn get_processor_arch_test() { let processor_arch = super::get_processor_arch(); assert_ne!( "unknown", processor_arch, "processor arch cannot be 'unknown'" ); } #[test] fn get_cgroup2_mount_path_test() { match super::get_cgroup2_mount_path() { Ok(cgroup2_path) => { println!( "Got cgroup2 mount path: '{}'", misc_helpers::path_to_string(&cgroup2_path) ); assert!( cgroup2_path.is_dir(), "cgroup2_path {} must be a dir", misc_helpers::path_to_string(&cgroup2_path) ); assert!( cgroup2_path.exists(), "cgroup2_path {} must be exists", misc_helpers::path_to_string(&cgroup2_path) ); } Err(e) => { // This test is not critical, so just print the error message. // This test could fail in some cases, like running in a container/VM without CGROUP2 mounted. println!("Failed to get the cgroup2 mount path {}.", e); } }; } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/logger.rs000066400000000000000000000017631500521614600232130ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::misc_helpers; pub mod logger_manager; pub mod rolling_logger; pub type LoggerLevel = log::Level; pub fn get_log_header(level: LoggerLevel) -> String { format!( "{} [{}] ", misc_helpers::get_date_time_string_with_milliseconds(), level )[..34] .to_string() } #[cfg(test)] mod tests { use log::Level; use std::str::FromStr; #[test] fn logger_level_test() { let info_level = Level::Info; assert_eq!(Level::from_str("Info").unwrap(), Level::Info); let trace_level = Level::from_str("Trace").unwrap(); assert_eq!(trace_level, Level::Trace); assert!( info_level < trace_level, "Info level should be lower than Trace level" ); assert!( Level::from_str("Trace").unwrap() >= trace_level, "Trace level should be greater than or equal to Trace level" ); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/logger/000077500000000000000000000000001500521614600226365ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_shared/src/logger/logger_manager.rs000066400000000000000000000113501500521614600261550ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use super::rolling_logger::RollingLogger; use log::Level; use std::collections::HashMap; static LOGGERS: tokio::sync::OnceCell> = tokio::sync::OnceCell::const_new(); static DEFAULT_LOGGER_KEY: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); static MAX_LOG_LEVEL: tokio::sync::OnceCell = tokio::sync::OnceCell::const_new(); /// Setup the loggers and set the default logger key /// # Arguments /// * `loggers` - A hashmap of loggers /// * `default_logger_key` - The default logger key /// # Panics /// * If the default logger key is not found in the loggers hashmap pub fn set_loggers(loggers: HashMap, default_logger_key: String) { if LOGGERS.initialized() { return; } if !loggers.contains_key(&default_logger_key) { panic!("Default logger key not found in the loggers hashmap"); } // set the loggers once LOGGERS.set(loggers).unwrap(); DEFAULT_LOGGER_KEY.set(default_logger_key).unwrap(); } pub fn set_logger_level(log_level: Level) { if MAX_LOG_LEVEL.initialized() { return; } MAX_LOG_LEVEL.set(log_level).unwrap(); } pub fn get_logger_level() -> Level { let level = match MAX_LOG_LEVEL.get() { Some(l) => *l, // No need to use `clone` on type `Level` which implements the `Copy` trait None => Level::Trace, }; level } fn get_logger(logger_key: Option) -> Option<&'static RollingLogger> { if let Some(loggers) = LOGGERS.get() { let key = match logger_key { Some(k) => k, None => DEFAULT_LOGGER_KEY.get().unwrap().clone(), }; return loggers.get(&key); } None } pub fn log(logger_key: String, log_level: Level, message: String) { if log_level > get_logger_level() { return; } if let Some(logger) = get_logger(Some(logger_key)) { if let Err(e) = logger.write(log_level, message) { eprintln!("Error writing to log: {}", e); } } } pub fn write_log(log_level: Level, message: String) { let level = match MAX_LOG_LEVEL.get() { Some(l) => *l, // No need to use `clone` on type `Level` which implements the `Copy` trait None => Level::Trace, }; if log_level > level { return; } if let Some(logger) = get_logger(None) { if let Err(e) = logger.write(log_level, message) { eprintln!("Error writing to log: {}", e); } } } pub fn write_info(message: String) { write_log(Level::Info, message); } pub fn write_warn(message: String) { write_log(Level::Warn, message); } pub fn write_err(message: String) { write_log(Level::Error, message); } pub fn write_many(logger_key: Option, messages: Vec) { if let Some(logger) = get_logger(logger_key) { if let Err(e) = logger.write_many(messages) { eprintln!("Error writing to log: {}", e); } } } #[cfg(test)] mod tests { use crate::misc_helpers; use ctor::{ctor, dtor}; use log::Level; use std::env; use std::fs; const TEST_LOGGER_KEY: &str = "logger_manager_test"; fn get_temp_test_dir() -> std::path::PathBuf { let mut temp_test_path = env::temp_dir(); temp_test_path.push(TEST_LOGGER_KEY); temp_test_path } #[ctor] fn setup() { // Setup logger_manager for unit tests let logger = crate::logger::rolling_logger::RollingLogger::create_new( get_temp_test_dir(), "test.log".to_string(), 200, 6, ); let mut loggers = std::collections::HashMap::new(); loggers.insert(TEST_LOGGER_KEY.to_string(), logger); crate::logger::logger_manager::set_loggers(loggers, TEST_LOGGER_KEY.to_string()); } #[dtor] fn cleanup() { // clean up and ignore the clean up errors _ = fs::remove_dir_all(&get_temp_test_dir()); } #[test] fn logger_manager_test() { for _ in [0; 20] { super::write_log( Level::Trace, String::from("This is a test message This is a test message"), ); super::write_log( Level::Debug, String::from("This is a test message This is a test message"), ); super::write_log(Level::Info, "message from write_info".to_string()); super::write_log(Level::Warn, "message from write_warn".to_string()); super::write_log(Level::Error, "message from write_err".to_string()); } let file_count = misc_helpers::get_files(&get_temp_test_dir()).unwrap(); assert_eq!(6, file_count.len(), "log file count mismatch"); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/logger/rolling_logger.rs000066400000000000000000000154041500521614600262150ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::misc_helpers; use crate::result::Result; use log::Level; use std::fs::{self, File, OpenOptions}; use std::io::{LineWriter, Write}; use std::path::PathBuf; #[derive(Debug)] pub struct RollingLogger { log_dir: PathBuf, log_file_name: String, log_file_extension: String, max_log_file_size: u64, // max log file size in KB max_log_file_count: u16, // max log file count, if exceed the count, the older log files will be removed. } impl RollingLogger { pub fn new(dir: String, file_name: String) -> RollingLogger { RollingLogger::create_new(PathBuf::from(dir), file_name, 20 * 1024 * 1024, 20) } pub fn create_new( dir: PathBuf, file_name: String, log_size: u64, log_count: u16, ) -> RollingLogger { RollingLogger { log_dir: dir, log_file_name: file_name, log_file_extension: String::from("log"), max_log_file_size: log_size, max_log_file_count: log_count, } } fn open_file(&self) -> Result> { misc_helpers::try_create_folder(&self.log_dir)?; let file_full_path = self.get_current_file_full_path(None); let f = if file_full_path.exists() { OpenOptions::new().append(true).open(file_full_path)? } else { File::create(file_full_path)? }; Ok(LineWriter::new(f)) } /// write a message to the log file, and roll the log file if needed /// the message will be prefixed with the log level and timestamp pub fn write(&self, level: Level, message: String) -> Result<()> { let message = format!("{}{}", crate::logger::get_log_header(level), message); self.write_line(message) } /// write list of messages to the log file, and roll the log file if needed pub fn write_many(&self, messages: Vec) -> Result<()> { self.roll_if_needed()?; if let Ok(mut writer) = self.open_file() { for message in messages { writer.write_all(message.as_bytes())?; writer.write_all(b"\n")?; } writer.flush()?; } Ok(()) } fn write_line(&self, message: String) -> Result<()> { self.roll_if_needed()?; if let Ok(mut writer) = self.open_file() { writer.write_all(message.as_bytes())?; writer.write_all(b"\n")?; writer.flush()?; } Ok(()) } fn archive_file(&self) -> Result<()> { let new_file_name = self.get_current_file_full_path(Some(format!( "{}-{}", misc_helpers::get_date_time_string_with_milliseconds(), misc_helpers::get_date_time_unix_nano() ))); let current_name = self.get_current_file_full_path(None); fs::rename(current_name, new_file_name)?; let log_files = self.get_log_files()?; // delete oldest files let max_count: usize = self.max_log_file_count.into(); let file_count = log_files.len(); if file_count >= max_count { let mut count = max_count; for log in log_files { fs::remove_file(log)?; count += 1; if count > file_count { break; } } } Ok(()) } pub fn get_log_files(&self) -> Result> { // search log files let mut log_files: Vec = Vec::new(); for entry in fs::read_dir(&self.log_dir)? { let entry = entry?; let file_full_path = entry.path(); let metadata = fs::metadata(&file_full_path)?; if !metadata.is_file() && file_full_path.ends_with(&self.log_file_extension) { continue; } // log file name should able convert to string safely; if not, ignore this file entry if let Ok(file_name) = entry.file_name().into_string() { if !file_name.starts_with(&self.log_file_name) { continue; } } log_files.push(file_full_path); } log_files.sort(); Ok(log_files) } fn get_current_file_full_path(&self, timestamp: Option) -> PathBuf { let mut full_path = PathBuf::from(&self.log_dir); let mut file_name = String::from(&self.log_file_name); if let Some(time) = timestamp { file_name.push('.'); file_name.push_str(&time.replace(':', ".")); file_name.push_str(".log") } full_path.push(&file_name); full_path.set_extension(&self.log_file_extension); full_path } fn roll_if_needed(&self) -> Result<()> { self.open_file()?; let file = self.get_current_file_full_path(None); let file_length = file.metadata()?.len(); let should_roll = file_length >= self.max_log_file_size; if should_roll { self.archive_file()?; self.open_file()?; } Ok(()) } } #[cfg(test)] mod tests { use super::RollingLogger; use std::env; use std::fs; #[test] fn logger_new() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("logger_new_tests"); let logger = RollingLogger::create_new(temp_test_path.clone(), String::from("proxyagent"), 1024, 10); logger .write(log::Level::Info, String::from("This is a test message")) .unwrap(); // clean up and ignore the clean up errors _ = fs::remove_dir_all(temp_test_path); } #[test] fn logger_roll_if_needed() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("logger_roll_if_needed"); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); let logger = RollingLogger::create_new(temp_test_path.clone(), String::from("proxyagent"), 100, 6); // test without deleting old files for _ in [0; 10] { logger .write(log::Level::Info, String::from("This is a test message")) .unwrap(); } let file_count = logger.get_log_files().unwrap(); assert_eq!(5, file_count.len(), "log file count mismatch"); // test with deleting old files for _ in [0; 10] { logger .write(log::Level::Trace, String::from("This is a test message")) .unwrap(); } let file_count = logger.get_log_files().unwrap(); assert_eq!(6, file_count.len(), "log file count mismatch"); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/misc_helpers.rs000066400000000000000000000407211500521614600244060ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::{ error::{CommandErrorType, Error}, result::Result, }; use regex::Regex; use serde::de::DeserializeOwned; use serde::Serialize; use std::{ fs::{self, File}, path::{Path, PathBuf}, process::Command, }; use thread_id; use time::{format_description, OffsetDateTime}; #[cfg(windows)] use super::windows; #[cfg(not(windows))] use super::linux; pub fn get_thread_identity() -> String { format!("{:0>8}", thread_id::get()) } pub fn get_date_time_string_with_milliseconds() -> String { let date_format = format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond]") .unwrap(); let time_str = OffsetDateTime::now_utc().format(&date_format).unwrap(); time_str.chars().take(23).collect() } pub fn get_date_time_string() -> String { let date_format = format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]Z").unwrap(); let time_str = OffsetDateTime::now_utc().format(&date_format).unwrap(); time_str.chars().collect() } // This format is also the preferred HTTP date format. https://httpwg.org/specs/rfc9110.html#http.date pub fn get_date_time_rfc1123_string() -> String { let date_format = format_description::parse( "[weekday repr:short], [day] [month repr:short] [year] [hour]:[minute]:[second] GMT", ) .unwrap(); let time_str = OffsetDateTime::now_utc().format(&date_format).unwrap(); time_str.chars().collect() } pub fn get_date_time_unix_nano() -> i128 { OffsetDateTime::now_utc().unix_timestamp_nanos() } pub fn try_create_folder(dir: &Path) -> Result<()> { match dir.try_exists() { Ok(exists) => { if !exists { fs::create_dir_all(dir)?; // Recursively create a directory and all of its parent components if they are missing } } Err(error) => panic!( "Problem check the directory '{}' exists: {:?}", dir.display(), error ), }; Ok(()) } pub fn json_write_to_file(obj: &T, file_path: &Path) -> Result<()> where T: ?Sized + Serialize, { // write to a temp file and rename to avoid corrupted file let temp_file_path = file_path.with_extension("tmp"); let file = File::create(&temp_file_path)?; serde_json::to_writer_pretty(file, obj)?; std::fs::rename(temp_file_path, file_path)?; Ok(()) } pub fn json_read_from_file(file_path: &Path) -> Result where T: DeserializeOwned, { let file = File::open(file_path)?; let obj: T = serde_json::from_reader(file)?; Ok(obj) } pub fn json_clone(obj: &T) -> Result where T: Serialize + DeserializeOwned, { let json = serde_json::to_string(obj)?; serde_json::from_str(&json).map_err(Into::into) } pub fn get_current_exe_dir() -> PathBuf { let mut path = std::env::current_exe().unwrap(); path.pop(); path } pub fn get_long_os_version() -> String { // os let os; #[cfg(windows)] { os = windows::get_long_os_version(); } #[cfg(not(windows))] { os = linux::get_long_os_version(); } os } pub fn get_processor_arch() -> String { //arch let arch; #[cfg(windows)] { arch = windows::get_processor_arch(); } #[cfg(not(windows))] { arch = linux::get_processor_arch() } arch } pub fn path_to_string(path: &Path) -> String { path.display().to_string() } pub fn get_file_name(path: &Path) -> String { match path.file_name() { Some(s) => s.to_str().unwrap_or("InvalidPath").to_string(), None => "InvalidPath".to_string(), } } const VERSION: &str = env!("CARGO_PKG_VERSION"); pub fn get_current_version() -> String { VERSION.to_string() } pub fn get_files(dir: &Path) -> Result> { // search files let mut files: Vec = Vec::new(); for entry in fs::read_dir(dir)? { let entry = entry?; let file_full_path = entry.path(); let metadata = fs::metadata(&file_full_path)?; if !metadata.is_file() { continue; } files.push(file_full_path); } files.sort(); Ok(files) } /// Search files in a directory with a regex pattern /// # Arguments /// * `dir` - The directory to search /// * `search_regex_pattern` - The regex pattern to search /// # Returns /// A vector of PathBufs that match the search pattern in ascending order /// # Errors /// Returns an error if the regex pattern is invalid or if there is an IO error /// # Example /// ```rust /// use std::path::PathBuf; /// use proxy_agent_shared::misc_helpers; /// let dir = PathBuf::from("C:\\"); /// let search_regex_pattern = r"^(.*\.log)$"; // search for files with .log extension /// let files = misc_helpers::search_files(&dir, search_regex_pattern).unwrap(); /// /// let search_regex_pattern = r"^MyFile.*\.json$"; // Regex pattern to match "MyFile*.json" /// let files = misc_helpers::search_files(&dir, search_regex_pattern).unwrap(); /// ``` pub fn search_files(dir: &Path, search_regex_pattern: &str) -> Result> { let mut files = Vec::new(); let regex = Regex::new(search_regex_pattern)?; for entry in fs::read_dir(dir)? { let entry = entry?; let file_full_path = entry.path(); let metadata = fs::metadata(&file_full_path)?; if !metadata.is_file() { continue; } let file_name = get_file_name(&file_full_path); if regex.is_match(&file_name) { files.push(file_full_path); } } files.sort(); Ok(files) } pub struct CommandOutput { exit_code: i32, stdout: String, stderr: String, } impl CommandOutput { pub fn new(exit_code: i32, stdout: String, stderr: String) -> Self { Self { exit_code, stdout, stderr, } } pub fn is_success(&self) -> bool { self.exit_code == 0 } pub fn stdout(&self) -> String { self.stdout.to_string() } pub fn stderr(&self) -> String { self.stderr.to_string() } pub fn exit_code(&self) -> i32 { self.exit_code } pub fn message(&self) -> String { format!( "exit code: '{}', stdout: '{}', stderr: '{}'", self.exit_code, self.stdout, self.stderr ) } } pub fn execute_command( program: &str, args: Vec<&str>, default_error_code: i32, ) -> Result { let output = Command::new(program).args(args).output()?; Ok(CommandOutput::new( output.status.code().unwrap_or(default_error_code), String::from_utf8_lossy(&output.stdout).to_string(), String::from_utf8_lossy(&output.stderr).to_string(), )) } pub fn get_proxy_agent_version(proxy_agent_exe: &Path) -> Result { let proxy_agent_exe_str = path_to_string(proxy_agent_exe); if !proxy_agent_exe.exists() { return Err(Error::Io(std::io::Error::new( std::io::ErrorKind::NotFound, format!("File '{}' does not found", proxy_agent_exe_str), ))); } if !proxy_agent_exe.is_file() { return Err(Error::Io(std::io::Error::new( std::io::ErrorKind::InvalidInput, format!("'{}' is not a file", proxy_agent_exe_str), ))); } let output = execute_command(&path_to_string(proxy_agent_exe), vec!["--version"], -1)?; if output.is_success() { Ok(output.stdout().trim().to_string()) } else { Err(Error::Command( CommandErrorType::CommandName(proxy_agent_exe_str), output.message(), )) } } /// This function replaces all occurrences of %VAR% in the input string with the value of the environment variable VAR /// If the environment variable is not set, it returns the original string with VAR unchanged. /// # Arguments /// * `input` - The input string to resolve environment variables in /// # Returns /// A Result containing the resolved string or an error if the regex pattern is invalid pub fn resolve_env_variables(input: &str) -> Result { let re = Regex::new(r"%(\w+)%")?; let ret = re .replace_all(input, |caps: ®ex::Captures| { std::env::var(&caps[1]).unwrap_or_else(|_| caps[1].to_string()) }) .to_string(); Ok(ret) } #[cfg(test)] mod tests { use serde_derive::{Deserialize, Serialize}; use std::env; use std::fs; use std::path::PathBuf; #[derive(Serialize, Deserialize)] struct TestStruct { thread_id: String, date_time_string_with_milliseconds: String, date_time_string: String, date_time_rfc1123_string: String, date_time_unix_nano: i128, long_os_version: String, current_exe_dir: String, } #[test] fn json_write_read_from_file_test() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("json_Write_read_from_file_test"); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); super::try_create_folder(&temp_test_path).unwrap(); let json_file = temp_test_path.as_path(); let json_file = json_file.join("test.json"); let test = TestStruct { thread_id: super::get_thread_identity(), date_time_string_with_milliseconds: super::get_date_time_string_with_milliseconds(), date_time_string: super::get_date_time_string(), date_time_rfc1123_string: super::get_date_time_rfc1123_string(), date_time_unix_nano: super::get_date_time_unix_nano(), long_os_version: super::get_long_os_version(), current_exe_dir: super::get_current_exe_dir().to_str().unwrap().to_string(), }; super::json_write_to_file(&test, &json_file).unwrap(); let json = super::json_read_from_file::(&json_file).unwrap(); assert_eq!(test.thread_id, json.thread_id); assert_eq!( test.date_time_string_with_milliseconds, json.date_time_string_with_milliseconds ); assert_eq!(test.date_time_string, json.date_time_string); assert_eq!(test.date_time_rfc1123_string, json.date_time_rfc1123_string); assert_eq!(test.date_time_unix_nano, json.date_time_unix_nano); assert_eq!(test.long_os_version, json.long_os_version); assert_eq!(test.current_exe_dir, json.current_exe_dir); _ = fs::remove_dir_all(&temp_test_path); } #[test] fn path_to_string_test() { let path = "path_to_string_test"; let path_str = super::path_to_string(&PathBuf::from(path)); assert_eq!(path_str, path, "path_str mismatch"); } #[test] fn execute_command_test() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("execute_command_test"); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); super::try_create_folder(&temp_test_path).unwrap(); let program: &str; let script_content: &str; let script_file_name: &str; #[cfg(windows)] { program = "powershell.exe"; script_file_name = "test.ps1"; script_content = r#"write-host "this is stdout message" write-error "This is stderr message" exit 1 "#; } #[cfg(not(windows))] { program = "sh"; script_file_name = "test.sh"; script_content = r#"echo "this is stdout message" >&2 echo "This is stderr message" exit 1 "#; } let script_file_path = temp_test_path.join(script_file_name); _ = fs::write(&script_file_path, script_content); let default_error_code = -1; let output = super::execute_command( program, vec![&super::path_to_string(&script_file_path)], default_error_code, ) .unwrap(); assert_eq!(1, output.exit_code(), "exit code mismatch"); assert_eq!( "this is stdout message", output.stdout().trim(), "stdout message mismatch" ); assert!( output.stderr().contains("This is stderr message"), "stderr message mismatch" ); _ = fs::remove_dir_all(&temp_test_path); } #[test] fn get_file_name_test() { let path = PathBuf::from("test.txt"); let file_name = super::get_file_name(&path); assert_eq!("test.txt", file_name, "file_name mismatch"); let path = PathBuf::new(); let file_name = super::get_file_name(&path); assert_eq!("InvalidPath", file_name, "file_name mismatch"); } #[test] fn search_files_test() { let mut temp_test_path = env::temp_dir(); temp_test_path.push("search_files_test"); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); super::try_create_folder(&temp_test_path).unwrap(); let test = TestStruct { thread_id: super::get_thread_identity(), date_time_string_with_milliseconds: super::get_date_time_string_with_milliseconds(), date_time_string: super::get_date_time_string(), date_time_rfc1123_string: super::get_date_time_rfc1123_string(), date_time_unix_nano: super::get_date_time_unix_nano(), long_os_version: super::get_long_os_version(), current_exe_dir: super::get_current_exe_dir().to_str().unwrap().to_string(), }; // write 2 json files to the temp_test_path let json_file = temp_test_path.as_path(); let json_file = json_file.join("test.json"); super::json_write_to_file(&test, &json_file).unwrap(); let json_file = temp_test_path.as_path(); let json_file = json_file.join("test_1.json"); super::json_write_to_file(&test, &json_file).unwrap(); let files = super::search_files(&temp_test_path, "test.json").unwrap(); assert_eq!( 1, files.len(), "file count mismatch with 'test.json' search" ); let files = super::search_files(&temp_test_path, r"^test.*\.json$").unwrap(); assert_eq!( 2, files.len(), "file count mismatch with 'test*.json' search" ); assert_eq!( "test.json", super::get_file_name(&files[0]), "First file name mismatch" ); assert_eq!( "test_1.json", super::get_file_name(&files[1]), "Second file name mismatch" ); _ = fs::remove_dir_all(&temp_test_path); } #[test] fn json_clone_test() { let test = TestStruct { thread_id: super::get_thread_identity(), date_time_string_with_milliseconds: super::get_date_time_string_with_milliseconds(), date_time_string: super::get_date_time_string(), date_time_rfc1123_string: super::get_date_time_rfc1123_string(), date_time_unix_nano: super::get_date_time_unix_nano(), long_os_version: super::get_long_os_version(), current_exe_dir: super::get_current_exe_dir().to_str().unwrap().to_string(), }; let cloned = super::json_clone(&test).unwrap(); assert_eq!(test.thread_id, cloned.thread_id); assert_eq!( test.date_time_string_with_milliseconds, cloned.date_time_string_with_milliseconds ); assert_eq!(test.date_time_string, cloned.date_time_string); assert_eq!( test.date_time_rfc1123_string, cloned.date_time_rfc1123_string ); assert_eq!(test.date_time_unix_nano, cloned.date_time_unix_nano); assert_eq!(test.long_os_version, cloned.long_os_version); assert_eq!(test.current_exe_dir, cloned.current_exe_dir); } #[test] fn resolve_env_variables_test() { let input = r"%SYSTEMDRIVE%\%WindowsAzure%\ProxyAgent\Package_1.0.0"; let expected = format!( "{}\\WindowsAzure\\ProxyAgent\\Package_1.0.0", env::var("SYSTEMDRIVE").unwrap_or("SYSTEMDRIVE".to_string()) ); let resolved = super::resolve_env_variables(input).unwrap(); assert_eq!(expected, resolved, "resolved string mismatch"); let input = "/var/log/azure-proxy-agent/"; let expected = "/var/log/azure-proxy-agent/".to_string(); let resolved = super::resolve_env_variables(input).unwrap(); assert_eq!(expected, resolved, "resolved string mismatch"); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/proxy_agent_aggregate_status.rs000066400000000000000000000055261500521614600277050ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::misc_helpers; use serde_derive::{Deserialize, Serialize}; use std::{collections::HashMap, path::PathBuf}; #[cfg(windows)] const PROXY_AGENT_AGGREGATE_STATUS_FOLDER: &str = "%SYSTEMDRIVE%\\WindowsAzure\\ProxyAgent\\Logs\\"; #[cfg(not(windows))] const PROXY_AGENT_AGGREGATE_STATUS_FOLDER: &str = "/var/log/azure-proxy-agent/"; pub const PROXY_AGENT_AGGREGATE_STATUS_FILE_NAME: &str = "status.json"; pub fn get_proxy_agent_aggregate_status_folder() -> std::path::PathBuf { let path = misc_helpers::resolve_env_variables(PROXY_AGENT_AGGREGATE_STATUS_FOLDER) .unwrap_or(PROXY_AGENT_AGGREGATE_STATUS_FOLDER.to_string()); PathBuf::from(path) } #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub enum ModuleState { UNKNOWN, RUNNING, STOPPED, } #[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] pub enum OverallState { SUCCESS, ERROR, UNKNOWN, } #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] pub struct ProxyAgentDetailStatus { pub status: ModuleState, // ModuleState, RUNNING|STOPPED pub message: String, // detail message #[serde(skip_serializing_if = "Option::is_none")] pub states: Option>, // module specific states } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct ProxyAgentStatus { pub version: String, pub status: OverallState, // OverallState, SUCCESS|FAILED pub monitorStatus: ProxyAgentDetailStatus, pub keyLatchStatus: ProxyAgentDetailStatus, pub ebpfProgramStatus: ProxyAgentDetailStatus, pub proxyListenerStatus: ProxyAgentDetailStatus, pub telemetryLoggerStatus: ProxyAgentDetailStatus, pub proxyConnectionsCount: u128, } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct ProxyConnectionSummary { pub userName: String, pub ip: String, pub port: u16, pub processCmdLine: String, pub responseStatus: String, pub count: u64, pub userGroups: Option>, pub processFullPath: Option, } impl Clone for ProxyConnectionSummary { fn clone(&self) -> Self { ProxyConnectionSummary { userName: self.userName.clone(), userGroups: self.userGroups.clone(), ip: self.ip.clone(), port: self.port, processFullPath: self.processFullPath.clone(), processCmdLine: self.processCmdLine.clone(), responseStatus: self.responseStatus.clone(), count: self.count, } } } #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct GuestProxyAgentAggregateStatus { pub timestamp: String, pub proxyAgentStatus: ProxyAgentStatus, pub proxyConnectionSummary: Vec, pub failedAuthenticateSummary: Vec, } GuestProxyAgent-1.0.30/proxy_agent_shared/src/result.rs000066400000000000000000000002271500521614600232440ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use super::error::Error; pub type Result = core::result::Result; GuestProxyAgent-1.0.30/proxy_agent_shared/src/service.rs000066400000000000000000000125421500521614600233710ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT #[cfg(not(windows))] mod linux_service; #[cfg(windows)] mod windows_service; use std::path::PathBuf; #[cfg(windows)] use crate::logger::logger_manager; use crate::result::Result; pub fn install_service( service_name: &str, _service_display_name: &str, _service_dependencies: Vec<&str>, _exe_path: PathBuf, ) -> Result<()> { #[cfg(windows)] { windows_service::install_or_update_service( service_name, _service_display_name, _service_dependencies, _exe_path, ) } #[cfg(not(windows))] { linux_service::install_or_update_service(service_name) } } pub async fn stop_and_delete_service(service_name: &str) -> Result<()> { #[cfg(windows)] { windows_service::stop_and_delete_service(service_name).await } #[cfg(not(windows))] { linux_service::stop_service(service_name)?; linux_service::uninstall_service(service_name) } } pub async fn start_service( service_name: &str, _retry_count: u32, _duration: std::time::Duration, ) -> Result<()> { #[cfg(windows)] { windows_service::start_service_with_retry(service_name, _retry_count, _duration).await } #[cfg(not(windows))] { linux_service::start_service(service_name) } } pub async fn stop_service(service_name: &str) -> Result<()> { #[cfg(windows)] { windows_service::stop_service(service_name) .await .map(|_| ()) } #[cfg(not(windows))] { linux_service::stop_service(service_name) } } pub fn update_service( _service_name: &str, _service_display_name: &str, _service_dependencies: Vec<&str>, _exe_path: PathBuf, ) -> Result<()> { #[cfg(windows)] { windows_service::update_service( _service_name, _service_display_name, _service_dependencies, _exe_path, ) } #[cfg(not(windows))] { println!("Not support update service on this platform"); Ok(()) } } pub fn query_service_executable_path(_service_name: &str) -> PathBuf { #[cfg(windows)] { match windows_service::query_service_config(_service_name) { Ok(service_config) => { logger_manager::write_info(format!( "Service {} successfully queried", _service_name )); service_config.executable_path.to_path_buf() } Err(e) => { logger_manager::write_info(format!( "Service {} query failed: {}", _service_name, e )); eprintln!("Service {} query failed: {}", _service_name, e); PathBuf::new() } } } #[cfg(not(windows))] { println!("Not support query service on this platform"); PathBuf::new() } } pub fn check_service_installed(_service_name: &str) -> (bool, String) { let message; #[cfg(windows)] { match windows_service::query_service_config(_service_name) { Ok(_service_config) => { message = format!( "check_service_installed: Ebpf Driver: {} successfully queried.", _service_name ); (true, message) } Err(e) => { message = format!( "check_service_installed: Ebpf Driver: {} unsuccessfully queried with error: {}.", _service_name, e ); (false, message) } } } #[cfg(not(windows))] { message = "Not support query service on this platform".to_string(); (false, message) } } #[cfg(windows)] pub use windows_service::set_default_failure_actions; #[cfg(test)] mod tests { #[test] fn test_update_service() { #[cfg(not(windows))] { let service_name = "test_update_service"; let exe_path = std::env::current_exe().unwrap(); let result = super::update_service(service_name, service_name, vec![], exe_path); assert!(result.is_ok()); } } #[tokio::test] async fn test_install_service() { #[cfg(not(windows))] { let service_name = "test_install_service"; let exe_path = std::env::current_exe().unwrap(); let result = super::install_service(service_name, service_name, vec![], exe_path); assert!(result.is_ok()); } } #[tokio::test] async fn test_check_service_installed() { #[cfg(windows)] { let service_name = "test_check_service_installed"; // try delete the service if it exists _ = super::stop_and_delete_service(service_name).await; let exe_path = std::env::current_exe().unwrap(); let result = super::install_service(service_name, service_name, vec![], exe_path); assert!(result.is_ok()); let (is_installed, message) = super::check_service_installed(service_name); assert!(is_installed); assert!(message.contains("successfully queried")); // clean up _ = super::stop_and_delete_service(service_name).await.unwrap(); } } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/service/000077500000000000000000000000001500521614600230175ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_shared/src/service/linux_service.rs000066400000000000000000000056241500521614600262530ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::linux; use crate::logger::logger_manager; use crate::misc_helpers; use crate::result::Result; use std::fs; use std::path::PathBuf; pub fn stop_service(service_name: &str) -> Result<()> { let output = misc_helpers::execute_command("systemctl", vec!["stop", service_name], -1)?; logger_manager::write_info(format!( "stop_service: {} result: {}", service_name, output.message() )); Ok(()) } pub fn start_service(service_name: &str) -> Result<()> { let output = misc_helpers::execute_command("systemctl", vec!["start", service_name], -1)?; logger_manager::write_info(format!( "start_service: {} result: {}", service_name, output.message() )); Ok(()) } pub fn install_or_update_service(service_name: &str) -> Result<()> { unmask_service(service_name)?; reload_systemd_daemon()?; enable_service(service_name) } fn unmask_service(service_name: &str) -> Result<()> { let output = misc_helpers::execute_command("systemctl", vec!["unmask", service_name], -1)?; logger_manager::write_info(format!( "unmask_service: {} result: {}", service_name, output.message() )); Ok(()) } pub fn uninstall_service(service_name: &str) -> Result<()> { disable_service(service_name)?; delete_service_config_file(service_name) } fn disable_service(service_name: &str) -> Result<()> { let output = misc_helpers::execute_command("systemctl", vec!["disable", service_name], -1)?; logger_manager::write_info(format!( "disable_service: {} result: {}", service_name, output.message() )); Ok(()) } fn reload_systemd_daemon() -> Result<()> { let output = misc_helpers::execute_command("systemctl", vec!["daemon-reload"], -1)?; logger_manager::write_info(format!( "reload_systemd_daemon result: {}", output.message() )); Ok(()) } fn enable_service(service_name: &str) -> Result<()> { let output = misc_helpers::execute_command("systemctl", vec!["enable", service_name], -1)?; logger_manager::write_info(format!( "enable_service: {} result: {}", service_name, output.message() )); Ok(()) } fn delete_service_config_file(service_name: &str) -> Result<()> { let config_file_path = PathBuf::from(linux::SERVICE_CONFIG_FOLDER_PATH).join(format!("{}.service", service_name)); match fs::remove_file(&config_file_path) { Ok(_) => { reload_systemd_daemon()?; } Err(e) => { let message = format!( "delete_service_config_file: {} failed to delete service config file '{}': {}", service_name, misc_helpers::path_to_string(&config_file_path), e ); logger_manager::write_info(message); } } Ok(()) } GuestProxyAgent-1.0.30/proxy_agent_shared/src/service/windows_service.rs000066400000000000000000000400121500521614600265740ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::error::Error; use crate::logger::logger_manager; use crate::result::Result; use std::ffi::OsString; use std::path::PathBuf; use std::str; use std::time::Duration; use windows_service::service::{ ServiceAccess, ServiceAction, ServiceActionType, ServiceConfig, ServiceErrorControl, ServiceFailureResetPeriod, ServiceInfo, ServiceStartType, ServiceState, ServiceStatus, ServiceType, }; use windows_service::service::{ServiceDependency, ServiceFailureActions}; use windows_service::service_manager::{ServiceManager, ServiceManagerAccess}; pub async fn start_service_with_retry( service_name: &str, retry_count: u32, duration: std::time::Duration, ) -> Result<()> { for i in 0..retry_count { logger_manager::write_info(format!("Starting service {} attempt {}", service_name, i)); match start_service_once(service_name).await { Ok(service) => { if service.current_state == ServiceState::Running { logger_manager::write_info(format!( "Service {} is at Running state", service_name )); return Ok(()); } logger_manager::write_info( format!( "Service {} failed to start with current state {:?}", service_name, service.current_state ) .to_string(), ); } Err(e) => { logger_manager::write_warn( format!( "Extension service {} start failed with error: {}", service_name, e ) .to_string(), ); if (i + 1) == retry_count { logger_manager::write_err( format!( "Service {} failed to start after {} attempts", service_name, i ) .to_string(), ); return Err(e); } } } tokio::time::sleep(duration).await; } Ok(()) } async fn start_service_once(service_name: &str) -> Result { // Start service if it already isn't running let service = query_service_status(service_name)?; if service.current_state == ServiceState::Running { logger_manager::write_info(format!("Service '{}' is already running", service_name)); Ok(service) } else { logger_manager::write_info(format!("Starting service '{}'", service_name)); let service_manager: ServiceManager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let service = service_manager .open_service( service_name, ServiceAccess::START | ServiceAccess::QUERY_STATUS, ) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; service .start(&[""]) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; logger_manager::write_info("Wait for 1 second before querying service status".to_string()); tokio::time::sleep(std::time::Duration::from_secs(1)).await; service .query_status() .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error())) } } pub async fn stop_and_delete_service(service_name: &str) -> Result<()> { stop_service(service_name).await?; delete_service(service_name) } pub async fn stop_service(service_name: &str) -> Result { // Stop service if it already isn't stopped let service = query_service_status(service_name)?; if service.current_state == ServiceState::Running { let service_manager: ServiceManager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let service = service_manager .open_service( service_name, ServiceAccess::STOP | ServiceAccess::QUERY_STATUS, ) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; match service.stop() { Ok(service) => { logger_manager::write_info(format!( "Stopped service {} successfully with current status {:?}", service_name, service.current_state )); tokio::time::sleep(std::time::Duration::from_secs(1)).await; } Err(e) => { logger_manager::write_info(format!( "Stopped service {} failed, error: {:?}", service_name, e )); } } service .query_status() .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error())) } else { Ok(service) } } fn delete_service(service_name: &str) -> Result<()> { // Delete the service let service_manager: ServiceManager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let service = service_manager .open_service(service_name, ServiceAccess::DELETE) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; service .delete() .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error())) } pub fn install_or_update_service( service_name: &str, service_display_name: &str, service_dependencies: Vec<&str>, service_exe_path: PathBuf, ) -> Result<()> { // if query_service returns Ok, then the service needs to be updated otherwise create a service match query_service_status(service_name) { Ok(_service) => update_service( service_name, service_display_name, service_dependencies, service_exe_path, ), Err(_e) => create_service( service_name, service_display_name, service_dependencies, service_exe_path, ), } } fn query_service_status(service_name: &str) -> Result { let service_manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let service = service_manager .open_service(service_name, ServiceAccess::QUERY_STATUS) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; service .query_status() .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error())) } #[allow(dead_code)] pub fn query_service_config(service_name: &str) -> Result { let service_manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let service = service_manager .open_service(service_name, ServiceAccess::QUERY_CONFIG) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; service .query_config() .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error())) } pub fn update_service( service_name: &str, service_display_name: &str, service_dependencies: Vec<&str>, service_exe_path: PathBuf, ) -> Result<()> { // update the service with the new executable path let service_manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let service = service_manager .open_service(service_name, ServiceAccess::CHANGE_CONFIG) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let mut vec_service_dependencies: Vec = Vec::new(); for src_dep in service_dependencies { vec_service_dependencies.push(ServiceDependency::Service(OsString::from(src_dep))); } let service_info = ServiceInfo { name: OsString::from(service_name), display_name: OsString::from(service_display_name), service_type: ServiceType::OWN_PROCESS, start_type: ServiceStartType::AutoStart, error_control: ServiceErrorControl::Normal, executable_path: service_exe_path, launch_arguments: vec![], //TODO: add arguments dependencies: vec_service_dependencies, account_name: None, // run as System account_password: None, }; service .change_config(&service_info) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error())) } fn create_service( service_name: &str, service_display_name: &str, service_dependencies: Vec<&str>, exe_path: PathBuf, ) -> Result<()> { let service_manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CREATE_SERVICE) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let mut vec_service_dependencies: Vec = Vec::new(); for src_dep in service_dependencies { vec_service_dependencies.push(ServiceDependency::Service(OsString::from(src_dep))); } let service_info = ServiceInfo { name: OsString::from(service_name), display_name: OsString::from(service_display_name), service_type: ServiceType::OWN_PROCESS, start_type: ServiceStartType::AutoStart, error_control: ServiceErrorControl::Normal, executable_path: exe_path, launch_arguments: vec![], dependencies: vec_service_dependencies, account_name: None, // run as System account_password: None, }; service_manager .create_service(&service_info, ServiceAccess::QUERY_STATUS) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; set_default_failure_actions(service_name) } /// Setup the default failure actions for the service /// The default failure actions are: /// - Reset period: 30 minutes /// - First restart service after 15 seconds /// - Second restart service after 60 seconds /// - Third restart service after 120 seconds pub fn set_default_failure_actions(service_name: &str) -> Result<()> { let service_manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let service = service_manager .open_service( service_name, ServiceAccess::START | ServiceAccess::CHANGE_CONFIG, ) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error()))?; let failure_actions = ServiceFailureActions { reset_period: ServiceFailureResetPeriod::After(Duration::from_secs(1800)), // Reset period 30 minutes reboot_msg: None, command: None, actions: Some(vec![ ServiceAction { action_type: ServiceActionType::Restart, delay: Duration::from_secs(15), // Delay before restart }, ServiceAction { action_type: ServiceActionType::Restart, delay: Duration::from_secs(60), }, ServiceAction { action_type: ServiceActionType::Restart, delay: Duration::from_secs(120), }, ]), }; service .update_failure_actions(failure_actions) .map_err(|e| Error::WindowsService(e, std::io::Error::last_os_error())) } #[cfg(test)] mod tests { use std::{path::PathBuf, process::Command}; #[tokio::test] async fn test_install_service() { const TEST_SERVICE_NAME: &str = "test_nt_service"; // Delete Service if it exists _ = super::stop_and_delete_service(TEST_SERVICE_NAME).await; // Install Service let service_exe_path: PathBuf = PathBuf::from("notepad.exe"); super::install_or_update_service( TEST_SERVICE_NAME, TEST_SERVICE_NAME, vec![], service_exe_path.to_path_buf(), ) .unwrap(); // check the exe path let config = super::query_service_config(TEST_SERVICE_NAME).unwrap(); assert_eq!(config.executable_path, service_exe_path.to_path_buf()); // Update Service let updated_service_exe_path: PathBuf = PathBuf::from("calc.exe"); super::install_or_update_service( TEST_SERVICE_NAME, TEST_SERVICE_NAME, vec![], updated_service_exe_path.to_path_buf(), ) .unwrap(); // check exe path has been updated let config = super::query_service_config(TEST_SERVICE_NAME).unwrap(); assert_eq!( config.executable_path, updated_service_exe_path.to_path_buf() ); //Check if service is running let output = Command::new("sc") .args(["query", TEST_SERVICE_NAME]) .output() .expect("Failed to execute command"); let output_str = String::from_utf8_lossy(&output.stdout); print!("SC query output: {}", output_str); // Check if the output contains the desired information indicating the service is running assert!( !output_str.contains("The specified service does not exist as an installed service") ); let result = super::start_service_with_retry( TEST_SERVICE_NAME, 2, std::time::Duration::from_millis(15), ) .await; assert!(result.is_err(), "Test Service should not be able to start"); let service_status = super::query_service_status(TEST_SERVICE_NAME).unwrap(); assert_ne!( service_status.current_state, windows_service::service::ServiceState::Running, "Test service should not be able to run" ); // Check if service is stopped let expected_stop_service = super::stop_service(TEST_SERVICE_NAME).await.unwrap(); let actual_stop_service = super::query_service_status(TEST_SERVICE_NAME).unwrap(); assert_eq!(expected_stop_service, actual_stop_service); // //Clean up - delete service super::stop_and_delete_service(TEST_SERVICE_NAME) .await .unwrap(); //Check if service is running let output = Command::new("sc") .args(["query", TEST_SERVICE_NAME]) .output() .expect("Failed to execute command"); let output_str = String::from_utf8_lossy(&output.stdout); println!("SC query output: {}", output_str); // Check if the output contains the desired information indicating the service is running assert!(output_str.contains("The specified service does not exist as an installed service")); } #[tokio::test] async fn test_create_service() { let service_name = "test_create_service"; // try delete service if it exists _ = super::stop_and_delete_service(service_name).await; let exe_path = PathBuf::from("notepad.exe"); super::create_service(service_name, service_name, vec![], exe_path).unwrap(); //Check if service is running let output = Command::new("sc") .args(["query", service_name]) .output() .expect("Failed to execute command"); let output_str = String::from_utf8_lossy(&output.stdout); print!("SC query output: {}", output_str); // Check if the output contains the desired information indicating the service is running assert!(output_str.contains("STOPPED")); //Clean up - delete service super::stop_and_delete_service(service_name).await.unwrap(); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/telemetry.rs000066400000000000000000000031111500521614600237330ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT pub mod event_logger; pub mod span; use crate::misc_helpers; use serde_derive::{Deserialize, Serialize}; #[derive(Serialize, Deserialize)] #[allow(non_snake_case)] pub struct Event { pub EventLevel: String, // Critical/Error/Warning/Verbose/Informational/LogAlways pub Message: String, pub Version: String, pub TaskName: String, pub EventPid: String, pub EventTid: String, pub OperationId: String, pub TimeStamp: String, } impl Event { pub fn new(level: String, message: String, task_name: String, operation_id: String) -> Self { Event { EventLevel: level, Message: message, Version: misc_helpers::get_current_version(), TaskName: task_name, EventPid: std::process::id().to_string(), EventTid: misc_helpers::get_thread_identity(), OperationId: operation_id, TimeStamp: misc_helpers::get_date_time_string_with_milliseconds(), } } } #[cfg(test)] mod tests { #[test] fn test_telemetry_new() { let event = super::Event::new( "Critical".to_string(), "test message".to_string(), "test task name".to_string(), "test operation id".to_string(), ); assert_eq!(event.EventLevel, "Critical".to_string()); assert_eq!(event.Message, "test message".to_string()); assert_eq!(event.TaskName, "test task name".to_string()); assert_eq!(event.OperationId, "test operation id".to_string()); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/telemetry/000077500000000000000000000000001500521614600233715ustar00rootroot00000000000000GuestProxyAgent-1.0.30/proxy_agent_shared/src/telemetry/event_logger.rs000066400000000000000000000161531500521614600264250ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::logger::logger_manager; use crate::misc_helpers; use crate::telemetry::Event; use concurrent_queue::ConcurrentQueue; use log::Level; use once_cell::sync::Lazy; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; pub const MAX_MESSAGE_LENGTH: usize = 1024 * 4; // 4KB static EVENT_QUEUE: Lazy> = Lazy::new(|| ConcurrentQueue::::bounded(1000)); static SHUT_DOWN: Lazy> = Lazy::new(|| Arc::new(AtomicBool::new(false))); pub async fn start( event_dir: PathBuf, mut interval: Duration, max_event_file_count: usize, set_status_fn: F, ) where F: Fn(String) -> Fut, Fut: std::future::Future, { let message = "Telemetry event logger thread started."; set_status_fn(message.to_string()); logger_manager::write_log(Level::Info, message.to_string()); if let Err(e) = misc_helpers::try_create_folder(&event_dir) { let message = format!("Failed to create event folder with error: {}", e); set_status_fn(message.to_string()); } let shutdown = SHUT_DOWN.clone(); if interval == Duration::default() { interval = Duration::from_secs(60); } loop { if EVENT_QUEUE.is_closed() { let message = "Event queue already closed, stop processing events."; set_status_fn(message.to_string()); logger_manager::write_log(Level::Info, message.to_string()); break; } tokio::time::sleep(interval).await; if shutdown.load(Ordering::Relaxed) { let message = "Stop signal received, exiting the event logger thread."; set_status_fn(message.to_string()); logger_manager::write_log(Level::Info, message.to_string()); EVENT_QUEUE.close(); } if EVENT_QUEUE.is_empty() { // no event in the queue, skip this loop continue; } let mut events: Vec = Vec::new(); events.reserve_exact(EVENT_QUEUE.len()); for event in EVENT_QUEUE.try_iter() { events.push(event); } // Check the event file counts, // if it exceeds the max file number, drop the new events match misc_helpers::get_files(&event_dir) { Ok(files) => { if files.len() >= max_event_file_count { logger_manager::write_log( Level::Warn,format!( "Event files exceed the max file count {}, drop and skip the write to disk.", max_event_file_count )); continue; } } Err(e) => { logger_manager::write_log( Level::Warn, format!("Failed to get event files with error: {}", e), ); } } let mut file_path = event_dir.to_path_buf(); file_path.push(format!("{}.json", misc_helpers::get_date_time_unix_nano())); match misc_helpers::json_write_to_file(&events, &file_path) { Ok(()) => { logger_manager::write_log( Level::Trace, format!( "Write events to the file {} successfully", file_path.display() ), ); } Err(e) => { logger_manager::write_log( Level::Warn, format!( "Failed to write events to the file {} with error: {}", file_path.display(), e ), ); } } } } pub fn stop() { SHUT_DOWN.store(true, Ordering::Relaxed); } pub fn write_event( level: Level, message: String, method_name: &str, module_name: &str, logger_key: &str, ) { let event_message = if message.len() > MAX_MESSAGE_LENGTH { message[..MAX_MESSAGE_LENGTH].to_string() } else { message.to_string() }; let logger_key = logger_key.to_string(); match EVENT_QUEUE.push(Event::new( level.to_string(), event_message, method_name.to_string(), module_name.to_string(), )) { Ok(()) => { // wrap file log within event log logger_manager::log(logger_key, level, message); } Err(e) => { logger_manager::log( logger_key, Level::Warn, format!("Failed to push event to the queue with error: {}", e), ); } }; } #[cfg(test)] mod tests { use crate::misc_helpers; use std::env; use std::fs; use std::time::Duration; #[tokio::test] async fn event_logger_test() { let mut temp_test_path = env::temp_dir(); let logger_key = "event_logger_test"; temp_test_path.push(logger_key); // clean up and ignore the clean up errors _ = fs::remove_dir_all(&temp_test_path); let mut events_dir: std::path::PathBuf = temp_test_path.to_path_buf(); events_dir.push("Events"); let cloned_events_dir = events_dir.to_path_buf(); tokio::spawn(async { super::start(cloned_events_dir, Duration::from_millis(100), 3, |_| { async { // do nothing } }) .await; }); // write some events to the queue and flush to disk write_events(logger_key).await; let files = misc_helpers::get_files(&events_dir).unwrap(); let file_count = files.len(); assert!( file_count > 0, "It should write some files to the event folder" ); // write some events to the queue and flush to disk 3 times for _ in [0; 3] { write_events(logger_key).await; } let files = misc_helpers::get_files(&events_dir).unwrap(); let file_count = files.len(); assert_eq!( 3, file_count, "Cannot write more files to the event folder after 3 times" ); // stop it and no more files write to event folder super::stop(); // wait for stop signal responded tokio::time::sleep(Duration::from_millis(500)).await; write_events(logger_key).await; let files = misc_helpers::get_files(&events_dir).unwrap(); assert_eq!( file_count, files.len(), "No more files could write to event folder after stop()" ); _ = fs::remove_dir_all(&temp_test_path); } async fn write_events(logger_key: &str) { for _ in [0; 10] { super::write_event( log::Level::Info, "This is test event".to_string(), "event_logger_test", "event_logger_test", logger_key, ); } // wait for the queue write to event folder tokio::time::sleep(Duration::from_millis(500)).await; } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/telemetry/span.rs000066400000000000000000000054411500521614600247040ustar00rootroot00000000000000use std::fmt::{Display, Formatter}; // Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use super::event_logger; use serde_derive::{Deserialize, Serialize}; use std::time::Instant; pub struct SimpleSpan { start: Instant, } #[derive(Serialize, Deserialize)] struct ElapsedMessage { elapsed: u128, message: String, } impl ElapsedMessage { fn new(elapsed: u128, message: String) -> Self { ElapsedMessage { elapsed, message } } fn to_json_string(&self) -> String { format!( "{{\"elapsed\":{}, \"message\":\"{}\"}}", self.elapsed, self.message ) } } impl Display for ElapsedMessage { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!(f, "{} - {}", self.message, self.elapsed) } } impl Default for SimpleSpan { fn default() -> Self { Self::new() } } impl SimpleSpan { pub fn new() -> Self { SimpleSpan { start: Instant::now(), } } pub fn start_new(&mut self) { self.start = Instant::now(); } pub fn get_elapsed_time_in_millisec(&self) -> u128 { self.start.elapsed().as_millis() } pub fn get_elapsed_json_message(&self, message: &str) -> String { let elapsed_massage = ElapsedMessage::new(self.get_elapsed_time_in_millisec(), message.to_string()); elapsed_massage.to_json_string() } pub fn write_event( &self, message: &str, method_name: &str, module_name: &str, logger_key: &str, ) -> String { let elapsed_massage = ElapsedMessage::new(self.get_elapsed_time_in_millisec(), message.to_string()); event_logger::write_event( log::Level::Info, elapsed_massage.to_json_string(), method_name, module_name, logger_key, ); elapsed_massage.to_string() } } #[cfg(test)] mod tests { use std::time::Duration; use tokio::time::sleep; #[tokio::test] async fn span_test() { let mut span = super::SimpleSpan::new(); sleep(Duration::from_millis(1)).await; let elapsed = span.get_elapsed_time_in_millisec(); assert!(elapsed > 0); let duration = Duration::from_millis(100); sleep(duration).await; let message: String = span.get_elapsed_json_message("test"); let elapsed_message: super::ElapsedMessage = serde_json::from_str(&message).unwrap(); assert_eq!(elapsed_message.message, "test"); assert!(elapsed_message.elapsed > duration.as_millis()); span.start_new(); sleep(Duration::from_millis(1)).await; let elapsed = span.get_elapsed_time_in_millisec(); assert!(elapsed > 0); assert!(elapsed < duration.as_millis()); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/version.rs000066400000000000000000000066111500521614600234160ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::error::{Error, ParseVersionErrorType}; use crate::result::Result; use std::fmt::{Display, Formatter}; #[derive(Clone)] pub struct Version { pub major: u32, pub minor: u32, pub build: Option, pub revision: Option, } impl Version { pub fn from_major_minor(major: u32, minor: u32) -> Self { Version::from_major_minor_build_revision(major, minor, None, None) } pub fn from_major_minor_build(major: u32, minor: u32, build: Option) -> Self { Version::from_major_minor_build_revision(major, minor, build, None) } pub fn from_major_minor_build_revision( major: u32, minor: u32, build: Option, revision: Option, ) -> Self { Version { major, minor, build, revision, } } pub fn from_string(version_string: String) -> Result { let version_parts = version_string.split('.').collect::>(); if version_parts.len() < 2 || version_parts.len() > 4 { return Err(Error::ParseVersion(ParseVersionErrorType::InvalidString( version_string, ))); } let major = version_parts[0].parse::().map_err(|_| { Error::ParseVersion(ParseVersionErrorType::MajorBuild( version_string.to_string(), )) })?; let minor = version_parts[1].parse::().map_err(|_| { Error::ParseVersion(ParseVersionErrorType::MinorBuild( version_string.to_string(), )) })?; if version_parts.len() == 2 { return Ok(Version::from_major_minor(major, minor)); } let mut build = None; let mut revision = None; if version_parts.len() > 2 { match version_parts[2].parse::() { Ok(u) => build = Some(u), Err(_) => build = None, }; if version_parts.len() > 3 { match version_parts[3].parse::() { Ok(u) => revision = Some(u), Err(_) => revision = None, }; } } Ok(Version::from_major_minor_build_revision( major, minor, build, revision, )) } } impl Display for Version { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { let mut ver = format!("{}.{}", self.major, self.minor); if let Some(b) = self.build { ver = format!("{}.{}", ver, b); if let Some(r) = self.revision { ver = format!("{}.{}", ver, r); } } write!(f, "{}", ver) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_version_from_string() { let version = Version::from_string("1.0".to_string()).unwrap(); assert_eq!(version.major, 1); assert_eq!(version.minor, 0); assert_eq!(version.build, None); assert_eq!(version.revision, None); let version = Version::from_string("1.0.0".to_string()).unwrap(); assert_eq!(version.major, 1); assert_eq!(version.minor, 0); assert_eq!(version.build, Some(0)); assert_eq!(version.revision, None); let version = Version::from_string("0".to_string()); assert!(version.is_err()); } } GuestProxyAgent-1.0.30/proxy_agent_shared/src/windows.rs000066400000000000000000000301241500521614600234170ustar00rootroot00000000000000// Copyright (c) Microsoft Corporation // SPDX-License-Identifier: MIT use crate::error::{Error, ParseVersionErrorType}; use crate::result::Result; use crate::version::Version; use std::ffi::OsStr; use std::mem::MaybeUninit; use std::os::windows::ffi::OsStrExt; use std::path::Path; use windows_service::service::{ServiceAccess, ServiceState}; use windows_service::service_manager::{ServiceManager, ServiceManagerAccess}; use windows_sys::Win32::Storage::FileSystem::{ GetFileVersionInfoSizeW, // version.dll GetFileVersionInfoW, VerQueryValueW, VS_FIXEDFILEINFO, }; use windows_sys::Win32::System::SystemInformation::SYSTEM_INFO; use winreg::enums::*; use winreg::RegKey; fn read_reg_int(key_name: &str, value_name: &str, default_value: Option) -> Option { let hklm = RegKey::predef(HKEY_LOCAL_MACHINE); match hklm.open_subkey(key_name) { Ok(key) => match key.get_value(value_name) { Ok(val) => return Some(val), Err(e) => { print!("{}", e); } }, Err(e) => { print!("{}", e); } } default_value } fn read_reg_string(key_name: &str, value_name: &str, default_value: String) -> String { let hklm = RegKey::predef(HKEY_LOCAL_MACHINE); if let Ok(key) = hklm.open_subkey(key_name) { if let Ok(val) = key.get_value(value_name) { return val; } } default_value } const OS_VERSION_REGISTRY_KEY: &str = "Software\\Microsoft\\Windows NT\\CurrentVersion"; const PRODUCT_NAME_VAL_STRING: &str = "ProductName"; const CURRENT_MAJOR_VERSION_NUMBER_STRING: &str = "CurrentMajorVersionNumber"; const CURRENT_MINOR_VERSION_NUMBER_STRING: &str = "CurrentMinorVersionNumber"; const CURRENT_BUILD_NUMBER_STRING: &str = "CurrentBuildNumber"; const UBRSTRING: &str = "UBR"; pub fn get_os_version() -> Result { let major; match read_reg_int( OS_VERSION_REGISTRY_KEY, CURRENT_MAJOR_VERSION_NUMBER_STRING, None, ) { Some(m) => major = m, None => { let major_str = read_reg_string( OS_VERSION_REGISTRY_KEY, CURRENT_MAJOR_VERSION_NUMBER_STRING, "".to_string(), ); match major_str.parse::() { Ok(u) => major = u, Err(_) => { return Err(Error::ParseVersion(ParseVersionErrorType::MajorBuild( format!("{} ({})", major_str, CURRENT_MAJOR_VERSION_NUMBER_STRING), ))); } } } } let minor; match read_reg_int( OS_VERSION_REGISTRY_KEY, CURRENT_MINOR_VERSION_NUMBER_STRING, None, ) { Some(m) => minor = m, None => { let major_str = read_reg_string( OS_VERSION_REGISTRY_KEY, CURRENT_MINOR_VERSION_NUMBER_STRING, "".to_string(), ); match major_str.parse::() { Ok(u) => minor = u, Err(_) => { return Err(Error::ParseVersion(ParseVersionErrorType::MinorBuild( format!("{} ({})", major_str, CURRENT_MINOR_VERSION_NUMBER_STRING), ))); } } } } let build; let build_str = read_reg_string( OS_VERSION_REGISTRY_KEY, CURRENT_BUILD_NUMBER_STRING, "".to_string(), ); if build_str.is_empty() { build = read_reg_int(OS_VERSION_REGISTRY_KEY, CURRENT_BUILD_NUMBER_STRING, None); } else { match build_str.parse::() { Ok(u) => build = Some(u), Err(_) => build = None, } } let revision_str = read_reg_string(OS_VERSION_REGISTRY_KEY, UBRSTRING, "".to_string()); let revision; if revision_str.is_empty() { revision = read_reg_int(OS_VERSION_REGISTRY_KEY, UBRSTRING, None); } else { match revision_str.parse::() { Ok(u) => revision = Some(u), Err(_) => revision = None, } } Ok(Version::from_major_minor_build_revision( major, minor, build, revision, )) } pub fn get_os_name() -> String { let os_name = read_reg_string( OS_VERSION_REGISTRY_KEY, PRODUCT_NAME_VAL_STRING, "".to_string(), ); // Win11 CurrentVersion Registry Shows Wrong ProductName Key // https://docs.microsoft.com/en-us/answers/questions/555857/windows-11-product-name-in-registry.html if let Ok(ver) = get_os_version() { if let Some(build) = ver.build { if build >= 22000 { return os_name.replace("Windows 10 ", "Windows 11 "); } } } os_name } pub fn get_long_os_version() -> String { match get_os_version() { Ok(ver) => format!("Windows:{}-{}", get_os_name(), ver), Err(_) => format!("Windows:{}-{}", get_os_name(), ""), } } pub fn get_processor_arch() -> String { unsafe { let mut data = MaybeUninit::::uninit(); windows_sys::Win32::System::SystemInformation::GetSystemInfo(data.as_mut_ptr()); // Ref: https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-system_info match data .assume_init() .Anonymous .Anonymous .wProcessorArchitecture { windows_sys::Win32::System::Diagnostics::Debug::PROCESSOR_ARCHITECTURE_INTEL => "x86", // 0 windows_sys::Win32::System::Diagnostics::Debug::PROCESSOR_ARCHITECTURE_ARM => "ARM", // 5 windows_sys::Win32::System::Diagnostics::Debug::PROCESSOR_ARCHITECTURE_IA64 => "IA64", // 6 windows_sys::Win32::System::Diagnostics::Debug::PROCESSOR_ARCHITECTURE_AMD64 => "AMD64", // 9 12 => "ARM64", // 12 - ARM64 is missed here _ => "unknown", } .to_owned() } } pub fn ensure_service_running(service_name: &str) -> (bool, String) { let mut message = String::new(); let service_manager = match ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT) { Ok(manager) => manager, Err(e) => { message = format!( "ensure_service_running:: Failed to connect to service manager with error {e}." ); return (false, message); } }; let service = match service_manager.open_service( service_name, ServiceAccess::QUERY_STATUS | ServiceAccess::START, ) { Ok(s) => s, Err(e) => { message = format!( "ensure_service_running:: Failed to open service {service_name} with error {e}." ); return (false, message); } }; match service.query_status() { Ok(status) => { if status.current_state == ServiceState::Stopped { match service.start(&[OsStr::new("Started by GuestProxyAgent")]) { Ok(()) => { message =format!( "ensure_service_running:: service {service_name} started by GuestProxyAgent successfully." ); } Err(e) => { message =format!( "ensure_service_running:: Failed to start service {service_name} with error {e}." ); return (false, message); } } } } Err(e) => { message =format!( "ensure_service_running:: Failed to query service state for {service_name} with error {e}." ); return (false, message); } } (true, message) } pub fn get_file_product_version(file_path: &Path) -> Result { if !file_path.exists() { return Err(Error::ParseVersion(ParseVersionErrorType::InvalidString( format!("File path does not exist: {}", file_path.display()), ))); } if !file_path.is_file() { return Err(Error::ParseVersion(ParseVersionErrorType::InvalidString( format!("File path is not a file: {}", file_path.display()), ))); } if !file_path.is_absolute() { return Err(Error::ParseVersion(ParseVersionErrorType::InvalidString( format!("File path is not absolute: {}", file_path.display()), ))); } let file_path = file_path .as_os_str() .encode_wide() .chain(Some(0)) .collect::>(); let size = unsafe { GetFileVersionInfoSizeW(file_path.as_ptr(), std::ptr::null_mut()) }; if size == 0 { return Err(Error::WindowsApi( "GetFileVersionInfoSizeW".to_string(), std::io::Error::last_os_error(), )); } let mut buffer = vec![0u8; size as usize]; if unsafe { GetFileVersionInfoW(file_path.as_ptr(), 0, size, buffer.as_mut_ptr() as *mut _) } == 0 { return Err(Error::WindowsApi( "GetFileVersionInfoW".to_string(), std::io::Error::last_os_error(), )); } // get VS_FIXEDFILEINFO let mut fixed_file_info = MaybeUninit::<*mut VS_FIXEDFILEINFO>::uninit(); let mut fixed_file_info_size = 0; let result = unsafe { VerQueryValueW( buffer.as_mut_ptr() as *mut _, "\\".encode_utf16() .chain(Some(0)) .collect::>() .as_ptr(), fixed_file_info.as_mut_ptr() as *mut _, &mut fixed_file_info_size, ) }; if result == 0 { return Err(Error::WindowsApi( "VerQueryValueW".to_string(), std::io::Error::last_os_error(), )); } if fixed_file_info_size != std::mem::size_of::() as u32 { return Err(Error::ParseVersion(ParseVersionErrorType::InvalidString( format!( "Invalid VS_FIXEDFILEINFO size '{}' returned", fixed_file_info_size ), ))); } // get the product version from VS_FIXEDFILEINFO let fixed_file_info = unsafe { *fixed_file_info.assume_init() }; let major = fixed_file_info.dwProductVersionMS >> 16; let minor = fixed_file_info.dwProductVersionMS & 0xFFFF; let build = fixed_file_info.dwProductVersionLS >> 16; let revision = fixed_file_info.dwProductVersionLS & 0xFFFF; let version = Version::from_major_minor_build_revision(major, minor, Some(build), Some(revision)); Ok(version) } #[cfg(test)] mod tests { #[test] fn get_os_version_tests() { let os_name = super::get_os_name(); assert_ne!("", os_name, "os name cannot be empty"); let os_version = super::get_os_version().unwrap(); assert_ne!(None, os_version.build, "os version.build cannot be None."); assert_ne!( None, os_version.revision, "os version.revision cannot be None." ); let long_os_version = super::get_long_os_version(); assert_eq!( format!("Windows:{}-{}", os_name, os_version), long_os_version, "long_os_version mismatch" ) } #[test] fn get_processor_arch_test() { let processor_arch = super::get_processor_arch(); assert_ne!( "unknown", processor_arch, "processor arch cannot be 'unknown'" ); } #[test] fn get_file_product_version_test() { let system_path = std::env::var("SystemRoot").unwrap_or("C:\\Windows".to_string()); let file_path = std::path::Path::new(&system_path) .join("System32") .join("kernel32.dll"); let version = match super::get_file_product_version(&file_path) { Ok(v) => v, Err(e) => { println!("Failed to get file product version: {}", e); assert!(false, "Failed to get file product version"); return; } }; println!("kernel32.dll File product version: {}", version); assert_eq!(version.major, 10, "major version mismatch"); } } GuestProxyAgent-1.0.30/rpmbuild/000077500000000000000000000000001500521614600165215ustar00rootroot00000000000000GuestProxyAgent-1.0.30/rpmbuild/SPECS/000077500000000000000000000000001500521614600173765ustar00rootroot00000000000000GuestProxyAgent-1.0.30/rpmbuild/SPECS/azure-proxy-agent.spec000066400000000000000000000024731500521614600236610ustar00rootroot00000000000000%global __os_install_post %{nil} Name: azure-proxy-agent Version: %{pkgversion} Release: 0 Summary: Azure Proxy Agent License: MIT URL: https://github.com/Azure/GuestProxyAgent Source0: %{name}_%{version}.tar.gz %description Microsoft Azure Guest Proxy Agent. %define _buildshell /bin/bash %prep %setup -n %{name}_%{version} %install mkdir -p %{buildroot}/usr/sbin/ mkdir -p %{buildroot}/etc/azure/ mkdir -p %{buildroot}/usr/lib/systemd/system/ mkdir -p %{buildroot}/usr/lib/azure-proxy-agent/ cp -f ./package/ProxyAgent/proxy-agent.json %{buildroot}/etc/azure/ cp -f ./package/azure-proxy-agent.service %{buildroot}/usr/lib/systemd/system/ cp -f ./package/ProxyAgent/ebpf_cgroup.o %{buildroot}/usr/lib/azure-proxy-agent/ cp -f ./package/ProxyAgent/azure-proxy-agent %{buildroot}/usr/sbin/ %post %systemd_post azure-proxy-agent.service systemctl unmask azure-proxy-agent.service systemctl daemon-reload systemctl start azure-proxy-agent.service systemctl enable azure-proxy-agent.service %files %defattr(-,root,root,-) /usr/lib/systemd/system/azure-proxy-agent.service /usr/sbin/azure-proxy-agent /etc/azure/proxy-agent.json /usr/lib/azure-proxy-agent/ebpf_cgroup.o %changelog * Fri Sep 13 23:43:30 UTC 2024 - ARTProxyAgentVTeam@microsoft.com - Initial release GuestProxyAgent-1.0.30/vs-init.cmd000066400000000000000000000014721500521614600167650ustar00rootroot00000000000000REM Copyright (c) Microsoft Corporation REM SPDX-License-Identifier: MIT @echo off SET VS_DEV_CMD_COMMUNITY="%ProgramFiles%\\Microsoft Visual Studio\\2022\\Community\\Common7\\Tools\\VsDevCmd.bat" SET VS_DEV_CMD_PROFESSIONAL="%ProgramFiles%\\Microsoft Visual Studio\\2022\\Professional\\Common7\\Tools\\VsDevCmd.bat" SET VS_DEV_CMD_ENTERPRISE="%ProgramFiles%\\Microsoft Visual Studio\\2022\\Enterprise\\Common7\\Tools\\VsDevCmd.bat" IF EXIST %VS_DEV_CMD_ENTERPRISE% ( SET VS_DEV_CMD=%VS_DEV_CMD_ENTERPRISE% ) ELSE ( IF EXIST %VS_DEV_CMD_PROFESSIONAL% ( SET VS_DEV_CMD=%VS_DEV_CMD_PROFESSIONAL% ) ELSE ( IF EXIST %VS_DEV_CMD_COMMUNITY% ( SET VS_DEV_CMD=%VS_DEV_CMD_COMMUNITY% ) ELSE ( ECHO "No VS 2022 found!" EXIT /b 1 ) ) ) %VS_DEV_CMD%