pax_global_header00006660000000000000000000000064150357434100014514gustar00rootroot0000000000000052 comment=5565412840d1f4c215fe089aee733337f3d57d56 accelerate-1.9.0/000077500000000000000000000000001503574341000136135ustar00rootroot00000000000000accelerate-1.9.0/.devcontainer/000077500000000000000000000000001503574341000163525ustar00rootroot00000000000000accelerate-1.9.0/.devcontainer/devcontainer.json000066400000000000000000000022271503574341000217310ustar00rootroot00000000000000// File only needed for VSCode users to have proper Docker based interpreters { "name": "accelerate_dev_environment", "build": { // ACTION NEEDED: comment/uncomment the relevant line depending on whether you are in a CPU/GPU environment "dockerfile": "../docker/accelerate-cpu/Dockerfile" // "dockerfile": "../docker/accelerate-gpu/Dockerfile" }, "runArgs": [ // ACTION NEEDED: uncomment the next line if your local machine has GPUs available // "--gpus", "all", // Enable the docker container to access system resources "--ipc", "host" ], "remoteEnv": { "PYTHONPATH": "${containerEnv:PATH}:${containerWorkspaceFolder}" }, "customizations": { "vscode": { "extensions": [ // Ensure we have IntelliSense in VSCode when running inside container "ms-python.python" ] } }, "workspaceFolder": "/workspaces/accelerate", // Need git for VSCode to color code modifications. Only runs when building environment. "onCreateCommand": "apt-get update && apt-get install -y git && pip install -e '.[dev]'" }accelerate-1.9.0/.github/000077500000000000000000000000001503574341000151535ustar00rootroot00000000000000accelerate-1.9.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001503574341000173365ustar00rootroot00000000000000accelerate-1.9.0/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000047451503574341000221610ustar00rootroot00000000000000name: "\U0001F41B Bug Report" description: Submit a bug report to help us improve Accelerate body: - type: markdown attributes: value: | Thanks for taking the time to submit a bug report! 🐛 If this is not a bug related to the Accelerate library directly, but instead a general question about your code or the library specifically please use the [forums](https://discuss.huggingface.co/c/accelerate/18). - type: textarea id: system-info attributes: label: System Info description: Please share your accelerate configuration with us. You can run the command `accelerate env` and copy-paste its outputs below render: Shell placeholder: accelerate version, OS, python version, numpy version, torch version, and accelerate's configuration validations: required: true - type: checkboxes id: information-scripts-examples attributes: label: Information description: 'The problem arises when using:' options: - label: "The official example scripts" - label: "My own modified scripts" - type: checkboxes id: information-tasks attributes: label: Tasks description: "The tasks I am working on are:" options: - label: "One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)" - label: "My own task or dataset (give details below)" - type: textarea id: reproduction validations: required: true attributes: label: Reproduction description: | Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet. If you have code snippets, error messages, stack traces please provide them here as well. Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code. placeholder: | Steps to reproduce the behavior: 1. 2. 3. - type: textarea id: expected-behavior validations: required: true attributes: label: Expected behavior description: "A clear and concise description of what you would expect to happen." accelerate-1.9.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000046341503574341000207630ustar00rootroot00000000000000# What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/accelerate/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/accelerate/tree/main/docs#writing-documentation---specification). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. accelerate-1.9.0/.github/workflows/000077500000000000000000000000001503574341000172105ustar00rootroot00000000000000accelerate-1.9.0/.github/workflows/build-docker-images-release.yml000066400000000000000000000061421503574341000251630ustar00rootroot00000000000000name: Build Docker images (releases) on: workflow_dispatch: release: types: [published] concurrency: group: docker-image-builds cancel-in-progress: false jobs: get-version: runs-on: ubuntu-latest outputs: version: ${{ steps.step1.outputs.version }} steps: - uses: actions/checkout@4 - id: step1 run: echo "version=$(python setup.py --version)" >> $GITHUB_OUTPUT version-cpu: name: "Latest Accelerate CPU [version]" runs-on: group: aws-general-8-plus needs: get-version steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Build and Push CPU uses: docker/build-push-action@v4 with: file: docker/accelerate-cpu/Dockerfile push: true tags: huggingface/accelerate:cpu-release-${{ needs.get-version.outputs.version }} version-cuda: name: "Latest Accelerate GPU [version]" runs-on: group: aws-g6-4xlarge-plus needs: get-version steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Build and Push GPU uses: docker/build-push-action@v4 with: file: docker/accelerate-gpu/Dockerfile push: true tags: huggingface/accelerate:gpu-release-${{needs.get-version.outputs.version}} version-cuda-deepspeed: name: "Latest Accelerate GPU DeepSpeed [version]" runs-on: group: aws-g6-4xlarge-plus needs: get-version steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Build and Push GPU uses: docker/build-push-action@v4 with: file: docker/accelerate-gpu-deepspeed/Dockerfile push: true tags: huggingface/accelerate:gpu-deepspeed-release-${{needs.get-version.outputs.version}} version-cuda-fp8-transformerengine: name: "Latest Accelerate GPU FP8 TransformerEngine [version]" runs-on: group: aws-g6-4xlarge-plus needs: get-version steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Build and Push GPU uses: docker/build-push-action@v4 with: file: docker/accelerate-gpu/Dockerfile push: true tags: huggingface/accelerate:gpu-fp8-transformerengine-release-${{needs.get-version.outputs.version}}accelerate-1.9.0/.github/workflows/build_and_run_tests.yml000066400000000000000000000024751503574341000237720ustar00rootroot00000000000000name: Trigger docker images and run tests on: push: branches: - main workflow_dispatch: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: check-for-source: runs-on: ubuntu-latest name: Check if setup was changed outputs: changed: ${{ steps.was_changed.outputs.changed }} steps: - uses: actions/checkout@v4 with: fetch-depth: "2" - name: Get changed files id: changed-files uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42 - name: Was setup changed id: was_changed run: | for file in ${{ steps.changed-files.outputs.all_changed_files }}; do if [ `basename "${file}"` == "setup.py" ]; then echo "changed=1" >> $GITHUB_OUTPUT fi done build-docker-containers: needs: check-for-source if: (github.event_name == 'push') && (needs.check-for-source.outputs.changed == '1') uses: ./.github/workflows/build_docker_images.yml secrets: inherit run-merge-tests: needs: build-docker-containers if: always() uses: ./.github/workflows/run_merge_tests.yml run-integration-tests: needs: build-docker-containers if: always() uses: ./.github/workflows/self_hosted_integration_tests.yml accelerate-1.9.0/.github/workflows/build_docker_images.yml000066400000000000000000000072021503574341000237070ustar00rootroot00000000000000name: Build Docker images (scheduled) on: workflow_dispatch: workflow_call: schedule: - cron: "0 1 * * *" concurrency: group: docker-image-builds cancel-in-progress: false jobs: latest-cpu: name: "Latest Accelerate CPU [dev]" runs-on: group: aws-general-8-plus steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Get current date id: date run: | echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV - name: Build and Push CPU uses: docker/build-push-action@v4 with: file: docker/accelerate-cpu/Dockerfile push: true tags: | huggingface/accelerate:cpu-nightly huggingface/accelerate:cpu-nightly-${{ env.date }} latest-cuda: name: "Latest Accelerate GPU [dev]" runs-on: group: aws-g6-4xlarge-plus steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Get current date id: date run: | echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV - name: Build and Push GPU uses: docker/build-push-action@v4 with: file: docker/accelerate-gpu/Dockerfile push: true tags: | huggingface/accelerate:gpu-nightly huggingface/accelerate:gpu-nightly-${{ env.date }} latest-cuda-deepspeed: name: "Latest Accelerate GPU DeepSpeed [dev]" runs-on: group: aws-g6-4xlarge-plus steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Get current date id: date run: | echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV - name: Build and Push GPU uses: docker/build-push-action@v4 with: file: docker/accelerate-gpu-deepspeed/Dockerfile push: true tags: | huggingface/accelerate:gpu-deepspeed-nightly huggingface/accelerate:gpu-deepspeed-nightly-${{ env.date }} latest-cuda-fp8-transformerengine: name: "Latest Accelerate GPU FP8 TransformerEngine [dev]" runs-on: group: aws-g6-4xlarge-plus steps: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Get current date id: date run: | echo "date=$(date '+%Y-%m-%d')" >> $GITHUB_ENV # Get the previous month echo "base_year=$(date -d 'last month' '+%y')" >> $GITHUB_ENV echo "base_month=$(date -d 'last month' '+%m')" >> $GITHUB_ENV - name: Build and Push GPU uses: docker/build-push-action@v4 with: file: benchmarks/fp8/transformer_engine/Dockerfile push: true tags: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ env.date }} build-args: | BASE_YEAR=${{ env.base_year }} BASE_MONTH=${{ env.base_month }}accelerate-1.9.0/.github/workflows/build_documentation.yml000066400000000000000000000006241503574341000237650ustar00rootroot00000000000000name: Build documentation on: push: branches: - main - doc-builder* - v*-release jobs: build: uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main with: commit_sha: ${{ github.sha }} package: accelerate custom_container: huggingface/transformers-doc-builder secrets: hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} accelerate-1.9.0/.github/workflows/build_pr_documentation.yml000066400000000000000000000007201503574341000244630ustar00rootroot00000000000000name: Build PR Documentation on: pull_request: concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: build: uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main with: commit_sha: ${{ github.event.pull_request.head.sha }} pr_number: ${{ github.event.number }} package: accelerate custom_container: huggingface/transformers-doc-builder accelerate-1.9.0/.github/workflows/fp8_runner.yml000066400000000000000000000017231503574341000220240ustar00rootroot00000000000000name: Test FP8 Runner on: workflow_dispatch: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: set-prev-day: runs-on: ubuntu-latest outputs: prev-day: ${{ steps.set-prev-day.outputs.prev-day }} steps: - name: Set PREV_DAY id: set-prev-day run: | PREV_DAY=$(date -d "yesterday" '+%Y-%m-%d') echo "prev-day=$PREV_DAY" >> $GITHUB_OUTPUT run-fp8-tests: needs: set-prev-day runs-on: group: aws-g6e-12xlarge container: image: huggingface/accelerate:gpu-fp8-transformerengine-nightly-${{ needs.set-prev-day.outputs.prev-day }} options: --gpus all --shm-size "16gb" steps: - uses: actions/checkout@v3 - name: Install the library run: | pip install -e .[test_prod,test_fp8] - name: Show installed libraries run: | pip freeze - name: Run TE FP8 tests run: | python -m pytest -s -v ./tests/test_fp8.py accelerate-1.9.0/.github/workflows/gaudi3_scheduled.yml000066400000000000000000000051221503574341000231270ustar00rootroot00000000000000name: Gaudi3 tests (scheduled) on: workflow_dispatch: schedule: # every day at 6 AM UTC - cron: "0 6 * * *" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: run-gaudi3-tests: runs-on: group: itac-bm-emr-gaudi3-dell-2gaudi container: image: docker://vault.habana.ai/gaudi-docker/1.21.1/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest options: --runtime=habana --shm-size=64G --cap-add=sys_nice --env HABANA_VISIBLE_DEVICES env: OMPI_MCA_btl_vader_single_copy_mechanism: none PT_ENABLE_INT64_SUPPORT: 1 PT_HPU_LAZY_MODE: 0 RUN_SLOW: 1 steps: - name: HL-SMI (1) run: | hl-smi echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}" echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" - name: Extract HPU visible modules id: add-modules run: | export HABANA_VISIBLE_MODULES=$(hl-smi -Q module_id -f csv,noheader | tr '\n' ',' | sed 's/,$//') echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" >> $GITHUB_ENV - name: HL-SMI (2) run: | hl-smi echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}" echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" - name: Checkout to Accelerate uses: actions/checkout@v4 - name: Install Accelerate with Transformers & DeepSpeed run: | pip install -e .[testing] \ git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0 \ git+https://github.com/huggingface/transformers.git - name: Run CLI tests if: ${{ !cancelled() && (success() || failure()) }} run: | make test_cli - name: Run Core tests if: ${{ !cancelled() && (success() || failure()) }} run: | make test_core - name: Run Big Modeling tests if: ${{ !cancelled() && (success() || failure()) }} run: | make test_big_modeling - name: Run DeepSpeed integration tests if: ${{ !cancelled() && (success() || failure()) }} run: | make test_deepspeed - name: Run FSDP integration tests if: ${{ !cancelled() && (success() || failure()) }} run: | make test_fsdp - name: Run TP integration tests if: ${{ !cancelled() && (success() || failure()) }} run: | make test_tp - name: Run Examples tests if: ${{ !cancelled() && (success() || failure()) }} run: | make test_examples accelerate-1.9.0/.github/workflows/integration_tests.yml000066400000000000000000000026601503574341000235040ustar00rootroot00000000000000# CI for specifically ensuring integrations work fine (`transformers` mainly) # Useful tips: # - New integrations to test should have its own job, and follow a strategy method where we check both # the pypi and github versions. # - When checking the latest release of the integration, use # git checkout $(git describe --tags `git rev-list --tags --max-count=1`) to get the latest release. name: Integration Tests on: pull_request: paths: - "src/**" - "tests/**" - ".github/**" - "examples/**" - "setup.py" types: [opened, synchronize, reopened] env: HF_HOME: ~/hf_cache jobs: run-trainer-tests: runs-on: ubuntu-latest strategy: fail-fast: false steps: - uses: actions/checkout@v4 - name: Set up python 3.9 uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' cache-dependency-path: 'setup.py' - name: Install Accelerate from source run: | pip install --upgrade pip pip install -e . - name: Clone and install transformers run: | cd .. git clone https://github.com/huggingface/transformers cd transformers pip install .[torch,testing] - name: Show installed libraries run: | pip freeze - name: Run Trainer tests env: WANDB_DISABLED: true run: | cd ../transformers pytest -sv tests/trainer accelerate-1.9.0/.github/workflows/nightly.yml000066400000000000000000000140671503574341000214210ustar00rootroot00000000000000name: Self-hosted runner with slow tests (scheduled) on: workflow_dispatch: schedule: - cron: "0 2 * * *" env: RUN_SLOW: "yes" IS_GITHUB_CI: "1" SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }} jobs: run_core_tests_single_gpu: runs-on: group: aws-g6-4xlarge-plus env: CUDA_VISIBLE_DEVICES: "0" TEST_TYPE: "single_gpu" container: image: huggingface/accelerate:gpu-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Update clone & pip install run: | source activate accelerate git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e . --no-deps pip install pytest-reportlog tabulate - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run test on GPUs working-directory: accelerate run: | source activate accelerate make test - name: Run examples on GPUs working-directory: accelerate if: always() run: | source activate accelerate pip uninstall comet_ml -y make test_examples - name: Generate Report working-directory: accelerate if: always() run: | pip install slack_sdk tabulate python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_deepspeed_tests_single_gpu: runs-on: group: aws-g6-4xlarge-plus env: CUDA_VISIBLE_DEVICES: "0" TEST_TYPE: "single_gpu_deepspeed" container: image: huggingface/accelerate:gpu-deepspeed-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Update clone & pip install run: | source activate accelerate git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e . --no-deps pip install pytest-reportlog tabulate - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run test on GPUs working-directory: accelerate run: | source activate accelerate make test_deepspeed - name: Run Integration tests on GPUs working-directory: accelerate if: always() run: | source activate accelerate make test_integrations - name: Run examples on GPUs working-directory: accelerate if: always() run: | source activate accelerate pip uninstall comet_ml -y make test_examples - name: Generate Report working-directory: accelerate if: always() run: | pip install slack_sdk tabulate python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_core_tests_multi_gpu: runs-on: group: aws-g6-12xlarge-plus env: CUDA_VISIBLE_DEVICES: "0,1" TEST_TYPE: "multi_gpu" container: image: huggingface/accelerate:gpu-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Update clone run: | source activate accelerate git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e . --no-deps pip install pytest-reportlog tabulate - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run core and big modeling tests on GPUs working-directory: accelerate run: | source activate accelerate make test_core make test_big_modeling make test_cli - name: Run Integration tests on GPUs working-directory: accelerate if: always() run: | source activate accelerate make test_integrations - name: Run examples on GPUs working-directory: accelerate if: always() run: | source activate accelerate pip uninstall comet_ml -y make test_examples - name: Generate Report working-directory: accelerate if: always() run: | pip install slack_sdk tabulate python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_deepspeed_tests_multi_gpu: runs-on: group: aws-g6-12xlarge-plus env: CUDA_VISIBLE_DEVICES: "0,1" TEST_TYPE: "multi_gpu_deepspeed" container: image: huggingface/accelerate:gpu-deepspeed-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Update clone run: | source activate accelerate git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e . --no-deps pip install pytest-reportlog tabulate - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run DeepSpeed tests working-directory: accelerate run: | source activate accelerate make test_deepspeed - name: Run Integration tests on GPUs working-directory: accelerate if: always() run: | source activate accelerate make test_integrations - name: Run examples on GPUs working-directory: accelerate if: always() run: | source activate accelerate pip uninstall comet_ml -y make test_examples - name: Generate Report working-directory: accelerate if: always() run: | pip install slack_sdk tabulate python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run-integration-tests: if: always() uses: ./.github/workflows/self_hosted_integration_tests.yml accelerate-1.9.0/.github/workflows/pr_style_bot.yml000066400000000000000000000006331503574341000224420ustar00rootroot00000000000000# To run this bot, comment "@bot /style" on a PR name: Style Bot on: issue_comment: types: [created] permissions: contents: write pull-requests: write jobs: style: uses: huggingface/huggingface_hub/.github/workflows/style-bot-action.yml@main with: python_quality_dependencies: "[quality]" style_command_type: "default" secrets: bot_token: ${{ secrets.GITHUB_TOKEN }}accelerate-1.9.0/.github/workflows/quality.yml000066400000000000000000000012601503574341000214220ustar00rootroot00000000000000name: Quality Check on: [pull_request] jobs: quality: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Python 3.9 uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' cache-dependency-path: 'setup.py' - name: Install Python dependencies run: pip install -e .[quality] - name: Run Quality check run: make quality - name: Check if failure if: ${{ failure() }} run: | echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and rerun 'make style; make quality;'" >> $GITHUB_STEP_SUMMARY accelerate-1.9.0/.github/workflows/run_merge_tests.yml000066400000000000000000000116601503574341000231440ustar00rootroot00000000000000name: Self-hosted runner tests (push to "main") on: workflow_call: workflow_dispatch: env: TESTING_MOCKED_DATALOADERS: "1" IS_GITHUB_CI: "1" jobs: run_core_tests_single_gpu: runs-on: group: aws-g6-4xlarge-plus env: CUDA_VISIBLE_DEVICES: "0" container: image: huggingface/accelerate:gpu-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Install accelerate run: | source activate accelerate; git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e .[testing,test_trackers] -U; pip install pytest-reportlog tabulate ; - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run CLI tests (use make cli) working-directory: accelerate run: | source activate accelerate; make test_cli - name: Run test on GPUs working-directory: accelerate if: always() run: | source activate accelerate; make test - name: Run examples on GPUs working-directory: accelerate if: always() run: | source activate accelerate; pip uninstall comet_ml -y; make test_examples - name: Generate Report working-directory: accelerate if: always() run: | pip install tabulate; python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_deepspeed_tests_single_gpu: runs-on: group: aws-g6-4xlarge-plus env: CUDA_VISIBLE_DEVICES: "0" container: image: huggingface/accelerate:gpu-deepspeed-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Install accelerate run: | source activate accelerate; git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e .[testing,test_trackers] -U; pip install pytest-reportlog tabulate ; - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run test on GPUs working-directory: accelerate if: always() run: | source activate accelerate; make test_deepspeed - name: Generate Report working-directory: accelerate if: always() run: | pip install tabulate; python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_core_tests_multi_gpu: runs-on: group: aws-g6-12xlarge-plus env: CUDA_VISIBLE_DEVICES: 0,1 container: image: huggingface/accelerate:gpu-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Update clone run: | source activate accelerate; git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e .[testing,test_trackers] -U; pip install pytest-reportlog tabulate - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run test on GPUs working-directory: accelerate run: | source activate accelerate; make test - name: Run examples on GPUs working-directory: accelerate if: always() run: | source activate accelerate; pip uninstall comet_ml -y; make test_examples - name: Generate Report working-directory: accelerate if: always() run: | source activate accelerate; python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_deepspeed_tests_multi_gpu: runs-on: group: aws-g6-12xlarge-plus container: image: huggingface/accelerate:gpu-deepspeed-nightly options: --gpus all --shm-size "16gb" defaults: run: shell: bash steps: - name: Install accelerate run: | source activate accelerate; git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e .[testing,test_trackers] -U; pip install pytest-reportlog tabulate ; - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run test on GPUs working-directory: accelerate if: always() run: | source activate accelerate; make test_deepspeed - name: Generate Report working-directory: accelerate if: always() run: | pip install tabulate; python utils/log_reports.py >> $GITHUB_STEP_SUMMARY accelerate-1.9.0/.github/workflows/self_hosted_integration_tests.yml000066400000000000000000000075511503574341000260670ustar00rootroot00000000000000# CI for specifically ensuring integrations work fine (`transformers` mainly) on GPUs # Useful tips: # - `working-directory` should be set to the root of the repo, which is cloned on the actual CI runner. # It follows the directory structure of `actions-runner/_work/{repo_name}/{repo_name}/{cloned_repo} on # prem, but in Actions setting `working-directory` looks just in the `{repo_name}` level. # - New integrations to test should have its own job, and follow a strategy method where we check both # the pypi and github versions. # - Workflow call lets this be called from `build_and_run_tests.yml` # - When using a docker container, it's recommended to set `--shm-size`, we use 16gb. name: Integration Tests (push to "main") on: workflow_call: workflow_dispatch: env: HF_HOME: ~/hf_cache defaults: run: shell: bash jobs: run-trainer-tests: container: image: huggingface/accelerate:gpu-deepspeed-nightly options: --gpus all --shm-size "16gb" runs-on: group: aws-g6-12xlarge-plus strategy: fail-fast: false matrix: cuda_visible_devices: [ "0", "0,1" ] steps: - name: Install transformers run: | source activate accelerate; git clone https://github.com/huggingface/transformers --depth 1; cd transformers; pip install .[torch,deepspeed-testing]; cd ..; - name: Install accelerate run: | source activate accelerate; git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }} ; pip install -e .[testing]; pip uninstall comet_ml wandb dvclive -y cd ..; - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run trainer tests working-directory: transformers/ env: CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }} WANDB_DISABLED: true run: | source activate accelerate; pytest -sv tests/trainer - name: Run deepspeed tests working-directory: transformers/ env: CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }} WANDB_DISABLED: true if: always() run: | source activate accelerate; pytest -sv tests/deepspeed - name: Run transformers examples tests working-directory: transformers/ env: CUDA_VISIBLE_DEVICES: ${{ matrix.cuda_visible_devices }} WANDB_DISABLED: true run: | source activate accelerate pip install -r examples/pytorch/_tests_requirements.txt pytest -sv examples/pytorch/test_accelerate_examples.py examples/pytorch/test_pytorch_examples.py run-skorch-tests: container: image: huggingface/accelerate:gpu-nightly options: --gpus all --shm-size "16gb" runs-on: group: aws-g6-12xlarge-plus strategy: fail-fast: false steps: - name: Install accelerate run: source activate accelerate; git clone https://github.com/huggingface/accelerate; cd accelerate; git checkout ${{ github.sha }}; pip install -e .[testing]; cd .. - name: Install skorch run: | source activate accelerate git clone https://github.com/skorch-dev/skorch; cd skorch; git config --global --add safe.directory '*' git checkout master && git pull pip install .[testing] pip install flaky - name: Show installed libraries run: | source activate accelerate; pip freeze - name: Run skorch tests working-directory: skorch/ run: | source activate accelerate; pytest -sv -k TestAccelerate accelerate-1.9.0/.github/workflows/stale.yml000066400000000000000000000013011503574341000210360ustar00rootroot00000000000000name: Stale Bot on: schedule: - cron: "0 15 * * *" workflow_dispatch: jobs: close_stale_issues: name: Close Stale Issues if: github.repository == 'huggingface/accelerate' runs-on: ubuntu-latest permissions: issues: write pull-requests: write env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v4 - name: Setup Python uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' cache-dependency-path: 'setup.py' - name: Install requirements run: | pip install PyGithub - name: Close stale issues run: | python utils/stale.py accelerate-1.9.0/.github/workflows/test.yml000066400000000000000000000034121503574341000207120ustar00rootroot00000000000000name: Run Tests on: pull_request: paths: - "src/**" - "tests/**" - ".github/**" - "examples/**" - "setup.py" types: [opened, synchronize, reopened] env: HF_HOME: ~/hf_cache TESTING_MOCKED_DATALOADERS: "1" IS_GITHUB_CI: "1" jobs: run-tests: runs-on: ubuntu-latest strategy: fail-fast: false matrix: pytorch-version: [ latest, minimum, ] test-kind: [ test_prod, test_core, test_cli, test_big_modeling, test_deepspeed, test_fsdp, test_example_differences, test_checkpoint_step, test_checkpoint_epoch, test_rest ] steps: - uses: actions/checkout@v4 - name: Set up python 3.9 uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' cache-dependency-path: 'setup.py' - name: Install the library run: | if [[ ${{ matrix.test-kind }} = test_prod ]]; then pip install -e .[test_prod]; fi if [[ ${{ matrix.test-kind }} != test_prod ]]; then pip install -e .[testing,test_trackers]; fi if [[ ${{ matrix.test-kind }} = test_rest ]]; then pip uninstall comet_ml -y; fi if [[ ${{ matrix.pytorch-version }} = minimum ]]; then pip install torchvision==0.18.1 torch==2.3.1; fi pip install pytest-reportlog tabulate setuptools importlib_metadata - name: Show installed libraries run: | pip freeze - name: Run Tests env: PYTORCH_VERSION: ${{ matrix.pytorch-version }} run: | make ${{ matrix.test-kind }} - name: Generate Report if: always() run: | python utils/log_reports.py >> $GITHUB_STEP_SUMMARY accelerate-1.9.0/.github/workflows/test_imports.yml000066400000000000000000000022421503574341000224670ustar00rootroot00000000000000name: Run Import Tests on: pull_request: paths: - "src/**" - "tests/**" - ".github/**" - "examples/**" - "setup.py" types: [opened, synchronize, reopened] env: HF_HOME: ~/hf_cache TESTING_MOCKED_DATALOADERS: "1" IS_GITHUB_CI: "1" jobs: run-tests: runs-on: ubuntu-latest strategy: fail-fast: false matrix: pytorch-version: [ latest, minimum, ] steps: - uses: actions/checkout@v4 - name: Set up python 3.9 uses: actions/setup-python@v5 with: python-version: 3.9 cache: 'pip' cache-dependency-path: 'setup.py' - name: Install the library run: | pip install -e . pip install pytest-reportlog tabulate setuptools git+https://github.com/muellerzr/import-timer - name: Show installed libraries run: | pip freeze - name: Run Import Tests env: PYTORCH_VERSION: ${{ matrix.pytorch-version }} run: | pytest -sv tests/test_imports.py - name: Generate Report if: always() run: | python utils/log_reports.py >> $GITHUB_STEP_SUMMARY accelerate-1.9.0/.github/workflows/trufflehog.yml000066400000000000000000000004001503574341000220720ustar00rootroot00000000000000on: push: name: Secret Leaks jobs: trufflehog: runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-depth: 0 - name: Secret Scanning uses: trufflesecurity/trufflehog@main accelerate-1.9.0/.github/workflows/upload_pr_documentation.yml000066400000000000000000000005771503574341000246620ustar00rootroot00000000000000name: Upload PR Documentation on: workflow_run: workflows: ["Build PR Documentation"] types: - completed jobs: build: uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main with: package_name: accelerate secrets: hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}accelerate-1.9.0/.gitignore000066400000000000000000000035641503574341000156130ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # VSCode .vscode # IntelliJ .idea # Mac .DS_Store .DS_Store # More test things wandb # ruff .ruff_cache accelerate-1.9.0/.pre-commit-config.yaml000066400000000000000000000004561503574341000201010ustar00rootroot00000000000000repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.2.1 hooks: - id: ruff args: - --fix - id: ruff-format - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: check-merge-conflict - id: check-yaml accelerate-1.9.0/CODE_OF_CONDUCT.md000066400000000000000000000121521503574341000164130ustar00rootroot00000000000000 # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at feedback@huggingface.co. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. accelerate-1.9.0/CONTRIBUTING.md000066400000000000000000000230631503574341000160500ustar00rootroot00000000000000 # How to contribute to 🤗 Accelerate? Everyone is welcome to contribute, and we value everybody's contribution. Code is thus not the only way to help the community. Answering questions, helping others, reaching out and improving the documentations are immensely valuable to the community. It also helps us if you spread the word: reference the library from blog posts on the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply star the repo to say "thank you". Whichever way you choose to contribute, please be mindful to respect our [code of conduct](https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md). ## You can contribute in so many ways! Some of the ways you can contribute to Accelerate: * Fixing outstanding issues with the existing code; * Contributing to the examples or to the documentation; * Submitting issues related to bugs or desired new features. ## Submitting a new issue or feature request Do your best to follow these guidelines when submitting an issue or a feature request. It will make it easier for us to come back to you quickly and with good feedback. ### Did you find a bug? The 🤗 Accelerate library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue. First, we would really appreciate it if you could **make sure the bug was not already reported** (use the search bar on Github under Issues). Did not find it? :( So we can act quickly on it, please follow these steps: * Include your **OS type and version**, the versions of **Python** and **PyTorch**. * A short, self-contained, code snippet that allows us to reproduce the bug in less than 30s; * Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_config.yaml`) ### Do you want a new feature? A good feature request addresses the following points: 1. Motivation first: * Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best. * Is it related to something you would need for a project? We'd love to hear about it! * Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you. 2. Write a *full paragraph* describing the feature; 3. Provide a **code snippet** that demonstrates its future use; 4. In case this is related to a paper, please attach a link; 5. Attach any additional information (drawings, screenshots, etc.) you think may help. If your issue is well written we're already 80% of the way there by the time you post it. ## Submitting a pull request (PR) Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to be able to contribute to 🤗 Accelerate. `git` is not the easiest tool to use but it has the greatest manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. Follow these steps to start contributing: 1. Fork the [repository](https://github.com/huggingface/accelerate) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote. The following command assumes you have your public SSH key uploaded to GitHub. See the following guide for more [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). ```bash $ git clone git@github.com:/accelerate.git $ cd accelerate $ git remote add upstream https://github.com/huggingface/accelerate.git ``` 3. Create a new branch to hold your development changes, and do this for every new PR you work on. Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)): ```bash $ git checkout main $ git fetch upstream $ git merge upstream/main ``` Once your `main` branch is synchronized, create a new branch from it: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` **Do not** work on the `main` branch. 4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library: ```bash $ pip install -e ".[dev]" ``` This will install all testing and linting/code quality dependencies for the library (see `quality`, `test_dev`, `test_prod` targets in [`setup.py`](./setup.py)). (If accelerate was already installed in the virtual environment, remove it with `pip uninstall accelerate` before reinstalling it in editable mode with the `-e` flag). Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers). 5. Develop the features on your branch. As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this (see below an explanation regarding the environment variable): ```bash $ pytest tests/.py ``` > For the following commands leveraging the `make` utility, we recommend using the WSL system when running on > Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about). You can also run the full suite with the following command. ```bash $ make test ``` `accelerate` relies on `ruff` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: This target is also optimized to only work with files modified by the PR you're working on. If you prefer to run the checks one after the other, the following command apply the style corrections: ```bash $ make style ``` `accelerate` also uses a few custom scripts to check for coding mistakes. Quality control runs in CI, however you can also run the same checks with: ```bash $ make quality ``` You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks automatically as Git commit hooks. ```bash $ pip install pre-commit $ pre-commit install ``` Once you're happy with your changes, add changed files using `git add` and make a commit with `git commit` to record your changes locally: ```bash $ git add modified_file.py $ git commit ``` Please write [good commit messages](https://chris.beams.io/posts/git-commit/). It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes: ```bash $ git fetch upstream $ git rebase upstream/main ``` Push the changes to your account using: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. Once you are satisfied (**and the checklist below is happy too**), go to the webpage of your fork on GitHub. Click on 'Pull request' to send your changes to the project maintainers for review. 7. It's ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Checklist 1. The title of your pull request should be a summary of its contribution; 2. If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it); 3. To indicate a work in progress please prefix the title with `[WIP]`, or mark the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged; 4. Make sure existing tests pass; 5. Add high-coverage tests. No quality testing = no merge. See an example of a good PR here: https://github.com/huggingface/accelerate/pull/255 ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/accelerate/tree/main/tests). We use `pytest` in order to run the tests. From the root of the repository, here's how to run tests with `pytest` for the library: ```bash $ python -m pytest -sv ./tests ``` In fact, that's how `make test` is implemented (sans the `pip install` line)! You can specify a smaller set of tests in order to test only the feature you're working on. accelerate-1.9.0/LICENSE000066400000000000000000000261351503574341000146270ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. accelerate-1.9.0/Makefile000066400000000000000000000072261503574341000152620ustar00rootroot00000000000000.PHONY: quality style test docs utils check_dirs := . # Check that source code meets quality standards extra_quality_checks: python utils/check_copies.py python utils/check_dummies.py python utils/check_repo.py doc-builder style src/accelerate docs/source --max_len 119 # this target runs checks on all files quality: ruff check $(check_dirs) ruff format --check $(check_dirs) doc-builder style src/accelerate docs/source --max_len 119 --check_only # Format source code automatically and check is there are any problems left that need manual fixing style: ruff check $(check_dirs) --fix ruff format $(check_dirs) doc-builder style src/accelerate docs/source --max_len 119 # Run tests for the library test_core: python -m pytest -s -v ./tests/ \ --ignore=./tests/test_big_modeling.py \ --ignore=./tests/test_modeling_utils.py \ --ignore=./tests/test_examples.py \ --ignore=./tests/test_cli.py \ --ignore=./tests/deepspeed \ --ignore=./tests/fsdp \ --ignore=./tests/tp \ $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",) test_cli: python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",) test_big_modeling: python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",) test_deepspeed: python -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_deepspeed.log",) test_fsdp: python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",) test_tp: python -m pytest -s -v ./tests/tp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_tp.log",) # Since the new version of pytest will *change* how things are collected, we need `deepspeed` to # run after test_core and test_cli test: $(MAKE) test_core $(MAKE) test_cli $(MAKE) test_big_modeling $(MAKE) test_deepspeed $(MAKE) test_fsdp $(MAKE) test_tp test_examples: python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",) # Broken down example tests for the CI runners test_integrations: python -m pytest -s -v ./tests/deepspeed ./tests/fsdp ./tests/tp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",) test_example_differences: python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",) test_checkpoint_epoch: python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_epoch.log",) test_checkpoint_step: python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_step" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_step.log",) # Same as test but used to install only the base dependencies test_prod: $(MAKE) test_core test_rest: python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",) # For developers to prepare a release prepare_release: rm -rf dist build python setup.py bdist_wheel sdist # Make sure this is ran in a fresh venv of some form install_test_release: pip uninstall accelerate -y pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple accelerate$(if $(version),==$(version),) # Run as `make target=testpypi upload_release` upload_release: @if [ "$(target)" != "testpypi" ] && [ "$(target)" != "pypi" ]; then \ echo "Error: target must be either 'testpypi' or 'pypi'"; \ exit 1; \ fi twine upload dist/* -r $(target)accelerate-1.9.0/README.md000066400000000000000000000355231503574341000151020ustar00rootroot00000000000000



License Documentation GitHub release Contributor Covenant

Run your *raw* PyTorch training script on any kind of device

## Easy to integrate 🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16. 🤗 Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged. Here is an example: ```diff import torch import torch.nn.functional as F from datasets import load_dataset + from accelerate import Accelerator + accelerator = Accelerator() - device = 'cpu' + device = accelerator.device model = torch.nn.Transformer().to(device) optimizer = torch.optim.Adam(model.parameters()) dataset = load_dataset('my_dataset') data = torch.utils.data.DataLoader(dataset, shuffle=True) + model, optimizer, data = accelerator.prepare(model, optimizer, data) model.train() for epoch in range(10): for source, targets in data: source = source.to(device) targets = targets.to(device) optimizer.zero_grad() output = model(source) loss = F.cross_entropy(output, targets) - loss.backward() + accelerator.backward(loss) optimizer.step() ``` As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16). In particular, the same code can then be run without modification on your local machine for debugging or your training environment. 🤗 Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further: ```diff import torch import torch.nn.functional as F from datasets import load_dataset + from accelerate import Accelerator - device = 'cpu' + accelerator = Accelerator() - model = torch.nn.Transformer().to(device) + model = torch.nn.Transformer() optimizer = torch.optim.Adam(model.parameters()) dataset = load_dataset('my_dataset') data = torch.utils.data.DataLoader(dataset, shuffle=True) + model, optimizer, data = accelerator.prepare(model, optimizer, data) model.train() for epoch in range(10): for source, targets in data: - source = source.to(device) - targets = targets.to(device) optimizer.zero_grad() output = model(source) loss = F.cross_entropy(output, targets) - loss.backward() + accelerator.backward(loss) optimizer.step() ``` Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples). ## Launching script 🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training! On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo): ```bash accelerate launch examples/nlp_example.py ``` This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience. You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`. For example, here is how to launch on two GPUs: ```bash accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py ``` To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli). Or view the configuration zoo [here](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates/) ## Launching multi-CPU run using MPI 🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well. Once you have MPI setup on your cluster, just run: ```bash accelerate config ``` Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun. Then, use `accelerate launch` with your script like: ```bash accelerate launch examples/nlp_example.py ``` Alternatively, you can use mpirun directly, without using the CLI like: ```bash mpirun -np 2 python examples/nlp_example.py ``` ## Launching training using DeepSpeed 🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`. ```python from accelerate import Accelerator, DeepSpeedPlugin # deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it # Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2) accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin) # How to save your 🤗 Transformer? accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model)) ``` Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue. ## Launching your training from a notebook 🤗 Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add: ```python from accelerate import notebook_launcher notebook_launcher(training_function) ``` An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) ## Why should I use 🤗 Accelerate? You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object. ## Why shouldn't I use 🤗 Accelerate? You shouldn't use 🤗 Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, 🤗 Accelerate is not one of them. ## Frameworks using 🤗 Accelerate If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below: * [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development. * [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76). * [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic. * [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms. * [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses. * [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products. * [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library. * [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so. * [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves! * [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion. * [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric. * [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side). ## Installation This repository is tested on Python 3.8+ and PyTorch 1.10.0+ You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). First, create a virtual environment with the version of Python you're going to use and activate it. Then, you will need to install PyTorch: refer to the [official installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform. Then 🤗 Accelerate can be installed using pip as follows: ```bash pip install accelerate ``` ## Supported integrations - CPU only - multi-CPU on one node (machine) - multi-CPU on several nodes (machines) - single GPU - multi-GPU on one node (machine) - multi-GPU on several nodes (machines) - TPU - FP16/BFloat16 mixed precision - FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine) or [MS-AMP](https://github.com/Azure/MS-AMP/) - DeepSpeed support (Experimental) - PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental) - Megatron-LM support (Experimental) ## Citing 🤗 Accelerate If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry. ```bibtex @Misc{accelerate, title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.}, author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan}, howpublished = {\url{https://github.com/huggingface/accelerate}}, year = {2022} } ``` accelerate-1.9.0/benchmarks/000077500000000000000000000000001503574341000157305ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/README.md000066400000000000000000000002241503574341000172050ustar00rootroot00000000000000# Benchmarks The folders below contain suites to test various functionalities in Accelerate. See their relevant README.md's for more information. accelerate-1.9.0/benchmarks/big_model_inference/000077500000000000000000000000001503574341000216675ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/big_model_inference/README.md000066400000000000000000000037641503574341000231600ustar00rootroot00000000000000# Big model inference benchmarks Running inference with Accelerate on big models. ## Setup These benchmarks use the `transformers` library: ```bash pip install transformers ``` To reproduce or test a new setup, run ```py python big_model_inference.py model_name ``` This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`. To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`. If you get an error linked to disk offload, you need to add the option `--disk-offload` ## Results On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included). | Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload | |:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:| | GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no | | GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no | | GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no | | GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes | | T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no | | OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no | | OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes | Note on the results: - using two GPUs instead of one does not slow down generation - using CPU offload slows down a bit (see OPT-30b) - using disk offload slows down a lot (need to implement prefetching) You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary: - peak GPU memory is exactly the size of the model put on a given GPU - peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger. accelerate-1.9.0/benchmarks/big_model_inference/big_model_inference.py000066400000000000000000000133371503574341000262070ustar00rootroot00000000000000# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import time import torch import transformers from measures_util import end_measure, log_measures, start_measure from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from accelerate.utils import compute_module_sizes DEFAULT_MODELS = { "gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"}, "gpt-neox": {"is_causal": True, "model": "EleutherAI/gpt-neox-20b"}, "opt": {"is_causal": True, "model": "facebook/opt-30b"}, "T0pp": {"is_causal": False, "model": "bigscience/T0pp", "model_revision": "sharded"}, } PROMPTS = [ "Hello, my name is", "Are unicorns real? Unicorns are", "For the first time in several years,", "My name is Julien and I am", "The goal of life is", "Whenever I'm sad, I like to", ] def parse_args(): parser = argparse.ArgumentParser(description="Run and time generations on a big model using Accelerate.") parser.add_argument("model_name", type=str, default=None, help="The name of the model to try.") parser.add_argument( "--tokenizer_name", type=str, default=None, help="The name of the tokenizer (if different from the model." ) parser.add_argument("--is_causal", type=bool, default=None, help="Whether or not the model is causal.") parser.add_argument( "--model_revision", type=str, default=None, help="The revision to use for the model checkpoint." ) parser.add_argument("--torch_dtype", type=str, default=None, help="The dtype for the model.") parser.add_argument("--disk_offload", action="store_true") args = parser.parse_args() # Sanitize args if args.model_name in DEFAULT_MODELS: defaults = DEFAULT_MODELS[args.model_name] args.model_name = defaults["model"] if args.tokenizer_name is None: args.tokenizer_name = defaults.get("tokenizer", args.model_name) if args.is_causal is None: args.is_causal = defaults["is_causal"] if args.model_revision is None: args.model_revision = defaults.get("model_revision", "main") if args.is_causal is None: raise ValueError("Could not infer the default for `--is_causal`, pass either True or False for it.") if args.tokenizer_name is None: args.tokenizer_name = args.model_name if args.model_revision is None: args.model_revision = "main" return args def main(): transformers.utils.logging.set_verbosity_error() args = parse_args() if args.torch_dtype is None: config = AutoConfig.from_pretrained(args.model_name) torch_dtype = getattr(config, "torch_dtype", torch.float32) else: torch_dtype = getattr(torch, args.torch_dtype) model_cls = AutoModelForCausalLM if args.is_causal else AutoModelForSeq2SeqLM kwargs = { "torch_dtype": torch_dtype, "revision": args.model_revision, } if args.disk_offload: kwargs["offload_folder"] = "tmp_offload" kwargs["offload_state_dict"] = True start_measures = start_measure() model = model_cls.from_pretrained(args.model_name, device_map="auto", **kwargs) end_measures = end_measure(start_measures) log_measures(end_measures, "Model loading") module_sizes = compute_module_sizes(model) device_size = {v: 0 for v in model.hf_device_map.values()} for module, device in model.hf_device_map.items(): device_size[device] += module_sizes[module] message = "\n".join([f"- {device}: {size // 2**20}MiB" for device, size in device_size.items()]) print(f"\nTheoretical use:\n{message}") tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name) start_measures = start_measure() generation_times = [] gen_tokens = [] texts_outs = [] for prompt in PROMPTS: inputs = tokenizer(prompt, return_tensors="pt").to(0) tokens = inputs["input_ids"][0].tolist() before_generate = time.time() outputs = model.generate(inputs["input_ids"]) after_generate = time.time() outputs = outputs[0].tolist() num_gen_tokens = len(outputs) if outputs[: len(tokens)] != tokens else len(outputs) - len(tokens) generation_time = after_generate - before_generate text_out = tokenizer.decode(outputs, skip_special_tokens=True) texts_outs.append(text_out) generation_times.append(generation_time) gen_tokens.append(num_gen_tokens) print(f"Prompt: {prompt}\nGeneration {text_out}\nIn {generation_time:.2f}s for {num_gen_tokens} tokens\n") end_measures = end_measure(start_measures) log_measures(end_measures, "Model generation") generation_times_per_token = [gen / tok for gen, tok in zip(generation_times, gen_tokens)] avg_gen = sum(generation_times_per_token) / len(generation_times) print(f"Average time of generation per token: {avg_gen:.2f}s") print(f"First generation (avg time per token): {generation_times_per_token[0]:.2f}s") avg_gen = sum(generation_times_per_token[1:]) / (len(generation_times_per_token) - 1) print(f"Average time of generation per token (excluding the first): {avg_gen:.2f}s") if __name__ == "__main__": main() accelerate-1.9.0/benchmarks/big_model_inference/measures_util.py000066400000000000000000000062761503574341000251350ustar00rootroot00000000000000# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import threading import time import psutil import torch from accelerate.test_utils.testing import get_backend torch_device_type, _, _ = get_backend() torch_accelerator_module = getattr(torch, torch_device_type, torch.cuda) class PeakCPUMemory: def __init__(self): self.process = psutil.Process() self.peak_monitoring = False def peak_monitor(self): self.cpu_memory_peak = -1 while True: self.cpu_memory_peak = max(self.process.memory_info().rss, self.cpu_memory_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def start(self): self.peak_monitoring = True self.thread = threading.Thread(target=self.peak_monitor) self.thread.daemon = True self.thread.start() def stop(self): self.peak_monitoring = False self.thread.join() return self.cpu_memory_peak cpu_peak_tracker = PeakCPUMemory() def start_measure(): # Time measures = {"time": time.time()} gc.collect() torch_accelerator_module.empty_cache() # CPU mem measures["cpu"] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch_accelerator_module.device_count()): measures[str(i)] = torch_accelerator_module.memory_allocated(i) torch_accelerator_module.reset_peak_memory_stats() return measures def end_measure(start_measures): # Time measures = {"time": time.time() - start_measures["time"]} gc.collect() torch_accelerator_module.empty_cache() # CPU mem measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20 measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20 # GPU mem for i in range(torch_accelerator_module.device_count()): measures[str(i)] = (torch_accelerator_module.memory_allocated(i) - start_measures[str(i)]) / 2**20 measures[f"{i}-peak"] = (torch_accelerator_module.max_memory_allocated(i) - start_measures[str(i)]) / 2**20 return measures def log_measures(measures, description): print(f"{description}:") print(f"- Time: {measures['time']:.2f}s") for i in range(torch_accelerator_module.device_count()): print(f"- {torch_device_type} {i} allocated: {measures[str(i)]:.2f}MiB") peak = measures[f"{i}-peak"] print(f"- {torch_device_type} {i} peak: {peak:.2f}MiB") print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB") print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB") accelerate-1.9.0/benchmarks/fp8/000077500000000000000000000000001503574341000164255ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/fp8/ms_amp/000077500000000000000000000000001503574341000177015ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/fp8/ms_amp/Dockerfile000066400000000000000000000003311503574341000216700ustar00rootroot00000000000000FROM ghcr.io/azure/msamp RUN pip install transformers evaluate datasets RUN git clone https://github.com/huggingface/accelerate RUN cd accelerate && \ pip install -e . && \ cd benchmarks/fp8 CMD ["bash"] accelerate-1.9.0/benchmarks/fp8/ms_amp/ddp.py000066400000000000000000000124661503574341000210330ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`. This particular script verifies this for DDP training. """ import evaluate import msamp import torch from fp8_utils import evaluate_model, get_training_utilities from torch.nn.parallel import DistributedDataParallel as DDP from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(opt_level="O2"): set_seed(42) scaler = get_grad_scaler() model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) accelerator = Accelerator() device = accelerator.device model, optimizer = msamp.initialize(model, optimizer, opt_level=opt_level) model.to(device) # Convert the model to DDP device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index model = DDP(model, device_ids=device_ids, output_device=output_device) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for i, batch in enumerate(train_dataloader): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): outputs = model(**batch) loss = outputs.loss scaler.scale(loss).backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(opt_level="O2"): kwargs_handlers = [FP8RecipeKwargs(backend="msamp", opt_level=opt_level)] AcceleratorState()._reset_state(True) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for i, batch in enumerate(train_dataloader): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": for opt_level in ["O1", "O2"]: baseline_not_trained, baseline_trained = train_baseline(opt_level) accelerator_not_trained, accelerator_trained = train_integration(opt_level) assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 not the same for untrained baseline and accelerator using opt_level={opt_level}: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 not the same for trained baseline and accelerator using opt_level={opt_level}: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) accelerate-1.9.0/benchmarks/fp8/ms_amp/distrib_deepspeed.py000066400000000000000000000146501503574341000237370ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`. This particular script verifies this for DeepSpeed training. NOTE: MS-AMP does *not* support ZeRO-3. """ # import msamp.deepspeed as msamp_deepspeed import evaluate import torch from fp8_utils import evaluate_model, get_training_utilities from msamp import deepspeed as msamp_deepspeed from accelerate import Accelerator, DeepSpeedPlugin from accelerate.state import AcceleratorState from accelerate.utils import set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(zero_stage: int = 1, opt_level: str = "O1"): set_seed(42) accelerator = Accelerator() model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) import numpy as np config = { "train_batch_size": 32, "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": { "stage": zero_stage, "offload_optimizer": {"device": "none", "nvme_path": None}, "offload_param": {"device": "none", "nvme_path": None}, }, "gradient_clipping": 1.0, "steps_per_print": np.inf, "bf16": {"enabled": True}, "fp16": {"enabled": False}, "zero_allow_untested_optimizer": True, "msamp": { "enabled": True, "opt_level": opt_level, }, } ( model, optimizer, _, _, ) = msamp_deepspeed.initialize( model=model, optimizer=optimizer, config_params=config, ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss model.backward(loss) model.step() for _ in range(accelerator.num_processes): lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() torch.cuda.empty_cache() AcceleratorState()._reset_state(True) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(zero_stage: int = 1, opt_level: str = "O1"): set_seed(42) deepspeed_plugin = DeepSpeedPlugin( zero_stage=zero_stage, enable_msamp=True, msamp_opt_level=opt_level, ) accelerator = Accelerator(mixed_precision="fp8", deepspeed_plugin=deepspeed_plugin) accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16 model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() torch.cuda.empty_cache() assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) AcceleratorState()._reset_state(True) return base_model_results, trained_model_results if __name__ == "__main__": for zero_stage in [1, 2]: for opt_level in ["O1", "O2", "O3"]: baseline_not_trained, baseline_trained = train_baseline(zero_stage, opt_level) accelerator_not_trained, accelerator_trained = train_integration(zero_stage, opt_level) assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"ZERO stage {zero_stage}, opt_level={opt_level}:\nAccuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"ZERO stage {zero_stage}, opt_level={opt_level}:\nF1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) torch.distributed.destroy_process_group() accelerate-1.9.0/benchmarks/fp8/ms_amp/fp8_utils.py000066400000000000000000000106501503574341000221720ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def get_dataloaders(model_name: str, batch_size: int = 16): from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad( examples, padding="longest", pad_to_multiple_of=16, # Specific for FP8 return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=16, drop_last=True, ) return train_dataloader, eval_dataloader def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None): """ Returns a tuple of: - Model - Optimizer - Train dataloader (prepared) - Eval dataloader (prepared) - LR Scheduler Suitable for training on the MRPC dataset """ from torch.optim import AdamW from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup from accelerate import Accelerator if accelerator is None: accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(model_name) train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size) optimizer = AdamW(model.parameters(), lr=0.0001) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=len(train_dataloader) * 2, ) train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader) return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler def get_named_parameters(model): """ Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted from parallel) """ from accelerate.utils import extract_model_from_parallel model = extract_model_from_parallel(model) return {n: p for n, p in model.named_parameters()} def evaluate_model(model, dataloader, metric, accelerator=None): "Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on" model.eval() for step, batch in enumerate(dataloader): with torch.no_grad(): # W/ MS-AMP, we need to cast while evaluating with torch.autocast(device_type="cuda", dtype=torch.bfloat16): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) references = batch["labels"] if accelerator is not None and accelerator.num_processes > 1: predictions, references = accelerator.gather_for_metrics((predictions, references)) metric.add_batch(predictions=predictions, references=references) return metric.compute() accelerate-1.9.0/benchmarks/fp8/ms_amp/non_distributed.py000066400000000000000000000114211503574341000234460ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `MS-AMP`. This particular script verifies this for single GPU training. """ import evaluate import msamp import torch from fp8_utils import evaluate_model, get_training_utilities from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, get_grad_scaler, set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(opt_level="O2"): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) model, optimizer = msamp.initialize(model, optimizer, opt_level=opt_level) model.to("cuda") base_model_results = evaluate_model(model, eval_dataloader, METRIC) model.train() scaler = get_grad_scaler() for batch in train_dataloader: batch = batch.to("cuda") with torch.autocast(device_type="cuda", dtype=torch.bfloat16): outputs = model(**batch) loss = outputs.loss loss = scaler.scale(loss) loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(opt_level="O2"): kwargs_handlers = [FP8RecipeKwargs(backend="msamp", opt_level=opt_level)] AcceleratorState()._reset_state(True) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) base_model_results = evaluate_model(model, eval_dataloader, METRIC) model.train() for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": for opt_level in ["O1", "O2"]: baseline_not_trained, baseline_trained = train_baseline(opt_level) accelerator_not_trained, accelerator_trained = train_integration(opt_level) assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) accelerate-1.9.0/benchmarks/fp8/torchao/000077500000000000000000000000001503574341000200645ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/fp8/torchao/Dockerfile000066400000000000000000000003531503574341000220570ustar00rootroot00000000000000FROM nvcr.io/nvidia/pytorch:24.07-py3 RUN pip install transformers evaluate datasets RUN git clone https://github.com/huggingface/accelerate.git RUN cd accelerate && \ pip install -e . && \ cd benchmarks/fp8 RUN /bin/bash accelerate-1.9.0/benchmarks/fp8/torchao/README.md000066400000000000000000000020651503574341000213460ustar00rootroot00000000000000# FP8 Benchmarks Comparing and running [torchao](https://github.com/pytorch/ao/tree/main/torchao/float8) FP8 with accelerate ## Overview This repo provides scripts which compare native `torchao` model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following: * Single GPU training (`non_distributed.py`) * Multi-GPU training via DistributedDataParallelism (`ddp.py`) * Fully Sharded Data Parallelism (`fsdp.py`) * DeepSpeed ZeRO 1-3 (`deepspeed.py`) To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `torchao` manually. ## Running: There are official Docker images located at `huggingface/accelerate:gpu-fp8-torchao-nightly` which can be used. You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed. For single GPU, run it via `python`: ```bash python non_distributed.py ``` For the rest, run it via `accelerate launch`: ```bash accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py ```accelerate-1.9.0/benchmarks/fp8/torchao/ddp.py000066400000000000000000000146031503574341000212110ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `torchao`. This particular script verifies this for DDP training. """ from functools import partial import evaluate import torch from fp8_utils import get_training_utilities from torch.nn.parallel import DistributedDataParallel as DDP from torchao.float8 import convert_to_float8_training from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import AORecipeKwargs, set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def evaluate_model(model, dataloader, metric, accelerator=None): "Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on" model.eval() for step, batch in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) references = batch["labels"] if accelerator is not None and accelerator.num_processes > 1: predictions, references = accelerator.gather_for_metrics((predictions, references)) metric.add_batch(predictions=predictions, references=references) return metric.compute() def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None): if isinstance(module, torch.nn.Linear): if module.in_features % 16 != 0 or module.out_features % 16 != 0: return False # For stability reasons, we skip the first and last linear layers # Otherwise can lead to the model not training or converging properly if fqn in (first_layer_name, last_layer_name): return False return True def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) first_linear = None last_linear = None for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if first_linear is None: first_linear = name last_linear = name func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear) accelerator = Accelerator() device = accelerator.device model.to(device) convert_to_float8_training(model, module_filter_fn=func) # Convert the model to DDP device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index model = DDP(model, device_ids=device_ids, output_device=output_device) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for batch in train_dataloader: with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(): AcceleratorState()._reset_state(True) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()]) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) torch.distributed.destroy_process_group() accelerate-1.9.0/benchmarks/fp8/torchao/distrib_deepspeed.py000066400000000000000000000203201503574341000241110ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `torchao`. This particular script verifies this for deepspeed training. """ from functools import partial from unittest.mock import patch import deepspeed import evaluate import torch from fp8_utils import evaluate_model, get_training_utilities from torchao.float8 import convert_to_float8_training from transformers.integrations import HfDeepSpeedConfig from accelerate import Accelerator, DeepSpeedPlugin from accelerate.state import AcceleratorState from accelerate.utils import AORecipeKwargs, set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None): if isinstance(module, torch.nn.Linear): if module.in_features % 16 != 0 or module.out_features % 16 != 0: return False # For stability reasons, we skip the first and last linear layers # Otherwise can lead to the model not training or converging properly if fqn in (first_layer_name, last_layer_name): return False return True def train_baseline(zero_stage: int = 1): set_seed(42) # This forces transformers to think Zero-3 Init should be used with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock: mock.return_value = zero_stage == 3 config = HfDeepSpeedConfig( { "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": {"stage": zero_stage}, } ) plugin = DeepSpeedPlugin(hf_ds_config=config) accelerator = Accelerator(deepspeed_plugin=plugin) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) first_linear = None last_linear = None for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if first_linear is None: first_linear = name last_linear = name func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear) convert_to_float8_training(model, module_filter_fn=func) import numpy as np config = { "train_batch_size": 32, "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": { "stage": zero_stage, "offload_optimizer": {"device": "none", "nvme_path": None}, "offload_param": {"device": "none", "nvme_path": None}, "stage3_gather_16bit_weights_on_model_save": False, }, "gradient_clipping": 1.0, "steps_per_print": np.inf, "bf16": {"enabled": True}, "fp16": {"enabled": False}, "zero_allow_untested_optimizer": True, } ( model, optimizer, _, lr_scheduler, ) = deepspeed.initialize( model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, config_params=config, ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() model_outputs = [] data = [] for batch in train_dataloader: outputs = model(**batch) data.append(batch.to("cpu")) model_outputs.append(outputs.logits.to("cpu")) loss = outputs.loss model.backward(loss) model.step() for _ in range(accelerator.num_processes): lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) del config return base_model_results, trained_model_results, model_outputs, data def train_integration(zero_stage: int = 1): set_seed(42) AcceleratorState()._reset_state(True) config = HfDeepSpeedConfig( { "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": {"stage": zero_stage}, } ) deepspeed_plugin = DeepSpeedPlugin( hf_ds_config=config, ) # This forces transformers to think Zero-3 Init should be used with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock: mock.return_value = zero_stage == 3 accelerator = Accelerator( mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()], deepspeed_plugin=deepspeed_plugin ) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, lr_scheduler, train_dataloader, eval_dataloader ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() model_outputs = [] data = [] for batch in train_dataloader: outputs = model(**batch) data.append(batch.to("cpu")) model_outputs.append(outputs.logits.to("cpu")) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) del config return base_model_results, trained_model_results, model_outputs, data if __name__ == "__main__": for zero_stage in [1, 2, 3]: baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage) accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration( zero_stage ) assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) AcceleratorState()._reset_state(True) torch.distributed.destroy_process_group() accelerate-1.9.0/benchmarks/fp8/torchao/fp8_utils.py000066400000000000000000000104551503574341000223600ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def get_dataloaders(model_name: str, batch_size: int = 16): from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad( examples, padding="longest", pad_to_multiple_of=16, # Specific for FP8 return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=16, drop_last=True, ) return train_dataloader, eval_dataloader def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None, prepare=True): """ Returns a tuple of: - Model - Optimizer - Train dataloader (prepared) - Eval dataloader (prepared) - LR Scheduler Suitable for training on the MRPC dataset """ from torch.optim import AdamW from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup from accelerate import Accelerator if accelerator is None: accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(model_name) train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size) optimizer = AdamW(model.parameters(), lr=0.0001) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=len(train_dataloader) * 2, ) train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader) return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler def get_named_parameters(model): """ Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted from parallel) """ from accelerate.utils import extract_model_from_parallel model = extract_model_from_parallel(model) return {n: p for n, p in model.named_parameters()} def evaluate_model(model, dataloader, metric, accelerator=None): "Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on" model.eval() for step, batch in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) references = batch["labels"] if accelerator is not None and accelerator.num_processes > 1: predictions, references = accelerator.gather_for_metrics((predictions, references)) metric.add_batch(predictions=predictions, references=references) return metric.compute() accelerate-1.9.0/benchmarks/fp8/torchao/fsdp.py000066400000000000000000000157321503574341000214020ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `torchao`. This particular script verifies this for FSDP training. """ from functools import partial import evaluate import torch from fp8_utils import get_training_utilities from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torchao.float8 import convert_to_float8_training from transformers.models.bert import BertLayer from accelerate import Accelerator from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin from accelerate.state import AcceleratorState from accelerate.utils import AORecipeKwargs, set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer}) def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None): if isinstance(module, torch.nn.Linear): if module.in_features % 16 != 0 or module.out_features % 16 != 0: return False # For stability reasons, we skip the first and last linear layers # Otherwise can lead to the model not training or converging properly if fqn in (first_layer_name, last_layer_name): return False return True def evaluate_model(model, dataloader, metric, accelerator=None): "Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on" model.eval() for step, batch in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) references = batch["labels"] if accelerator is not None and accelerator.num_processes > 1: predictions, references = accelerator.gather_for_metrics((predictions, references)) metric.add_batch(predictions=predictions, references=references) return metric.compute() def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) first_linear = None last_linear = None for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if first_linear is None: first_linear = name last_linear = name func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear) accelerator = Accelerator() device = accelerator.device model.to(device) convert_to_float8_training(model, module_filter_fn=func) # Convert the model to FSDP model = FSDP( model, use_orig_params=True, mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32), auto_wrap_policy=FSDP_WRAP_POLICY, ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for batch in train_dataloader: with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(): AcceleratorState()._reset_state(True) fsdp_plugin = FSDPPlugin( auto_wrap_policy=FSDP_WRAP_POLICY, use_orig_params=True, mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32), ) accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=[AORecipeKwargs()]) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) torch.distributed.destroy_process_group() accelerate-1.9.0/benchmarks/fp8/torchao/non_distributed.py000066400000000000000000000135411503574341000236360ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `torchao`. This particular script verifies this for single GPU training. """ from functools import partial import evaluate import torch from fp8_utils import get_training_utilities from torchao.float8 import convert_to_float8_training from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import AORecipeKwargs, set_seed MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def evaluate_model(model, dataloader, metric, accelerator=None): "Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on" model.eval() for step, batch in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) references = batch["labels"] if accelerator is not None and accelerator.num_processes > 1: predictions, references = accelerator.gather_for_metrics((predictions, references)) metric.add_batch(predictions=predictions, references=references) return metric.compute() def filter_linear_layers(module, fqn, first_layer_name=None, last_layer_name=None): if isinstance(module, torch.nn.Linear): if module.in_features % 16 != 0 or module.out_features % 16 != 0: return False # For stability reasons, we skip the first and last linear layers # Otherwise can lead to the model not training or converging properly if fqn in (first_layer_name, last_layer_name): return False return True def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) first_linear = None last_linear = None for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if first_linear is None: first_linear = name last_linear = name func = partial(filter_linear_layers, first_layer_name=first_linear, last_layer_name=last_linear) model.to("cuda") convert_to_float8_training(model, module_filter_fn=func) base_model_results = evaluate_model(model, eval_dataloader, METRIC) model.train() for batch in train_dataloader: with torch.autocast(device_type="cuda", dtype=torch.bfloat16): outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(): set_seed(42) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[AORecipeKwargs()]) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model = accelerator.prepare(model) base_model_results = evaluate_model(model, eval_dataloader, METRIC) model.train() for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() AcceleratorState._reset_state(True) accelerator_not_trained, accelerator_trained = train_integration() assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) accelerate-1.9.0/benchmarks/fp8/transformer_engine/000077500000000000000000000000001503574341000223145ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/fp8/transformer_engine/Dockerfile000066400000000000000000000004571503574341000243140ustar00rootroot00000000000000ARG BASE_YEAR=25 ARG BASE_MONTH=03 FROM nvcr.io/nvidia/pytorch:${BASE_YEAR}.${BASE_MONTH}-py3 RUN pip install transformers evaluate datasets RUN git clone https://github.com/huggingface/accelerate.git RUN cd accelerate && \ pip install -e .[deepspeed] && \ cd benchmarks/fp8 RUN /bin/bash accelerate-1.9.0/benchmarks/fp8/transformer_engine/README.md000066400000000000000000000021201503574341000235660ustar00rootroot00000000000000# FP8 Benchmarks Comparing and running [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) FP8 with accelerate ## Overview This repo provides scripts which compare native TransformerEngine model training against `accelerate`'s own integration. Each modeling type is segmented out via a script, supporting the following: * Single GPU training (`non_distributed.py`) * Multi-GPU training via DistributedDataParallelism (`ddp.py`) * Fully Sharded Data Parallelism (`fsdp.py`) * DeepSpeed ZeRO 1-3 (`deepspeed.py`) To run them, it's recommended to use a docker image (see the attached `Dockerfile`) and not install `TransformerEngine` manually. ## Running: There are official Docker images located at `huggingface/accelerate:gpu-fp8-transformerengine-nightly` which can be used. You can run all scripts using the core `accelerate launch` command without any `accelerate config` being needed. For single GPU, run it via `python`: ```bash python non_distributed.py ``` For the rest, run it via `accelerate launch`: ```bash accelerate launch ddp.py # or distrib_deepspeed.py, ddp.py ```accelerate-1.9.0/benchmarks/fp8/transformer_engine/ddp.py000066400000000000000000000137441503574341000234460ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for DDP training. """ import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from torch.nn.parallel import DistributedDataParallel as DDP from transformer_engine.common.recipe import DelayedScaling from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) accelerator = Accelerator() device = accelerator.device model.to(device) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) new_named_params = get_named_parameters(model) # Convert the model to DDP device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index model = DDP(model, device_ids=device_ids, output_device=output_device) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(): FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) torch.distributed.destroy_process_group() accelerate-1.9.0/benchmarks/fp8/transformer_engine/distrib_deepspeed.py000066400000000000000000000171751503574341000263570ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for DDP training. """ from unittest.mock import patch import deepspeed import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from transformer_engine.common.recipe import DelayedScaling from accelerate import Accelerator, DeepSpeedPlugin from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(zero_stage: int = 1): # This forces transformers to think Zero-3 Init should be used with patch("transformers.integrations.deepspeed.is_deepspeed_zero3_enabled") as mock: mock.return_value = zero_stage == 3 set_seed(42) accelerator = Accelerator() model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) new_named_params = get_named_parameters(model) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) import numpy as np config = { "train_batch_size": 16, "train_micro_batch_size_per_gpu": 16, "gradient_accumulation_steps": 1, "zero_optimization": { "stage": zero_stage, "offload_optimizer": {"device": "none", "nvme_path": None}, "offload_param": {"device": "none", "nvme_path": None}, "stage3_gather_16bit_weights_on_model_save": False, }, "gradient_clipping": 1.0, "steps_per_print": np.inf, "bf16": {"enabled": True}, "fp16": {"enabled": False}, "zero_allow_untested_optimizer": True, } ( model, optimizer, _, _, ) = deepspeed.initialize( model=model, optimizer=optimizer, config_params=config, ) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() model_outputs = [] data = [] for _ in range(2): for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): outputs = model(**batch) data.append(batch.to("cpu")) model_outputs.append(outputs.logits.to("cpu")) loss = outputs.loss model.backward(loss) model.step() for _ in range(accelerator.num_processes): lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results, model_outputs, data def train_integration(zero_stage: int = 1): set_seed(42) FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) deepspeed_plugin = DeepSpeedPlugin( zero_stage=zero_stage, zero3_init_flag=zero_stage == 3, ) accelerator = Accelerator( mixed_precision="fp8", kwargs_handlers=kwargs_handlers, deepspeed_plugin=deepspeed_plugin ) accelerator.state.deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = 16 model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() model_outputs = [] data = [] for _ in range(2): for batch in train_dataloader: outputs = model(**batch) data.append(batch.to("cpu")) model_outputs.append(outputs.logits.to("cpu")) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.destroy() assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results, model_outputs, data if __name__ == "__main__": for zero_stage in [1, 2, 3]: baseline_not_trained, baseline_trained, baseline_outputs, baseline_data = train_baseline(zero_stage) accelerator_not_trained, accelerator_trained, accelerator_outputs, accelerator_data = train_integration( zero_stage ) assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"ZERO stage {zero_stage}: Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"ZERO stage {zero_stage}: F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) torch.distributed.destroy_process_group() accelerate-1.9.0/benchmarks/fp8/transformer_engine/fp8_utils.py000066400000000000000000000104371503574341000246100ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def get_dataloaders(model_name: str, batch_size: int = 16): from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad( examples, padding="longest", pad_to_multiple_of=16, # Specific for FP8 return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=16, drop_last=True, ) return train_dataloader, eval_dataloader def get_training_utilities(model_name: str, batch_size: int = 16, accelerator=None): """ Returns a tuple of: - Model - Optimizer - Train dataloader (prepared) - Eval dataloader (prepared) - LR Scheduler Suitable for training on the MRPC dataset """ from torch.optim import AdamW from transformers import AutoModelForSequenceClassification, get_linear_schedule_with_warmup from accelerate import Accelerator if accelerator is None: accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(model_name) train_dataloader, eval_dataloader = get_dataloaders(model_name, batch_size) optimizer = AdamW(model.parameters(), lr=0.0001) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=len(train_dataloader) * 2, ) train_dataloader, eval_dataloader = accelerator.prepare(train_dataloader, eval_dataloader) return model, optimizer, train_dataloader, eval_dataloader, lr_scheduler def get_named_parameters(model): """ Same thing as `Accelerator.get_named_parameters` Returns a list of the named parameters of the model (extracted from parallel) """ from accelerate.utils import extract_model_from_parallel model = extract_model_from_parallel(model) return {n: p for n, p in model.named_parameters()} def evaluate_model(model, dataloader, metric, accelerator=None): "Turns model to .eval(), runs dataloader, calculates metric, then turns eval back on" model.eval() for step, batch in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) references = batch["labels"] if accelerator is not None and accelerator.num_processes > 1: predictions, references = accelerator.gather_for_metrics((predictions, references)) metric.add_batch(predictions=predictions, references=references) return metric.compute() accelerate-1.9.0/benchmarks/fp8/transformer_engine/fsdp.py000066400000000000000000000151421503574341000236250ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for FSDP training. """ from functools import partial import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import MixedPrecision from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from transformer_engine.common.recipe import DelayedScaling from transformers.models.bert import BertLayer from accelerate import Accelerator from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer}) def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) accelerator = Accelerator() device = accelerator.device model.to(device) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) new_named_params = get_named_parameters(model) # Convert the model to FSDP model = FSDP( model, use_orig_params=True, mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32), auto_wrap_policy=FSDP_WRAP_POLICY, ) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(): FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) fsdp_plugin = FSDPPlugin( auto_wrap_policy=FSDP_WRAP_POLICY, use_orig_params=True, mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32), ) accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) torch.distributed.destroy_process_group() accelerate-1.9.0/benchmarks/fp8/transformer_engine/non_distributed.py000066400000000000000000000126661503574341000260750ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for single GPU training. """ import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from transformer_engine.common.recipe import DelayedScaling from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) new_named_params = get_named_parameters(model) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) model.to("cuda") base_model_results = evaluate_model(model, eval_dataloader, METRIC) model.train() for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to("cuda") outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results def train_integration(): FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) base_model_results = evaluate_model(model, eval_dataloader, METRIC) model.train() for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC) assert trained_model_results["accuracy"] > base_model_results["accuracy"], ( f"Accuracy should be higher for the trained model: {trained_model_results['accuracy']} > {base_model_results['accuracy']}" ) assert trained_model_results["f1"] > base_model_results["f1"], ( f"F1 score should be higher for the trained model: {trained_model_results['f1']} > {base_model_results['f1']}" ) return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_not_trained['accuracy']} == {accelerator_not_trained['accuracy']}" ) assert baseline_not_trained["f1"] == accelerator_not_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_not_trained['f1']} == {accelerator_not_trained['f1']}" ) assert baseline_trained["accuracy"] == accelerator_trained["accuracy"], ( f"Accuracy should be the same for the baseline and accelerator: {baseline_trained['accuracy']} == {accelerator_trained['accuracy']}" ) assert baseline_trained["f1"] == accelerator_trained["f1"], ( f"F1 score should be the same for the baseline and accelerator: {baseline_trained['f1']} == {accelerator_trained['f1']}" ) accelerate-1.9.0/benchmarks/fsdp2/000077500000000000000000000000001503574341000167465ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/fsdp2/README.md000066400000000000000000000073531503574341000202350ustar00rootroot00000000000000# FSDP2 Benchmarks This benchmark showcases `FSDP2` in 🤗 `accelerate` and compares it to `torch` baseline. ## Overview This benchmark consists of two parts: - `main.py` is the main script that runs the benchmark - `visualize.py` is the script that visualizes the results (if `--output_dir` was specified for the previous command) ## Motivation We want to showcase that 🤗 `accelerate`'s integration of `FSDP2` is on par raw PyTorch, and highlight a "broken" part in PyTorch that creating an optimizer before applying `FSDP2` **doesn't result in a working training loop**. (more on this later) This script showcases **matching memory usage and convergence between `accelerate` and `torch`'s baseline.** To deal with this breaking change (and maintain backward compatibility with FSDP1 in terms of an API), `accelerate` had to come up with a workaround since `accelerate` assumes that the user will nearly always create a model, optimizer, scheduler, etc beforehand and bring them themselves. This lead to an issue of a stark increase in memory as well as the model not even training if the user creates an optimizer beforehand. To workaround this, we replace the parameters inside the optimizer with the newly created FSDP2 sharded ones. More about this can be found in this [blog post (TBD)](TODO) > [!WARNING] > This script is intended to fit on 2x 24GB GPUs, though on so few GPUs it's not possible to see the memory difference (discrepancies in grad allocation result in lower memory usage in the non-fixed case), only the difference in convergence. Below are attached results from 8x H100 GPUs where the difference is visible. > TLDR: more GPUs = bigger memory difference between fixed and non-fixed cases. ## Results Here are the results from running the benchmark on 8x H100 GPUs:

Allocated Memory Usage

Reserved Memory Usage

As you can see, the memory usage of `accelerate` and `torch_post_shard` (the **intended** way) are very similar, while `torch_pre_shard_not_fixed` uses significantly more memory. Our fix in `torch_pre_shard_fixed` brings the memory usage back in line with the **intended** approach. > [!WARNING] > Timing discrepancies are due to the benchmarks being ran in 1 script. ## Running To run the benchmark, you can either use `accelerate launch` or `torchrun`: ```bash accelerate launch main.py ``` ```bash # For two GPUs torchrun --nproc_per_node 2 main.py ``` This supports multiple configurable options, you can learn about them by running: ```bash python3 main.py --help ``` This script will run 4 different benchmarks: - `torch_optimizer_after_fsdp`: `torch` baseline where optimizer is created after applying `FSDP2`, this is the **intended** way to do it - `torch_optimizer_before_fsdp_not_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` without fixing the optimizer parameters - `torch_optimizer_before_fsdp_fixed`: `torch` baseline where optimizer is created before applying `FSDP2` with our fix to the optimizer - `accelerate`: `accelerate`'s own integration of `FSDP2` where optimizer is created before applying `FSDP2`, but we apply our fix to the optimizer Memory results are saved in a folder specified by `--output_dir` argument. Optionally, you can specify `--save_memory_snapshot` to save the torch memory snapshot, which can then be viewed using [`torch memory viz`](https://pytorch.org/memory_viz) ## Visualizing results To visualize the results, you can run: ```bash python3 visualize.py --dir ``` This will then create two plots, showcasing allocated and reserved memory usage between all the different benchmarks discussed above. accelerate-1.9.0/benchmarks/fsdp2/imgs/000077500000000000000000000000001503574341000177055ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/fsdp2/imgs/allocated_memory.png000066400000000000000000003710251503574341000237430ustar00rootroot00000000000000PNG  IHDR',:tEXtSoftwareMatplotlib version3.10.1, https://matplotlib.org/so pHYsaa?iIDATxwxUٞ{U.‡ EAb "b/(" ]RD)JPEt^BMͶXvٙl2@0{9s̙3y}$UUU0 0 0 0 0 0 sQJ 0 0 0 0 0 owaaaaa)Xpgaaaaawaaaaa)Xpgaaaaawaaaaa)Xpgaaaaawaaaaa)Xpgaaaaawaaaaa)Xpgaaaaawaaaaa)Xpgaaaaawaaaaa)Xpga㏣^z{/^z8y$`ӦMWƍOU=z4իM6߸qP^=\{vp:hܸLE#ԧ,ZW_}raԩ۷/nF4j7pzɓ'!_~Sc0 0`) 0 0 0%ɓ'?#)) ;wĞ={аaҮѳgOo.zXV̟?C%1{œO> Ӊ> ʔ)Ӊ~ &L̙31~x4iҤ hذ!,X6~x]cǎEPB,X*T0 0 sE 0 0"̙EQfϞ]@rsso߾Үছn¢E,X믿sp<Xp!^~et͚5CK/a0LxpҮ2 >>7 ꪫ4ԩ޿bŊ[qaaaa%x^,\ߎf͚a˻2'-Z@FбcG]x1^4k 7t~al߾]Ovv6F:-7zUVYhz(> âE¿oذ> Zlƍs;v,n\. :Znƍ;l[oXnێ;SNxWq-QF1x`߿__ȎԩS9r$n&\wu۷/>,<쳸pcРAҔ~;ܶ-Z@~O?i YxѴiS^]w)i1j(zhڴ)Zj{GȾ}CfдiStcǎZ>St7F.]0o<]1ڧ(KdffW_Eݺu}j֬7xT`iϞ=WF޶sN<ШQ#oo5vwu֯_n [.@Y-[Dff&~ij ͛7?L=z=Zl֭[cĈp\r># 0 0W ,3 0 KXt)N'zի 5b0|<䓰Z5jL>}`߿?<Ox߉'bhҤ &MQFra]Q<#ꫯЯ_?̘1FB AvZ@o^,X۷,[ <|>}]L<ݻwǴiCp^x̝;ݻwǤIO`X~y 7܀ *믿&ۨ\r+Wă>ӧcĈ8x z={Ǽ(W>c <[la0h \{7n^Z |A8˅>}`…ӧL{& O>$/^,?FF0c 4o~;݋;wjS˖-CZp7WaРAXd }QL:~!ԩ#Gj3f`X"ƍɓ'[n^zIS{gg}gy| N*|Ŋ+5]vX"VXUUK .Ye'O+iӦѻwo!x0zh<8qbXdYO?Mbݻ7֮]#F`[0~xk-”)S^30 0 s 0 0fϞXy]wܹs@$ey^3իW (>KѣGcիrss1at/rMk׮X`5k TR]vŃ>ޯF޽;[o)))]6B hܸ1cԨQ]6L>?#u#G`aKmڴAΫ]M&zI&!-- UTn,_zl/3gnlZB.]GiGW^+nU}v <>(UVXd&~ܹ8|0~mbmڴANG;t.&99YK߫W/,\|g|ӦMHOOǰaΫ݌[zm^{-*Uvk~!V+͛rJ\.!??UW]?0[n.]h>}5mK!I4hk"++ _=U˗cذazkF˖-1c )S@TT C 9s=O?zk7"qҥ xp.] 7nO?4 {Faa.w8Ýaa_={;@||<n?=7WyvDǎb{_Ю];~eʔM;*U> WN@ZZZuB.]hemHqM7ybm˖- g!Yn*WCTRM6֭[cB!V $IBʕ޶a嘘iOƑ#G4E M4AÆ l2MՊ={y1 99?VZd?쳘4iRXlQNSNv ߏmjLIIIp~>}"dgzCfAA$IB=@0nӧOc޽h۶mXlѱcGXV)췡 hXll,RRR4BaapgaڵӹsΘ6mfϞ3\^haǐpIh>r[sb۶mɁ(Jdž &`„ E7PBիW 7܀ bРA0uɓ1iii|W^rNgxJx$$$爼_T ,Y޽{bʕСʖ-:.͆/?< ݎ&MছnBϞ=5/++ ӦMڵkq)_=BzS6NB~~>=ztRh@0a6ѣGkCu* [} yFaa.wXpga9s ~G}֭[SN:E(38ROJ#2;b8p *Wz W_}5bcc ,.Sf[+EVs{9ϨQOEַZjϢo1* I.})н{w=G޽O?@cL u_wuXblق~ ƍ/}:uǃ޽{ɓxжm[$%%d2O> /fmӧVV-ݻ>O ػw/*WWVZȑ#!2V\mۆ":v?8Y>럺0 0 \0 0 s7ch޼ݻb޼yS6/xڗ˃(HNN*Ə )J&D^%~Eʬ=sn[oIIIXl*WXtڵfddk!=K*UCbb}1lw܁sXhVjhvDwСChذ!W]ufd_믿< 9cԨQԩ~7?~Ë/9V#gz8zӧqw`ܸq L#<#FXXXxu\'aa pgae_51p@nZB `"#iذ!ʕ+~I\_bs͛7j50h۶-  (jɓ":4hcq (8|0|"a vyذaV^Ν;=)n`튢7e.@(raÆ Qjժe^zL7o ߎ?>ɁwߡaÆpw܉Ç###CoڵѠA87}? _>L&~W;Xtfh<Z*}ܹĉxQreApܹ3bcczj,]ʕ O hԨQ#lܸc322/F=?0 0 S0 0 szj:u ={n'X,{+W*jbHKKO<~ ;>s7͚5 /@Y\9(V#Gbʔ)ضmV^Gy+n|{P^=>}:6oތDnn^^{-5kE&ۺ(*Vz 'OD=7?ڵk駟W^HJJ‡~raʕxᇱxbl۶ ƍ 6;ٳ?`˖-8q"^x@СC([,v]vW_ů+V`Wh$$$`HNNF>}ꫯ⧟~۱n:=wy'V+K8tk׮ _楗^$I۷/,Ym۶oE~faVÕ?0 0 S 0 0ٳ!IS~{/>s̙3',ǝwމ$L4 Æ GժUF<+Vܹs裏B$4m_}:@޽?0T={bXl~m fG}4j_|ڶm{Aup85kƍ3gӈEӦM1cƌb몧W^ؾ};ڶm{^q1}t|ȀbAZ0`<qJ*?>&N?999CժU1tʕÄ 0f11111m4X,lڴ _}\.z-[ǪUxbԩS>(<6lؠrϧOEnݺXd ͛իWcŊ(((@||<<쳸{p8o@8Zhya„ xxxꩧ ^'aa IUaaWzj Vcaa0 0 0 0 0 0%{3 0 0 0 0 0L ;0 0 0 0 0 Ô,3 0 0 0 0 0L ;0 0 0 0 0 Ô,3 0 0 0 0 0L ;0 0 0 0 0 Ô,3 0 0 0 0 0L ;0 0 0 0 0 Ô,3 0 0 0 0 0L ;0 0 0 0 0 Ô,3 0 0 0 0 0L ;0 0 0 0 0 Ô~znC~[|94k ={Ć ¿)cǢcǎhժ~a8q"Đ!Cкuki#G o>-Zm݆S^ eaaaaa\ɓof͚o1l00` tٳdL4 k׮EZSOAUU+B,] .ġC0fc=oׯرc_`ʕ3 0 0 0 0 0W$n۱`Rp?>ڵkvn;5\_c[.1tP:t;v@VVV^CL2X"|I,\~֭O={^z0͚w]W0 0 0 0 0 ۰vӉ$Ͷ$_݆&>.uۋ@@!QX$IHH(4,\PUT@UU`F߆{STEc$@$x<>x})ЀHKa\U-ޔl ~xE+Ҵaq6_#BCy8Ax#BHxLj (.Ljs#.dSWөt:QL$''d2-[eʔAAAdYGB~?zpl\#jѢ~Ɵo܋*B_۵^۰t6r/6'ƈvcܿ/\nK[I ׫YV4mz]X./Җ{qu1b}r^r/N*d.+ n܆܋ӥ#rYzEFO]viӦ꫱gϞoyyy8~84iCUU߿_slbb"j׮F᯿B fGUQa7ah0LQ0L4x|`(Js{7nĺuz`=zwy'wޘ9s&:3GƍQLt1bAv';v лwR+Il63Áax|`&<>0 Sɓ'^б:Ϳp.|!n-jziW⥗Ǘ_~~ǭ^CKXV ݁kmu _ *Tw?QZx!R^56k⼏;p/[f̘u­v)5jo#1y LdaaaaÂ;S$7#aG/#!!ǎ'ƤIWl9[nƒ%Â?n(mx!R*ڷTnPzK]M :S̝ϟ7|^[,cժ/O?G&oI2}V|Xl߾ Ç (ؿ/F|dB~|>L% m۶b8VyףX~ڵpeaaaawtM&*EϞoHLL$IU6F3gN,_K.F]ѩS;Xf5^}$ۃZ#5$ z'̒>|(L]]wu/Kгg7t3fL ׫MHO?ZkkӦ%Nz=ٳ:ujÑ#5|luWg̚50Lh޼%jժ-yx'Х-޽>x ?޽{ ӿѱO⧟V4QL)x@z) >ݺuD.[*[ ؼyZ5bnGw7E2ecʕ+[o킱ccmۖmԠA#_r-t 6'fluUU3g2Q~CÇӏK[Э[G.^e1tS]}…߄C}!2v;tŋլ 0 0 0 0 Â;ޏTsO/7Պ=u~ o޽{0o޷Y_h|t ?b͚ZPޮ];QlY,Y[Ň}bEx0u$dkT2֬w>@\\\*dq8p/L4˖F 0rPs_3M}HM=onx<6l0ZK¤I3}̙35jĜ9Av0l &N/0+V~ o :>+0|x*^|9TP .ܹ >FFX~oߪޮ]bԹ Z݀5kV(0h;[u5 4kwx^3Yx<;yV$ ۷oEFM`X쳃РA#|ݏ4isL}[}`.¿ 6S|\'RSOb¯1b+:ڵ~fۗ%V)! ֺvOϑ.0Fe->ɋ&c<ٍ ViWȓXNvU.KdU[޽u+^sF|# cmnLŷczC!(<_.Izf8`q/NQ]{E<ç9+9]c߄BWUa_[ʔ"?`s ?7J6 H 75$ʗ@^F-:#b'p8PVmt7ǽ.\AnM7,}>ݎ6mA_Gjj*RRʐgeew^ǰa#Pz (˗7Er> ޽{аa#@fuڵ=@eH~klܸ Jݻf͚Z;zk4m бm7+[ֽ РA#4h߾}{p!L8=(}v^x%H/C"--?eˢqp 7c[ ըQǎ#ew-B>}9[ݢE+,_wnԮ]{F-sam۶x Cfjj^̞=O>yIV7fL4gd{S.<RQf* (* si)T|>2eq̟6a,XދwL$*> 3kܻQ;KSv$I1&FY1w؅k\pŸ[#8/EUk~$bpy{vyw(k6 L;ᐬK0IWVL@ UF9K"`\2u, lil,߄c꡹FMk {83m& |Β$WvpE++g?7\dZn|=`lQ9?^5]gk0i+hlY?NOR^3 S'Y=c#D/lC)5:{Sqܟ[&џ>5_{PZW۪5D2'XP$坲 {'Qv|ix)+zb_T=| ~T1Wg`k:69Q<G=[UTNXV7CYM^(~5 F]kxbk'kfg AVe^7*[RPV z<cy6L2?#cQ^b.Ń}hbYBUUCj 6{&qȕ1Ɠ)]|j];PْfE;re{eUFa4uKo~?Mޯw *dz>"!!)))WX6n<*C ~п}>:SOQ˸vl.,###=,WTغEz8qGSR #z99СuxUUa@ZZ7oVju:wdRSOBetQs,p:$$dĈWq]Tl޼ ;vl/'ફ?#m6Y$I\ ~}83.$w-Gcv4n& ;w&MÞ= Am۶{NJbUӧf؟(1xP=ohsMJJ89,_,3̥?2tp).Ճ:OR[LqE0׸w"ʰHlC5KnE[ěbOo7\ =]6y|^fɌy̷h0׼! Z9 _"NrD=x.c:;g$_Ttox+\b\2@Ys;jBQ򧣖Q\مA֖r.& FG؆Hf|;&;W8'Jן#F[(Bb;lF(P<=Z1٠*eLQz߀Aen>^Up=iΟpK\#t&ރ-'fT:g"=DKG]$o ڵ\P* /eiى۰sr:^Is5bEre ZN33w1 M3NJ,-W~*|MR^mGWXއ{uL]dSfW ɊsV`k;L0ayel 4gQwUUqԟr0so]16{  E9zFV |"NԺR| klj#or`slA%s fTy('LPޒ=2f@/+? PT;ǐbGMk0%ɽ liGw"\ߣ `|镟F5kYxOJ8Z<Gۘx})ob;ĜGb(-RRp7 [FM|>60ǏC5#2d*}-/P_"7׉1c٬kgl,C`2Pf- ~q 㱔u_ d7U!d=d)2«!C b 6EPfuL9GUr/5-/0ٹ*. /fpG|sZ[5|Za$R ݪ*ͳSQ=_)`yΏ`3sAK\ f<9N0{}'RzSsB39) O:Svg8eWg 8%Z=+P4s:n{f!ګ }\3r~gt!_:eL/JsK 6&&o 6*WŹsn\}W>5Wetoہ` k|s<}:XYuʂ?qZvᾖƟ#hX>2s6xsux tPۑ1x4 c:6E2<@;ɕ߾4<%X=, e/jXaƬ*CPΒӲlPU.Ճ?GRc!OvQ}mo gj[e=gh_"]vnA%|Q Hr.w\QF=G?=qKlC$8pCbV!H1czZSqa8Z8bk;@,9ߥ'/@ũ@p'Wv!YIΕ78;7>FUXS|j^ՏG'"OqSlXX?Orp2 Ksb*W tv<;SZeYa _Ĩ }1=w 䭇&̪2,Il@{dUFd, =EŽGWvy`8>Y &3 S{)6LફA50< 2L[ %ߢkm. N}/_Tl߾ƤI";#)) Nr勛 W׃U!--n?\ssZ=KժpQͶǏm[1#=aiilnp]vaҤ > 667AZZjG\ݵkyfXEbbrss5ۚ7o۷ahԨ TU}{c6ly=`-h:X,a+~Vkcǎr*ݮ*釬L|hNgЛ09x#b>\JdU%DH4"=BY/fBs>{l6m^9իW/nǙ3Y˅W2No ZD/;3ر8x1 tL%n5q 7!99 gdW^Ą ;wVݻw!`%8r0:vܿNиq|8NG᭷^xWp_ش7^mڴ#zXf5xeݯYQv>|HyVXt1WơbJX|)ZZl߾͚s㍭aX0md|>?~tw8s& {Ob׮Xpp!vr0 *a@~9s!⇂mΗ+6ʧX!x_+xwV9 PY E?֐U{A+UY_ޠ)GB\- 8}P!OqcJD6mVbXXD;ې !FdtBYஓb+(}3r7<Y0E|VP vP (mĂ! 9=%'@L|.<>'s~Gåzy ᘼ[J!LpB f 7`+^"^ ?SܚVd`nzqr Ʃs<9He~wY<g|(UF~h©@vxBFE5& &mM_ex1sH >ųϾՊ<`| }V4ir]8ar%8s^R[BNlkJ(@PeDA}gzx̷x&ȍ'gNOçƣ@GYmɦsB"c:1fח]BfpF!%BpPTӿЌ5PeLwTOrB ww^_{#!>N>b^LP ZEzgWy?c8#C+X4;g?s~<{s Y=yJ!lbJ2 /&,6,PPū;!b2`CDfKj6KtVrD Y.}I=CY/dĠӓЩϐqdv$zQ>p/ 9=H18ᙲ-P).n0B=CF[//H=|6T\B]fYu6MޯXs~>Xsq18e73~ٌiDPTu[Ȍ7D*!SrWC~,?߁pX zTϨ`I2@h7լx,sukV2fy@})h;.䬭\u1) ( y RWN1aR)kmѣ7 FZXhYߟ}6)~1kaN"XjDzQɧI1g{=UE-Jnū@H6/nM&Yxy 4{hhM(1)J CQ͚ srA/]_pFj)6ýPx:)އULR6'uoPzh S ң`Y-*uG#.ՃDU+JZSv p.a"<PpGߨoL]mch*.]d`fceUF^+B9ĻI`rΖ%9\x6詉88:wx ֟Y`s5>b$[Z 0cɩ1ΩPm U/-s7æ EWcao\pP&"B$ 5F;e>M-lK09~zu*.o%E6ODd3 s l6 f͚@,Xx!ʕ+orE*<1#aE/NQӤB2k4*@bՖ+ hƩ nJآL@Jldt5REf퇄t[?+,Ć.Mp>JpfxU?y{e( t}EoSx9EH>E1gYؔPR@gfOՍIX""%Ꜳ*k~Ѳ%K6NԚvW=c D ى|R\bvERyG³"9b,f;e530p,0Ni,3 ӣQRLn,;8q8f͚7u 2ZT`a.-G0ͫ^љ1DŃds8=𜧸ũXM ģˊ!yOjlk槮~ThK= bշ)˚*3"bifxU?ֺw m"KW~P4 @Kd{Z PB;--"<>Z NTYU!\Qsk| BE/ע)򹦂3x>ĭzgPѶM0  sAW|uCDh~ U wSgQYʆJv˪'D\^Lߓ?D{BJ.QV\ ۔~K0 ,G. U3,w"3׺w JAd#Z E *(J]6Q-lv "#ϩ2 s~ S"~ 0wGyQ$ի%+g\aK !'|6!½ &(٥xl@D~J4 UEl'O*E4 (}?.$fP L,l-fXQ٦&٣@L4qW" n8fg72Ι+k35r0nGC@эq`d] =KG-yM2ABNO ]`7⬟1Fi,3 0IBl &S6 \2Iq5r7j4}a)Fa Tfa=J9L bjDՙkEBDɓBBܡ(* 9=pfɤ9N^%"KX\nOM͊Da]BY9"gxFd `dy!_ wƣ S6N A?^,(K-f٤9KB"1cBn}fBvx %@]*zL wE\)k1j`(җEpTFpʣԏnKRF.E;FS@jlD~*]UUM[RQg gڀSMgSm߈BB$,E>9Dݨv.V2ؠߖ#hgHl#"rG0` #бz}#7J0ȬU#=܉5 v` @;څn Ü?C0 0 /> )Gv9A-W7_VA\ = 4(<ۏd9(T(Ie !mZ %GҧNRh"3ܩ Lک8B{ Yy:)ay,RL^Y/ܟm#l8( ' D @f*T)nMv#,fRzBG + ᦀ~QDh&=:j*DP.l; M@%S6G+nQ\"ꃃѼ#P"d ZL]- 犟|#J .bF F2f([iKQUh6bK-!3 {B"FeS2٧pm7dD=KNp*{G Sf#ˢ2w@7/(˂e,j UUǦDxдuȚ"=0̕ 0 0=*#FdF~U/:\R?>5 |RE ]S$@n!/p_M_ל(d/l{ȗ[D} TͻsЬ\fe퓫KgG@T}O?9_h%B-2HXD ރ gE31P[~G8nF َz0*UH4k-H\?I dY,)bRZ;E|1Ct W8.YNILFgټ{52  [ ^c ېC ÝZL}89Eb^3Y'i/]^E3QP A!'}i+! ş)ީ,K*9!Xʈ !AyF3dT\wv6Tf,[ U읨g](&H= ,| G;5kP 9W3Sc@F?5Rd7`92dƒ;JΈZ?K dUĬn];]zN"3yOg/(sP`*XF & 66fU:CPpJS`)} 'Oc;th͛/]:>Czk[|ҮeK/=/[z ymΜO=ZdY>݋U~klcχm۶Ji6Q|tv'CebxoЦMKY]b#wz((@,[=Ҡ(2ԩ~B۶Lӧ UUa;p5н]2# "B`6AxB7߽kl֬یS0p^),͆#!OIPbc IB|0}$ 9 UeL}/P(bjAAxV +z`U)PE^T\ף댠`R&QB5 P(q3zqGR(uاrf=9%{X/gyU0^&$JP [ilyW EuIevDsG]qdFhUBSXV,#}PnfNdԩ|Nr4 J6#Ȃ؝PF֟Ԍ#%lz+ 3 grh-e(A⳰SD]7MxΗlTE(QdFfnQB/F;V_%QVQTR}V\PeLѲF\a]dd{{b콐a WxZ.4CΥ[pf[MDV>0)MwH6o#F À_FBB";?Os4ir~4i: ++۷og0|py)b I&7@8_bࡰXڶm+F|rZkkMaY2s>le1"yT]1i׭C dYmT@xL6iS Zݫ9zmzO Tƚ~B|E T$%Љus &8-!fRSiG)6IשuVM0`Ǥ@ /AͿVjGs̟!l3ڇz=Gvaю%N%fVnU-{X^"5a,]miU~I3ps7Uئ?So#6pi٩~gUž{s:e@wFT}ݪWl[U f(x!RJ'Q~1 Trsͨۄz}0 fC.Ět?Ƚۗm(>=TH 9ޑ Ü/C EQg{ѷ$&&A$ԪUFS3/_끥KGԩ Y>Az=fI>S|.]㮻:_a%ٳti3զMKFB5iSNx|h :ȑÚrz63f͚^l{L&4oj<tݻwA лwO1}7:v?_pY= >?HO?Ç[ҥ=zU\>lMhf4jvݻ߅7xeʔ%)Wtb1ex__Ӧpm퐞bٲ 0W2!LY&N#"S0W\Tb *iGM}P[H/[)l<__&}:c'!:!wlx62!"cvCT%~ʨ@iK9`鲷"SȒ6:#er}܊OAy=ܵ;Q38O=?KbwOGʲ_kCYEgT.#lCr?fgҗBfb!히5FwFhUH,K jӿyO 9@#|# !Z_8Etߠ}qF1Ki)m@|([4*`C_9 w&*ii^oV=zޖ{`޼o嗳믿`ѢС7Ě5Qj5]vlٲXd%Zn?|ܹ3aINaT2֬w>@\\n `q8p/L4˖F 0riio-b~?~u=RSO< -ǒ%0i l߾sD51gBs1`#ذL)^xa$V?|:t.i>ePU/*T a܅g}Rl}QFM_۷olo׮=U^u\Vn5OQtClYnYؽ{'"5Μɂkǎh޼$I[ѨQ!3f{# gy۶mm[ǞBJA M޽{.aT>PO  [ʈ_mQ[ a^_E;@&5 Q]ľDW#ԢԀ}о֔SqSD2Ji:"dbeXh}5v,;`MV3, ,}`rڇp7` m>@=O5~ęJOzq:[) 瀰U5Xx=[RN7Xqji1ۅ3ʅ-eJuz(<>nJG,LebwMM= Á+רQ Nlϋ@ZѩSglܸۻs7X[v;ڴiY~HI)CwyÆ@5( /_7|ʕ $,{AÆwkY!I~qԮq*Я߃*Uw~5kH(kٲp]дi3@ǎa޼nO[*@РA~#0q88<УxAxᅗ I?5桇EZZ*~ e˖EnBǎQ&;F&2߇[>}9r͛ EVX| `ݨ]., ݍ[b?=۶mk24h>t IqqP"9M;a.LDMBsz3 }vG#i1(pG/~ti~J RQFh!DbQ(ъۨ w *;Bء/` g衲);Ë“^s^@\G~![jQE(ʔVY1 #b 3' )H} 6]lX<(Fupq#*>( F~\SܰI0j)c䞸_ T9EAQv.FUP"@XeQvF k81F ef3 s{)8%f\Z@aw *)KHHDJʹ*VM~7t Ή?CB>Ro2n]8=''n #F<,H *lz"=O8ѣƩSxᅑHM=찝 l0JKKC4۪VSSjIȲn:jeNSޑ*n c͛7aǎ?|x|\uE,0M Y$I\ ¥zrxCyرc;7n Ʉ;D&aϞ]xᅠgm[н]E/~VXɓ'bڴ9BNJJ鼠Q^0a.1T'Fad?^F־Ȑs`ƥPbDS I/Kk2**9v^G˅B V(fyr+^Og@kP\({1P0 eЌeF˗+OY*z+C& TB0 ӣ@5C-zyQ&Fhx $)gHʫu JF2aJW`1uʷ wڵF|HKK%m`?kKE0-Idh/Ę1lVv`ĩQ ٚd2fZ8 2aXqT*jҮ /Ib)YXlgKz0.r^.X"&7Jf R/W<2+rʚ,idoSQ, BKਁYJ2(PJ!%6ԸPJz{!/|SߝFg[ 8mFfe\IxT FXԬtYP30 MiԖ"R63Kի0d0o@K|]\.8N$''.Y|KZ۷b޼٘4izXdx$%%Сԩ4T\ϫ*^VTnWuj,UVñcG5ێ?mŌnJߏ¤Ip}@ll7n"߼yvځgV~E\͛۰^4j`߾رcZ}4n|n3Bx⫯ܣ`-lF$Ip8,x`K" SL+)kn'88SNL#C~_E#1s\.=T%?qd^&͒ _xTNE[+\ٍrJa)MMe"I '鄪8v( yOfôizpa^7fj9\|%It:曯`ȐaB2ygO̘1ǎE _Ëq^Oc/q㍭n )?c\8s& "&LG߹X޽ @˗/#ѱmu\ƍOt"??? o?66[oB ǦMahӦyŚ5/w~E(ڵabw͛ҥQz 66+VKѢEЂgh֬ȑ9:o 5kv!#4Թꂯ)IVY/ \ԢW* /b$n׸vMba˕1R&.f6? ?ٸ0F 04CWsYӢE+L0ӦMF>AѾ}' Ȭԩs?((GηGڶGϞ0vom@ffƌycƼ޴i3;<<k0f̧:!=11 mڴc=X,x'cp睝6mbР!dY:uFz)F͚5GzQk࣏F{fCc"=#L: /<g3j֬'{f7L]. .]]E4oo2Oxƍb m۶;{TUᅢ5j < :uF&aǎlhРE]0?^ X% /,9 ]qM4+Rְ@؏$bDSL}\5'㢫 b)+dalqK L0_ٳd f-l~\b[j[+TUْK 22T4'sK l8$[Z55'^QtҰUbT_df5^oi{ob%+J*eýޅ~:t*Fx+V!_^k׮UUUqp |L& ^8aJx>cL0 Z;˛qF/.DS $fʹ$S`MRLh[ZQ#ZIPRX1.z3$(IUR')TjcTޜTAn5GllȎ쐬‚T?0BdE2ޅ &`L{@;m[J`NBFoTDi-o Tl:6:E#4Y͉M6 "}YTy`q6{j]^ ;G`잔1k%j)+Q׭Sت?Cӏ"F3ڮFTyF.@E]k%ҭBAYM1ţ@h;Tl`mFIF﯑B%s}D>0٣'1 !ʗOs_ 0fÀ`֬JKcm{$a+ͅAF2AU"1M1A$.MYXQ"ԴoWY5q}jv"q&1!$-$k7hmUOCI6.MuSO+xN%,*o@:PdOQƙ hJq]o7' zv~PRFFoodMlRږ/Ig=Fî;P.KKm8mTFח+6ڗ˙EeH\] k n75mT% 9]+ϗX}] gM1~uwwLvHwFKV]J]ٮxCۨG?=O4(Qԕ]}⡤oF0 _TTӧY|xsQbR˿B~^0aJ{,H(CH/Q U-e㨏&q^6$l>)Gt"U2mDFڌ4b X/?'sJ`ڟnorAM&Z9EM%P4*z&{W\:|dFշy }}%HB1لzP>x@ El*𤿧m G}_5م )]߯$H¸dgF!`K.VRVx~cMH"H`C'g8C˔~0Ż#<^ul9kzkR x0%[73qğdNXTG$aVVimE[SAV8$c9M'$-gDB,_l39EgzYLTzEsYq u ϪJp׋1dX%BC _ hNw~] bu#F }Z{U,mk* (j7}5JH4]쨾G {\Z^x΂A mK'Q,h)VȖIah&Y`*`S"fW" }KV}g NEIԱzw5*x@rQxIvX#}L`UmExv/*XOԻ/{v1B?ښzN!YB 3L5V 5+mqdT;0 `6+aJ".p-cN 6b*5AZ۪Ut() I9mU#D2Ƃ<;$ hX%pl&BY8MEUꃝj3bK%+,BbzkCefS5,0f] 66tj)+<;eL񨨳ٰbٷ F/G~[)v4Z*fWQ"$%g˴ =_@Y/6 kU e_kut8bf ՏI]>1xoD+-*/;@fAT'- q#Bq輑$0R}j@p=TL/-7-3ea!ܦ{E TI?vCQzfFijJ5ޡw}@"xϵ3$A7[ZF (-0̅QZ: 0%dgT|F 0p+ިK(qnjHJp/2]Ʈ qCc3JS.,L/oID*SU/ښ N|PSٴ>Jva9%(ƺLRA{E Bܡbگo͏Np7YT& eQ,lӗ_ޒ(l2Ѝ̚H4 Y["&?9a%Z$a?J`S)qXB[{O XP3?A qfg~$pMB  \Pc95nP]"VI{grAbU9VL0 66q&&zT$ j#} K%3a Յzjև ؎ɰwꜴp[o#Ix U hQ[NPO0$`rD8ˆn,B39`՟2ޥ~ KmT , Mi,3 0 \X 5D 2Ý+$貅$|dODa:>.Yl8h3A]%?$ hXaJ3%~?mUKbTY/f-(ϒ79`Jb#‡C ~|0M\!S qD; :;V,B-QEs PmFeIS٥bQBB#}ܩhcn;#\K8N<+_Y֢1#zϿQ3ܭ0n]71^ZaH{4J-8+Ȍq5ƚl]k'Tɒ"d(ۉ&YCg1@& SEKRBIXTj0>CD}&cI 5Q3[%3T7sPu /|!]'g{T?fKaaa DQř8#kQԘh^0YCzhe!2rUsL#ؤ3紈I\Z4U(Pb)+2vں$ 3N-'z> P{orYgpbv&AoFT泾0jݺcŌYHNΔ@gʖnHi;n eD-pIͮn(HV4!xP ovKbr,ՇM-\R``Iw}vܦzX%s㶝ؠ)xSŘl3KXAI[03+عV/O.4K϶BRbd|XM zbA MxS8b #w]jX"HG 0bC>MxPRa|XpgaQNdQ٫ 8!ƛ倘lLI»;Ј$Hu4om% U$"]bfA`/r'#|ؗ$ u|ƣ<{*Qh ! AeU#է( J-`ĬTЏM0a]d7ohIDQS={U vrN-`%RBe=HV4/ )nQMD_С;%2X*XB$(VO M zw@Pp/ZĦn2ܩb+y'ىl|jqjEeNPT#_!x>QAEjHlO"]k3胺T`9I?ִc0W*]$κ ]MGL6ى%ל0W>,3 0ji, 0bEdST^+kN ohK9eXVlM$DʖƆ_m_-7m<`E g*d'bV#Q/#XZ}'wd$$hAU2[,}8133m&!"fm*gNQɜ9g X"AȒ+bČIvC1 \e-)1Z/- ΒE0JחOiRStb.%JqD ctYb;B6 K;1c wꙦ/ֈ K2S5Tv?iF.[(A!Q,Zcd#. /*]我ŒkE+[|0NيD39md 5j3 sa;S* &OxAv7^50>޽{S6]Q( SRN-e$AT,Z҉rpCg5 Jvx=|xI @/]cb(7Ks uPRNxN1+%bџĤpʏ't(k"+0C"F@O2őTpDGAĬc*[^/T0' υm)n"2%Q4@)N4jM6Ǒ1&QEiK=L-YfMV(%$&bk3Ŭ_J `}L8WZN- *`PܸZ09|*@. |DԵUhbL6s{ XI̒f+S~h@چDYUl ܤheq/=1MYD8*HDZT;R}oj!^}YA#1~odC56k̄.&I~1] $ma djYuQ00OibؿfΜ;e˖-t@~!!A?w;f >BEQxɧѧOo 5?AݺWZTQ;.+bYAժՐI&`MunwoO ,%M(mխM/(3' e /fER~ѽE w#/~Fy+,jXˑ0A- EJ l+o(*JD#էX*e%g=F T65xUxdEl_.YD %QOH0DYVɢ3DP"sYsFX@-L*:4J^(a4e:M1q&'ʙezJ T[b{(R:+M_ t1D6m0÷% HzPb3Qv" %rEB]4} f$i-aE Ԅe>mQD^UOeY">G2Im"9B#/[߄ӕ_Z BMYpE 8V9Q ,3+[R5XybMvN%RF|U ƘlP"N0Fi,3QQ|.z}@bb$IBZ1jx<9s`%Xt1zNA 5#ot'1hУ8q`ʔ/0|PLti_~Y˗gnҥ=f̘W6-~ :צMKL: zGѳg7t?#GkٸΘ5k:Ç0d0ddd`zU2]:th#GŨQo<tݻwA Y~bܸԩ 2egCq\$ *TϏ< /cXPVm 4w 6dY&JzrcӦλ~  gj0Lp7$,H J`ʦ#XTp(k׫IV>Af Ů nw5Bʧ!Њ9R@ Rxsjֲ b$.X_pA%PQa)C/ݏڇ~8pId:K/lوY̱B00 XP6&~| E"Yd_V T_љ!Q2r;%إc%RNq^<,;瓲rHV#O_u(!ʂ|)+0# $ 9s(Xp'f g+PZd>KpČ׃'Dvɂ$N1b7^a.8Gg)y*o~D8ÀZYA;cMva}ԏ79B[3&Pc(-w&*ii^oV=zޖ{`޼o嗳믿`ѢС7 gfٵk'ʖ-%KVuwo^̝<NtJ*k2y[&N¤Iӱljԯ#G>Y0a1mK,F6mRK~WDη3g̘ xoK`ذhz,Y &[0gpYYYu(WN\8ٳg +++=99v{oN⯿,+0x`fk?6PƬ1$h o?*Nez}E]mjY+,`J2hed"V*-?&b(aN&hт'z l YWl2')AAdRhb"F8'9m˘ `Yub(iB='b[+ȠɁB'M[%_55}MP.2H@tF~ šѦ;v76Nݺ (ԛZ ^dN!m:P3FXɎD5IgO8xN{(_NQ/GLzmvF75B\.P=»yP fnG|j,|WRp'0Q8nS"JꥷJfXg:8HYPmW5JT`ʃ=K'=j_μ?*_xZ4ԓp8(_{pTZX8I8ԪU:uƍqス=WPpM%KE߾nMvב2YYx1lT^`xwBObow4lо}')S,aot_|<3 11b]ƍ@~T޽a֬i!UOzb7èQogQv]4k7| ZhUeʔE||NJCn7/^37TGaN=h.0JetzѼ"N,:k]SphoQ$>xsl j(C-ngrxuu&HeV"u^TpCȴ5=z&dc!FKbpHVHoW`T f,h줞 $"@> mF5g_Dv)}"_qDhDEfSS ԴV |bYsªq6"+]/3 MqxobfP٪B1Q2ܕ;j,a'PZ&ɤɬf9Q}Ne2)="ʷXmbp1]-q&;|v&~,6sA#Q#v4?xP-YN#D`dM6X}j,q|z }`*qL^RFd$l1/ddc~O?`q5Pbbap }zfF ݔ@%KCDvp=pKe ek@-z'@ BPv1P=Њ9vɪ` @t?uv"sdbIŘOKz=(0y"dĩ$s) T)N#L-#72`>JDX@.k$,GhǛھa!+`%Fh $h"P(q7NlD g,b_#-lRE=Gh\*xΪXE`0xdQ=Ѭ&(wJ"z3wS4eM' :]pw˩-&;a%!)DH @;C5xeCLzmYoFd 'g#Y&s&sM ʺs-3УHh%#ꠅ:~vof̚bL;ޡf E^;՟`8g}x E5J?~ ի A(*D!/&ӧ\'ƌ$nx'NkXYjX;km>˖}oHp]foonf4mz6>8;w'|6mM7zɓ'PXF͚ۆ}><ۀaJ EQry(x0v"gp$#2L*S@gg ( Z0`ba`8sf759Yr,ChxNЧc(TنPAh~Tm+EǴ%NHO `k#Fէ$I;>-Wsi@ PQQDT|T,Xcb/"E* !^.csewg.#zٙ~;߁[6^?`h#OGCcįg bH eWVaRіU_?-tI\>͠"Kh#Q Zc#qf- k̘YtXbg-0 Zl 0sn-W#]_=ê4SNJfpCĈYPS;"l"ZX1e6.vRHgvnյ |ǚmGDbR =y*t 3g~Oc޼oqjjjPQQ,@a!21mG!233{N~Pڶm3߂X~-^x5tYu=܎tw@AAnW^Z*++`2`7+n6Uz~mpн{t#)!hRHwthVd}?Mcz鲽3c x,og3gL0 np_?#v,k dyUNanmރ4o TZXBsTފ@U\eU04Y-++S˳<!e!޲n2ʛ9^[΄@@ҭOvb 栏V͜˜=e0HX6/Vw,e5&NTTX!eY7k~EԄa{'gxG ¥̇,$Vc  pчb=[ 'X"Qh!fd,bvuDR JaE qe~`'z>DYX3؂{:orvA\Їie qV`ه Գ 3h!h08wØ;[L&*** 2߇Á+:lƇN޽{K0xoTUU^TTTw߇.]_|X|ؿ~?f7t "4,X0ݺAgCrz 23-CM6YYxQSDii {!Λ {SPVxcڴwqN/78C#FSϣ3[ (Wf]=BY"69Ӑ%Ƴd;S6̑+䉙1%9N8NAc,rlaV{2ഋ -F =5aVB9y .8'ٺ+Xx,QsB(kʶb6 Eu`ԇ+Zfd=*"!e] ra F,bLx=Y`ͪ{Ý!Y8޳n?TP¸ъbVh_U^. GZc!ewQ93Lo:rt3Xeb"g[ $r8iYM#^;x l=p'Jz2N:ۃ .gF߾㭷j*OGn=pcpM3%\ 2d(dYر}֤m͚Q\\_~g=({n\{8A8\pp\/?UYHEcԨG/@ .d,yx!"߿_<]w:t;[x㍩عsv>{qطo/~}">{  cڴD8~*6Ay;%TYLA-T'8`RH-VVx6գ8@]Ni'!_Z'_,A4 Hp' hDxά[,uX#>.ۙZܣ>"_ϽU /Kpce=ø3Hk-&sX,841DFxe!<++ƺvIm`]tT +vl3igQ|,Fh 3Yw*q}, (?K('<ljHD3+5`$@S+t0bncyJjAo#-a{ܮ%9a5NY.uMZXX}7;l̜I'bW[_][qqn*+c@2xp v=yj͂qN8).ihWK+1$ZYxՙC1aEY66?|x̮C}sz!rժJnk;%jtQC}u4@F!_< :W 2X[nV@lm  gb%N$kL8q5G`87f0+.Ǖ0V̜ Pfbk^p{[^l@1 I@;AAD## "B0c}}-7BrF|GfmXiҬx.I/1Y,}gfzGz?+BiAadkwl*X{@Sp5u3$Hq\EYYby=0!:-Z yGFGuY? AVb-[`uaPui`802,cnc 0`p d0Ϫn0˦X06^wx]mEAv yV{1نXWȲU,_~'8Feb aL~]d܏՜-G#Eɠp39>m$"saP}@"ū>_W9"kր k5aD@rs&1(j7oĂ?lxm`=~[LX:ׄc IS={'%$AB$ .s~bq"ˀga$~k3@c,"Cϰb̲`}28"D -🜫3.-1D`4tg,qf+3`,dۮє1] }Xp}P`[/u\οp;nx 0Z&YΊq\Lkvmz~ٮXu>VbkV ţ}>?[zƵh5ڲ3X[3Co/:P8gkQBX:3!L@'1Pw}ОS_<3{!+-RCEt5gĘ3MXw G<7DKWLnq1NP=XؚqOցa]kzଁ=X97k_v[++a|EG~Op'ƦZ J< #,pb{cyNV(Y,"=Y`{ʲ G 8NzU3liϸ"ygMȌg04/y!AXfm=2qB̰S#~=:q/b 9g-6\A"%M vZV轼>F^`Ў&X!H$F|N0W<_VEH~"P0 F<yCoE5Hl)o,žNc ue3$J?C+Y`խ:/}+S J܋Q\\ѣGl6#;;#F֭[1o%L^,FLR ,%fBiZ[&/,>$N=X2<K+%OX1UgpW3sJ )I$fHOx6Mt!"^RϙHOf쨏A !k|mc qDg$: *k;g7({ht5gH-2ڧqH̟k2)j I$rcgMÝ .{nn.z3f஻ƒ%K0tPl޼}QߧO,Zyf\pA8(ݻ76n܈޽{w\h{aZyfU>?@R.A5])3F_&2doc7cʅޱE1|[#uo"Po"eTX%j@g>>x0:M^gĺ99J5hrD毕!Lsr6Yyv` >%"C+ӗMf9tF\W}eWcuoD^?j:xyL{h,+E^kc'}K\ No`#bUu:|נEy b ֌M3G!}"/@;m>} ;A{.z?OYi"48YQ]˴4]SA HʨReam~&Uo9]{îC/DQ0(u(IN%huh0z~:77}\.o(p6Aul: &ZvCeX&U"w0xoaPՙF?b=E^f5 Ê4F6"Rt!FI[9U8Q矈eI]i#s*yk#;psm(t >aVւB4Fx=O["̨}G f_{ots:֨6P>L4<./_yoFΫ;#0t1ZM0EuGw$VwfƵ;V8V}[o#km9m6nîO{E/DH!Zu6¬iAa=I!J3U4+Q8\X-&X4͌4lD"i CYfv^S~qpr>E}pY0ӜNnf8$ *k[]?F6"4i#XzFg찞ِ.Ti<ǃ7Bҧ5x|UN`Ids)=>j};ŬV6uE&]^6,9ݢ'5z_դB@}G03Y]?:[cYxOt=B(sֻ1xPzH6\g#"}Fo s4|khiz,+F$Qzq뭷 _ꩧp}u<+nZ}.>e)x;0feN?Z CxO=8:־YuWcݛaXE1aP`h[;ŪZ8^gԾ#?jmIԷoG_"6PDm9nfmvTmYu(d(VRtF}̩?jkhb:#g@~7 6o7>?DESk<"Vzu+K/k_^}XKmD >htsO_^/4]^˥/wv_BEVO_O V]>z.emnq8:.kd}/xkpz5/?@Ymʨ}GlP<6ŶT3RI@ qbGJI)N>>ͫ\]lw|e4Z{l'Gh#פ7n?<.?_䷆F+ףonwv_Wpv{mC[?{=},\GFE=`8ȿ N yXm[5{!c(vwugՕM*iڙ0F[`BSF[U69 ʫ͋ut(mq bcC8s'5}ⴵ3ۆS-בF@>YƺNI* l$Gqø'X݌z$959MԬ&M{sʇ$z?l/Hȶkq> _wMHD=9/}k4<ߣ/n@ IT7[78h lڴIMп@߾}yfU^[lAѱcGdffر^}=tSYY_~YoYXrYh /\0^x^x?mƫcGswނ{0_UU~ GĈCC~3NƷ矍~X 1s?#F+SL&- g=@^e!"1xCZ%)W@pjRBicƃ^ -Xm6a:ncڇ!;} #U:D )>/ ٌSO=O>$Zlw}>,z)o/z2d^}(--E~0m4XJвI&GǰaOkC(}P^^_~Y?Z[^Ғ~7 iiJl?}=#F{ {FvɪkUk޳ ?={xK)1݆yz̟RƈDb?l/Xbi=rץyz&0#e"=YLH9^>B-f!fl;2-14caTqWt=]!}=y6(~X,Sa1s+$(s6WȖ%kp>m6`^`I1zo LpD;;w Np0~)s) &`„ mfO<x≤3BT]ȹpP>5n}Ӧ8p Ç h߾;.K/Ng5ڵkڿGXFJKKP[[zᮻN}A@Qade)b~6mB$IGe~BEEy(Xؤ,jYHp' &ֺ lB{&X}&&1&9y%"Yb V Ѱ*Q{L6Gn0#(w81BWL%Cv$*2/bAshmPsdmPb#!e-}αƞy. ^['E+,SbQfL2zm?t b_eqo],}!ܔ?=,]#^xUq dYYgƼXe! ]uS2 M\/5#8!YBDDco Q{<%{C>IX B!e|1#usG`d%,]v#\tf^GVpgNlvHbx¹'N*$fէ~ύG=:' ;Df$˹  )DJU.4p}㤓N \.|}V8ۇcUo߆}` n;U"Ch۶ m۶ؽ{ P~TTBhٺu38,sLF8BDHZoRV&F@2m => )\RqAII1*++k/bϞucƌqMvp80|2(*: ǍSwnҦM[ڵn{ Áe9łÇ~?0@@u_o=\ G78b  %BB$ Z|(=Y!e8:d/-MSakYO,/>j;>I,pK]= c )3ˁ,`=+ֻ Tbx+a(,;L2o#7 ـA$TN2\q>L&nۨQG7̇UWdƙg;k/2]{p:8|~y ^~l#/ǃx+k?0Fnݺᇟ@NN.^{%deecĈlُ7B|z 8d] %נ_xPzoKzSZ08s5H$G S?/"*K=f23<$>'Ta8fswył5K{]6Eo8|DfON,a?2x+ײQg-߃>9t{@ꎓbؖdg` U6[@!e"R 0dKWx<#}L&6łɓ궵m?.wFF&|?rhi>Rudu nT qZM W%Jyk7c6Q)%!KR>#ވqTC>$0`7~'۠% l8^/d| )cK֊`C$Җ- /^:U42̜?㍓^"g-p[էŢՐY+J^FЂES Ñ^@=K!e"R2AAG)II!A<9B"^418G$KKwlH=FÑ\4Ո#."rԟY -2q1j,y^Ќ1E=,\2Y OЌbhHφo&GC7L$!^dVgt̘,1? 1O:0{{ ȶrD 8Z!  (E`)9B|2ž GGZBr z1Cϑ\?QKQvF9'p)$OfCgJAD}IL#l#+u"kcē?bXdzJ?+ͿƃD1f,;9㌉ AAG),w݂:TeH #d94 G:@}8r~}dz2OD6nȵQ,X!e;:Gf} bdÛ7 SodaKrCq)qhFJLd`DȳNOBʰ􃊉>U?e!ɝ $AZ 3,ui }Nyl-~ !Ħ7X8FZz-|=OipG=Y=^ 5j2[4rxrf-0! 0MА2\ r&-&p0GZ\1~OgǗ?#R5Se3Bϴ/*װoD=~t<5 *M%h0IQ[Mu1pogjHě,_D^q $I˛l2h"ȸfj0q7LUxe۳-]*C}QKSmLYCC<.؆\{Cۂ⽶q_-̹9> /r NDҰ^HHb7b9iy5X!_³_Z xԠqC$s6A':yC<I"Nhx<ގZ' +y[1[vEƙM?=pG_SKX8:!̐yi' ;z O3ܧ9m/t1<Awd_`GF]q;Z3P{ߺ}0 c&sX(LGX6ʡ'ڊ-Ty90ئtN}J^fe&GSkkvkEC\p[^'3~kCl#"aK=2 B(?-Y.rT} `4BUemݰB۶A~ǺDޫH[:&j c@;~|hĺ:ko2n 9&^9Td>y {u3/Xf.g" ȺAՇ#{" unYe&eVg6жl= C^guS9zW l+cc !89啉(.f˲RdflIA=Ȳ}vbB7>ť:yND/*WbzOsL|z%/NᖼXY=mլx.ɋU[rÙ>XJsC}`-̲I5 Zr-8 mA6s %yv X:c(y_ 8fxz$/YC˽UR-z1CQd,AS+{7z#SP΍(lmOcet[XK{@y58v Zs2im}XUbfHtfQƯ1b'%jaxeVnAS`RڎE+νKUX_W(#ҵMmЭ?? ^Hn k΂s;t1+Vki?umW<>+&Osf=AesFNqpI^݌ޖ!Q[Z:C ªڭ854XjV vG Nc(<چB:u6|gh +XYYՆX-ekݻ CƩuPgXN!e?dP~qmE !=듭=#f%؇ۈYQZ+ֽ #v<6(؎Cz,7P=W!ᱬ6A[=(L{uo>THk7<mdžo!(e Y힃bO}n,P[cوxukbo+vAio$Kյtlށ~VevEe]bm|'-^ُU[ Zgε >9mK9+Ox1AG99{$c$@ Guu EEi$! cFx禝Zc7V> AA hRp "3%>|IzL|> A4<u<0e0 "9hcG*&   R 0ǡw㐗ߤDwf^4oh~pqc w   Sfù瞇0Wn<GӹfdB]ѥKT &pq&   NHpOvg1$AMIQUJu1k{ižG#d @D4Ri#WAADAFxحY#SX   "Hp's @ #RpRX# A6 #>T܉"B@Q ?RΡ~L} "d#0AH  (%raumD4wADlAF} "?~F"ˀHu18 !@po; !AdF*myIIx'B H Ind @D4Ri#Hp'qiG!; A6 #>T  RfÝ   )A;AAQJsX4   )ESwލ~ ;Q]]tSOѽ{d   MT   D+Xp!xDZ6mڠgϞFqq1x 8p}]v*;qH.AG)d<ܛp|RADlAF} " k֬wߍ֭[㥗^Ygt~X|9M1c_ThDdxR] B>DbMmd @D4Ri#'N&Lq.]tK~X~}% h6P w   E‚~}+KQsp8p:ݐ$9A4>Db4d @D4Ri#ܣVŽ;ЦM1f9  0NAAѸhТ| .]L\}8q}a}?x{lpa  MxT   3gs="ܹ_=fΜ~!֮] xYf  fZ456   "/_=.\ ?I&aHOO '|K,If  1) AAAO‚{^^Ǝ+Dmm.F{^PXXx F$ɴX AL>DbH  A6 #>TڈJdggllHKKSg6z/!ѨAF} #7ES> AA hFP0P"ipz8d"1-4QA AdF*mؐPUUJχ]VV֐SL&2 2H N),ɑA AdF*mD &e_.F  Ov4Q   )'AA͚b%)_By(퀿hAAAD$,3&  hs*,N3    )CȲ AĦJ [$ʙpUY*A AdF*mD̟?^{m(O7zuhiF,?A4;>DlmnA7s柃A AdF*mD‚{ee%&LӉk<χM<-[Į]b8cR`FyRAQΡy!@D4Fa *ޅ ۍ3gv9r$ƌ W_}G?DloF> AA hF$,Ѿ}?{! h4gw   B‚{^^N:$]z׮]a2Bs1(,,L4AA͂< AAAc˲4]ٳ'EDOCA, w   O[ƞ={bm6i& IJ`C ÝkF1ADlAF} "  uߏ{guV!$@;Ѭ @poN!e> AA hF$,xqD}[w^p *$AADSG2AAAcw ~,ZzBvpڵ xwкud8Jyii3Aj>Dlfh* A6 #>Tڈw6l-Zk׮Ν;<ڵkcbرHVYF@s9KD} Bp OBl} "d#0AHh999ۓQ  fLcAAA4%vt:uĉ=%AADEAAAwߏ/sU5b޼yK=%AAD% K$AAA4N89,x0}tiÆ 'EEEXnVXCýۤclW)ExR hv} <\)ֺwy[KA h AD!6"''=&,ٷoLe˖J%˲ 6 z+vڐS5 NADb|8KAD3 2LS^`   )Ç#?? B_ʰyfG>}BacyG޽qF塺w\h{aZy hDz NAA(i4ŋ?,˘4i}Qn a@ffj{ff&QQQPmȨWw{r8.+?%x,ːev'D n8N8x$:o{|)S|L}S6q|jFm1ad wם uxmD>('#_GcV%l%ߦپv<|FPO/I^\ve8p`2Jx!q;M7݄A}|ca6XL4pXuTU6try`2 Zͪm~^`[],V (>x~]o Sx9nH UɤnB,CxYTdYFufuƚ):QU˪Z/L&V:`r]^x-ZUnWס$Ip:|ա"lVv0fh;uryqڷ `fa55ڷ,j5d2jཉVw:l6B$x<~d#BP<й0P&A6"z  و0d## d#P pQ_8 3?{E0n83Fi,1|p̞=;eϞ=8qꩧCxCO6 =y晸{1f̘o={_#Gbҥh߾}h{o`Сqq]#n4̷iR6NU&jͩ(mރ.O%%b#x$V25K6)!JM}S6"h/j)aS-[:tKR<-Z[b9s&z-q0l0hGVB6mp8u֐~AL&uYToڴ 7o @[lqбcGdffb!}ǎz۷ol,7ێT +SJLKFk=at:lxG_698P9k=|S=9peaeRPm}:lxG_8z|l + G۵RnxTG6߆؈xI޻wow}駟gsx'qYg_GqqqR#"ƍSb(--o.cƌ1sLx<X+VW^y%̙ 6raʔ)0:t(A1uT:txW1bj*)e' 0"YB AAA4ZȢ'p$I7|ӧcpw&՗ɓ'.ȑ#裏"-- .}YwR<̙ٳgcݺu1c0~xt5Ϲ ٌ~I ͝$ @ё; A6 #>Tو=NK/6-~-[SAA4)Mp'  hJW^Aiii2"1Jl$ x Ԑ} ؄MW?!@D4Fa J'|F'I!@>!%!@D4Fa *2iiiXb.\޽{3㴿+8AAD/P䈴 AAADc%)O?0ؽ{n{sMAQJա2   HK.MF6AA͒J6;[p$AAADCHa޽AFFtAh~qH+$IS] 2>Dttޚ’} "d#0AHH/㫯;\ےqK.AG)d˜ҺRX@ h AD#U6");#?Ǐlj'Áj[o6p5$TQ & ADgYL>-%!@D4Fa J}ܹxG1~xU^ݻ/ qVdz0'`D Ð} "d#0AHɡC0p@涡C"???! hr+0 fR   ===dn+,,DZZMA/URmw ’AAA %)AbӦMO?4AA49 n'  h$%Tڈ7o,YM6t"==a2q(Ge\T aLe>a  @D4Ri#"lƅ^ /0YYhup } 6*w  C6 #>Tو ֭Ö-[P]]p'TQ sp8p:ݐ$zAc} "d#0AHHK/>0N;AAÝ   )oM7݄nYYYȒ  YP%5ow   LƏOb;AAD= z9VޜAAA !)ȑ#lٲddEAѬpp2AAANNR^^{-l6zM=9p/.Nu h$Ȳ <fŴS]$   fONNz&%믿~$AAċ[‡ <   =IܿZ4sLp|O  A Toty d @D4Ri#]$Z4"qB To eϦ  A6 #>TوÇǚ5kAAD, RX   "$%bڵ8t_SAA4}%), AAA "?={`޼yǑNA_D;AAADc')O?lF,peZ 5dMNo1> AA hF$Epo߾oQLJD#B/bqB J4ޚ’ADlAF} "IY4~wp 8p ?x8p555xhq$R] 6>HYRXA h AD#6")epע GyWTT{wG9f3A>ʙR\@ h AD#6")[o~-z!@ 5裏b֬Y8 AAD%yW c    }׮]˙d  ɰWmS\   "$EpBee%sÇAA4fU$椰$AAADH~gA~~* N?dhB8Al> &K r@ h AD#U6eYnh&UW]rt@ǎQXXL|رc2{S\\"A4TJ;=AAAArr>VLFt 믿ƍѮ];ddd+رcAA4 r $6wHqi   dP|ʎh< Ij dBM ),I!@D4Fa J^PPP۵k  |EB    Çk?Yqnݚ  j~;xk KBAAA$wA`2piF.]X,  Ki @۱), AAALW\cΜ9x'лwo\|5jrrrYF  &E`-]R[   "ip,78j1g̛78SqcĈp8(g:NM㐄&EDA(HnٔޑADlAF} " 99鉟7{@ UVoŲeq .(.A% x[@_K'{CKDAAADOb9 _::vŋ;s9ql638KuQ8 @a%g 2=uIDAT), A6 #>Tڈcؿ?fϞ9s栨:uw܁ѣG'4Q &ED$d"Ly ,vI]A> AA hF4XpŢE0{l\ѣG(#AADaVLޞ’AAA&a}ڵ={6/^ Y1tP;2dD1AAM,A?bvKDAAA$񫯾v~:8 g%$z*  &AI*$[[R\"=\oE9QAG^ ?wVfmJuqE@ 3s̩.AqiWTmm--[e˖Eݏ8ܛ,AiAh @ ߧێMaIp]0r_y?' A6UvtkXƳPn 0圖AɀAH=p۶mqۺuk2L2i t} H=edd37~?OxŹ@k*j}@8_l]uZ@G% ːanǂT >~ndOM>ȥ;/P wj?<$; Ȑ|>Ȯ>J,CI.+EO(X]+b~_h~@,$#2 ~4e!|}QLW8rw@!*F [;ER^Y!A"9i 8V-HI˒9PUW)6?u{OBa/$@+đ*!Pd Ȓ$[_{Z8pe@ CN?4%]1Ðl>-Eⳡeџ jev&9mK$*}%dK\6;>U"_^\o(rUz$U@F={O$ãi~IƁd5Bj Eu}"{bB*uw`!Ԥ`C #(B,z T8ZC4-rX1=Ʊ !ރS,`ʻ}gp8,$mqMՐ`} !Oth?C@(Wf']]`?-յ(~-Wr2ZZo< 7 ~G%~>بHUxzMZK\>fQ}}6~n'܇ְU<kgܭW{zXS,Ἳ`}2eYXK]?΅i񐜫pO2 N_׻ހ!?BxyҴC,,.^ˁ/> GUy%|=:ҟVÊ,xc}^e#ݧü kn\.)OyaSq{]qI)1]} ރ kqŸyA(6C>\}O}%6?Ӂˀ?!=q; ~+ SE+╛p2!T{HSYfYWrAR?KB ~?4E_y|o^?!/lyie;)J7>TVAz `x6x \]!fPڇ4yHgJ Cޱ&CZ-x0>qtqLXPA-YPC`)_>< \7}&?n[y^`]P: *ܹ a<@9k:p),{C(Sc}+ʰ$x*r[C\Ҍw 2Tyg 󏯞 q _o q- n_5PTKk+1wJ4B@YM5`:0^I)S!Pޗ*l= C[32hg`;}bLZZ=XƆBup '>]h@!M w\<ek_X( 9p7б^[؄9?0G ߅+BUU@zHs[޸ҝCz!}^N #y+΍T~I9|G?PpoDqA<|@Tp+w`>0 OT>ŒBIBlճ5K.wC ci~ܳWyT~?XVCep (<6QwԢTp o|?/W=xZ*> pzػ x5Օy.LZV]{ޅX= b-_kЅ<; ow^3 ~XP< !2-N,˫BNxp7;k؋_yŶ`Tx=dsQ^`/}lW@1<+R L\&6+{3pC'0CrE?o@P,xB Ծv~ }%8k$| ++Cz^㰪a\~]X/Y/BUa^[ ?78AnQn ^?O@o}O!y.L]6z{ޟ`HoN+)c ]@!t,˜ޅ)s>L{{X8D:,{1*ܴs v R.HTmX7{ ^m=0,8˟Q}>ݰ]kͳ=@i1K A,}s1_X qx.XŶUɖ(7Tg @>t@߇%\xn9򿃭] ֧k*zױ(ٸ o~6{p _}bٛ$'z1x4I+>%{E%<]8JDznl?$p2\ 9Oe<ަU+B0yG[ saz ;,WuȓqW{Qp"_^ j L-lԥ0o Z gE%`%Zߨ~>E{~i\29`ne'p+xhU9| vWߤ A 6i7K{" K^ٷcz`!''rz fw'8 NjS)PrߋOzȃi>Xq`ߛ%r! g`:KLY_luBHeocc)vؿ ؿ 3owբl(h]0$065ra)}Y|ʼnm髈ru>ѩPȅ>Vr &VRo} 9ĿÊ{GKp]֥8F_$<֢dȲkbbq01Yr,,+J|E}~ m H;pv蔱-nA-\<8Hfa}8,Ub:uX0$L#?ǁ~6[>O 0?(%~_lT\ܵTS_a}xAeo$=oOAю`s"LOr.G O}*ų1sG-~+_+= ݁W^[Wbo Y<>Tfh 68\ǵ²x6A= ]bk(sKpdܽLL_3MlX0ܦTxN_wMO\1̭_FEkG0OT{%?0 bK_7b,;حX}+j6N9>}jl)ၕK2{]0kX2 TNW+wbaq S_!lCE/xtrU0{g-.n"Һ:v#^Vbɋ `*-N,;TX۝GΆճ 7+NZ9C=n޼Lj,¨iJZ༻]>8o_6y3 2(koT*sz?k.aƗ?*BgA`]nDn|T;bZQ&Ve54K`zTUك /_yO|=MZ:ݐ~e5K!FjHO)ؽ'PRy!6ؼ3wǞ]=֯G!!Hp' eKk7xph#d%-o~!Rp"e?\/4St6Aeg'Xvtiy@'+KmL8ֶ _mTwA9g.B7 3JB @PY pVL "\S2en Z ʟ pJ+cbt=KEdpx~ P N_9a[0U[%tv=D:l&&|-gj$ȮaNY' x4\;7cBzF/#aE [|M?n^wr.X~o8 TU9"A?c^Jt_X-2~kۄUꏷsLR/`hm:ϕ|W(m59G~K }cl/Y70w MߠN0ִd.޽Pmv4/lg[Z=<> d-{` <@p8&YYUSxo!}? 6g:݃L f;Qz-{& ucMX~?vaoYPXy.O s:[vc^s >5)(?O? ZT r5j2*Үi= 'JEpK !B t'w9?1yl#@T,sB fg-$MxK'dodǯBP=')a>WU`bȁpirB d Y*㲖YRlP^[Gu>۪0~cRXXp>,I?)V0+ U궺sgJ`Ehi ? RPEs2]uJlJܨx9`IfTX3㠓s ;SZC/;Oʁ5OCΗ't0G͹6ĞJ?MpT>v!\upU-b V <1Eŗ `BWk%“.9_p6PdJ'񇯱U7ogBz9lX>Z=Żm޷ʥd+{w͚W*pu(oX9W]] i"߻1By/q6=/]4 > hg>s@!D=POK3M'A޶u׳UʹB:5?J<7xpg bo{ ,EKyϳr.􎐗ru#y<|քMN/]߁X{" 9B./V-σ|(,`~=`֯n| ZPPe3&,5M\ю*?2k>xNrHӪpZR3nwpq/kZOÀMO{`ⴱʕ>4;YvFcDU 37?:^uy!TN7P^o媴krϩ2^3y'8S=Uh)Vn%lvs0qLj_վr&JclaYlY5,\4WUHS|y<ˏs"䞚-&|A0,:n㉎fudY{O}O3͆LUZƟ[bQa0O؇+3#0 7C~G(mDu #f =PS5ʽjeR߳[8 X^eg¼$j븼'^s+%OD\TTE9#Չrc1EcPr}6~ћ8m:E&d@aO Ƶ\wG];i݆!fv6;qrME4>hC_9otmD[mm^y&Y" ߜr;'GbYV,]{7b`/ÿB9ƵOwզ3~Ǎu`-8%E2d*;-y.i>8HO1>3X,QRDcmHGa}U}j=f3C n7NaA! N O/"!i96MG0ĩN>eaoج~ݺV΅=ǣsɹ?|GXlQnŰv9wv w" T iJ`\ 獥1˪ .%ߍ Á7oc+ %^re]{ʈv;6k@^Q!/E8D}#*  ;ZܒX=ȋfBIBg@vBzI?΅ֳ VoM B AA;aOQLǥPPp%ςQ{ 5?@,z iY諗}Nh :޶ 6^}pj/xę?gNs,;4ΗQ@YiB&?\9>Dw&Gd/;uH"czi1CvQ MorLEn[!)t{C;g8ֶ_US@7sTmN@d,W X8n֝6ӳߪ{GfvmnCܽ[ڳX{<+d.;ݯٺ/"\ؖ_a/g,BlΗގ4^-X89AzHQ_ӕ9&NFv%kX92 ձڶUnFݽRS-a+i7Zqu7/^U4-&ژw0,<~,k?'!ME;9NѮ*ayV4+>E7.  >/ޣ<@zPB9փ-X_l1˾g/ ojqqY:+r>)Np?3C=PxS7QQo6n=pvj XJK\ UZ{sثs }bt}D.f%;k3: TVfݍDzڶ68|n>k23C[ZV!KT {pfRt]ӳ!@oZTot Bx'ښK M12,s Ϟ~ vxT 3k:J+dlŬܷvfu\ 9mJL4]} UԤuމSUi&Z} X;۾ᎏ#Ǥ˞b1Nr m4Ffχ7B PCs?XTibQ?; ~;dρn\AہJڻйYq 'tҖ_1%[;m-ZRB62oW^ {~k9T(͜ n]cmFG\w{wq>˫ss^@EdWVWݢɆyƷ\Pں|EoQyAyXa{[`i%ꁗykqb-wʏu4ʋW,ҋ"F͐ :gkkWAЕ%WEsj _GI - H*RY Ch-<Wd%w@n7ӡuiBl`oPVN},B }BaU2׷KUkAFd-ԥ=AgZX|yV>N{n.͈ Y(. ]Z7| AX,sR}|Qy.zqˆ0K;;{W]hH2,e藦mM>SĀ6ӧ* uqU6.NJx`cNWsމ]e.CZ'֨c(r@rC@RQP ݀sKFfχ#Hӈ\!rM|UK^2snݽ5(4ۼ ;s8ҷFd-e.eT8űy ykm-RUEB_UMmBf?@?cX<1DKzߞ86b Xxd/y/ǴTR옶ovN2**-/n?Ui[jX;O+amV UAH"B5ț0ۛ̌٪:/{]e_zti9uiKϩʬU'񱧓*ڜՐ4φ—=G jڢ'll;[=>/<^ L4~?e{#S_Qk @ޤnSK׿|7n`5\#!>l ><DGd8H}A4k>PM₩iwzQ^e .wsWllL]:oM@YYbN\<ğ ~?#y}9lh!zA|^ ]Zk~?gD/&[OubչBƞB,ߞmpjzzӷ[dϔh}:-mByxNXEQ'Yuápul$lkv|)di<0:[=^jBz۵:싷z:UѩDu,{QeBYiim*DPZZ9ƶU9=T3`2uY~0U=vNb= uu)dP"rTu֤,qI˯u/g$'=\k2^݌:j@¥Jl=>JXf (!r^D^-hiu{pcuZӶM7#HOo|*[t%|@ק`(cε**qȵ6rDmw0ڌ֣۞}}*>k5E}lJ=C0ytv/2k@$Ks=,q,]7={qprN訍xq=ܚ,c9S&sTcc/a櫥 -WO'HaWJDߧNcQ\5zCf׭vA^)Lֵ{=@UT WxbdCI3ids+/YbSҊH.T>|OgݾrV-:$VY]ta{⋪8W99"kse[-om}u+{s;C٭v|suj ?]8<bn-CA#Y'N+˞Z !|}6~ff}kK:/Ϻ*=z\lJץ5D"]a n74i9qGhŒɫd?Uj/Zjj׼G_ѐ|M/\"$,kxWꫯ⤓NI'UVoӧcʕ۷/zO 0[ ҡ}s1nD˘.%5xk.֠֗V Z!`{y%3 M>2D^R.?# BAX^8>Y-OqX ~$K@~-V S(6t 8`g6$EG~ii{E@/k=>Ҥv/t!?ĩ2{&]ۙu< jzA I: NLӇb}tvm04/)V+nv|8 H@DќqJnʊ(Z{5Ywu1d (BO{|<YKl2,΂UbgP=N{yX5랷鑀NK4_ Ebt5Lw}pIa0Sm ! k?@F`OcL։B23C|₂[d}H 1jn(>Jֱgs 73q1\FkV N5Bn_mj#XXޭ=PZ~iX\绘XV蛪miB ӫ0I+F87xayʳ`yw0։)T0pwVf^MW a * #kv]Fٸܴ<{.g >7ݬ~ɥEw -ԋ inͽ.+f2))TBhZl!"ru{Cypdž^f ƍ`ԨQ;W'|#Gb+iSx%ǃC iI&~6NbOY YY Z#. OQ=a:,ftKSk~ZK'+j~KtiBu <QH\(dXgOuf> [- gx!sDV9jg6CLUN޻X`nVYa?Xd XϧL}@` DUF/L3e9v1˲[Ꮞ|a{>8=c 5``,{A2ŶQ+Xoyb U"*!oU_F4^Gx駱wzݻwGnݒq[nw߅WE/**ĉqW`͚5xGc?{&IUVU瞰9ewa KIߚ1 ()" s3*",9.LOf꩙I<<ޮt{}ܛɿ|{,.bz'Ywy'~@) waa&O+O.d{?RY4=HZHPb:&Y0hDVi#hCv-KROimkC(!*U-Q״(+3tb4Q:VSpQEOkmYEmI]D4 }sD1^ ꘝ=v-̍*a~ (V"mbQJ@sJd`Gk-j-nI~V F  *{zlʨBǵ<_DO7K=(&$QHN ܧ Uk@C,mֿt׾CjW K ܇p)"?SN9p̟?{H$pG]wyEuN}Xr%$x8^z)f̘V\q[2 0 ? APó6!Q&NB #jV`* GؓP+ŴX» "H2(EhMlC((Qƀ$5^$*gLʼn L!IQ.CejR&Id?eӈ&DEQ "@ԄL? +U;l|꼎z "cM"ZL˯/Zdk֬izk !t wRFRRt7JCyokBw)+ y*7{PǰQMH| N-?`iaQo۵ B>Ě*G1,el#I3aDI'i0vu#8z0S 59l[D #z4AdDRdp.T>} ~BN!,gb;xc`hĊ !=Pph͡;ʹNy܋pIje0QE;=8//e2 K8{,7lBZ= Bd*hEަ3ܩ dz!}`VN76iS{b%lI(T݊uK:pjƸ,J9LOdvwSzS'1X&Kw'|;vm݆)SG?.z ?>/Ɔ ~CٕCy} J%8R'|s>m^q bj:[sEjg| b% !Bue~/z~mہЁK)3cv J`mj~c߯R}7=-la6t~:5F![1#+W7(F箒(e\qT  Q7JXӔdZ?ޱU|X\ԨA=c8+!Ƃޅ)7옓ps`x'2ܣ{jRb0 wܣI{nžbp&s^;*MB"&횂Ӏ w{}6W {/}lcQ(\J%/{-HdC<ַ"O? )%6oތox1|A<#6iҤrz?hkk }{WyNd28w%No/~5Se)NYGccX}jT| ;{"F>s}/w4N"|V2ýks(Ti -V7b"ay=궍بK=N=ޓ'S6@yh-B+?n Rf$'Mg>{,X f%%]%B} pn{]J~fU=_n;}B]lx8p_2b1޼y?Y {;wı; g}6Ò%KO'rJ+f͚k'ʕ+1g^gQ*bŊ?0 0gJ[QbU|زn9G5,׏PhXŎ6|o = &rtDh4 G~DAU+ht 2* t!jn2>;>J=ܙQ}͚57Gq֮][aЇs|{ykqFu](ӟ?O??裏"㦛nB<14׿7yfttt "` ūƳ΃`yGc sU3!(pgG]4ܐJKKx7quf͚믿K.}x{ߋ+;wľoɤg#3΀88cq5maX(YzvkˀrQcS [F,RЋ F AEVz(ke62ÝFR(M#)ozh}–֨c*d|PAa/ݩ>yuJl;2XdwcQ}}_/>s۶q70$g30㝨Ý<md M& LWK!0TOJD7 i@fH֧t!v½0H;};A>wc9~$uL\?Oqꩧ3gD.ë^*g?òe1 30#5a&&&gWs=!T&otj܍u;eKEђLݷ!UaP%Dtz;kXE3!<8D؏OwnT}0p΃҈"QI d=1fTQy8b0ўn"Po@T Y>0Q݊xalgSd)֧]U4XFJM0o<|^_\C1d)@J`fO60%A]n+Z^F86W}<_9n6cJl[>2yFrN|m/?]Nvn n)P\qpdj`wNfj_ULΥV 2m8*8# 㰕=zbG^g81~P3w;D, JDJs8=^ё;Ž zN, p'uRnQ$2^--;R&r; ,toҹ3|M0"bhza<#B[n!I&K3 0 0̘{KgTIfm weM^Wp0;Lߟ0!C2MfJ0̹mq:?%tZ[AXI$o[]fOǂt,L>ks9`_'!jLF>rD&`^7C5.'Z&<@ ZԹJ?cvT"F?2@;<}:6O[ő (~ܦH{4,}=3}m/jHn̺& f-dm(lb Hxx¬InL%$ 45y8\ J4|ТJ" .W1SfIŝ0dQ97]CnV v3J}o=DT{Q&!= z<Վy^Q*I yMmN> H,jgKKJRޟ \ +l"V&g!Tվ(L%]^- <ӌI7A[۔k a|3o~x(.z/3і0 h+0~Mo7[)z$m&O}L6(T[Ӧo⚰fU(K?w'kmާ,DI]&6fDJH% y΂p|>Oڲ%3%|nQ1A ó 9Z\< &g#EOe *:W]uS)DTp&:%Yb;ʋL@M]ӻlMQS<8aHwHPn6[~,kOW;u?ٻF}F|+92Kj;G ȸ'%[C~tVmkS9*j7;)Or %$bvR2!=MPw<{'Sj;[)ܪFczlkagQi~3f@<G>z ÌrT悈 p|`&"[J =S<^Dߖ6K~{e`E8%'t T/Ẁ̳>6j{lظ#Cm]wyg;g_^0 0 Ì 6;C̠%2l<U4j3u(ă)n|͒MekzV-TѬ o} y@7lO"k:n=nD! 2A:JXCj,%PB--RX2@tXԅA3+=n%N ^% wz@T /:yP X AI{GYCT豩,QwEVڥVdF~uJX!a:[g0K%)F{#1HI*IfS1,4Mh>5YA}ԾS?a5sN}~a965$TG!apt,#'G]Pqx?}鉤=unb >OgU5 { 4N0?YKID%\{Η6oe4qCf' T/WgoO*g?qz1>BŤE]^!B7@:GooXhJby.τ?0sNf`_IrVJHr[0 [}mӽIz^7M .4!|۪Y $ =iKiVzЕ ꁘ~PUtqbI>06A=yeDRvZ`Tu<' { 3HM:ԶZvu2,=ku3qh{m0=w6iʼnqv^.C/sQ>7iCMh,lbV[ Ӄs bRzSvT<  -jq2@ R"z4z_mK Ih7@ olg-:-fN5ÝI$GOj^@Ҡ'Шk0Eyܬ6)nK .!]Q4und m-M µTE|ma(譞#02T eGEţ^F$ڣ a2܅BՊR{f$eiY'!m zY w%§v@'wO$ak jʳE" YԤ /-L>pZ477n0 0 0c]nOfEB1ҀLSYJd#_I@r.Ⱥ$R|"4{dV볇~ߜۤ=z^Pp׿# 2U/hE"􄡋:̢lYPS&,;Cs"Ҭh>Q30me!>A(ؤ-8~J֖ !"19uKbj[#Y D- _97KQvOC>R⚒YԹ@X~tMm;2Ds7_fk*8+OJ я;d=_u9- VT xxa0uݦ)M!dMѬk(yΐ! AsC *Dđ>~ΔTTSRm^ mҲ} ,X}8 RZ(M:Qc'xH77{x(d^IN}-.ƥЊӦtKSp؜{< CES)Mpt@ppZ吢qNM$SLHy04^*{?Ft9{aafL7[" {Yώ wb€ąѲbd!9=ZnDۢ T<%29DS:q/V{:,CLQq{w+òT% y'2bTܼL")ٖy`8)PvA;8i"]]uE #<@Otl((4L\ &ǡm. wú--lgKySctGڨ1:^BG03iDا'c)F\@χd7M)Ʉ9?=2!-W?~ı_%'NuEȋD]NgIƱ¥ѲI WEA{&'*ZٰPTA1ljꚤ'?^b2& E79vdJ;%buJ!$2N/D@(N9vDzS\$()ai1i8 ,tx+ >JB{yJDi}2U2~^US&tOx:n Q!2U3 H]ŋbܹ~V~mӦM(aafBsA: %  D MN y-e!;'V1e 2A_ JТ,XlB݁n]c%RBh'W˸U!3ݴ#VxӢ&+U6l(D2TgMXzD9O;w^nAQl7eVvq~Mn.2)a2-aQ-e 2Ez.3L<1 @ 8pP=,Ý:Nк˞LLҫ6fkm @L(:U.wwIL\GI-I' M0"?8N?t̜9f¢Ep_i+0cǡ \0 p|`& ' z d}E^Te(Yy6 SX ,b, miT/% ^H d\_&bg)Dz M,+N[]D lN ”%͈ '*wSepL}1=C`.NFAos.#&78!M% iC\#%LQ 2s2 W?'- $¿wvYd4# O 80e(NL k 1+"λR5Vv !R&)Z6v:}Z[\&+0V\ee&kׅ[e2d>"! yWꫨڄo^alK^(E wSThj_l=n=_}qXoK"a5DKI&IDCD_ES#zw͘-~҉T6 GînTt3 2[q)iuCHd }]N) Hծ<˵H'=C =ܐP iW]p0]nEnDd[}Be ]QBRHkmYeʬfcLԋܬv\Tz{ S&,e')=PhBf[Jꂐڒ߂L"#qof\mKgy"mD z1 `Si鶒+@R.GӾ3Q8ʵ}w{FR.?_mY;mU)a|HoP}K?VJ/ޫ w'X n|6 Rmzl=H 9Mx>--R<"UBH t1*:,\GN7b"Flb%׍^;ei~ ;vm[O3KsMi'] lw'>1@hp3W/&! v JfOTԢ'(՘~Q#Ud"@k(dbB9IxTATB Lj->fMfq3c1h?ɴeqm[pޏ*wAd Ý'YD{&6vGR~뭷뮃I:u*>ϰ0 0̄KV\[;a=Lh2-3E!hL/6iDvbBN?{PV"LxaBZ7EF{ 4]d$e|>OESc$3%=cIDJ͛dϷ0[=AA7}v3/v^n|H'Il6{d]|0dHpŨQh1Z@TA!00s*䥯s}-I@!2ܣiҬvTp@5o(%O|jO'NB,L~Jz:"Vō.RAa4c\$a;*2wA,ϳ,f;Kh*p<ܩsȹ9d\{?%S|M-,Խ%2Gf.EΛ''=wQ:qHf27ac6{H$MvD25n,eF6m¡Jv衇bӦMaafTB^VM]hd+{+R&MvzE=\6'FJp3{]p/[gT}vךӗn"+EJ_mv@T-H%CSH*+N 21Y{kQ>9g+.L&-eD{Q%cIaP,<«I8)XHW_rRȀsfE \df`p Sik%_[FfE~xOd 1?Qϭv*8eϏ|6!HZU `j#fa,M]{^&; F L\ۼl@Wt:r[4B[Nf#=y wK-;&bsRϮǛpֿK=f6In(F'i)b#P2MƲLa+'U$[_Y6WDG;:uDXyRIz&z:U8xw*X V'5{*iLcJ)P۠Mcz|2L|)HзgkG  :XBe7z<c 3$1£RqK{JHDmnMF,\(uDgӿ|7OC~Rȏ_ Y KZX"Ý͵0VݾE 2IfQPT5h*+MxGDJweMJȒޞՅt<M2s2l 7uA@y!3܃}<^X[\W袎%}C<)=. "qyצ (Ei][ϖPV$=ǹLA&&"BzQZӬU)2yMbNb>}ū )0C-eP>U^@оI`wmMJ*Ia,N=k+dn\uSIH2yx8ogh:-nm?'v<S?~RpQ%1/Gf VB~ \p+]RՋ!\PVi>g^dv+h1+L6I`FܟYT █"%IVOQq$bz&tEK*;P5baԪ/B֮_)P@hy䋾M8Ԏ/-F 6ϟaeWD!:%i꟏ܳvN;3r[I;x[;%i*ni2mBog}4aЅPl3н;mx!}j쒞I}* OMM>6c8}$u`h{ObC&ݻC?)wcfB>o!0DϽk:s[5<܃MjtUGWkB 3Д~ې hqt}J&=TUi#܄~ܼϕvʺ9Av8tL v'F!NLJ <'!1t3 p?JLBxV,S63Q!,⧻#tLG6-ECEoE }cR)g,3.mI:kE;& }r'34۠l "׬ Y`|[0>us{T{s؄= 4) 2O|g gQuBTN0 0 ÌSvKwHŏ!<135B%2 U\5<=x58sOL @{wmDQM"H;~ 4az/i9Z*Alخɽ5qme!AR  wX~gۥ[ʸYͮ$LHt!H|{X )zeыOnURт{֗qʂ |CgS$&RY䗡kϦF`pDv;[:Eq'\o w9(2?Ceph>x*?IYYԤQx,g%уI'3cgW0ˌ6l ް wZ)]O ]& R^+ӺYߏ/=dŅPGa}DA0D]2ܿnk_Zp 7믿z*~=aa<]Xو8tKnEMҤ<șIxnšf& X@[*6U,k,Cg `w@0)=r}m< Aaif'~6U[0zz%זۄ㴅)ܰ*_hM[𵹭owJܗU=#]{+mުL-@K_T) W8oi\NxtBomfЙԵ2} WG}m97_k[/ڼ c)M}7]gڏ7|iK{AcVTkTعfPyK 8۶()8/dA y@4l˄Hu/kSOL"G}wOL {.tyw#C]Hmi ?ܾצl7=]|ה)pX_%ܵ8mg!'"$Vhd/;/S龶-Ph<܎4iÿ{'H>;PۧvS/DUsƏ q?u߶JທTJ0Cϭ_w==v/cڊu]] CIl}m9W6}ז1 -5;IkMk|qBaͅjxndC^6mO{DuڏwŚ^$z|;0:K;^&`fLƒbbg5?(f]a~/<^]vtz7߄o†2֖wbVo­Yt 3y+1{>^>rH]pLPKp)L+(wҖL[`b':[u!>fD%^U)yq k k4s^N u^\uUꪫpꩧbժU8p5+^aaaF-ז,"P%]e9 &O--c-T3v/eWA{t}܍:?qWWzn0zirE {e/|];O?6d<:x~Z`N^1=s|Ok ͘#Sξ% ֋u7ܧ:ph156T*|kk+s%km;>{c+p ϔ_#Yp2{q&#/;ڞɯ}W'*<^2F{wO K}m;b&Miiŏ m\~h_:p8_ ǷlC^;kX}y_I?6wAM״Ќ]pCs,g&{tQ%ᇿkV~(9hcO5]^ܱ7yIPqѿJV zۚq^LxSn @,U;2J-[A_E*tUn zm7,x_5=~0B&<&?k~1頔 'm$L[y"Φ,{ *e>ۤ'ήq[*l.Ҳ뵰UZU5w||ݍm]Xx&?+ wxǼG6ǰ͞@߯=Ud[mޓQPCerjŐ/o_~(܍(|onƓ}pHW㢕x5/z7_ki8%`pЃ7T}?mnod}o K~73 [tUV4Mz)m~y%3|AO=+=( sB^n뵘۹fв9JTZOv(^V(\{D+Ӿw"~-[/ý]۾\tHc[}}.x, ܲ/Lwi"0aφ!0!HX8{cML`uaywO3pq-X_6\{>k+JEܥsS:u4-MbQ薕 k~޶o-‘Sl{mh^՗Yv<xժ}qGo$-g_Gq݃O~4e+@Y)KOF?>bR;K 6Bv>D,pݥ2 ը>j;[зzAK^u!Xn@[ 㣟 7o+^A{ps1ذaC=aF9R*tu!k,3 31L69+ WزN(S |Ӄ4}g %飡ì3Dn4>-TLA/[ޚ2C{lEn9EY7,i8OOW)Jbάl3;i&dfx9s*iB ʚ l\7h8b.fE d'dҞ'nKv{:>ie!-qi8UcÙkqHȒx !%V2z8J𷮣*0Ň۱0ZI|VdއmU^*"ᣇ3G_(,0TiG߂e@+Kd:.cff-y;62u(ZS1A @KUEShng7=^c [:|cx?E/$)sJ,V}3~2_ٟdžҼrYwsnsJ5L) w/s3^҅~ێ[\elm_w̗~B_zgsۆobsx?-2˟i/nA=OcE?K{ oGӻ>{W ' g`)fj2o)dX}"0dk*I[Ng77 n /f[[/rw[eUΗ~\zo_?cf'L)zؠ-SW4c8i^Gϝ3\`#>/5?X 9l#Q,2剼~=s*rPЫ7+ISp=4Tᄆ q$ Nh*p9 L)XqPSߏ)X9b7URwމYM_Sq BXONA۟vW97UXW4oVq|hWѳh=dSĨ(}qb~,V7_rYO$rvVMr<ؽ ~ Ӛ8֪ 54~Gކ7>SHOw?x?[p(P^zم-q3/yX@o~Rq֛TD]O@4]L0Z^gBnMP \pSFR~ofT +<뒊Pۦwh[UˢIy %ʺW^,Z d9:D]&lܸ|m˖-daalsvvT2e+lz5J=0|6J3r[Q%uO]f5g߉gwә eԋ>yd+vwf: l;o%8q3)@؞ 1C`r*>҂?F o߷ 8W79/*0 $L#U=)x2 __ 3 X1$-oԎO#f={rnfZc@u?{b{? zV_gཿF۵d2)8DG{ }`? ͯ)iJC}64'U_uVJE2SW)M}BU&?7N \Os&yقw(>YC:f`嶧 s~U08){>3Qf3X09i/eqscj2_$,n_[&={KV6!i πRƒPlbdzJ9nN t!{9>O[\ fy^L%xz櫟J$!p ܆#{*ϒ(bx+m5Iҟ+Yt`zD43PZOWYw|« =\$SZ>؝ԙp| ƻ>&{ՙ9|⚗ )*b3C'.T 6`a=~ŷm|9 HPBʇy:ኃZ?묲`Ps7 p7 wCmߣZl+2[:!0X}Ǻ,BkK[B+S.;&Z3΅J Ew`_e0ÿ*9nGςC*3%Rpc&&' Ho>oc@CWgދI/hqʯKιT]SㇷŭzOy/֓d{A̞s~> Sa1S j|wIsfEz 17#_'›i\wR?s8?C[K&نKL ݟLřW0y*YЄ-]h6%F8v#_;@G:!J eV[}>6m&Y='fT@T>gݞ2tN"[WŚI%D͠So)|w%b.,N[^PoL/ӆk^ъST.S]}xGd43 Nmb}lsB8=?c .I2MsJ$~t׷biebOu9v*V}ϭ7=~߭1uqi޿33h#u]|3XckőGYaQaRq%bef"a9KݩpwU*M(<a%W^E:S|^*n`rs+cB`zƄ#>؆"ļkp tȦ%] 'Z1i ;*sƢ4>F|xל#|+m-bW] -{}>l'- 2N;q!vWp_:č?x4iEI- ﳺM2Uݽvܝ#y!q4.21N: S˂.|B?x!} z#}MaB%t0>%򱓱1{툯e4C dҧ"9 kB]S=  Δk JiܾfQ,LV 9 w</ sX<>pT|˂6 2G[q›PP6 @|Pҽs T[{E 'syENmħ7|_yFloUJ҂!Y [Xǿ/ ?F+Qh7o_3VVU1L lq9{_w T7>rJ`YTrq1f!:_B3e㸹I{-Iܿc8Ԭ$qM#+ ;/ĢV_wp RAͬ `vSv?y.l&$/Az4OǴPػNJuFKBַuw߄iI8 %Ǭ.EAi !־o^"NmWZj cND\h)՗'#{1oI腄 Y g0#n _>FpsV*[,?W݂rK&U5eR8BENatW/_T_ˎ,?32&VEL랇8B`띾/2 Àӧ Y2Ds(5Io/ LެpԬ Ƕm3`\aBDJ2CW2 L*GOzj^5?Om$Fn;C@q8ɉ~]n+s@tø癷Ä'ZDzS` 0`{= \,pn0^q 4 7BlD* 㳷=S,1`nF`#YdH$a|KP/= q15Qmqms3hSk!}#>Xh%˽sf&:øk0!fy%>}+eu^I'΃vĎ8t? ]HQ 7sA"@&A.CX… }hNqrA3 p|`;;vy,fR~Bums7},fOPv &2Wx H߿Q'V3ə@OA&heްhD:q;MK_[EsseaR/0DC";>q߾ ZE>zX+Uq" uYgaN?"3 Ί_pًs}зq]tdP޹Af}v{:6侎i{R{Mg R .TM*Ug:`yT@ܠkxMT2>|dnl<X E6=[OB*E\Io ^, =cJΤ`P g%ܙ{){ "TrɎ]Ch^ǽ@c/7 .gE6E5AlJ~JSMxƯA{7^\%97C,lo 1mJ?dxvH[?ULlیw)iXqÕL5},o+-`}ycyEY'Џ0`|F`sҕ@-ה+9Fk'[I1czc6W !=bԶ)@\>C9I,mTW{"Oq:賖0gp 8S_X9pi)޶BR 8mZ{v7ρPRt!D,][6[$I c/ 7 O"`p?LT—mó6{d_F 6WRgڍ)cFvZoY"Y7 0L~q>(⽓0c'Э+P}Y}17Y+g{;/lk @+JyKrc7fc1_ g`\66Ruy ½GM*9_~3bGqj̘?Γ0𭦠l& 8zvSӓ09e~ׂ,S>kɃ`|W ,o1۷g} F3#<Ng~_^/}ٸ9k{}D *mwߞuL1GgxU `ߊ oD,C`)q"7Ԟ2=U-ĉ5_j\Gx fe[%J B`}Nz @lٷX[̾ +ү@i? ƅ.ӏB[4@fEJFCoC6e*X֋MFl#~[xԙyt1w]c!7`mnKmka/~=ѐCzo* |Ts^N Q cC>>7MB=Cfalܧ4Hp7w~!` /C[K= *=^R)o笊ͭ w۷@v5ĩ<װ.V/:֤Q);MqG_ACE&;+kٷa"A% CcȘބ0g!N8ab3z@}f1*6wЫ`|^%0,X#嫹QRCVԴqT+B5uz#'1=ļU-TGk .8SoA -m̞Xꧩy]'(:"dhs`s=-uk+a1G m@NЬ}d3{!I7|smYp(ϗDaqfݩܭF1ۮdGn$_O|8R$V9l⃑=0reBըU uF$:K r)Or\l ~8.58.z'X.>o{8 xCc,<o8ܾ=u!=_8󂆍QO`6ÞN~w={:B^}gAu;W h턌-oQD ΌѯP*>A|qO= @8zu쫯ǠLعPYyW y_]G Co+ZC m0:Đ~z3P mAcF3vV-Pf@EE =/Y4uC 3q(M4RFYOq2vK a;^S)v-| qqP??.A+Ci48 &MK{B\t%Ĺ~S:?3!Ϭk1uo<$kd̏Sҷg=һ24b`ヘ21x[,uo_%G=Asm1T}{F3:?1@8>0 hX۞2Lm%xC`dC FʄQsZ̚ jlMq֛ >12 Ftx˻!N8=Ǹ3p& ƨGO1'! ; Ͽ8Qo)؎qP|LNBd`|@bڬޝa$cĐX2 0 Ì%:gbkD7JD+\YZ;'kqt 掐23lMĆWC5Fa7@lNcwq .:vŷ0~k0οuo]f xKFgOzk=aaf'7 Z 5ýXS&V+}3m镣mqqmE{\H3J40Om;g}3h֭[qM7UzU#aa1FQ PSpkyBguᴽ5?餪tfE f%\~`3Z4ROP_}ͳmx!QGUaQL_aF?w٠Eg3e)ҜAJGAYKX>>Fk1ˑ8yHN5F;|0L">4R.TOs޽rZxhnf t5a&6FgW֤ R#]A9Pƕ5:6X#0pwA_=_5XDOq)!ZpaafL S_S#Y4a0F2F Kp}݇xꩧ0{loW|#ҥK뵟 0 0Uֆ!FL-;1=$  \I򫟬g֑1'afRj} y}/^Ws.\|;~K/jba47`qaf<%+3[yWY"kF2Ӗߢt8op[>{/J=3ܙ^G1 _Xt)>Oaٲe'aa1Cd3Hu5 _!M0QQppIYÕaaC&nÝa6C{;߉n矏'(j,wfatlfAX38H}80?*OkmmaaC a1Ͱ?iӦꫯƑG BBdafBýA=0F#{B{ؔ80,-a k5cq`ͰG#<~PJ+iSN9 ,Ǿ2 0 Ì:)mL2l65f*w`{~wCvRpuK=,>1 0 bMZ[[zjw};qA;N9}Hݝa?\tyό5.kL㊦>12c.>Z;{-XpՌ0FO|hLݻd Hƈ, ^r%:ŵ^xN20ap|`#USU0M F,e ! .x,eT|pÝa4c*F0(dqk`n9{Rk|`qĢ}YsGpG&&##ęJp9;h0(08 e&fR-g/G)WVc \|2 pیso2bÌB>rX+ǹ{gmFq|`EqY?һ31lwƲLD0xJpojh{RfMJTc)@el)3Ie|md1#fhOE=1q푓hcTA5|B0JT3G;}]7һ1a waa.$#۸,ewOe7 0w!2 wG;? ̚q) 0 ÔxeOiq8=4'p&&7dwЄ->~xݙmp[.Jѐ0 0 3DvV f7U{@&}#3p7(}La{p*[ʌzļ0?yH0FKxhG=W  x}E{OCffxp0 0np}\_; T ZYpgaaƃwn(P(PL80C3^po3i)SpWFc*_'4/j㘋ަe ԋ1#cp|`##RJ3һ0(3^ Lz]4*]+j5 }Ň|p8ÝaŘ 18>0 S/0f<$D F@8 ʚhlo.ztKpgz2b0{ bb L0 t:] .\B4'Ü{R&g{7BpsJpGqkRaȘ 18>0 S,3 0 mN$.SO= .tN:$|;%^gפ?r_ޣaan"v25AqWRO 志j ˶#5ESRaaa&*cRp܌?Os᳟, ."~_/| 7<1+~իw[lvmp7qeA)~ʑaF)F)؂+Q;[r.`qiXE*5:+A%`SgV^X 1L=+1a=aj1R1b ]]]Xb!`8묳a6.i,_sN]w݅UVaժUH$8ӱd{;WƢEf/G3H!%OP0 3tj6laWV)z Z7{4R6ǐc/Lj;>F-ng}v BCQzbFRpS"z㜵W39cWV`{=0^?c!D<|^c1c7É#˘܃<~n կ{R---[ZZcPJwttDޟxB"m f̰< 4Yk| "3L=ZEoo ~P H&c,BFL~]W"+@+)I X.0!2ƝR ݞ/l&.\וC8 ]]^1-q\bIP71,TraY&RZP7@hXt`Yi1R1L$,~K%} t:"Gж]%1M>.rZwJ)$1baawSQu 'j0M| ǐc`Ljs[wĈ,dږCq7#*IC9u% 1zr<ɘ# -|ǓcD|1bGpQaOĈx**7p11XƴK.>qկ~EnW}c_{]_[R*wym^q M8n} EnL\W1׶8~w ŽߡbA췲M~"/u Uj料ǰރWX9ߵDr1c,ƈ;0`@ED;# _\P6>1ŝmffFLj[,{,!_^_n/LQ"n11cGL#1¶r|RQcCD6tߏ|>3<ֆkDkk+ I٩Vކz}ɑz)E?c8~w 'wǰocXYGc|~F.h2}cTgŗ n@U|IÏ*KYs~z}0ߍw\~wʭ*nQ>g,#Or=~6퓂;!#/;}jLʘ+ pWK_RYl+VgTfq\O`}㟓aa˓ŗQP޲u;^x]w:ko|W_}5nf=y9%Kp 7ӟ46n܈ŋ&L8}vMoB.a~{#YT 0p|`)PԒ- FK] CG6t(c&>,#,:c&F0 0L-F2F930n/d=Qǝz'MrWgfa;L|uWNeq;41#p|`##Ɯ;3z,0fU%7׹hj*0L ja^[[0㝱#p|`##8Ýaafv=K"y};R03S(Vz3 o[Ě[0 0 0 ?M1 0 ]g/2} m0s+o!?X$< )Zgڒr@JERaaa>aaf8E&Wۺ8|b{=y8!R{{&X yw+A˘+ waaa wn(P,\a xHx,kr]RoeNË%ؑw٘3w_}&,< PLǞ#-A=w~wAv7Wpdl?=a00;S7EbF3xR{ .3𣎏 o8sv9>]H/Kۆ!J5?֐2*!unCr{_ w+֠dh ÌN8>0 Sw0LO)ah8>0ZZ_d~wm] .7]ykhS{v&b2!׭@9s]=xI fbC*pza&8##p|`##821u02 Kp|` f#b~UV*vܩ܃db.LQ\nKZkC!^W qs 31aQ aj11-eaa""d)[G45ߞɾ w߀71XqQ 7dcCtqi ! N9{0 0 0waaTP%W6 ,"4e*pj xUG ʚukLn7)V"C;0 0 0X-eaa"+޻U[b5m6fDZݕ6x-0 0 0pgRjp|`RKUu9.gC؂ –X]ʈLJK©Tz0Ռx`faZT wnH]0x  T Ֆ2 ؜vJp'!%,$ᨈՂ{,^;VwrefaF%HpgaH^T܇^4iς g'o[-E85>jKDKu۱0 0 0p;S7 C) c0F L8>0ㅂ,Z*]{s_FioaϿ*j(B\ 'KarjdGE|po]}aǨ ÌJ8>0 S1>a&񀣂^t{}gJC4K!>Rx|{0ԋxKxFbf31aQ aj1R1waa*=^`R*rEH92 Ì^8>0ㅢ ͤwuw3Vc|A$މjL=fC0 aj11x벗,04NNKb>K^K waSGz  ÄaZT'6nJ%efltvA pGK}Al!`<0o# T ÄaZd`B$1ض;b ÌN8>0 NLװؾƊ>3s0$zz $a `&  b$c[0 0 D`vVR*> 9aaa?1 0 D`RF  s3 0 0 0uwaat^L_bMqW52MaaaF ,3uC)X 0:_OUB/۽ηX53aj1a08>0 S\4J)H0Xg-]*w_{kM2a0L-F2F(faveflsw]%58Lp2a0L-F*F f0fe7R_ES9>0 S  ÄaZd`aaf:˂{hjo-epgaaO0 0tLuV{L=ܫ3܃v5Raaaf;0 0tZT^u^Xw0}[0 0 0 Ì'XpgL0aإSz{lw%.R00LĈƌT*@Jcf\i3X:x"6xܚ6.Ֆ2vMcp|`# 0QAaaE^`-{D9_wG '5m,56q'|ESǶ0 0 0 3 wn@&B08>0cގJ?#_* >i-tzA}wcGՠ00LHpgi/asX[އ=$_VAE ,]>,O༽3CU*Bsԯ4bCoaZp`&  bb 0 0(QDqĶ}榟y[/0i SCXWO= yG&OR 0 0 0LaaacF\Hf(by'76WB=`aaai,3 0 #B> [Vs1u&+ ?{=g 0 0 0LXpgꆔ %HFzWep|`ͦ1+ܳm_:8vK_V 6 "8|PV8>0 S  ÄaZd`+.0 3Jf_ [?ef>|Čّkp|`# 0;0 0غjm-ŻsC1&!nHc|/x!?BDaaayXpgafBb|9;G 6jg6LIGmjn#N:✷Aѽaaa 0 L8D1XD派WyX4r_ o0 0 0 3`+\!a08>0 ЗNGۯ?zǣ )]\0SgDoaZp`&  bb LݐR4һ0(3jP l W>~|wNqdmOaj1a08>0 S,3uE3 C X[?Z^ƹknEN6i?5W_ ߠ ;˜a0L-F*F f)@Jcf4`fם߶OrVN.Wx> ߃"r  Ԃc0ap|`##Xpgafc֮/q7Svk<~cZlgaao0 0ވئwSwiUg K͑SO= yGNZ{Eaaa0 0E9{U˿8wPb; Xm'ؾ x?`~daaa,3 0 3nB}]L5exgZmok51sg!caaa,3uCJH0H`fOz,P%I.p,yw~u׀۱_aj1a08>0 S,3 0 3gxߧV@N)O<<>aaaa6H3~0 t:=ap|`4;B_q3%_V<[{g۩|/VG_v'>v Ԃc0ap|`##8Ý+eGz7p|`$Bn ^D+ǵaJR`RFmZyUuq[`|vm'*a0L-F*F0 0#+im&N[{e})o}đ'Dږaaa0 0CqC_?a2VEMϣ/i=wjp;aaaf6n܈ /v=X\Rn1 0 3d~3~:mc$}b;UN{}sw}vaaa gx׻ޅ˗=v܉.x[2һ6˺1#+ ÌRv0-ܽ 7|@XE4/-z_^X<6y\[f&l cq70& J) %(FzWp`&  b$c U7һ0 0>6fn,Mtb{e`} i* tujq,R0̄c0ap|`##Xpb͚55kZZZm˗/K/dܻxY3aA@R⪀w2zpp2vZs`zy""`Y&'1 c0ap|`##Xp;~񽣣#. V)/#ekJ)(E;{;&@.PkТm0 0#AG=baXYoqYCNH-ue1o6껩 ;}ocH| 9Fx1bYR:1É# [H$b6v0 l6+H0M|=(ga%njVOIF\D šf2V^0SViJ!c1KId 唙9:6vC3ЦH / V5=<3=|^ mשS_Rr:Y^]}FuZA'O.DVCSV4BtQM3%8DaKUT[[3Spm_өjIRPM~~NըC^GX,rYuԝw.Oll]:UQ9]+|wnyuZNS t~_6n!5фшшbjfu͗yFP#P#Q#|%\>#-_\[nu㏚4i٣8zC}ZԔ锥H3Ս_Hm:6yĭ)a#^-ns:%oqЦ[*/ihk̡OqC0^^ʱ-̡bZ5Է9FW5e|vw:{5(ݫwU1!5y~3F~)9q~3a{]\/Ct~~"""..-i]֊[NgOTI+[Hj4%Og߷u,q[7/ckK]i/q,q[7/ש5/Ƌ`#^^sKsиZ+.s{7Ԉa-'wǸ߾e[7o9NP~ߥ㈊Rtt233UUUbeeeiʔ)fv٨o0;'P#xB}Y5%eqk׮] ɓ5k֬ ^3nB/X/_,)YvE_<c>jO1Fp40 w @XN$K*NT~~54pJho<>Ɨѭ[?% 7:PxB} 5'ޘU#h0EW\ bv*OF3k wb<>o̬40 w @XN'_ p40 w @p40 w @pJKK5}tiСZxN Ivءx4k8pƏoƄ T3gT\\㕚 IRaaMoY#FիM@[+**ԯ_?%%%q=t OVJJf̘!ͦp >\g}^zi„ 튏߮כ66Р *995F}Y5;|RPP=zSN~213fⶂEEEEEEq A_V׮]]cUW]EFFߵ-**J{5#U&ԩdZ%I%%%O4j(ce5zh@LvmUzzN WyyXyy,I @tWX/@DzuVM>]<~IeR^^Ν;ϏUX,իRRRqFYf'N rssk̙Ͷ7u]zR# >q8:|?WDDM @{p82־;͟?_K.ոq\CW]]kt,9r2RM߿5޽{@駟رc:t4~xIR\\C}:}W^q+..fSBB)5;|heffJҔ)SN @;3qD}ڶmjjjCi̘1f i;wnVm oӧOTvv6@p8TUUŋ:~-[XM2EZ~jjj}vm߾]'N4;mm 55U_~rrr+WJrrr4zhuE֭ʕ+U[[jҥ4iƎkJ8YP>:rӵk.hɚ5kPtt$RZT͛7+33SPZZ dNݻuf5۶i&P^^233~l6%&&*%%EvݔA h`;h`;pJMMUddןO6lPddMwƍկz}gϞdյAfq,Niv.Nee].\eggJߌTUTTI&i1byRbbnv=m!` x u=Wnݚؖ5 jKRHHRRR4gM0A7xc+g%esIMMw߭۷;TttƍBjر=ܣ"X999JJJM7ݤ+%%E߹scƜN/_#G2df͚~͵ϨQԳgOкh̉'feffjڵ:~͛zKZf/Ѽy4`mذAoJJJZW_),,L reggkŊzꩧi&\R1ckŢCj^ w9zԷo_hO(::ZUXX:f4hԫW/W^QIIKϵk. 8m]s56lZ_^}U544թSTPP:;np=ԩ$o߾ncĴDr[}sڷo*++kСCu!%''O>Ç.!?4WVVvh[4&((bi64&56%7~o_]%)!!A|/nĉg$I*g5;[S<99YIIIͶ?ئ;-66VӞ={oֶmۚ5ڛ w^O>:xz=ۭ[fwرC\s"""dZP%&&~S~$]J%i%e׌3_kٲe*../E)11z؆ 4sL}7??+++K]tQ޽])((Հ;pp^w}j*XBVUzwp8<7l0]Vyyy2d$^k4;vLaaaի(Ir:ڶmd58NI:ul6{ >/ГO>ӧ Ē2ZՂ wi˖-UU,YiӦle;>L?>cٳwߕ*|p,)h`;h`;h`;h`y!B%#IENDB`accelerate-1.9.0/benchmarks/fsdp2/imgs/reserved_memory.png000066400000000000000000001604671503574341000236400ustar00rootroot00000000000000PNG  IHDR',:tEXtSoftwareMatplotlib version3.10.1, https://matplotlib.org/so pHYsaa?iIDATxy9sf1N]dϖHvBBW)dP(IJdK%lmB+Pؗaݿ?99gÙ9}_u\MaH@wP]DDDDDDDDDDTpEDDDDDDDDDD|@wP]DDDDDDDDDDTpEDDDDDDDDDD|@wP]DDDDDDDDDDTpEDDDDDDDDDD|@w""""q9GNq[pp0%Ky׏ em7ɓ'iժfРA:t(_5%KdӦM>svcOsT¢E=FÆ iݺ5ak֬cǎԨQ&M0aWQ1jԨAY|yW\I L0 -[ƃ>HZS=+Vpoǎ1ϹY&[vh">j׮<?|VN:ԬYv1}t\X|ҤI)Pn]Ŕ)ShӦ 5jԠA<ܹ3fPJoΓO>Iڵټys#-ZlI˖-]'nܸӲeKj֬IL>͛swҽ{wfZGDDDD:p<@ٲe]]tGyX @ gΜ9<,[ի/w1|pWfcL:/+}|7E0k,~'VZEXXqqqL2nJ*ꫯ~znJDD?8&)]߸q#?}: &""kײ|r^y~}t=]c+V`رݛÇݻ]kӫW/Ë/HRxӧk֬XbOSX1&Lm3~U~gv;ڵKv76oxժU݊<lذدJxx  Ydd$ӧOٳ7n;wn̙@ ~7 ,*'JSN$** Oo?xL.]0Ln׽a_+RNWرc4i±cǒ-t8H J]ɪ4]DDD$xi߾g2p@.ΝlSNaZ0""3c ѣ7nժUQFtܙ+pi aɑeGO0DwuJb 0HX^%..z(ZdvUOnݺ1j(qn:bbbԩS-ԙ3gܾOZvBuœ#qvpb6t҅~˗ך5k J( #F̝;9sP|y.~Y߆a石zj>̕+W%? \*UN$qI+W$М_ҥiР~-/2AAA믜:uޭ#&:}ۛ^v׾RFیdU*Eu[ʢjժٓKf:u䶿dTR fu^{m6o΂ ?>cǎW^5۵kGSl&ݮW'wqk׮%j*Uvxϸ{Yr%u֥|)86o&+WNqg']c=-_/i96Rk}:sΥ~˔*Uʵ{h! NZj|.]~p[dlݺVZvZ,S$} >봧{vdHVH5tP+&OLӦMf,YsQrAIIZUܹs<䓼<î#bccӴ7]ta̙lذB ?ӫW/ `6-p /$$;rJ˾}4i&(((cs%N*qp%֣>ʪU/^:W\N2e`69r}oLҥKӳgOzj套^bڵ<#4lؐ˗S`A.\vM_*P/^Lcǒ=߉ڷoϻL<Yv-Ŋs[ m۶lذ{wof͚K,ѣG)Zh;""""ri w*O<<\t)Smk޼9QQQnktC¬ &֒>vcǎ?tnAXV)\05j`۶mnkh;w_|{߉klܸ idzX۷o.\poISRcx oM,W\lۗ_~o6~f4jԈ doZUիWgڵZerwz\i޼9?3۶mKnb6]|ɓ]7 MD֭\yq+[V/^j?Q9w[~i~t?sήO?7|Ø1c8wuҥK|4hHXf˖-O5#?/{u-1K/ѷo_{1{9J,Ǚ3g/_LJXl5jHvZ /30`BBBꫯ8~xǵzTV={еkW7ʬ\2zbٲe 2G}~g>4imEyg>}:#FSNX,|rڵkG#'229s/еkWʗ/СC|g=zqƹs!у>_|i(RH ?ìZw}X^Yz56l^H'#""""7 """"9+B׮]yWXf (P>K2m4rE5;wʕ+9s&… RD 3<:OzO5k(Pロg}rʥωk?+ɶ;w&66 2l0G֭>}:w_ǵ[nL8R7n~;+V`8ʔ)Àׯ_SX1.]ʐ!C0 e2b7LIǎ2e V]E_xb6nG}fH"ԯ_ &$ܹsY`Ig}ug}ƒ%KT ̙3\aÆW_ѴiS|MΝ /@i۶-uq{!=ה'LHׯwZFDDDDhѢE\pwW2dǎ 6;v=i&dEDDDD2KKʈKdd$75k={VZV(Q~ݻw3l0ʔ)_{G|ի(""""9_{yңG B@@a3gdDDD7o^. B =aTp.""""""""""* """"""""""">.""""""""""* """"""""""">.""""""""""* """"""""""">.""""""""""* """"""""""">XOnʨQhذ!ӧOwrmOҽ{w%Kl2Ο?O*U3f 5j >>I&eiذ!&L@3a~wBCCi߾=#FlN{Gh ,Xv ߽D "(#D僈x|o2E kQldȑ#2d;w䥗^bĉlڴ3fom6ZhA`߿˗7`Gv?x`-ƍYp!7nd7gs&@L&""YADQF'D "3#d=88+WXpguX,5kFʕ]˗ӵkWj׮MHHO=7onrJHɟ??Æ c˖-={}2rHCr۷/˗/?2 X+Z(Dex|O"?3"K.)ӧOۚ6mJӦM]vΟ?OѢEؿ?۷wm7TZ}QjU"##^k{Ŋ a;w%K/_>իs1 K&91 \?\9*Nma`)ڱii7#&n3L\%v};M7gި͍C_m2ʈvs1TF$Wvs1p8/UFNO4Y=#dU]VɈʒ:ukuH`/_>._LDDyu۞7o^%u4܃,=fىa6 IvիD@bcl rf;ndd,!!X,nlXv,BCu8DGn0~EEtX v Ld6ɝ;80 wd8 ի 411VvBBMɔ&lRC+V%\av=!@|x;P1t:DE%m -kډKy lBC11fskdJ$ } At;0 $01LxÀ׷1U3"?8+qqTPeD"eDe5ʈʈ9=#0 oe2"2"AAW>Ƶ[HH|FŸyOvTHS_d*T@51c-Zp;rHxGѣw&wܮM6eС?o?ܵw}lܸҥK "ow&BC]wVyM('3 fḢ85ʈvs1TF$Wvs111 ̷>i rF\Q]0'gDBO$[pw:={' (ɞ(""o OZpr pxdrjJ'v q* 4'lbI~ĐNIjX>]ch7{afMi }ӧծ0f1TFdvu}g]m7s}2e2"lndNl߬\b3߮ƶ>ݘH,yԼ>|8Yf߿Ԯ]ҥK/_>jRF jԨӧtk}T[>;W\neLrm\rﮈHfڵ5kְaϟl{=xر#UTaѼys޽;s̡f͚0m4ڴiC…)\05kfќ={ Od kE/_L$"YGHH(W\ﶈd5k֬ ݞ0kbIx_`߾}K|5hЀ??yqEj֬\2Vɓ'~zv;-Z`ɓ3g0n8~xG4hP iGfn+WgʋHy}RX *TdIf@l>x,D "(DěfD"y2|,Yp<tμ"DzyDD\v;Z$""""""""{\]D$kIL`` BDDDDDDDD$R]bg?vmٲ1;wnqnw}6mGݕ,套aذ>0 ƎEMt 8q=8Vkә&ͅ٬uE$9ex|O"?3"4Un?<Ȓ%wbcc)T0͛w'\kߧ+fx ӧp6mږcc_2?AAAPmtЉ^OSW^aŊOxi4iFvH{/O<4}1ׯԩS8*THOдis,ˢEaPr:v}j3~ Lv[QڷH^b?W^ũSL89CϱNz>ز{/+ѦMT)S,=8ǏaŘi{R3EDDDDDDDnM*W;wng/8Z֭[h֬e+""""""""-)#9Nzk2]v7o>L&ʕק˒% ذa-<҅uVӥn݄7ߜngӦ?ӲecO2h̞=H%=jp,Kv-ܹ-? ҵkڵk \jҤ>۷o̙Ӵl_&y@jMB׮hݺ ?ñcGY|;eEln+W'?|C]tؚwޙnĉ}{h@^Ѫ=Af֬xGy9sQӡC+ڵkLttTF~ܹƍFX,ر3&L`B)Spڴi3ٰa wqUA.㏛RgyVtځ~ѵ}'~:tha?Sju=!i׮9:bǧ3| zGjgy'x App0u`UކUDDDDDDDDD3ųCԩp~d!֭pyϧ~3:?U{;v;~v̈́޾}{i֮i'ӴiK>s6m7xN@c+?_r-2{ y'O,˘1ϳlJLn ?vk/l'<$ 8FLn2u{?ѣGK >x?܉E>lrӏ̞oի61q8ʕ@Ŋ5_{m UV0 ^|q5kfժ0~]YQLY6oHVmܖfi֬EVP iF֭q?Ӊٜr2vwq-7SN=c/M4%>> GHHu`2سg5jb`ZyAkׁz .v fڴ2ku_ȑy啗h>Xj9f}/XfK|Hv9kwWRg3*$|Orx>T .E|- 0iHv1nMCMAߒS7<$!!!)r[˔)ӧ\U[7ʕ+Omٶm+ݻH\ n޿'88&Mp'<>#lUpVժ%;vٳBHHO<4=7^xAz<Ӝ:PP!jּ ѪU4-TLY?6/?g)sQb}z ذa-AX,8ug\۷ON%n[TiΞ= MS*-V8 ꕰ~',寿$,Y3gp%V-ն#"26H"#\$Y׿$ig6 $.Άөq8b8h=%'RF'$CɒoEĿ5 7[J)] +V|ʰa#ݶv֮z,::$H7{O1o"W ,,|qaӧ(^Dkq,YS‰!44+W" Lqy%Kq?n8M~zɒԩ11l6׬􈉉f޼Yt@hhnj֬ũS^߹sС#M޼rcugϞjԨa89x`Ϟ_YN,J?ŋHv0xٳ7.0yTYI%Hɜ(g,OC`ߜ;Kf6agN\UD2L "(D$5C dbѬYg ""08~ HXX=z\[;(( DZcGٸ[^ aŋz VofGDD0q8 Ir4ԩ+/,_~s={E>/_~6lD9xƽȬY3Rv;6رju_WP5kS 227|W_}9C Wq_vl6;vƍФIgӦL0=z{@jʗѣG[֭t2ʕ-Z Q^<{rw7bp|V+'NÊ V}ŋѣ7 d߾ټykcGN""Y_S)*vD3ūz0k|.OϞG"EhѢ5}!!IgBJ<ȃDEEҶm{tyM+ڵӧI~'Ο?ԩ:ښ۵kaQQQ $6oԩ9-7o>4i3< bayݩtԖܹhҤ) K֭ri^{e.]Dٲe6}J.2iӦDzw1ft=LƇcQ\pe3``:v/f+2h0ڵsԫwok',Sn}&N#tWfmV^{tյ=447|hC޼i׮iuEfz_'00@ ;Ezw7o^EZwfҋd5$K^\usS Ck[""""""E}OUtٓdÆ̙>k|HgZ޽3CeNV+ݺ=h֬eDZyFc٬tPfaa!DEEüt#ׂC餌O"ADlF)'""D߾Otvw'UWp"{osw%[3 8n<+r8q6QF'D "3#TpK(V}x'Xt!&;Վ^ I1g˂2BDR|O"?3BkOo?nH62yTw!UKaoݍb nw"9A{6A2BD'_)""rÙ&BC0g!P$s&)g%e"AD7EDDD츤HNH6羆^މ"r""""""9 ""rS8NwA$rr+;L "(De/g[inXKd2BD'ƟÇ?3tl˖ٹs{t۴iӔ>Zd)/<|0'm Æ Lq/]L}AO{mjһwwtCD$+1tT,A3%Uy%K>d=RPa7oIO'O4q~a3<gf?6m{ ҟ (R6:tD^Wb'44iz# $sR=֗'H(@Ǭ_SNt:P"{?AӦX`.}@`` aBUر3wͤ%0qmEi߾#z=N@@@\zN3q =:uꥸm7noߧĘ16ljIѢIDXX0QQ8FH-) pWF'D "3#Tpv#)^|q,y9 xy;vvڵ]䫯6v (Q$-ZNؘJ.s&)䓏X&N|;_P֝TZypyOgϞ݌5^N'y1c^d2ӻwdZ,ZbI޽1c^Iq[tt4Jo︣*kO2lG+dND "(DdDvLn[oMk~88MB׮hݺ ?ñcGY|;eEln+W'?|C]tؚwޙnĉ}{h@^Ѫ=Af֬xGy9sQӡC+ڵkLttTF~ܹƍFX,ر3&L`B)Spڴi3ٰa wqUA.㏛RgyVtځ~ѵ}'~:tha?Sju6V+-[6cʔX`.O?0v(;v}5̙3tׯfy:,k*GɩS<ɶҥCls~> >`)ߏ| ZlM>OPju6mFɒo^ *ڵҸqS~{2O>g|<._vLbٴiߤIo;wnڴi38|/[ZcO? cxg8qG:ƍ0jX| GpmXj=| _Oʔ)֭[سg͚T^P 4dӦ^s:)/'k/ygr3u?Ox\x8~}u6d2g.jԨlf|PPk7ޘƨQcݶ:ݻe_<̳+LwbZ9p`%"U9p7."""""7ZRό[qbn CB1?S&~-eʔSO~ \nݖm۶ҽ{TϕP@Fv<ӤI3񄇇S@p<&gє.]Ɇ 8q2 _\zhDÁd'S|Em À޽%JңGo.]H>O$kk/iӦkUӏز{*V@j5V-ر#̞BBBx≧yAKZ'ԩp~B Q4l؈Vڤiy2er9 _~9={NqcǎRn}5`Æ8Wbpԭ[{s-߳{nǦU4hu o|tk{av[QڵLw""&ͧ*8MN0{m~%bb=z[Νq܋+jߒ'25N~˗/3q L~RSNQnJ,ӧ\'SxI:r;p6I%I/ӦM;˛oNԩpvaw;w&3Jn:kŜĥ F}ŋGzn\zW7o^ugڴ)v~}5kl6woԪu'֌߽W:vo|gpɮ|%uNATTnf$r|2BD'Ɵ7wҴk2eZ:20'Nt鲮¥nN60lN E>ʕNJppgwTxl&ia6)[ `Ȑ 2`ʗ%ԆfMߤ} &WPǔMS(QΝҹsWbb4/^Cz45>bŊS`!~M^ |RO/@ɒ8s /_;\V+.b pʕ+]$"7wPFgD "+#Tp3S&im~UTҥ˰bŧ 6mng/Ǣ @>)zCg.>t-rȗ/Gv+>}KdaGɒ8u*BCCr%g)YǏ؉дipT8%J &&F|h͛E=]fZ:;wo :~͛+W=Vn}͟FZﻩ_.5L~{Z-Y!V[oM棏>s%\)b@2d2b!.ή76Dn'NXnߔ"AD7-)L&F͚5_0{ """0 aذѣǵuXpq;v{fj_xW`<;"""8qÆL6[S,^n|2gF={E>/_~6lD9xƽȬY3Rv;6رju_WP5kS 227|W_}9C Wq_vl6;vƍФIgӦL0=z{@jʗѣG[֭t2ʕ-Z Q^<{Nz:cGYc-ZSl9yߵ=&&sRB ?/d@K?""K(#D僈x|o.^ի׀Yp|z|8)B I:<*TG$**mӥC4mڜ?_A׮>}O?q9Nԩ]׮]gҷSDEE1plvn2SHz޼hҤ<,ɓwҩS[rI 4,ŶZn˙3yte˖eڴ)]$M?@PPŘ12e~8cGqʖ-πرkkfʗȠAh׮C P]|Nݺ8q,<ӵ_͚YzkݿҩSt0 |sݺ=B2e>yEmUN~}AAATV=SMD_nΆwds7>u|d'̪NINaZy5kwW$Ztޙ!CFвekwѣGPh1 {>CDZyFc٬tPfaa! Z'BG飌O"ADlF)'""D߾OtvwCdϞn#d7nKs""""""~t]'_2 ۲ecveѣӣGWZnﮈdiNAddN^m]_Ϗ=ex|O"?3r(Οdɒٻw*T[һɓ'Mm\z~t`ϦM2|lF| 8=m[f5aayX3~a+NR}rI7X.]%K…̛7;wpJ!u 0bŊЭ? (HݺٳWpy~Tx; Nz>yN""{moMaK^?FDDDDD֥Ν4ժUWqO;;v$&&:MڵkW ֭Uذamm11є(QU9Ç1tڵkNǎy睩vטݝ3Ӻu.\8bt"GMnPpL&V?fKvb\ 4n7^p:Уc.\;~NwDҍEnˮKX 'O"ADWF.:'NÏ&H.eΟ?ρ/?+hٲ5}g|<._vLbf|OsMv̞=Çb޼E_U1fn7Lغ.{<1Xv5M4@4oޒutm7n"m۶w@_xc/ qqq9b7o1{/qq-.\$-?7'_|,[ .ϟ??mڴ#88<ғӧ믃^s8͊ (~ǒ~쉈h w?z2Fc9 ]-&%CR7<$!!!)r[˔)ӧ\l5~Briݺ-۶m{+ Fa/x If8 @ .gҤ9ҥt:ٰa'Nvʕq~W@)X0[V+ƎMޮ]^|qC$W\>m~0wDћKҧDGGѫWhO ĉoڵ=WN{osk ",,OZɶİz*"".Ө=꣈X܋."""""W*٪r78Xy(:MwHXV0?t8@}۶ 60{lNx &M*TG}'x=ns͝ޭr4[LYV+N ̉)]t^ƁAZJ2ӺɢEpJSz,889{qGUxܶvDF^[Z_&M`ʗ%˽?_-Z1adPԮ}'ػ7|;ѨQǝm +NAtteذnv;k~wt=MDD̙)RqϞ]|2[*/_>9Vp?}ŋHSٽ{'SLLkӧ>YN8xJJ,ũS +7=R{75k;wnbcX*TIDq8H8=B^sKeUD "(DeD bppǂ+h͚֬5#88N:Qre֬Y۷/+V$,,Çs~w.\ƍ>|8 hѢ 8UVaزe 6JyY d21rh֬ٳga?Æ $,,=z byqQ6n{H^xW`Z}ҿ&Nǰa#)W|:ue?v˗ѯ_֯_C hܸ JvkР!wQפx\PP0'O :: ?f|(.^q/2k֌L?DyO?b\t˗/3o,L&uM 3c4n¨Qc}OL&S/"6VyJLq @!"(D僈xόȒ3qi֬cժUc߾}SZ5׶0ʖ-˾}$ *UW^=R2#ժUcŊ>|vKz 5k> ΧgχH"hњ>}ϵ@XX*T#&֥KWfz_7ޘoSԩ-sѤIS T y9,Y!}$**\rQVf&kAhh(ży)_#d"88͑"qg8I*Mex|O"?3ddTzw[ým۶Ӈ^COή]xiڴ)֭owmѣ5|L4ۯnSzu/^r km?<5/\L60 ..mٓdÆ̙>k|ﮈtql޼Xl6+]vKNa&)fDbzM<6ma`)ڱii7#^{&;f`p82֧5M7gt o7#}C_9} K+_S,c UFd]eD͙cx3"1bbq8>hW;=}fNv'aNΈB’VYr{jR{gYt{fULD$ ɘ BC 11Nkl6MWް˕+7bcl rf;IX)v##c1  bqp\ Վ@h{ 32** $B``3& k# LnAddBX,{N\nJccnwh!$Ľ14RğMchju`+1LnJc(>F|Lh:N6۵Zť+!1j5+;j ol듁Ñ4j5+;j olӍɈJ۴,{l۶-[ʕ+ԩ^%K8rQQQL:URfM ,H۶mywtgΜa̙t Bf c￳rJzg-"Al|CRF:a@.SoD "(DfD^fM ᆦ7nfW\S2ydéTsΥH"<補?޽{MÆ y]mO8W^yVZHǎ>|8AAA̙3W^yyQpaNoəfO)QX:"T, 2BD'_a2VO? k'Qdoql޼Xl6+]o=]ɴl޼8WfF"0-N!"(D僈xόMvjZ!>>ɔ~̖-"""b7pK:,AI\O|ͮmqqqL2[ѱckLjMu[||ӦMkn݄ر)+L8ΝҦMS^|9ΟOQ\3_|ow} ޽3mK^M|R̙ӲecG""c6ȝ;ﮈI 0e%e"AD7%U_J*St֭qm;w&seVt Ou38|/[Zc}bŊW=e (K/BdU ta:&NLEo +W~Ɓ)Xܗ/_ʲe+ɛ7/⡇:r~u w0jպ/9i.>Z@ɒ^&6塇!**%JTv"""x .ʕOGx֩SW/^`Y|!..έaaajugǹsgcΜ6Y75-V8}8;ʲe …s@¬gnOs_D$svG;HI !/"AD7p֭[C6l[3yVZGll,zu#00{mOLf}(|Wf Ivßgڴ7ݻ;&5jo{|>OˣС*TdW(R(ӧEhӦ7o[,^<]}1 0Ђf{e"Nfgە"AD7id)>0[:qp9ؼy#ѱlVtyHo\l6BTT>)#es N(#D僈x|o2E3|#qIHNDDDD!ג2d%eDDDDDDr EDDD!;k圈HVDDEr*ߒ292BD'_aYEDrݠ'5NALQp0e%e"AD7̈1%*08zU$'̊4092BD'_3B,@c1 ̈́I skf7f7-)sL&ϕ+X HeŋػwY o94UDDDDD$P& /_!Cb3Үͦ5`;l ((O=[#I=,)#"""""~L&*TMdd4fs#d V#"F5]Dn8)TpB6m_1뺛&t$VKPƵOhHҨ=Mt\n2BD'ƟB8.)*Tp1Ma6kjq|-;I3ܕ"AD7̈d)fىHʔ"vrN!"(D僈x㯌Bp""""""Y """"ٌø%,)#"""""S/4lƞt^Ήd M|0 ﮈH|XuǞ2BD'ƟB,0jur"RB~o(#D僈x|o.>c2A``&{""YAķκTԏ= ex|O"?3BwD\A\G []_ ͏= ex|O"?3§K\psUK…)R/O!"""rKqsz EmsDDDDDD$Q /_?ɶ-[}R`̞NDDDvz' 7\Ͻ2Up裏xw0L4jԈ^zQHիW9<;wO?eٲe 6޽{""""cODDDDDDz.=M63лwoBBR8s޽㣏>b̙8pɓ'gÒ9NwAD(僈o][r*+#D僈x|o.;v/bŊoHHO=<C)%s: ɂ"ss SA!")D僈xό0ad@ݎŒz}F.ΟwDDD$:i@3#${=yɓc=0h~]8q{;Ooxܒğt}(W?DDDDDDDRsٳdq۵{"~ɍO"ADWFdx w9s+W.v 7o^c1119d#& qqv}AD(D2a88f=@ြCR9"{QF'D "3#2Up_ti>d^7ӨQ#^z% ,?oѣG)^8< :urd-[R cƌqEϤIزe 4lؐ &P@?2 0B|_'"I)D24FՂJ7O"ADgFdifyڵ+|ь13vXȘ1cxصk |ԬYM61c >T’%K߿?~-L>|rrŸq=z4ss8j;fHY?DDDDDDDvk?ӹsg(Pmڴ]rѭ[7iܸ1-[dŊ,_]RvmBBBxꩧؼy3v+W2p@/N6l[lٳ|"""rE&SK8Nkuua]7jԈE}aѢEZ*˗/gСҼysOjV_}}mfUo>VJdd$իWwmX"!!!߿?dJaZ+lNN)m3 HԎMK96qd"i7v co1LnF35##ioW;=}cxvm ,,ZbN2vVFݬ77:#1M>jW;=}fHkW;=}*^*_K>n8bcc]WVUV8a61c}eu]1&+ϟ˗/A|ܶ˗˗/vO nlvbcm&’Մʕ+ZnwcHX BBXٰܶZX,p8`gib!0%d2%kfrv;0 "#֟͝;85\nJccnwh!$Ľ14RğMchju`+1Lޮ1ob1>Nvap vV;qq)aҟMhhIDnw86X+&Sjw cx}<]qAHH av}{[5# +#.ʈ2$߃?OnʈʈʈRF;w0qq ׷2BHV͈ + PF$RF$PFd>#dd6{gϞ4oޜ{ìSk֬t9rnݺ1qDxLu4jyߟ&Ll&&&*U0j(+V`֬Yl޼5j0c Zh>rHxGѣw&wk6mڔCC.DRݘw MO4eo͙wN7Ev}*]eDbʈ̷|9s ʈvs1TF$|9(T(,>iK,OŊٳ'_} ?3'O# (CnΟwlNqED)D2,O2Pg?PF'D "d6#ɓsg￟Yfq9>|yѩSF3DD@ FPs{r)#D僈x|o.Ջ ХK>Co;tޝʕ+ӭ[7_Y( **^,H2q& {C0ex|O"?3"kpB^yz-z-tڕQFa6g/"""rrrkEDDDDDr ϻ˩Sعs'gϞl6SD 6lHB|OfswEĝA$cwSΝO"ADgFdRtiJ(AiW$14NDR|I?4D "(DeDJuڕ7똍7j=w42n5EDDDDDr pW9r$uaԯ_?w `Ν̙3ݻwke"""" 55KDDDDD$pcǎTT^{޽{/_>ԩC"E #**sαg"##[.| UVeEDDDr,4UDDDDD${MS>#ow}Ν;߉$O<)RN:Ѻukn_Y(JD$EILN^]!"(D僈xόT=w߭p8SIDnIs[d D "(DeDMn*DHH."(D2ɵ9y wex|O"?3Bw ,:A$c n%e"AD7EDDD(d'*dQ*d'>)GFFI"MS>H瓂{&Mxعs/l0j""A$ wex|O"?3'g}Cѻwoڶmx/l0 lND|m 2BD'Ɵᓿܞ~iV^ر#_|͚5cРA [ٜsg߉H(DρuN_RF!"(D僈x㯌T+2x`6lʕ+)Q>,-[dѢEtŘ&B$|ɘ[妩D "(Df l;Xl_~%s>SN8qFRDDD$q_=.)#"""""SX|Pxx8_|Wɓy睌=DLL Ceܸq,^WɱƵ%er w'޽{k.BCCԩ>(+Wv'44QF""""9^%e2S>Uڜrp1֑}M9ӑtNADd}1Fa8O>w%j!dS>)0a:vH\Gkbxf/fe2}^H|OYک+gsfn_ sZ[(WXv:ORJѺukv0SJtDF")D2inEwDDDDDB({(egCk.N>M|ќpݒ2Yݰ3t+Uh vz>zl"(THg!EwǏ[oE߾}QyIOPP/N%Yl"44+Nk"㶤)3/܍;0rD[@,b2w 2BD'Ɵᓂѣ1L8p,lֻ"2H>ZÞ?ܞŜ~+#D僈x|o>))H2:sKX}iwG $$J|RpyUi."""""/X6nȁ&O<ԪU-[enzm;_XTo[O 'No߾:u`BCCjRR%-ZD…}q*œNxݬDDQ>dMdؒ|sp7ҷ2BD'ƟU5NJɱ y͹Ԏ9.Zd>.vʲQگ{!ڢz{9~!WN ߝ,I r̘|}I㓂SO=Ett4K,aٲeÀx'|q|5-cLqXODr8q{@U3EA?1p⨿{-0tPѣG"o޼/_g,0fs`\"rH +ܝA4Yi )G&ut}dv dX[ (4 S{? q؝(" ݋ >RJ_6)وaZ ɂ"sXoMaKތ5 R5]*o",HPFxoi1?>ؿ}#HE|O7O gΜDFFb\ցdbƍ8dq&)_D"q&n&˰&m92LA%7GUFR=Ĩڔ"ADWFQػw/ 6J*LZVd6 !**."I(DRVpk)G}{sTOg[21Lafm~H "(DfO {eΜ94l͉r.)c臣 ۵M70|3]Hr#ZIDDDD$RWEp¾hJDDD䖔t)K$-ߜ"-qHi-)#""""m>dz-Ν;DDDDn9I ^Rڬh&pw wK:]wl'ԩâEh֬  W\nuT _p<V{҂p.|؈#8|0͚5#i-4z5,H :OpOR 3mIn>ʈh w@ ")DfO^߿9sШQ#_4'"""rI:5o|qjo%Yݤ%eDDDDD |H"vmhJ1DA\G k3iAMif+#RВ2"|ϔ"?3'A$ "A;}gۜW-5ܕkh"ADWF_ÇiҤ K&444>~/N%"""#9do\}y+n vM@. _8xAEDDDD zD(Q͉r29`IR8b;@\>-8q$kHtR_4.gfٲeDEEqwkQT)~g~m=Jygԩ%Kl2Ο?O*U3f 5j$ϤIزe 4lؐ &P@DDD֒nJ2&#Z;5pj#_/z$wKY8y,tIoڪ-~쐈O";w~+V$}mٲeY%KO?QR%-ZĹs8p >(?3cƌaܸq۷M61c |MmF-߿?111L>|r `>NeV H}g(DR4-)ޗhƵl: qDq&AuR0$#*#Hz۫a*_So+(D僈xόIj2f7o3|8*T ,,c2vX֮]Kr֭4nܘ-[b /_N׮]]6!!! @BX"ӧOgԩ8gϞɓ\rsE6lٿ?ժUsۿZj|W߿ۻfVʾ}Z*T^ݵbŊ~-LH6kSyaFvlZȱL&Id2A@u1g]ߎvz\?IH{c׷0{dK63*#t}Og .)c1]]#IlIrgd(#Rgc8׮KUF1ѯ#nwt듮oFf|-2"=}1fH/׮]˄ h׮rb<Ӿ8 gΜ믿f…!C;v,qqq DDD/_>DDD7o^yuAAglvbcm&Bsj,rX+6BBܶbb)a@HH  8V%Pv' ׭#ib!0% vj,fܹݎ3 Ȅ5fsN\nJccnwh!$Ľ14RğMchju`+1Lޮ1ob1>Nvap vV;qq)aҟMhhfcLq m6VLԮ@x}<]qAHH a^V͈7bcMƦ>ezL|ɝ0KJ[F\+47Q/6ʈk2fe2_G$$..3:")eD[5#,|0k׷2BH@H//_63< uyꩧ\ӯ_?7n3=5VXbNATT\ G%zn|lJ&7.F?ēk;)1J|'.N|uFf+Lv9Rjcخfnw$۞ؾv37ɷyCv3>v۽v1vSv #o0؄?<{c0ުcF@aV,QM5c1H2=6&(},e#&s (#u_G$CLLB!n팸2"2ʇO$lSF\HPFd$#<K.͎;(]tmvx8 g,Y0l._L(P@~}"""ȝ;k+W(TP)I{wU}}f IH0l"*X7(ZxBq֖j^+V[qz"T/j+zw vLf@fάy=̜曓wf>tu_ͬOŰLv cv3S1]h_k!y4KJM)k˲MdD۵1oΒ91n dDWiavͬO"Йf"bߙfS\4OԜ9stwߕ$}'zumO0[ZjUvkܸqZreW\QFIjkkUWW jԨQoT]]O>WmmmH$vIFC1EZÖ)5Ï[bf̬#. [ \rN9:sڪK/T~N:$]|vFr4m4=/m6w}4iLk|Zt.]3H|J>3°2J(lҘ."MηV~ sƙr6,$/9(+}_;ׅmˇ&!zu$d{@{t?}Lip{.,CH8*+fؽ9FWpcJKd>ǝ @*̈ =zPMMM+JESpH w)9hjf.@IUW_}U=z 'SN9ECoPxYgeןl:sPpeᢩh@QK{_:,k:4yd=ڴi}(i+f$ƙZ1EZmORl#ˊ]R;Pt2z^{sSO=Ecџ'M0A{|I544W# w#v_=!c%ecۤCkѫx@]wݥc9ۻ ӴJtF>}_ 5ܳ?={w2"M!όSCNSn[G`P;w0(P.|{bvzK|!IF3Y @*ޑ]Vߴh"mڴI9眣ɓ'kv0 8Zn㐮7;ݾw]ɐ1h*| |fDFZ[[멧[o޽{Oɓ5rH"a",ҶPr2=h$%dd2OeڰL2]0!όHU^^xAn[ƍӼyt1L=l^0f+s~Qp炩Y#<` O?z!Chƍz'}tP"ۥ4/*EgK`Oq5"K8\@n]p?ys;=L^ fAr2@1Jng?P,b%"3/jnHk2dDP,'H"$G>H%Ʋ$?v %e8kĊ|jIɈ(fqɐRgF0ur8$F>m 5D_Rdi/۹ X@ @* q8 {kA>rTE \4զUɈ%e(ɑRgFPpȣ؂{2Fg[9Z-0 "o deI(QpȣHBtg[nټ;bD/ wl]zQGJij~j o^z\ȴ.sQqIm-ܬ^i=\]R֪U=iE>5 *|t K |Yvu-CjkP}3I?B8`X;@2TiKz衇  &WEE|r-[L7p-Ea3MKMM|w@"6ikۖL[DNFY_~qcu( d3#~o;}W9I&/-Z;.^ۃ}yFkrmd60z屿uSP>hmDŲ'ڼ!i_; ]LKZt.o0(p }2M+P@ PpwmMߊ,2kQϬiћ eO3²,Ys+7Lӻ_JA!όnwV7ҕut/Yݮrex%ãP% ['I;}/N]!04rU{3OeԾzTH=k(rE|J2– tkժU>|֦_O>$$WpO\ݭ8Qpxb+wfxE*zre4Pl)tMr8zt\HrM7daJJ})مB {.ZH1tPN,Ӝ1G ]Zee~7 555\ REE()~+::IRQ ûOpWg.wv ;uZR Ё2;G3MKMMm\@'ٿ[>p~T0zy#K˜}F^aߺJ d |fm~}[ҩ;O[lі-[4m4u8~H|It{w=PpՒ2E!fT +#l)?C;wN_^o#n[twq80TV"_:!ξon\yDמlEpwأrLgD w'3;}>@*[<䓺_R| իjKv0$%~t1T/I۹z;{vّ%er1=K}FĮឃ Н}>@*[ 6mҘ1c8pviaJ l$vund(R\451`[j⺤%쵖M5ʮ=9~r2:@0Y@-Ӣ=XtMjmmرcevܩիW[nI'daJ'5.S PmIp@f˻3gj͚5={ ÐeY:3dY=X͜9ӎàYeqp x6EW}kOឋ%e?)k*1ȖYC>H%a˻JO￯>@MMMa#Fq˲w7 jNgjGyLd7>#dM!ό]iҤI5jFeG(RNCϞ@;{mMtѣcg+Cl_035܁*|UTL;tq .?|>;Eq8 UTxpd9C>.? X1\.)dȈNfv*|5TK[dy=5k5qD~=z( rtP4Z^fSp3[O?я׿U/.^Z?я4~xu]v9c^~ rzqKp w p7XRl3`Zpx 0EggEfsvW.a܍wsL 1@Ӣ֭[ŋkշo_͘1à@Yo`W lnO<*Vr;=WKuFĮ wvE|J2–wik׮WZJM8QW^yƌ#Q݂iZjllw7 "9r2R297K}FĮة@֐RgFRp8q<Ə.HǏ㱣i&1}{0_E-g; Ʌb’2@fKN;4UWWaܣLu@>3>Vic?h{HJ%`鑽BrgĦ;{e @wR k3#lyNmm|Y] ݭwLR,vms@JeCsR)+TJ1g%=0JP1"Ç׿o;(IxݎXݶgԧ) #˲-K[kݚ-֫#Fc:,;P lnUul wGrTdܯT7vMEa.?aKn$}gZxqaPp큮1[e6I, ;PpOlKDžp @gK͠ș+ H%I= z93td$븂{)挰6o/$V 3#lY}q߿}r뭷jذa˖-ӴitSN3<?:餓tkZretӯ~+wq=z.rرCs.(P3`$iW H fnY7!3>謁fYW +#lTo?_ַt衇jݺujnnwTZJ-޼y.}Zl:p Zb$_=ܣz5a]tEjii$}ݪ /Ȳ,]{Y{)2 u)F>[޺F!_v;L2 ~KMk={ԢΈ2[Q"ό+hƌھ}N;4\7eZhz!;4Mx㍚1cFg}Vִiz5vXZ`$i:uF2'?? j…K4`KW\q,YM6Rd^wڳ.ݼ1,#w#4"sF-) wvŜ|J>3–ޫs=WO?fϞ-3lpB;^&MNÇ{ãpCъ+vZ566jĈtTWWg{@`YCC<ς2SpK,}w|>O?՝wޙpߨQ_q[{?w}}v^׫:nuuvءzIRUUU.nRtY_U"'gY,+q{s# ÈXƮM]{0U{l}ovvSv3>ȈReƞKFp~wO&5[mzzhw7c^p %)2]_3|1\߀Nw%#Jq :"ð]^Gp~wOa!gDl>.ݕ>1%#ʖ{^s΄6mڤ ;umiԩ2d.=wwg޼뎻/5 PeeO =3B>$IUٳ\R2b}ӎ媬۲^w T.madD#º|-|~KF:"@Fٕ+:^q~dDFFd]eKC7߬Ծ5f;#IZl}]=s^љ;vPMMM:Ν;ջwhA"5|ӴԖYaNE &nv#mk H $l7 S$bEױaxH$o7Ñ]0n T0?~v3a|鏡߿kIn$h #Z7a7^BM55#kw7dyT㢚5ʴ3jXRx_N};#"K8 jNFj]t;WߤmdD𿼎mwtdDl##Ȉv3?͇>2FFd[1cK}֬Y'jO~mܸQպ[8$gѶm4aI߄ѣGT_rF%IU]]L")?PӦM~jiO>D~_{ܿ؏b$wu_ͬOhG fn1Oj1̼d}.w.hx=]Lk5|u=S;viYы]ᔑ13 1*k[xc!vk 9.cv3듥P(q>dnq!w~e nf}NF)[ w=ZbgUUUԩS;ٳg?yƍuYgiѢE2MSsՂ 4ydZtϟ/I>}~_SOհa#hr::3jȑ*++{xӧm/uaˉ ,'kٌN Ivk{)_䗁`j1f1<(mŘr|J2–^ . `쫽$͝;Wsѯk 8PwyoH;8W\m۶iȑ7ok ]~jnni`0 &覛nT*"=EKUcҾ=_nWF-˽_FڝbXiŤ6#l[Q)c P6d |f-Lԣ>SN9E PSS̙UV裏UW]%dzҰ?>ꨣhѢ?st9$xt7oY3fuSǚh{B!iñ߯y橥EtmW^G_~Ys(*_GyLFmzᖳQ[>3܋R6)>N/ -EoA$ϧꪫtuo֋/haʖPCt*B;$I+7t0=!Ffi&5J|rM8Qtjƍvh,Sj]",KmÝav/aֿ:n04PPl)WUU^tR9RUUY\ыV.VQTz63ed+GՑz{QfDkKt8yPڊ2 |f-:J~~a-X@&M?Z;P4Ft{By@̍pu(/,ۧ ~UW{ '>[s~q8PEWޙhk93^-ʌ\0Eel*|TCg}tq+W^vEo8J6I[N}ûouܭ?=7/P.#"3]܁l+|3TCxTWW-[hƌի6ol! ^ l$;㖔%VLŝIcID N{cc~dY =m۶Mguhذav opѺYyqKd Ň%e$`˼;Sk׮=ܣzKeee8@n;Pv<cg` w3vIXfcfa{o~|;p8XoA,KD>{,Kތ>Ќ4kgpŗ2Ȫ9C>H%a; 4(r|>;gYR t;ۣ_#uIn`Mˈ`@dU!όeA /$ꫯj0(p!y d[ üh w+Mm1r2RfD(܁*|3TC1cZZZ裏駟$|ͪҥ^vItY];3ƒ`IIK.\5k֨I:MFn[ݮq%eƼb;Cp\6lX}>^(8jYeg{fG. QqOhmU`dt-+WkC]{:=_שaPDA 1+-+c{|CNÞˎqKʔRde.j45bG>H%_wyGӧO+V+:skItSyymF2MK--~&+|@wStk8fּ܋*#1ߋCF\ * |fDڟI5fs=*++ׯ~+PccnFgMeװCLg~_Ƕۓ~(HipuLxtUWNfG^?pj7p!WX E$H%;F~q[eee[u'g9bst{/GͭwprPpהqpbfppt~a9rd&|_DvpESYR&?3$Qak[6{f']Rvv0 L{$mntx/#؋z()˲4iҤNE6uYq^{-^(VY {!D åΞm(Mr|Ԣ. TQ"όH۔)SJ$C>p}k/?;=k/*O3MFpgI '& |eDn~8jk 4R=${Q|s/`vŔ3܁*|[T\4rE>Ů>{ w+KdH&#\4ȵ9G>H%_A C_GRpW2eMw lnrkb{*,)cYR@< ܿ)=ij[=Mm_S&WkfH;lcYL8](uC rëjg;ܨXm ot^TES*|STL2 ˒!P,{pr, 5@`ۏ;7ʈUQ~DQ"ό`;l$C>T5/esɵivfҬ^tjJvkAY;֮!#,Ӕ ÐD1 8=r+](e[B3<ݹ>9_HGˬ>cmlo%I.4*y-xEuƁd uŒr|J>3;@Up7|GI%HF^=QvMR< e=mǔ7 w l nkI˒kd(\\*{=m'k$i/Cg ¯ziۖG~X^0QpնD7$I{B5n"[ZBAcOTWNg8Px7[Bf@(U[]܍ַ2'z;fɑ~(_(=4~Y;V*e|me{@{t?|J29aӴw7 l$j_Ws?!C xOdUϸd^o_JcǵY;V*WxP/k:iq¤vf -@* a6I4*C%_R{ro7V_!7+c}S(}^Uzu__.Bp8< (6J|2ͮ6g$IՎr6]#<ͶC6Imwt鑿Br!e ~\!B>H%A63|@7N9w?^d[[{bKe.nj T(Ww8bOzHbv;7 dw4m 6DWa)]2t6BX$~@6i:Ư2B\qd84PD(ishgt"m3_J~?Fߨqwa){FR{S}3 in)p)4O2:f?LƠ!6 C*/ŗ%e=@k$ iZjjjb%:!PbgqVr|`㢞ƾe0^ÐLX5d3#~n_t!PG::v.bj; ȡ1 @* a-G>YO$I{9*ӈY~p+3i+؂isGFH| |fw0$%~R` _@OXW]KfVgY w2@bd3#~z]z=zƎٳgAj*?G'G{?M4I75uT_3MSw}N8uQ:nݺ~m8l5F8{JVGܒ+nyf/@(ʂE]*zꩧzjqjkkӅ^1c^w߭s_._s5+ohƌ˴qFI_=쳚7o^y 8l_R&v{i լYTQQ[SL[o%K(/VyyF38Cϗ$-X@ƍӸqz5yd :T<$i1c: UVVj̙Zf|~}`j?g.wB1qe󢩶W*)QUUU۰a맺: 6LΘV>\ ,$iܸqq>|VX6}>|xt_ee +Vۣ:-oYΒw8:\17>˲dYstgF:H!֎vTsu cMOf ##Q,1$#"1\gۆ#~;fM;4c 32"tDFķNf;#"μ]2+}b =#"p6KFp~wOawɈ*VX'xB<yUUUիeU]]Z~v)˲߱cquas\#3~Zk_@HnSee}`H--~IJncc,K*+s_ߵ- ?(˩vC!S;vO62S Ȳ$ӡ o>˲&Ivaln)2CK vaK_`HnKeeF0a{x Cr#vnwwcr9T^?i)n1z]xjkK<ߛr]Ч`L8@H~owä;ܜneY*+sNv~w} #ߛTc;v,K>_ #9#v-W{m3NU^Z## ):}2B"#" !#xaoFx_0n_@GvH@P`Hl ;K5?>_}`lIn$h #Z7amm|N59#NG!#:aŘ =m.m|%_yWeŴ恌 #%#"xVt: vɈdD}3"Ȉ02mwO3L)ڂ/J7pN?tIRMM⋸׫W^r8kT__iMMM1{QD"VR^^5оBlw1Oj1̼\daGڢ,μl}щi t;\GuES-KυZmvu_&%#2oȼ]0f֧r~]0f'Kё>оBl;v춛Y{(;kh]jkk+&tŊ5jTtʕ+ڊz:UWWРkCJpåxSSCЇqvI<lP5 3hO@){0_+R{lܾqƩR}$3믿%Ki…/4ydI?Yk֬QSS~Cȑ#suN U6RMpcs]B13wY?E7%Ӛ5k4g͙3'nŋoԼyԧO͜9SǏ$ :Ttmi2dΝ}J>lmٲE{5zh{ケ@! n [;juՎC;}{y0Nd (YUBg˖|w!oC^57lY@ P*wY79z{c@z$}ݯU`y>[weɼt*G|!#$C>H|Jѷoϴ]t3QLRcc G q05Yb$_e-kzY/Ը3\l>li;ɐ!όѭ7H(͎GϣAU.}r[lDzޥQpRag29Y8F>dXG_Θ5-un?wx~zhO颦7xe{32@2d3#[$F>4t-'no(f5_Et{eԳZ @2Ta!:져V6 >mn`cf̖}]}fu{L.(TX2{9+LqwhoPxۘf} %$梩!嬈{>=R7J @2T y n|@iX)* oؗrY[ýD/FFH| |ew0$Uu P*BV0wf {FFH| |fDcC!ݩJEݯ^Kʐ!$C>H%QzlnJTpضu] pR;6=^ܝvpR@`@t؋2ޱ6%AY {!P*Vl^4p'#$C>H|J>3•CTY!P*ڬ5?M-ddɐRgF;6|JbBj3}^gYdw{iNFH| |ew0TYY/<(_K2c|>~\g!$C>H%Qz3F9.P6k@;6=.5m-)cH w==ԴMp;6CtSsMo;=*Mڦ%{ @2T%4-54 bhJ%epضseY3KNFH| |fDc`$7C")f2;m: w0TQᕃb-(IeTd wCagaH|J>3ޱ!NN)(f[ %e, gG_05v{i,!$C>H%_A2ƶЮK$ZR[}{.* w(\4`7jLhYwr%Z Ng^M@تiwq_gִsHN  iZjiˌ|@ n{rVv]wȲ,mqyuڼAvQKʐ!$C>H%A CE>X5rmR!J?^U[[͸}PݶڱUcV߿ @* aHnK@0nY P,ͯȽ :Vz_8uOM2W[ޗy-RKSL @* a*+s+ B!a}e2 U:'+ZM ',3c_,cweJ @2T Vy׌+0=O;(xE~ZeC1.0+[6˃{o$~#?2δ {B~:4`dr\tG$(H@e鰸~P?gI5"Re%,'!?'*vE !Pp[B?+[Q'=c'Qۑad^/1?dx˒<!$C>H%_aX\YV[4 `w+T>&I3u [4О2]Ff{2e}wqyi_߾=~.3a+ÐDn`9$Ӳ4[:=gtѾG}Lsd=HGK @2TaPeedЁ|@!qnOEoz-oh=,߬7Q_ K7o?KcNHRCFH| |fw=X\鸻eYzV=~pfz- iwrtGd;(}%ߖXwS꒗i}SuFۦ{/$}{z8<];dC}baq|]ϑdUQՃ:|[ϮmTl~LZ={|8˲d.{Eo=A%4Y&ec;smg^Լ◐)wz>պ w=;2!q+Ǐ~( aӴКn(@r ՙrwcu)?QUHj4n>gYy}_gv+Vɐ!όJ^bG/3rrUG*aIeʼj?[^coe(JaPY[mm'6$mzk(?$chxP+݆RWlY!aG%n4J8)wc{OΎ:ww.lte4 3W6_מZ /cUӶߓ$mt{~[_]"mXKF6}%݇w7 @2Ta_ݧ-w7dQ{Eo6O1ɼzb;aPX5^R%]@zvTmv\*!{}(~g%.֯__zU^^O>Yf͒w ijuH"v6(=NǗ)neH +`ݜw~mǯcoGs8 UT9ZHg`?}ˆt`v NPU{\c(wL#FK/m۶ /T>}8]+xJ !Y/<1 -7u4TywteYjk" | |fӶcXB}JS֌34|w(XSlRbJ[6o $Oe @*fǨU]]oĈԤH%A=F}}wرGwP^-Kя/8ׂwheSstgk_ Yd :(2[P57)*1L>7[ߛ1ܵt<ψH> 6KFp~wOa!gّ=vKFp~wOawɈྋLx\zqAp,VIR9Kom+v/ /I mlleIeen\θ}mmA\NǷ jnծkjjiZ*+sBm}5PnU~X9At:TQ{eYjll${:]ŧ`춯# Cr:x\jmVTx;on>&Önۍa$&r9գG1n1r*/C4n7z.yTTx>2"eY7AFDadDU%|I=z嗣:묳obmlحZץkii_d$:n))_ܢ߷=}n}AJPEv&kh60v۵a Ssaz2C6dD{*;cH @* aHnK+@*ddɐRgFPpwl`X%3wl@Ppwl@Ppwdl 4zhM0AwyLwkcj̙ФIo~SSNտ<@>_^^zFcjjhh$ZJ?tGhĉzG[GG?8;VW\ql"IZlM?\ry<@z6lX6`ذaȑ#|ܑ͒g߿^z%=cz饗__XxVXn[FPpGFVX>HW^yzkƌ?~ ^.\`7Nƍɓ5tPfHCCjkk5k,UTTh֔)S[oiɒ% U^^#F35Mj̙ xTSSOkڴiz;v?x-X CioԌ3RWFPpGF4p@UWWG1b>s555g?zp_]]wÙ}t#UUUԧO}6lP~TWWaÆtF >\+WGWXuu8 \.Ig}~Zw ^&M|q]wi:#u 79oAWUUU};%>sR8/ Zbx ]| _SK\F֯_Z|9r.kغuxq$رc/jz_:oA,+]P$ o?|͚5KcǎM80r+6p@XB/_|:]Pn6M:UC wW3ΐA+R=@^CQ}}}}2 C555^{% ~^~e]p/¯)viR__^z%+Нk̙zr:رc!nbٲezwu饗vڗ=`}U(KFՆ }}+VА!CTQQǞ(4I[bFȇwyG\s?O_[[?X`0zt˖-I'Tmzh+W$ngѶm4a=ZSN$=ZC%n?wߚ5kx4nܸdwdd9r.555i͚5z4}|w @93kɒ%|Zp M<9]#`P_J{qƍJ=jmm￯ &jkkԤ;Sھ}y䑚>}֯_ iҥZt<|w@̞=[/-ZEi޼yEiҤI[׼y:,viybA]dhƍoRg}.2\#GJRt*I/._^C u]:*?sotڷxb577oʕ+էO?9眓ȇ?Xs|r3FgV|r͙3Gk֬5k,M81]_}N8}ǒD>u]w?є)S4sLy޼dwl2؀;6 (` ؀;6 (` @={ s=WO= 5k䵿=gѢEua驧߯>L3f̐OzUUU:} .ܹsuUWiŚ7otFc&LKl($nf֭t!hԨQ:'+ȑ#u衇OԪUyuQGuGog}^x!|M}ߌ[?N w>:C?AoefqGyZZZTWWlFfuDoWWWK9䐸%/bgckCQ^&=֖-[:+?a}1c~imذA555%-[Efn}R.IwosNm޼9.*IƍӟgUUU[nugߎ{\UU$!=W;E 3fgi])#ԑG`0~[{~ӟjɒ% @ :T oo>o߾fk0` "˥ѣGgϞ2e֭[#FHXJf%iBŒ2v /?Os=Zf>Sq2eJ5܏>h Bz)]z׿Z|{1[tPq˗/Wyyy:fحSO=UC=Ν+˥#GVmmm};O<˗k̘1oYtui۶mҨQ裏LdY,Yqk2eXeJ9#ǣ?O{^/h" :4{lĒ2;C^ziԤ[?(0@=o~oS>/Ν;#bL ؀%ewl@Ppwl@Ppwl@Ppwly%=IENDB`accelerate-1.9.0/benchmarks/fsdp2/main.py000066400000000000000000000072111503574341000202450ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from typing import Callable import torch from accelerate import Accelerator from utils import parse_args, prepare_accelerate, prepare_torch MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct" LEARNING_RATE = 3e-5 CONFIG = { "model_name": MODEL_NAME, "learning_rate": LEARNING_RATE, } def train( model: torch.nn.Module, optimizer: torch.optim.Optimizer, train_dataloader: torch.utils.data.DataLoader, accelerator: Accelerator, ) -> torch.Tensor: losses = [] for batch in train_dataloader: optimizer.zero_grad() outputs = model(**batch, use_cache=False) loss = outputs.loss losses.append(loss.item()) accelerator.backward(loss) optimizer.step() return torch.tensor(losses) def evaluate(args, config: dict, init_fn: Callable, run_name: str) -> torch.Tensor: model, optimizer, dataloader, accelerator, memory_tracker = init_fn(args, config) loss = train(model, optimizer, dataloader, accelerator) memory_tracker.stop() msg = f"""Results for {run_name} (rank 0): Loss: {loss[-1].item()} Peak Allocated Memory: {float(memory_tracker.peak_allocated_memory):.2f} MB Peak Reserved Memory: {float(memory_tracker.peak_reserved_memory):.2f} MB {"-" * 34}""" accelerator.print(msg) return loss def main(): args = parse_args() evaluations = [ functools.partial( evaluate, init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=True), run_name="Optimizer Before FSDP (w/ fix)", ), functools.partial( evaluate, init_fn=functools.partial(prepare_torch, post_shard_optimizer=False, apply_optimizer_fix=False), run_name="Optimizer Before FSDP (w/o fix)", ), functools.partial( evaluate, init_fn=functools.partial(prepare_torch, post_shard_optimizer=True), run_name="Optimizer After FSDP", ), functools.partial(evaluate, init_fn=prepare_accelerate, run_name="Accelerate"), ] labels = [ "Optimizer Before FSDP (w/ fix)", "Optimizer Before FSDP (w/o fix)", "Optimizer After FSDP", "Accelerate", ] results = {} torch.use_deterministic_algorithms(True) for evaluation, label in zip(evaluations, labels): results[label] = evaluation(args, CONFIG) torch.testing.assert_close( results["Optimizer After FSDP"], results["Optimizer Before FSDP (w/ fix)"], msg="Optimizer After FSDP and Optimizer Before FSDP (w/ fix) should be the same", ) torch.testing.assert_close( results["Optimizer After FSDP"], results["Accelerate"], msg="Optimizer After FSDP and Accelerate should be the same", ) torch.testing.assert_close( results["Accelerate"], results["Optimizer Before FSDP (w/ fix)"], msg="Accelerate and Optimizer Before FSDP (w/ fix) should be the same", ) torch.distributed.destroy_process_group() if __name__ == "__main__": main() accelerate-1.9.0/benchmarks/fsdp2/measure_utils.py000066400000000000000000000110171503574341000222010ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import json import os import threading import time import psutil import torch from accelerate import PartialState class MemoryTracker: def __init__( self, device: torch.device, output_directory: str, run_name: str, save_memory_snapshot: bool, log_interval: float = 0.01, ): """Class for tracking gpu and cpu memory usage of the process. Args: device (`torch.device`): PyTorch device to monitor. output_directory (`str`): Directory to save the memory usage data to, will be created if it doesn't exist. run_name (`str`): Name of the run, will be used to name the output files. save_memory_snapshot (`bool`): Whether to also save `torch.cuda.memory._dump_snapshot` to the output directory. log_interval (`float`, *optional*): Interval in seconds between memory measurements. Defaults to 0.01. """ self.log_interval = log_interval self.save_memory_snapshot = save_memory_snapshot self.output_directory = output_directory self.run_name = run_name self.timestamps = [] self.allocated_memory = [] self.reserved_memory = [] self.virtual_memory = [] self.start_time = None self.running = False self._thread = None self._state = PartialState() self._process = psutil.Process() self._device = device self.torch_accelerator_module = getattr(torch, device.type, torch.cuda) def _monitor(self): self.start_time = time.time() while self.running: allocated = self.torch_accelerator_module.memory_allocated(self._device) / (1024 * 1024) reserved = self.torch_accelerator_module.memory_reserved(self._device) / (1024 * 1024) virtual_memory = self._process.memory_info().rss / (1024 * 1024) self.allocated_memory.append(allocated) self.reserved_memory.append(reserved) self.virtual_memory.append(virtual_memory) self.timestamps.append(time.time() - self.start_time) time.sleep(self.log_interval) def start(self): gc.collect() self.torch_accelerator_module.empty_cache() if self.output_directory: os.makedirs(self.output_directory, exist_ok=True) if self.save_memory_snapshot: self.torch_accelerator_module.memory._record_memory_history() self.running = True self._thread = threading.Thread(target=self._monitor) self._thread.daemon = True self._thread.start() def stop(self): self.running = False if self._thread: self._thread.join() if self.save_memory_snapshot and self._state.is_main_process and self.output_directory: output_file = os.path.join(self.output_directory, f"{self.run_name}_memory_snapshot.pkl") self.torch_accelerator_module.memory._dump_snapshot(output_file) if self._state.is_main_process and self.output_directory: path = os.path.join(self.output_directory, f"{self.run_name}_memory_usage.json") with open(path, "w") as f: json.dump( { "timestamps": self.timestamps, "allocated_memory": self.allocated_memory, "reserved_memory": self.reserved_memory, "virtual_memory": self.virtual_memory, }, f, ) if self.save_memory_snapshot: self.torch_accelerator_module.memory._record_memory_history(False) self.torch_accelerator_module.empty_cache() @property def peak_allocated_memory(self): return max(self.allocated_memory) @property def peak_reserved_memory(self): return max(self.reserved_memory) accelerate-1.9.0/benchmarks/fsdp2/utils.py000066400000000000000000000272131503574341000204650ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from types import MethodType from typing import Union import torch from datasets import load_dataset from measure_utils import MemoryTracker from torch.distributed.fsdp import MixedPrecisionPolicy, fully_shard from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling from transformers.models.qwen2.modeling_qwen2 import Qwen2DecoderLayer from accelerate import Accelerator, FullyShardedDataParallelPlugin from accelerate.state import AcceleratorState, is_initialized from accelerate.utils import convert_outputs_to_fp32, set_seed SEED = 421 def get_named_parameters(model: torch.nn.Module, drop_refs: bool = False) -> dict[str, Union[torch.Tensor, int]]: """ This function returns a dictionary mapping the parameter names to their data pointers or the original parameters if `drop_refs` is `False`. It is used to get the original parameter names before `fully_shard` is applied. We only return the data pointers, so we drop the references to the original parameters and `fully_shard` will then trigger a new allocation for the sharded ones. Args: model (`torch.nn.Module`): Model instance to get the named parameters from drop_refs (`bool`, *optional*, defaults to `False`): Whether to drop the references to the original parameters Returns: `dict[str, Union[torch.Tensor, int]]`: Dictionary mapping the parameter names to their data pointers or the original parameters if `drop_refs` is `False` """ named_parameters = {} for n, p in model.named_parameters(): # We only preserve the data pointers to have the unique 1:1 mapping between the original and the sharded parameters named_parameters[n] = p.data_ptr() if drop_refs else p return named_parameters def replace_optimizer_params(optimizer: torch.optim.Optimizer): """ This function is called before using `fully_shard` on the model. It replaces the parameters of the optimizer with empty tensors, so `fully_shard` can trigger a new allocation for the sharded ones. After this, we swap the parameters `data_ptr` to the original one, so we can reuse that later to map the sharded parameters to the original ones. This function modifies the optimizer in-place. Args: optimizer (torch.optim.Optimizer): Optimizer instance which contains the original model parameters """ for param_group in optimizer.param_groups: for i, p in enumerate(param_group["params"]): # We drop a reference to the original param here, so that _move_states_to_device triggers a reallocation # This is required or else the `fully_shard` -> `_move_states_to_device` uses the original memory address # for the sharded parameters, and we get a weird/undefined behavior. param_group["params"][i] = torch.empty_like(p) # We save the original data_ptr, so we can swap back the parameters later param_group["params"][i].data_ptr = p.data_ptr() def swap_back_optimizer_params( model: torch.nn.Module, optimizer: torch.optim.Optimizer, old_named_parameter_pointers: dict[str, int] ): """ This function is the counterpart of `replace_optimizer_params`. It is called after `fully_shard` being applied to the model. It swaps the parameters of the optimizer to their sharded counterparts. It is done using the `data_ptr` mapping prepared in `replace_optimizer_params` and `get_named_parameters`. Args: model (`torch.nn.Module`): Model instance to get the new named parameters from optimizer (`torch.optim.Optimizer`): Optimizer instance to swap the parameters of old_named_parameter_pointers (`dict[str, int]`): Dictionary mapping the original parameter names: data_ptrs to the new ones """ # We get the new named parameters after `fully_shard` being applied # We don't drop the references as we need the sharded parameters now new_named_parameters = get_named_parameters(model, drop_refs=False) # We create a mapping from the original data_ptr to the new sharded param corresponding to it mapping = {p: new_named_parameters[n] for n, p in old_named_parameter_pointers.items()} for param_group in optimizer.param_groups: # We swap the parameters of the optimizer to the new sharded ones param_group["params"] = [mapping[p.data_ptr] for p in param_group["params"]] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--output_dir", type=str, help="Directory to save the benchmarking results.", ) parser.add_argument( "--save_memory_snapshot", action="store_true", default=False, help="If True, `torch.cuda.memory._dump_snapshot` will be used to additionaly save the memory trace.", ) ###################### # Training arguments # ###################### parser.add_argument( "--batch_size", type=int, default=2, help="Batch size for the training loop.", ) parser.add_argument( "--block_size", type=int, default=128, help="The maximum sequence length to use with the model.", ) parser.add_argument( "--dataset_fraction", type=float, default=1.0, help="Fraction of the dataset to use.", ) return parser.parse_args() def prepare_dataloader(tokenizer, args, accelerator: Accelerator) -> DataLoader: dataset = load_dataset("tiny_shakespeare", split="train", trust_remote_code=True) def tokenize_function(example): return tokenizer( example["text"], ) dataset = dataset.map( tokenize_function, batched=True, remove_columns=["text"], ) block_size = min(tokenizer.model_max_length, args.block_size) def group_texts(examples): concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) total_length = (total_length // block_size) * block_size result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result dataset = dataset.map(group_texts, batched=True) dataset = dataset.select(range(int(len(dataset) * args.dataset_fraction))) def collate_fn(examples): return DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=False, )(examples) dataloader = DataLoader( dataset, batch_size=args.batch_size, collate_fn=collate_fn, ) dataloader = accelerator.prepare(dataloader) return dataloader def get_model(model_name: str): # We reguire model to be loaded in fp32, otherwise benchmarks don't match as accelerate does upcasting of parameters to fp32 config = AutoConfig.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float32) model = AutoModelForCausalLM.from_config(config) return model def get_tokenizer(model_name: str): tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token return tokenizer def prepare_torch( args, config: dict, post_shard_optimizer: bool = False, apply_optimizer_fix: bool = False ) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]: mp_policy = MixedPrecisionPolicy( param_dtype=torch.bfloat16, reduce_dtype=torch.bfloat16, output_dtype=torch.bfloat16, ) accelerator = Accelerator(mixed_precision="bf16") set_seed(SEED) is_fixed = "fixed" if apply_optimizer_fix else "not_fixed" is_post_shard = "optimizer_after_fsdp" if post_shard_optimizer else "optimizer_before_fsdp" run_name = f"torch_{is_post_shard}" if post_shard_optimizer else f"torch_{is_post_shard}_{is_fixed}" tokenizer = get_tokenizer(config["model_name"]) train_dataloader = prepare_dataloader(tokenizer, args, accelerator) memory_tracker = MemoryTracker(accelerator.device, args.output_dir, run_name, args.save_memory_snapshot) memory_tracker.start() model = get_model(config["model_name"]) optimizer = None if not post_shard_optimizer: optimizer = AdamW(model.parameters(), lr=config["learning_rate"]) if apply_optimizer_fix: # We drop the references to the original parameters, so that `fully_shard` can trigger a new allocation # Then we get the `module_name: data_ptr` mapping, so we can swap back the parameters later old_named_parameters = get_named_parameters(model, drop_refs=True) # We replace the parameters of the optimizer with empty tensors, so that `fully_shard` can trigger a new allocation # We also change the `data_ptr` of the parameters to the original ones, so we can swap back the parameters later replace_optimizer_params(optimizer) for module in model.modules(): if isinstance(module, Qwen2DecoderLayer): fully_shard(module, mp_policy=mp_policy) fully_shard(model, mp_policy=mp_policy) # We do this to imitate how accelerate forces outputs to be in fp32 via `convert_outputs_to_fp32` autocast_context = torch.autocast(device_type=accelerator.state.device.type, dtype=torch.bfloat16) model_forward_func = model.forward.__func__ new_forward = autocast_context(model_forward_func) model.forward = MethodType(new_forward, model) model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model) if post_shard_optimizer: optimizer = AdamW(model.parameters(), lr=config["learning_rate"]) if not post_shard_optimizer and apply_optimizer_fix: # We swap back the parameters of the optimizer to the original ones swap_back_optimizer_params(model, optimizer, old_named_parameters) return model, optimizer, train_dataloader, accelerator, memory_tracker def prepare_accelerate( args, config: dict ) -> tuple[torch.nn.Module, torch.optim.Optimizer, torch.utils.data.DataLoader, Accelerator]: if is_initialized(): AcceleratorState()._reset_state(True) fsdp_plugin = FullyShardedDataParallelPlugin( fsdp_version=2, auto_wrap_policy="transformer_based_wrap", transformer_cls_names_to_wrap=["Qwen2DecoderLayer"], ) accelerator = Accelerator( fsdp_plugin=fsdp_plugin, mixed_precision="bf16", ) set_seed(SEED) tokenizer = get_tokenizer(config["model_name"]) train_dataloader = prepare_dataloader(tokenizer, args, accelerator) memory_tracker = MemoryTracker(accelerator.device, args.output_dir, "accelerate", args.save_memory_snapshot) memory_tracker.start() model = get_model(config["model_name"]) optimizer = AdamW(model.parameters(), lr=config["learning_rate"]) model, optimizer = accelerator.prepare(model, optimizer) return model, optimizer, train_dataloader, accelerator, memory_tracker accelerate-1.9.0/benchmarks/fsdp2/visualize.py000066400000000000000000000104531503574341000213360ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import matplotlib.pyplot as plt def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--dir", type=str, help="Directory containing the memory usage data") parser.add_argument( "--memory_threshold", type=int, default=0, help="Memory threshold to filter data that is below this value (only filters 1st `--filter_partition` of the points which should roughtly correspond to the model loading)", ) parser.add_argument( "--filter_partition", type=float, default=1 / 3, help="Partition to drop data from that are below the memory threshold", ) return parser.parse_args() def filter_data(data, memory_threshold, filter_partition, key): timestamps = data["timestamps"] memory = data[key] mid_point = int(len(timestamps) * filter_partition) filtered_times = [] filtered_memory = [] for i, (t, m) in enumerate(zip(timestamps, memory)): if i < mid_point and m < memory_threshold: continue filtered_times.append(t) filtered_memory.append(m) return filtered_times, filtered_memory def compare_memory_usage(data, labels, memory_threshold, filter_partition): plt.style.use("seaborn-v0_8") colors = ["#2ecc71", "#e74c3c", "#3498db", "#f1c40f"] fig1, ax1 = plt.subplots(figsize=(15, 5)) for data_item, label, color in zip(data, labels, colors): timestamps, allocated = filter_data(data_item, memory_threshold, filter_partition, "allocated_memory") ax1.plot(timestamps, allocated, label=label, color=color, linewidth=2) ax1.set_xlabel("Time (s)", fontsize=12) ax1.set_ylabel("Allocated Memory (GB)", fontsize=12) ax1.set_title("Allocated Memory Usage Over Time", fontsize=14, pad=15) ax1.grid(True, linestyle="--", alpha=0.7) ax1.legend(frameon=True, fancybox=True, shadow=True, fontsize=10) ax1.spines["top"].set_visible(False) ax1.spines["right"].set_visible(False) plt.tight_layout() fig2, ax2 = plt.subplots(figsize=(15, 5)) for data_item, label, color in zip(data, labels, colors): timestamps, reserved = filter_data(data_item, memory_threshold, filter_partition, "reserved_memory") ax2.plot(timestamps, reserved, label=label, color=color, linewidth=2) ax2.set_xlabel("Time (s)", fontsize=12) ax2.set_ylabel("Reserved Memory (GB)", fontsize=12) ax2.set_title("Reserved Memory Usage Over Time", fontsize=14, pad=15) ax2.grid(True, linestyle="--", alpha=0.7) ax2.legend(frameon=True, fancybox=True, shadow=True, fontsize=10) ax2.spines["top"].set_visible(False) ax2.spines["right"].set_visible(False) plt.tight_layout() return fig1, fig2 if __name__ == "__main__": args = parse_args() DIR = args.dir with open(f"{DIR}/torch_optimizer_before_fsdp_not_fixed_memory_usage.json") as f: optimizer_before_fsdp_not_fixed = json.load(f) with open(f"{DIR}/torch_optimizer_after_fsdp_memory_usage.json") as f: optimizer_after_fsdp = json.load(f) with open(f"{DIR}/torch_optimizer_before_fsdp_fixed_memory_usage.json") as f: optimizer_before_fsdp_fixed = json.load(f) with open(f"{DIR}/accelerate_memory_usage.json") as f: accelerate = json.load(f) data = [optimizer_before_fsdp_not_fixed, optimizer_before_fsdp_fixed, optimizer_after_fsdp, accelerate] labels = [ "Optimizer Before FSDP (w/o fix)", "Optimizer Before FSDP (w/ fix)", "Optimizer After FSDP", "Accelerate", ] fig1, fig2 = compare_memory_usage(data, labels, args.memory_threshold, args.filter_partition) fig1.savefig(f"{DIR}/allocated_memory.png") fig2.savefig(f"{DIR}/reserved_memory.png") accelerate-1.9.0/benchmarks/torch.compile/000077500000000000000000000000001503574341000204765ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/torch.compile/README.md000066400000000000000000000150201503574341000217530ustar00rootroot00000000000000# Regional Compilation Benchmark This benchmark compares different compilation strategies using PyTorch's `torch.compile` and Accelerate's `compile_regions` utility, which is based on the recipe in [PyTorch documentation](https://pytorch.org/tutorials/recipes/regional_compilation.html). ## Overview The benchmark evaluates three approaches: - **Baseline**: No compilation, standard PyTorch eager execution. - **Full compilation**: Using PyTorch's `torch.compile()` on the entire model. - **Regional compilation**: Using `accelerate.utils.compile_regions()` which targets specific blocks of the model to optimize compilation time. Each approach is tested with different batch sizes (1 and 4) and sequence lengths (128) on various LLaMA-based models ranging from 1B to 13B parameters. We purposefully run the forward pass outside of the `torch.no_grad()` context to simulate performance in a training environment, where gradients are needed. ## Usage To run this benchmark: ```bash python regional_compilation.py ``` The script will automatically download the model configurations, create models, and benchmark both compilation and inference times across different scenarios. ## Requirements - Suitable GPU memory for the models being tested. - PyTorch with CUDA support. - Transformers library. - Accelerate library. ## Results The benchmark results are summarized in the following figures: - Compilation time is how long it takes to run the first forward pass. - Speedup factor is the ratio of non-compiled baseline inference time to the fully/regionally compiled inference time.

Compilation Time

Speedup Factor

Full results are available in the tables below: ```markdown [-------------------------------------------------- NousResearch/Llama-3.2-1B ---------------------------------------------------] | Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128) 1 threads: ----------------------------------------------------------------------------------------------------------------------- Baseline | 18.3 | 18.4 | | Full compilation | 6.3 | 10.0 | 10696.4 | 10248.0 Regional compilation | 9.7 | 10.0 | 1952.7 | 2903.9 Times are in milliseconds (ms). [---------------------------------------------- NousResearch/Hermes-3-Llama-3.2-3B ----------------------------------------------] | Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128) 1 threads: ----------------------------------------------------------------------------------------------------------------------- Baseline | 33.4 | 33.6 | | Full compilation | 11.2 | 23.9 | 17857.5 | 17736.5 Regional compilation | 17.3 | 23.7 | 2993.2 | 2478.8 Times are in milliseconds (ms). [---------------------------------------------- NousResearch/Hermes-3-Llama-3.1-8B ----------------------------------------------] | Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128) 1 threads: ----------------------------------------------------------------------------------------------------------------------- Baseline | 40.3 | 59.5 | | Full compilation | 18.9 | 54.4 | 20437.8 | 20152.3 Regional compilation | 19.7 | 54.0 | 2903.1 | 2438.0 Times are in milliseconds (ms). [--------------------------------------------- NousResearch/Nous-Hermes-Llama2-13b ----------------------------------------------] | Inference time (1x128) | Inference time (4x128) | Compile time (1x128) | Compile time (4x128) 1 threads: ----------------------------------------------------------------------------------------------------------------------- Baseline | 45.5 | 100.4 | | Full compilation | 29.4 | 89.7 | 23099.4 | 22885.9 Regional compilation | 29.4 | 87.5 | 2945.5 | 2526.2 Times are in milliseconds (ms). ``` ## Results Summary ### Compilation Time Regional compilation provides significantly faster compilation times compared to full model compilation: - **Full compilation**: Takes ~10-23 seconds depending on model size. - **Regional compilation**: Takes only ~2-3 seconds across all model sizes. - **Speed improvement**: Regional compilation is **5-9x faster** to compile. ### Inference Time Regional compilation delivers inference performance close to full compilation: - For batch size 1: - For smaller models (1B-3B): Full compilation has a slight edge over regional compilation. - For larger models (8B-13B): Regional compilation performs similarly to full compilation. - For batch size 4: Regional compilation performs similarly to full compilation across all models. ## Key Takeaways 1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models. 2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment. 3. **Batch Size Impact**: At batch size 4, full compilation and regional compilation perform nearly identically. 4. **Model Size Impact**: Even with a small batch size, full compilation and regional compilation perform similarly for larger models (8B-13B). 5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models. accelerate-1.9.0/benchmarks/torch.compile/imgs/000077500000000000000000000000001503574341000214355ustar00rootroot00000000000000accelerate-1.9.0/benchmarks/torch.compile/imgs/compilation_time.png000066400000000000000000007446021503574341000255140ustar00rootroot00000000000000PNG  IHDRiu:tEXtSoftwareMatplotlib version3.10.1, https://matplotlib.org/so pHYs.#.#x?vIDATxw7;0(AT] (f0*qUE_#`NwDTJPH`H30O9sVխ=]UcJRyA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@ T)fϞ]tI{ѬYB8蠃bΜ9޽{JfK/]wzx }*ѣ3HO2Àb:tO=T9MDi;c̘1z{.dvH=ѱcͶqm ESMA`qg>^yqKu13gNE&---WjՊZjEƍM6QFR6>6˖-'x"~gF:ua"*ٳgGKܧRJ^{w\/It@a 0 ֭[h֬Y9L/AJWV#by͸jy@,Gpb1}:th/n-ڵkWޣf!^}@TI&I'w_yeJyP;FNRҫQF)޺u.|2 sϸ袋{ `#ֽ{޽{yYرcL2 z{.pyvvv,]4LGW^y%D6m+i7O{`S4ks EP6>jՊ8蠃뮋?0N:k׮-  Jzqǹ瞛t?oFOeK'2W\qEj*鲏?U6Os̉)SļybŊQjhذal;DZZZyCJgϞq7&,|~~~L2%XhQD͚5iӦѦM,Ԃ o9sDvvvT\94h-[v)*W\#Acܸqܹs#+++֬YQfhܸqlѦM{cvHɾsrro5*&Mf͊e˖ENNNdddDÆ cmw9oe]R3fL1~cٲeQR_o1+TUV}Q|1XbEԩS'by}٧V瞋^x!MVz5c=6}z,]4ҢA[{_t1*UTΝ;'/^8.]Z oO>*fΜ?sZ*ҢvڱV[N;:u<0jԨQ}Dž \UVq 'ĉ'oF뭻_Yjٲe{Ř1cbԩ1wΎQF4i$Zh[t57o^fǨQދ &?+VhРAlѹs8Rv 񫼼obرc޼y+WjժEݺu^zѲes=c=-Ztn͚57>obΜ9xXzuGZb-VZ;:umۦ\)///Fc„ 1k֬XdI^:UՋ͛Gvb})s'o_~e̘1#-[Qn~SNqF&M /'''>x{WX5k֌-"vq8R}l999+3<|M']N:qGg[ouTV^_~e|1mڴ1cF,\0c͚5QVW^ԯ_?vic=cǎѰaboȐ!lٲ ꫯ_|Q~]tQ(~Κ5+Fcƌ>bݺuQ~hذa.~EΝjժe6[Z~AwNItk=)ϣ{=ztq ^zE޽Km^{mպu?sѥK{͚5+#":蠘3gz;w^K۔)SG#GFNNNmܸq{q'+HhŊǓO>YYYnm /09HKK+~#" \֡Cxꩧ~g}w}wL駟N;\sMtԩX3qqПMMˊvSxc͚5 #G;0///Sz5jĩ~ó/???x≸bժUn->}{ِn̜9sbРAꫯFnnnԩS;cb'#">?}zr!qUW(Lp͚5Go4T/N:\{W(sTf͚>h+r"m[^8ꨣN6my߹3c޼yޮI&q)EoD;۸;cر/===z\r_x8p`o[o}OM~2~kb\pA\x҉T\$^z'cǎx9rd92"#===>8w@%Qsk߫O?}#GF^^ޟz+C=X3i)`jժcÇ9""z(<"o;f̘8/R``D̙3kN;-Ϗ{7zYψ+WF~K/M\Ο??z+oSNw}(s=~B>#~ z[/r8fEcŁXcժU!z)LYTc6"gc9!c={'xTfh֭^_M2%?k˹#<GqD+W^ye}E %sύ^xD3CƑG/BB>#~ ?⦛nիW|ܸ馛 .(tgDoݺu+Tb2s̉;z#G,Vg/OϞ=+(秼]6 GqD 6!7 2$9"o;eʔ޽{yE 鮻nݺz|_GϞ=/_\pAp ~f͊s9'Z >O?H!<\#F}7n=zt~ב/Ryc”OE?f{8#7,TgDď?\rI@MOJٳ㏏aÆ: !^~8묳J_ '&MJd;"oo-ܲ^m޼yѣG/oڵqeѣmA{ク[JoNLeiѢE&z+b)WQUc[nh' ҙ*B۪TR^#GN: ,s=7^zbm^{m׭[7tS|jq7^?zGtM&777.x犵}vvv{1~"oz.~yN%Kgyʊ3<3XÒOO.r}wq)~1xcڵE/K/tm-[guV|Ś;^{X&3~J_\稼eeelu?Rv YUcW^^z}t&6>EK`֬YѣG駟 \FGӦMaÆK.yĉco_}!ժUN:EfffԩS'-Z3f̈/"6->;yhѢEՐ!CbСժW:tf͚Eݺucٲe1eʔ7n\[.K/x`r!z \o*cb-jժ`3fL̚5+_nnn|kEժUK,X]wze c>85k׏E̙3cĈNJ+z Ѿ}8餓J4SEQVabmZjEڵ#777-[ӧOI&%}""[C=.}}[Vrh߾}j*6lk׮g} ,H{/zPY&/_tYz7ވ*yP/ ;slQ^Ϗ W_}ӧOOXݺuq 7DÆ c/,C )0^z׿5>z뭣As̉?^}՘?~DD5\~x_Z>l6l:t&MDFFF̟??C&LsN<%>㣏>ZVzs=#333ׯ+VYfg}+WLjժ⥗^TrhѢElQ^]vTR%/_?sL8'u]>`(oYYYqo0jժnV[m 6ʕ+ҥK{~=./^zm0XCѼyߎYfѣ\2z=XŞW^s=c=m&4h1}gO=TuYc„ Ӱaرc4n8W/qw}/???ܹs4hРD/"_f8ꨣm66lK,Yf;ÇOz駟K4SEQz~EQN]v~'K/og}v]*1;jԨֻLKK6mN; 4J*ł ?O;_>v횒8 dgg_`gW^^{m0hƌ1bĈx7/ϟ7xc5je]w\ԪU+鶏$aY~k׮qwɓqUW.tw}7nXpz3gƓO>{n1/^|Xvo?[oMr-c-+?kc̘1 8q%<.rHtA6DZZZ{=PL4)a7K4id2eo={ҥK:^4k֬tl mݶL={3<3ih]~~~ѧOclqw vZf֮]{.0Eq%5j(ϼy⭷ފ7x#Ə_X"ꪤqyE=YYY1lذ4hPd͚5qUWÓweo5j=zHw#^}ՄeN8! `֭ꫯΝ;GJ;vl\?Wʊ-R;wשScZo&MD&Mb= . nx뭷 <88hӦM*/;St5vo}ҟɯO?ǨQ+vq _;C:O>dtرl W^k:===N:8 v[1~{Cw @ 'MPRJvmsψ_b.x7j_},]4޼y>|xy@4i$nxGZj ˿x 5GA̙k֬_8^8S ̌|08ef͊dz>[ϏCѵkxǣv ˞=$~rq}ݗ4233c^{%,[|yӧsUqoF<3qgǶnψ_BE;x /LXlٲx'Jkr1V_^z饸K |MőGC -"aG}/L믿;o0VZW\qE$,;_ ~xѡC? o!C$=_x;__?^z믿GqC>#~ Pٳg<1tg} }ƬYՋg}6.zE]/Bu̙S뎬XxqD_|Ÿ O9>O 6G}47o^q}% 7XdIlV[mO=T ?RJ~Qze>`GDęgY`mqE%gΜǏ/T?.^VL3rȘ={vJ* z>}D˖-}ʕ.:+aً/W.t6h 4%-Z$ Q[n] >T4SLW^y%鲣:}/i kJ*ui5\PwO?NB/KqE:,G $wm߿ԬYO{E~R2k,L|piӦE!R?}c^~X=ۜ9s ![wE:F{tӧ' m^z۷o&Mĵ^P_|y{dffߟߐ;3ڵkP?~|$SO=5ᄏ|1tfddOVZB?/Rdn(rOZ+Hy/+ .,!4+++>/o1{wF&MG4/(<?^xa&,̌>P}'Hzqk={{^-???|?^zqq"""╆ѯ_(R/ÄGuTz^zQN3<4sԩSPXUV-R5j^{Po<> %uQGŶnP饗J8ƧZjIѾꫲ&6c]v+>#jMҥKGnݺŜ9sWT)nBG꫱rjqUWh΋.(6|k%v}}݋56lrHM#FŋguVB Ur.+(лN:r-]&Mb=HoƒDƍk)\2^y啄zf⬳*V38#i#;;X=[nEnC盧zj4h %=Sqײe8Ë4.1JKK?>)\mLlƍVzof͚bK')7bĈO3*W\裏7!"^zIK̙SN-vN8X۵i&iFI% cwL}{ j?s̜9D}7F;vL?&I-Loϟ˖-+vOǠA 3n8sq=Ċ+ŵ^'ԺtRs=#33sʕ+7vx=UIϑJ>HI'T{Wh"G%>(|k.shѢE4nxŋc֬Y2Oq} AjժUc ;_xU& Y\re|gL*mj*TtYHv c4 ׬YQԦz1'pBkd{k׮3f{6^>HO>$Vnݤ{!Y Hգk׮%׿5i*vdA4|bwbfmjYYY8蠃Jԣۄ JwcԨQw}WtvԩnvIK.-vO4z߿~xg믿ܤ=ի3<]xqL81~衇ޞ{P7n\tα~hΝ;I~A}V[GP*vYfff^[lP+ds"kذa4mڴXTT)jԨRԶmb]vB-;;""jSNzl2&LP`򖓓ӦM1VXٱrv/_^fc9f4hŞnݺI%=*֭[39Ɔ?>gn햒ْ7ܶf͚ѬY͑nmL6D}s&c.(믿.0zCڵkWy}iΜ9ҥK#;;;VXQ`mD-{ VZEƍdB6mZ@؈_]233cK_Zjz_E{͖4L(֭U{Ҧvژ1cF|XbEX"֭[W>˖-+Qyژْ|TR%jԨz`$3gN\2޾}2NNN wܱĽӣUV APSN-V hdA 4(Q?ZbEEDnݺDGDTZ5Zh<' L6-~L>=֮][~֭{*-gT "ikC>#"LPQF^z ŋ j-[ҪUr SۦMHKK,$˗/z+|?~|JBC7ܘ9sfB"RC :^L5ZjŞ%YI \bE>[hŞW;Cz7gΜxߎS&|@q,[,rLW>6cz%VZ>Aؼyַ~2eB"~ I.!4aU߫TRJ{&뗗W~[lEU@>eڴiqwĨQR{c܏و_Z65N|'q=!Rm̙P[re/e \,nݺ)oѲPga*>k׮[lEB@jE-9իG}4֬YShc 矓>Z*RyG5ܷᙿJ^A\7ӧO[IoJbczHfc9fKyQ:W`$ZdIz* T &{ ڐʕ+t2Yzޓ٘Nz۷oJkזJ߲1Uؼ+ztʕ+#+++^Kl^zu\veѿ8 ߅ hP\s5kdST}ꜽN: Ai΋ٳgJ)뽒ZVNNNҠȑ#뮋+WJ=6c"|HH(իW'UKi@jժOjj=Xy睥sljԨ[mUq-orHz֭*NZO }N$ԪVOQT+iӦgQj!+uT wsz/7ߌ+b9f\IDZZ&~TuZ*VF.MǏ}]v{F۶miӦѴiӨYfdddDj^ogώ.]c`ҠA߿\}klժUqWċ/X`u֕֘Œ,DlŊ)靪>w}֭/<.\tyƍSNnEc-GժU####9c̘1=fk|l{s΍믿>֮]to:tGfff4m4jժ^JaJ{ 1 N')UZe˖k׮˗/OOɥyO֧uErwD~~~B}}!.pl:*UwuWL>=&M޲S?_|HժU+9眔Gu)pYϪ=sCɂM*oݺuKܷ=1eʔz oC=4T)-\A{~楡c4#6oYܯ4/Kp _NNNF0l}кu{rW~ ,(!U3l(jshѢYxqB?'O'ԏ}qk׮]o?ݻw-r=OE]Y +gռyR;U}e˖EӦMKS}j׏_|1jb*T嫠뽲:OȈXfzT;& \rԬY3%75^VVV =34l{Y6WES`P+QM1 nd̜93%g̘Q}1eʔIYHXy{jk׎nX!K,)Xc`ӦM8SVWX)vۄڬYbʕ%=y(>Ύ zռyoM_}4i+WNO6fHZ*sڵk'ӧOO{sƊ~G%|KJ.VgP,#Aff͚ ,^Do 5k5jH?L_jդaLQZvm֭[jܹlٲ6mZvڕoi8qbBڵkIJ2R4]xIϷ_y? jٲeS1Z%;KY_f*PVZ%' ,ɓ'G~~~B}/qﲔ+###;b;wndee`WJhѢEB"RpX.]4ΝPvmh޼y{~7%r9 ljժP[tizeIE4/2a]vIM4)VZUǏ5kj"???xw۷o_mѢE \}%ھR䷌% *KYMO 3L]6 m z3fL*F+֭[Gj#G,Q#FhhҤIlV ?Ľǎ}.\0VЗVI*d}],XL4wܘ3gNΟ??f͚U{x7nܸmd"^9f 6[KM6D=+|w}jYYY%C*w9r >xxᇋ^cȁP+1c>߿ŸQf̘q &8O?tX{Fڵ/bϗsss֨Q#:uTiӦK=P?C+dK,)vÇ,ٱY{Y6G>_dpqdMJfbKO4)\̌8 >f̘x뭷K&O>ѧOXvmΎ 8R5^iРAB/(V>{챒Q^3Jܻ$:+jժP7bnwI'EժUwqG^:35J<8&LP^?cNhv'=_yGc޼yE}IN+rbѢEE7|sbrWf8cϏ}kԨ{lB~zX=  ׿&}c}sOC^׮]wuWe޸qk…#ED$*=,#w+GVO>ddgg׳>NF$;|G„㎘;wnz}1lذzFFF@R|q-zݻwҟ!͛7OxbvJ}1mڴ"Yti\qf̵͚;$3YMSݺuOOkܸq#>iҤk ^ot*Uĥ^P_zuo)Sjs̉>;,YRYSjժI?WZ\sM_>m۶w޹sdryyyO׀b̘18####?_?S^o0 "o dC$={v\r%Ezo%m6:vJE.FU>999q5פs^9f6k{lBmq7ƺu cذaq뭷vM@vOL_\> .?]o}}'`8 gť^pG gsUJ^/<6ܹssΉ jժ7pC,;wN]6Dze cqgɓS6Ww}7^xᅔ8:+jժP77}' 9rd~O?{Kk׮Cj?>sτ%K㏏AҥKnbŊ2dH}1k֬[.H>;5jP=zt\y啱zByGLWT)YZl[neB}'G^^^`ƪiӦѫWn4hPoرq9zm]͕+W)|wʔ)-V\㎋߾P}6WOsύympK_N;LY$vk }|8O?M\ɮ8p`Tq l֎=H1";89sfN81=ܸ"///""ZjUZn暤! q7Ɖ'ql̙3#8".?~|ߧO[nB}ƌqGСCCDDqwYgt\P96gwz?7|3?۷o7.,X999O?ŨQo:*FW\M4)KcǎI&Oݻw>(ntxG_/m۶%kB-???nҥK\uU+ĠA3dȐCuF=yyy @UVի',7n\vaq_9f͚?|{ѹsׯ_B;ڵk',ɉGΝsΉ[o5wqG\p{w+Vm:(9B4ԫW/뮤z8c⣏>*0nʔ)q9=ܓtg;vLټeSOMƅ^˗'nݺu駟F^VvmKk2w9^{tY裏#FĪU6g޼yvZC暤a .;. T ,]4x8c… 333/][oGGyd|1jԨ駟"''',Xƍ}#GLگgϞ;%Ҽya?Swq1|XvmmW\?|z1jԨ꩸;âjժ ]vYs=1pAx`sRWt5j(z׿}qaEvm۶Q^Xzu,X ⋘;wnB;3N>«YfZ^ݻwOTQUT).իW²Yfy[mUn[F*Ub1s/Ba5jrKp %k8 >ÄegώٳgmfffҠTql:2dHdggW1bD\tEo>{5k֬lժU1t:th4l0vehԨQԫW/*U˗/e˖żybɱlٲ?m6矟4077P![{'{채eJgqF< fΜw^4j(shҤIddd?'NSطm۶q嗗襮GSO?^=777 v-Zlk׎e˖c̘1dɒ~7|s BIʕqǔ)SO6-.Ȉv-233AQRߞI&)pj֬sOy ѿ4hPt17o׏cёoFFFs=QVb͵9^z_O?=V^+Vg}6}"{7.,Fxꪫo߾Gf͢Zjx={v3&5[Fׯ_q%^zq 'СC-X F/ClN}>;>7n\²?~|?~=ԩ4h?ph"cnҺvzkz1nݺŝwޙJ={q 'Ĝ9sR/zE=\mxGK.E%]gѢE{ru1xݻw̟?DZbʕ :3"믏իWs=t 7,tm####U#5jѣGB(]/1~i]{sL 1j[n<qE%XfM5T뮻K/Mz<6|W5jԈ/v}TIk߾}w}qeYTO 80TٸKvk+ [o}-[d:꫘4iRJcEUR%yرco޼y<3ѦMOh޼yhl%﮻?|m۶D}6G'tR_4nܸHըQ#n?+W.JW_~zo޼y<ӱ;l K/GuT|^:+i0ȑ#cԩv=Çaҙ֭cqꩧFzzzW^\y1tШ_~DD,[,a_;oիCzfr־}4hPOzzzxѻwVԯ_?|8 }SƢE"777jԨM66mľ]v5kc… }5jTyRO8  Zߎ;F޽c=, >/b7yB#EΝk֭[+===֭[hO޶I&qgǩZ~ }W_g}6&MTີk׎#8"9fmpqy.}'j8c//jժ2[o_'O Ċ+bʕcM]Z~yމ P-[,>cʔ)xXtiTZ5bhݺut9;hРAyZxq_ԩSc޼y|Xn]T^=j֬[nelv;FΝe˖)wNNN|/&MgώKFnnnTV-4hlMo>omRM駟cƌY֫Wݻw̙'Oyʕ+J*ѰaabvLG}cܸq?GVVVE͚5#333Zn{wtA a`?&z#==FsO>qŔ)SbΜ9dɒXfMTR%j֬jՊMvm-[]v%ڶm[akXxz}Ʊ[>ҥKwߍ?;vlOyyyQfhڴij*:u]tIzO6-V[ʗ?TY6E>(j'lڴiqG$_}hӦM9LP}l*WQlF~Zzbv(iHAehܹK/% }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@ T)/袨Qz#GƘ1cJu}NJS^^^,[,fϞ3f̈ŋڵk{ bƌ1{XlY>{M6]6~xWJuT6. .ѣGǗ_~'Nٳg… 拉E EѺumݢCѤIrlٲ駟{ ^z4o޼`3x0`@B{T\TSN &… ӡCرcS&MI&Ŋ+ӭ[h֬YʹxbҤIww}K8WOKK:uĎ;knѩSZjx믿999 UT)233]vѮ]oUV)c-38#|wyg˥v<:(.uB򸯳 yyyq)W_}U::tz솪V^|ML81.]t]Ǝ;Xf3-_<ƌ&L.NgNYVhݺou _`^V\|A|1a„2eJZ*ahܸqk.ڷo{Wo>esdddD^o\+>UVPZ}j͚51|>|x;ߺ-EرccذaѶm8â[n[T n\wu=ٍ+-[^m]w]^^^L>=&L۟ɓ'' R^z 5kɓ{g̘:t(ՠ/wߍQFŔ)SCd-]4>""QFqI''5*5qx'cȑf͚?]?///f͚f͊7|3[|qGGJJ<ӹ;_ʕK~ꩧ3ΈM?@i $5jԨ[S?͍^{-x޽{\qѠAR:/r3&ޡC7ߟt;_잷~{{%mwi{ 4(zRz;?~EÆ S{֬YmoVOqűw}wy%uicժU-ZC ;: q_'@ɕ}uNJ+|%\'O.1 4lذxGKu?8fmR{ɒ%qƗ_~|qUWg]^G}t?~j9991hРK:*@TK^^^ 80>R|{e?7oP?saH駟iOIG=R5kĕW^#F(Q P[j*5kՋիڵkcŊ?̙3cĉ1f̘0aB·`sqG~~z-28rMIffft9ڷo[̨]vŒ%Kbܹȑ#cҤI1cF\x1lذVZYn]\z}U\9vo>b]vZ*,Y&MQFk+WL#///ab+guV :tkx'/.v_r_'@٨(ufggwQ5j({y督M6ѬYSNTR%b1vxb̘1YpawyGÆ K4 7'O.pyZZZt9vK4i$j׮k֬Kɓc̘1+DVVV}m۶:{O<1ƠAO> P}͠^z~;{npQjդT\9222AѦM8""bkŰabΜ9) PݻwݻwѻwI6{ҥKezJYf1eʔiӦŇ~P?J_7ժUwܱԾ{sQ^hڴot,+7c9&׿F֭ \I&ѤIu]΋O>$n'M{o\wuŞgkCqwGfծ];j׮[ou~qUW?xZzuqc{g}?^>dȐVu@*͕:+6unZ ug՚7o׏Ǘ,5kF-bĉ=Jԭ[7>8cw5j(5jm۶={_7|sΙ3'n8p`g裏bȑ.oӦM7ڴi,===jժѥKz(-zT\X֬Y3:x׫><.hԨQJ=P1u]QFѿxC)VM&Msύ~;'x"[+W|/===vi8餓;W^y%xgJe5kƞ{g}v{;ѣ/׹vi~\y Lf}_~9:vX:O=T̘1X[.z}O$ Lvq]wmyWO8ᄄŋc% PX(e}_gAƍΛn)UVl 222b]v=zD߾}7ߌcFum&n裏⦛n]v٥dvyx#,pwy'FUxu!{!TV-.R6ӧO~X*}1lذ(Me!@ /ll1pb-R*Uqzh{1dȐ,YjB}}Mʕe˖Ѯ]8.0`@寽Z~E/N;7|^g /н@TqZr1wӧO;u=P'ퟩUVtMѹsbݺu)@EիݻwOI7GU΋w13֪U+8n1bD|͑^`q饗]~GNNN²>}#"裏{lɓc̘1ѡC(:NiYX=P|wj֬7pCQQ=sc9&.MwzhQbѨQ"}~ԩSȳsύ~:K#"FVݻwO\hQѭ[b(-{|vmbŊZj *Agy&7o^(o/rBz~EqcRџǶmۖʌm۶.]$]|0aB{N2%iF5n8v}f̘Qpv!$';R}e,3fă>PKI&e6GEVlӦMJC>մi8餓.+֗t_gD^~WZ8.Ύ~D9䐤A**A;vl|IeddDf͚e2K j>| z""JQRRATDADT"E-JH}?df~qaVLsex i_TTBBBti]pAwM%#GVZj)C "Z L/K.-///=B hݾ}F'oܸQw7!׉\' =J+QF)**ʢVT))]:;fw?\g``rewG:%)44ԡ޹sVٲe{qxQp^H=s1޽y\>111ڽ{٣G… y"##3*O<*X*V5khѢN;޽[jǎ:y._{CYdQ"ETbE5kLO?\~IЉ't-[*ZjԨ&M(k֬;wjƍڻwΝ;kr̩+^zjԨ|||R|իڷoN:ӧOٳ}ݻK2eRܹU@.]Z5jPɒ% O-UttΜ9cQ0ŋʖ-ͽ)vܩCܹsv""" 2({*Pʔ)jժRJrwOݳ<_5!dvm([lʑ# ,ڵkvچ!mq~W:uBBB.///+((H*UR͕'O>}Z?>SN),,LQ>jҥKv{s/СCx]V={t?8[ѿ@wi5jp,k֬ڲeK?͛ZxV\7ouoխ[75m)a;Z-$iɒ%Zj;w裏>7|c{Tz4b&y'4k,mٲU.]Իwoy{{۴3/ah`ʗ/_m۶iڴi:yͽe˦}]vV^aÆY[jI&֏V?k:{=$)_|С:t萬/^z%k$y3gΝkUӧQQQo_X^+W.o^:ukcݴi̙Ǐ]/hpq/l/00P]l} ٳvZLjѢ}]VӬYw^^zSN. AjݺuZt9b׽3g/7x#MsYD]vwbŊ֝|:y^yp`|;tܹԱcG{ݻwSNc{ܻwO>X۶m5vXhK/ٳg'kl25r7NK:E?:L:$iׯqƺ}ECz-jfeTK=whhZh7nXk͚5zꩧ5khȐ!c/>cxTbK'NT֭>UZUaaaVZji…vի6olU/SVZ%&غu|Mñ;|x&MXK(5k8-u%&_o 뫯R駟ǏkjժAڵk7nŋ 5kL5qqq7oZj_զ0ɜ|^}U]v5%Gllƌ={[niرԩBBB\5qD=s֭VX0oPeʔ)jذlE>vޭMjvA%ڵk={ׯ~+v]W{9 *rJڶm WG_^M4ѷ~kűZz7oG^c^9 *Ioĉս{wݻwl{n5iDNwjjذ/_9ǭ ThT3%ϩ#G:<Ǿ}4o<ñ%Jh$H쏂s~YP=9ٍ\~ݰ%K7g}v唓!ru\gAu"-"IӕƏog@@]>w@@&M$777۷okuhgj̘1cyĉ$2?NN777ʕrE\gZD1:m6mذ>tPeΜ9EPvmupl;**J{xxhڴiʚ5k?IbbbL:3yeli,RdpI\U->>y >x>}Z.\0+]ϟ+JgϞ矝7::Z/^Ծm߾]C q88`?>:{ٳ5j}L8Q6mrJK.k׮NOW={ϟKI5ӦMԾ+VPppbbbWtnݺvWttg:08ͪU4ud_ݻ<o;L9s}bbb4`}z?Q,=?~Md1b\b86rH.\8Y}Dǎ3˗/WnXofw վ} ԩPZa@(g!י8r"י>$I3m"<:EDDh̘1V5kiӦ.UlYñOVߩSnr+Ioum>ՙxOvk˖-c:<D TƍUD Ν[^^^ ӧ~z0 ^%W\\uiz TjUժUKS97nhڸqi9rs)=zU=((HS%3gNy{{ƍ:|~gӀ+W4{Pرcuk|||TjU= ?<еk״}vmܸK_3>>6/MФYfjݺCk~l۶tdɒv^ ,sY͞=[kז}%i֬Ye˖i-"I\\u\gBN={.]dQ3fe˖wX\\}]}ʞ==l٢%KU\YzrhO[%'Y`AUVpO?T[}%iѢEc===k%rl TLITR $.]ŋׄ LCM6jĉ 7SҥU`A{ʔ)͛WVƍ ճgO_^"""ٺu/OOO͝;bnݺ:t *d_Ԁ4a}km6m4Ydȑ#ռyskϯz8pƏիWݷo.\=z8mi*UƍaÆ6d:thO1c._luM\\F 6$V|t Zǎ CK,1=5=zT&M2Ϟ=FFzK 7,ZUTqZ|ժUvZcOOOkN{6~_tRM6={_ /u"Fmq3<Çԩ>}hٲe4i믿oÇkժU u777lRʕ+aƍk9rњ1c͛g*Iϴn:f͚魷J4pڬY3h;w ,]T5jH'O4܌0cƌ:IqW|yOӧO;y&Nh6rqM?yfu]O=TjhѢrssZ7t͛is#1X_e:fMZrU͘1tuZjU'W^ڵk?~k"""AO.S֬Yc&MO>1 -]TFR\\\BmOLài„ z Ǐ?{k@@V^m5ZĉN262o޼DO;xxxyZ~.]u 5kkךΟ?5k8iiCwUdI[N7N2TR%}[5Ǐ;aL2i9rdڵf̘!wwlAV:uzj0Cر&Nh8Ei 5OOOM9sVZ۶m͛7a={X̙Giڴi6*驎;jŊʱcv'(( Ӌ=z~-_\7nLGDD `}x|rOR-[вe˖?7{uXܻwOƎk8ްaCm6Y4SP!t)D_:E3m"\:ue:Ebcc5rHZ *={l^[4nXضm۴x${iРAJ҄ 'OVٰaΟ?o8Vre$oʕڵKm۶ɓ'm9so߾VےTB'Y41cF/=zԩs#'/y;w^Νkx$իWOƍk|i…ʑ#Ν;o%kfxbe͚{Vvmذ"9`#F0%ڵke3O?5庙^{c󝱼Twiԩ*\C}2gάٳgJ*_}C֭3 d(P@ ,0} 0!C͚5K*T0q>dՌ>sU^ݮ6l6m%7ר^Ν+i޼i8yҥI&E6Ҹqk5 61c3dȠ?\׷gB_LrVXawOg3{%^iʙ3{wƏ|N>m86p@)Su>IBBB}ձcGO>]˗7۹s4h~?۷o+66VӅ ?Wڵ_:xxxH"cfoNK:E3m!\:u>utŋرcVѣGFFa9>}z(Λ7O{1رի$nߩS':9~15o\oVXǏ͛Qxx._-[hɪ]Νkgrg;((ȰF6 ;w='cT)S 8Q vyyyi7|*U\3ed۷/)oJ*{g Yj+{ɓ ǎ3$-=1{ivAՇ~3/_ܩߋSruoiUYdѤIz5M6Uf7o^lpޟ o86i$6e̛7%K(>>>ٽٳyBҘ9sjʔ)rss}{׭[իWծ][]tqRϭzdɒϔ).\ƍ觟~СCոqcURE%KԳ>w_p{4p@͛7eag3g\2'Nc:u5:\'δ\'Ngp̙cUoڴJ 4sLdl:o>͛7px}.]d8VdI7N̙3M?5J-ZPUT)UXQu[oE֭[Vs/9sfijF>^W\1cO*T0?~O]SNU]Ú4ix߾}U@٨Q#Zh4i%KԼy4j(Sٝz% }#י8r:3ru\g#I֮]۷[ &7vo׮醲?;ڈ#L?#GT…ŋ:t ۵kkLrT^]+Vնm[ט(P@3gԤIL:N6 s}ӱ~B[LO(n֬SN,[~i1g3(((ȡqr//^po^ƍˬ޽{ O}UXѰ̰oZdg}V6}zs89cC%UV ][3gÖ-[gU/Xʖ-zv9u EGGpfj 8)..Nn޼)I1c9bاgϞ^Kךބwފ2Xwyccc~z-\P7nLVcǎcǎzl2M, "י8r:3ru\g#Iә4qDzٲeծ];,Ǐ7}M;vΜ9#I/i&6m֭[lQxx~mM{ ,d-[hzUV-}Ǻw V 7n0@JcO0fMҲH鬀$5iİ~!C˗w~Iʕ+wu$,YRO=Sz5h'N8e$C ʘ1U==޽kxM׍ѣΟ?Cv?T`Aú4H6?vaXW^d${qĉTCĞc)-00Pƍ3v횆m۶?7B ۷+DFFW^vxl4sdʞM6饗^ҠAcǎD}b˗/kرjܸ8UZː!a=<l{;ϰN 49klڝ-[6Νem A/5ɝ;N>mQ{._;}$vcOK 6Tv_[m۶M;w4|Mϒ%O)-QM^^^1c95n8ïCO?j֬ *(GWDDtIٳG{1 x_xQFm:m=_ Τ$י?:E3t'9kV_)\|ׯOn5_CWf^`l̘1n9f̝;W3xR͚5*W\ʚ5ӧOkϞ=ڵk >\7C ?^ꥥ1bnj8ǫFN3&&F?l8`խ[״GիW/?^w}g8/ooojin@ י4r:2r!u]:Iu>x@U=wzw6CwۭrNݼI0{lXtNEN0A_|SO=iӦ0nݺ޽BCC`-YpyS{v{&Z'Lb߾};Wbk׮ h P֬Yfl9sf$wwwz,Y-FpO :BCCC:OjvV^uԩS.9lѢE ßn;tC~n#φ˗/i&kXP$ѬY/'}m۶z饗R`eĉfÇENsʔ)|6mTǏ9 ]@M4I5jЈ#->>^FR2e38<=.@ י4r:rGLr)裏 771b6ijѢ_kV׮]S`eŋ5o<.]v_|&իW̙3mޘ8 @Æ Sڵ5`̙3G+VTZ%LFl)Oܹs͛7 l vLuSI ԰aC͜9eaP)x lp4,Oyۼ0UH1"낂4|XQ1w\`$W:ur{Ւ%K ǞyM:M>y Ǣ4l0{&&א 28u.OryYi!c r#8^78H?u>y -ZȪϫaÆN#%̙SSL[hI^zjM4tu:tS>>Wd;_º4SlL29*?rUiFӦMSDDDj/euH9᩽ISCbA1^̽+Z鸯f̘ "T/֜9sL;w>}8}ӧ_ӦM{]ٲeKhpڵkW{?콥C ,!irAH}:]댋ӨQ>>___9f͚jժ鸻Nٳo?{= 6Ըq㜾qܹs ߗyzzjmYZ5pQl q_*Ta=22R/^L!&&ư_L0Iz }<__]ǏO<̞n)'*** IG/<JGmrDO|oӦ y=uIYdqx޽{pl,M3L,!י4rCH}:]\|YO///q)fׯU|| .$z]ܹUlY=3 T9秌3Sgt]׮]s$^7czȖ-؝;wRp%p' UV'WY~Fenܸ>l믿h)s,XPʕ3v݊wvmz :F>uu&\sRN:waU s='O&׈Ӿ2dpX 84G'IqqqzwvZruIسgyӜlʕ5w\y{{;}~t͛;e,YN:Ǝ;۷oy̾}}}pp6 TT)ӱݻwA)ۙx=e֓~f_+W6nh UiF:tP%'ufY^7'|YG}ZEqxSO=po י4YC3q:\'u|344TZrIGYfcUTҥKcΜ9GjСO]rxzp!曺x2e'vY[+Vt<ժU33>>^?f׼y:>xլYtl)y]e֓EWzZ}L2iz)@˚5XlrI%O=}ڵkm~YfԴc 0@111ժUٳ5?ް^@S`AӱnRvΤrr#\n\'N{Om~۶m/ԥK-1uqCAAAZ`\\g:%c8s挎?+M\ SqZlӧ ʕ+:pبQFGGB+K.Nk "Sz| B-9sFcǎ5 Tl {=]rŕK{۷O{VTTx ;5i=ubAHE +LN"i\RNry}]YeɒiӦ?t+OVnLc |B֭[N<'TFLǾ\ *dX?q6ϱcLǞ~i:qzEEE̙3c?0(P@-[t 1{݈ɓ'6OddΞ=k88~/Δ7o^e̘py+V̰|Rpp.xzzj̙8qaaa4hbcc]‘#Go*22pdɒL?0f: ŋwJ i\k4Gu\'N[ 6qi֬YVpp]… ԥKݸqpŋ3gN%-:6MfoH l _w}g dp&~!úi ;rz;vtfAu:Y4= 2 >|i=z4pyǑJ.m8w^M(UaݙԩSM(_~*W֭N:^o>}G\cɓz7t="Eh…ʜ9sǰnv"|rݼyt,C Nj8f"i\k4Gu\'N[,^X[n5k۶6lRJiРAל={Vcǎu !!!ڵBBB xb͛7EkXr2e8>xBΝ[7f̘+JZ L]?ٰaa\r!Z￝맟~2g˖M v))ZwJ߀h 3g6 d}$ >>>lHVa}޽iu^._wjҞ-[hɒ%c5jP=*Y26r9u4lY@}ʞ={)G;uʕfaмy~`/ruNs:\'6t]s޼y:qR|UT1gҥOM6phѢ>|xΝ;N:׮YFk׮M҃7ok׮pxl矫`)& iHH6s/^TDDUKŋw?8 }۷ikڵkW뇲ef?5矦׬Y)s@Zn=MիW7 ef(SSqumܸ>߿9칻w^]rѦߋ*U2\O ž_9h8vĉ^MaÆeϞ]SL3f(cƌViРAN?UqpuUyŋ;w]W .]2 &Gbn1[=̞e˖u7Nk:SNc:]\'u4`ß>>>1cA777M8QFwUMfΜY-RѢESt]fhN'8ccSibpp6 VhQjpFIW^MܹSݺuKƍϝ;gz=̙cܰuzYQFN-k׮9/p fk qb?_~rn 2oI!R޽uq͛7O+VL+\ڵkuXpݻg8VZ5yzz:<\'>xk„ 'gJҙ3g+82suEӧO\re˖cW\ѰagZ.^aÆ|_~i71>|i:tKf'߿ѤhN?UH"[&9j=z0)88XnݲgTTofС W52=zٲeZd猍O?QF `M7)Lg/Ud1cƘɓuq֘EDDgϞ.\蔓ϓrʕ+XXX}]7|Znx&MSN͛VgyFyu?<\uso1Uppva89sz)?~-[p,::ZΝZ`3r3P̙U|y3'Pڵ5h ˗/W^ќ9sC>^#F/b8iӦOYڵѣGջwoݾ}ԯ_?ӍrͫFQ{17ܼRJݻZhxttΝ˗^S͕?~jϞ=asր4~xZGUxq>qqqZf&L`zrsƌ5jԨdɣWJ"##չsgCz}gϞհaô~k X j͚5Vc.\P۶m5n8ժU+>Z`-ZdLQ`A)RD؜9se5lPŊSl#777k;wnדwww3F۷WttCԴiS=ZO5|p9sÇf4%KԠA4qDUViΝҥ^y?1m۶ֶm†NY34k_~ũ'H_p .oܸ'Otm|Z1B/_6{UP!{-[V Д)S7=cRl/^Xlldu;wO3gc~iԩk]tp5p@gϞG}kך^榡CڵN3ff͚9?!I34GuuiΔuuӦMӟi8֫W/UT^ԯ_?7nhȐ!ZpϠ9sgƣ]f:bs駟6Q&Mw}g:޵kW.\Nɾ$5jH_|φGU-T\95lP T\k׮ȑ#Z~}'i7ήͦ#F 'hO>Ѳe԰aCUREs疟_ .hƍڻwM6nX-ZHeP\ݻdɒWJ.ٳM7oԕ+WcرCV>\}ӞƎk8vQ=z4ZҤI)SFC Ѹq CCCջwoϟ_M4QbŔ;wnyyyڵk:}֯_Ǐ':O۶mմiSW|@ХK]xQK.5|&LYf|T .Y_111sܹk׮:vĤGc hѺE_~ѻy={8cZ|M.YT{#˖-3 5o\-[u u릝;wjVcVR͚5ոqc:#7|p=qך}2 ];*00P]jʔ)ܹsĉٳ .5kB ʑ#dɢHڳgٓhTz *صN#nҾ}%KTѢEI!i\LN! :]L˶nݪ%KUREzK/mjŊVc;vg}={ݷ{j֬Y5kMn޼Yڤr ,Ђ le/{ҿO8Qmڴ1|"I.]RppMZjgUܹﯨ(֞={k.EEE%:gӦMդIi$660ǚ={$7^FB#F$jɓN=MTRv]?eݺu+э:C%k=C q/adɒ\o?PժUSٲe+\̙~[<0o7${*Unm={(,,̪ޤIyz}'yZƍf͚;0O<8q/^l}?6lxyy>PnݜK.޽Sz-[V-R /-x뭷ԪU+kݺKB^^^UP!vw}WLv̫3gl!ChҤIʘ1cj/'EmְaÆ^IʊԀ À^^^>}?G\/kҥw҆ &O>}||4dM8LJ7phֳgO}*Pýe˦S>V<9Zjk_tzo=s6m{;J**YU}z#1 V2ef͚z7 <ٳg;<Sn]zڷ`ZdM>/]]vY՛5kJ 1:uđt-r@@\租~ݻwWsiذac.\pɦװO _N{3裏8mψ_ު^zu9ep6 Wիkڵ_tdˑ#իƍZjNE^{55jHV\izŋk׮j֬6B:wZji֬Yڴil\r0`WwwwW5jG};v|4i޽{;%???ӧvء}ĉtnݺ4rV֯_UViɒ%:s]ʕKڵSN9sfHϯ9sZt~gݽ{7Y2f̨J*f͚jԨC'RΝ5dZhhnݪҪ\gÆ Zrs=]:mkϞ=:tg}իFN˙3>c?~\_}~EDD$W^pZiN:؋\'\\9rGHu+:[j*UX~͝;pXbV Go^;wƍ֯_5jM6Nӄ [oiZz’իL2zոqc67a[;wyS{'O:v.\˗/ۊTTT!CeϞ]yO?bŊbŊ*ZSOy4]viϞ=:vΝ;7orwwWL;wn*TH*TP͚59^x]tɪyf˗Ϫ~%m޼YөStuEDDC*\*Tt)!)'Oo{ٳ ݻwL2)_|*RV{Nٳg7%%fZwQܹSҹsHW9sTTtiUVM+WvbhݻW_/*$$D2e$???Ν[EQ…Uxq-[0W /Pz:u駟Ҫ#GСC:|.];wݻw%KeɒE9rPRT|y/_^.YS\\֭WZԫWŋdNH.rN"z:\'N " ]FΧ~Z͛7O 護޲YF"k.=zԪ޿_ EҦg_~W"}W^yE編EEE/H0:N:*_|/buiϢEgQ RFRiE`;6H^^^2dU}ٲe~z*xݻW۷o=O N m9V^mU>|.@ǿ\RI TfMZDD>TZ>}Ucǎ*\p*uiǬYmQkذWJ+'@*1b,j+WSiEgӦM:pE- @{NRN _ڰaEWC IJ*/VH :buX{bJ+'@*kڴij/x5o<H#uN:SNj/ 6>'`O S{=l 8}NF'l 8gj/5Ҋ }WllEGL'u'=x@ǎKedɒʐ!Cj/@@RWJ:9&p6>'LpZɒ%S@t}'`O p6>'`O p6>'`O p6>'`O p6>'`O p^ҾWɓrn߾-IW9TLΝisŋx\;w(22R^^^ʜ9rʥ%K:uuiݾ}[nnnʖ--2es^pAG͛7u*o޼*U$)$$D֍7&///ʕK%J3<9cR2)Io֩St9ݹsG.nݺ[ǎSxx8T0Fruڵkvޭ7n$z}զMʞ=]sEDDh֭ڿ:[I-[W_U}ԵkW'ʕ+5n8ݿߦfͪ)SO|۷oא!Ctu)QfϞ &k?SOtSGj֬Y*[ltZ|Fix}H $#7^ uxHOR:1ch֭6(777uYKҦM4zhu_ ԯ_?uEnnnvݻo> >\Ν>OOOuUFk-3[KGݻp1BWf.""BSLѐ!Ck ,{g&ҿaַzK6l{oF={kOI믿Զm[%)&&F}h\ *^Ҷ FR)9rG:p+H"*Pd"___EDDʕ+:t֬YGZܻvZɓGy-[hڴi5///lRZRBӧO믿ƍ/IӐ!CT`A*UʦvڥѣG[mHZfMiFe˖U֬YÇoѮ]uz-}ʖ-Ms:uJ-[V۷׳>9rΝ;:~֬Y9O>Zz (`Ӝ6n8ݹs'@JL$e͚UjR*UTT)̙S~~~sO_^?fGo/B6ͳo> ͇/u _oс,_lԮ]$lQ+[RiEH;v,qɒ%!CT\@{- =Y|F-OOO5nXT}+V|`vԚ5kThD=uꔾ{5h@e˖Mk]jFm5ڵkUH$4hZ̙5o<ӏy2dŜKַ~|S ẗ́Ǝ6mژ޷b ="ꫯt|\\5kۢ;W^_U j5kԢE}$JޢEjРA*W͛|J{=9W^m_}&4iҤD?&MhРAG_~in)INtO^ZgΜIr*`ٱcG; /I&Yvء]v%9+EDDXryu-WkFB Zd.]&M$ɧ$UXQ .Tƌ-sε8_eg 4hРD7.]cǎ۷o~K7oj۶mҥKkҤIn)I7ʒFGGLǍQ}jݺcAFHЦMM>] ޗ_~*Do)**Y˳Ҷm[ZԶoߞ}њ?E^z_~vI%K~]tbŊsI'IݺuSPPPc[Ι#GpFv?Ц{]믿ֵk$Innn3f<==SuMBJg: *ZUV{bŊYC%yΝ;j={y7|jݻw'zϞ={kQ{7aӜ2gl3=1ʠϦ{bR} E3-[x'N831nnnVEիI޷{nݼyӢ֩S'twwW,jΝ;gzϩS6|]vhڵm޼9{taZVkӜ[<>pM_W8q6oޜ_VJRe-ZJg:=$߿?BBB,L26B Yl&5$U\9T\9| *6/^ܪ9si8<<<{a$=Sv@ߤI$gUQIR-GDDhǎ߿y{\Ep7>>^6m3h$eϞ]nEJg:ϯL2=_dd,Y|CYfxmל9') >4F'u!Ν;sȑ=ʕ+ ͘1J.mQۺumuz|R~iyyyYԶlbThQ̘1o󜮲f]t)!CF9#""'{޽p0n߾m8 9%zp,=eP 'V503˕+xEbŊv='O46,,q,Y'Nk&:dGMLLL:dלA @w^Z\6ߦM6LjӧO[Պ)b܏s-)k֬V>z?^kt}r}Pݹs'Y+>>^cƌQLL$Sqҙν{ZemVZ۷jٳm>S\\\L2饗^J%JH"9̅ ~rwwOrίZwIx֭[۴GݹsG&///+ @*TiTgKTHml ڵk-jEQ``K۱clQQ֭}Fc͛cg?zÝxw*$$DsNrNI}ه9y8y*TqҙNI,VZIޗ!CK'NLO3g˗/nQ8p2gΜnnn V޽jȑ#͛7k.]`iʕ+ZF*Rڴim*SLvϑfT???fR[F6Xr\bQkܸٳ[o[n O/^f̘׮]xfQfRz)ZHHᵏ?ޮΞ=d)uEt͘1#qx`͛WӦM%1c_zRΝ<n߾z\:Y|6H *`e͚եsnZFi&u@'Ysg>,U2fhQm+>>^+V033 @ ^,_ܮ9姟~͛׬YSeʔq}}}UN9R˖-u!>|X,XnݺޥKjܹvi9 i}!&Lо},jjRv\>իխ[7ZJ6i8}Z‡^^^V?4e˖رcc۴icڵkms˖-m:ܹq%<Β%^u@zҙΟ~IϷʕKFJV(ڵKӭ[><<\&LP>}tȑd={h߾}zjGGGk޼yٳve\ٲeӰaô}v}zUR%W>>>ʝ;{9 2D[lQ۶m̙3eAm72xZt-[fQ˓'N*_z8..NwՕ+Wt%>}Z#F??[nZrbbbjǏ7nlzڵk5qD>۷uΝڂ 3gNu]v)888Ys:jʔ) Mxܯ_45 pU̡C4tP===5sLe˖~ǎSppΜ9cQPŊ+W.I͛7uA]tIm۶iׯ|Mp ZԳfͪ+*O<͛7uQ>}:?C]t믿#F=d2eʤc*o޼5kӵj*{(xbEGG'{M *%l dY~&L`Q˜9>SeϞݡUTQ*U m6}:vXBWnݴl2ΐ!$k߷e̘ (88XSLIEGGkZn^y)SF}>oF[lI>[lV'gΜtN???;VOkĉڴi:tg}VٳgݻwuqYFk׮M;#c:PZn%2iQϞ=iQ7n*Udw{H,XPÆ S:u 7ܳg&M?S3f(&&F{Nr3gΨcǎ̙SC Q&Mau_ɓ'k׮] /RVgzu!L#Gw^U\%sePK۷O/r̩z*MdP aO~Ӑ!CP'|ŋtnwwwխ[WjҨQzꄱGj2dnƙ܍>KlOI֭N8ޢyfm޼9{ (zKÇgɒ%5j'N?ݻW{MެY>P>}394rȄpƎ$"3.]RnfQ{]Xb*TEmϞ=.! i}fG՛oȄ&O:uzlQ/bzO\,[Nmqʕ${qss;C~A7ٳgWppkϟ_wܱVmZov/m۶_3eʤnݺi͚5*U՜34N: &< T޽:IJg:ԵkWC%CVYN[ohBy䱹:vhQ͆~7k֬2ek̙3-jWƍ}N8=UTݽl={v7ot\ʓ'F-[ѥK!CEGGj*6@Ν;]Z)W_}5u+00b7n^o?ʕ+5ӧ-g˖Mٲe$y{{z^z>TŊUbwݻ}ujl HR:y}:|EA0aܒҥKV5{n7ڵk^|Yϙ5kVeΜYwMۑ}ryxx(((HAAA*Y2d`zmjdP pO mr劺vPz߾}յkTZ%OOn .Y28`㠠 {޽{:zEF.3>>^{I9tFEEwVYZjiƌVM{EFFZ|}}ctOTTK4lNgqPRȠxұ'L]~]]t:1[nӧO*ڣ9r$z}ڵ-ݻ׮"##6ݬS]=/ ϻtν{ʕ+ ռys t366V-*UܹsYdݺu>Fd͚Z07GAsl+l[aaaڵΞ=kQo۶ :2py $˗/{ի*K{nUZզ97lؠXt8-]Ԣֺukyz6_X<~MhݺZnm8cǎ%<.Y2dȐxԩ=K,k @Zҙx 6L7n.]Z~EVCB vytLޒaRӦMرciNg8vΝ;gQ\sTJeP rO W=tIz5f̘TZ%KXԩ=UVU-jni&..N˖-(QB Xbτ~~~z\6$oڴiSc.XJLرc[ԂpB9m eʔɢvZ|wUbEkfͪ… [~GS*UdW{L>ݪsϹl>GA6#}<<<:U^=-\P7nܰk}飘aÆ^,sڷo=6lԩS-jKV lsӦMڴibcc6&&F/V^,NX:t`|w^YFQQQ6]fuEaaa  o߾6 ҙNI?ϟoQ ŋ3gN'IoUmVi6z+:u$ooog}s*...{٣7|S7|3эWǎΝ;k6H%Ν;:t͛g56h l_dP }spu{}ߎxXK.sϕ^:u+(qO׮]o߾K/%b,Əw^ѨQHKKkƇ~/RL:y={=zX#+"~_'GZ\pq'ٱ~5kVko&%K;kƔ)Sbʔ)Ѱa7sLt!;차]vņ b޼y;K/[n-vֵ^'|rR?>AQzXrelڴT=0+W3u%^$_VW^t!Zjiii)rʘ5kV]66m;wڵkGݺum۶Ѿ}a]re,^8/_6lHKKKܗmѢE|+_9h̙3cʔ)qˋ#<27n|Am}g+5@ZZZl2ZlYi=TV-N<8+NZ[nѭ[ իDZ{ P͛7͛К7ƍW:Rz8c"33T`$4`'A}@  )`'A}@ dVvT7Z-PEwᑕ|IEp`+ 0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0ROH>  0RO aIDATH>  0ROH>  0ROHn UVX͋˗ƍ#"nݺѰaҥK4nܸBnݺ5>XhQlڴ)233>:DZZZ=P֯_K,e˖ڵk#777vk׎:uDv]vQ٭V 6… 㣏>۷GaaaԪU+6l;v͛Wv@2ZfMow}7Nk׮-qsύ??4hΝcƌI&E~~~1`CK.$*K@90#@ ͞=;NZ!ۺukvm1|r1"F!c{fIVZ-7|3 /N: ?1R]nnns=qwT@g@eɬRe˖q'GݣM6ѰaèVZ^:f̘?||I9aÆ1dȐRyw]RFqD^yu֘7o^<+oq5/M4ٯݝw6mJy_ʊΝ;GnݢCѪU8âf͚QPPׯ?8&MoF'r/_^ziQz|_/}{~+y1}>|x&[l7 f͚矏3f$}qTha'𥕙{;/w=k׎֭[GcqI){իWkn֬YRq'ׯ_?5kgqF<q}%Bnذ!5jTy/l~SN~{ַ;w=zD͚5^zѪUիWG?!C/]4}ѸKw''\~]v|[*yqw~7 ѶmĀ:uD֭_~SOwߝF>}VZ3P tӣO>1dȐhٲe q뭷&bǯ~}?裑Xgdd#/}9眓4s֭1wҥ^s|ͤiiiq^51f̘Dlɱu֨Yf9࿗w<ڿ֭[L:\I'TB+dgAAAI֭[DZZbrrrSNT5:*>D4J`3k֬Kr/K.z(~ĭ{nƘ1cZ*F&L#G1SZSL1s#//\=^y;vl̛79v튙3g̙3G/; 27K,)SNnnnL81&N={ÇGƍ+CE֡CbիWo&կFK]_~I뼼xK_4?iFDD ⦛n*Yְabm۶UHɓ'ǚ5kb}Z+V:uDZZZΨW^z!e1{:uj|n֭qm˕?bĈ9r>|?{WZ/r<1s*93"⭷*ӐϢ,Y]tQ+)b1`Xxqe@Y h999bE7.///}ݤXTEqGĊ+oLDDl_J۾}{\s5/G&MyG=ʜעErڵk|k_N8!6m 6;wwߍ{.>;w- 4SN9u*+++:w{lԬY34iu֍;.233cǤI7HzIK/W_}5W~z28~ E7nѢE޼~Ǘzb]7/X {ĺ{q9'322SN)~x뭷b_ףq)UTΝֹ:ݺuhѢXy~`{;o/S]vn:;HcE^]vf͚b7<)~ѬY83'/r 6wF*#Gzzzi&t;w7|3ihEԩSxZ#"^zѿ8UV{ӠAԩS\|qwg1|xףZj뷾ܹs#j֬G%>YfT^=իZ^zŏ~2dH̟??oҥ裏__ph08{b%|…bmڴ)sݢ9~i nK\jʊo}T[O?ٳgԩS'&Nt7"o߾)'rJԭ[76nܘ=1hРR;6vڕX׬Y3祿OhѧO2dHlٲy ̸[xW>}M322G)qHc1nܸD/K̙3T/5oܸq;t]t;F͚5Ϛ5kgjԨW]uU\veQvRdggLj#bǎ/'}YL0!WP9mڴz*:묤&LCFzzz*[o!eΝ+$ڴiM6k΂ Ѹq2>#[n+W:w]̘1#+ue"]6X|y"[!^z饤uݺuf*z()׿5Ə%'|2)v 72,L=x4 ݻ'&Oyyy%Y&?$.ܹ>k^q'օ#믿>z#F__{zkÿ曩j-5jL^:Ν[IP R^xᅤa{.1gժUIFEFFFkybrx룎:*2*yyyzxw|;{%>Ύ#GFVR^{ܹ1gΜYg)7_|qߛ_|1'O>,vyyy믿??y0Ћ/8.7RySsIZoݺu|O/.U5jDb'O[*n~u֍38#)oKꤓN*+z#8PVX/bu֍%m۶-i]VrS^i/~e˖zlիSN)]ƈ#CK/T,֯_ 7qG-ⷿmDDc裏Ǎ7DpP]իWo&կFK]_~1f̘:///~]3س?m۶Ŗ-[}5lذXa(n@(((n!6oޜ^z%WZr\t)n|"m6x+lgAAAIG.]*^I233믏?1pS>sj֬v[曆|p)xwbݻw/S-ZG^3س=ݕ-Yي+O(+>C]wӧOOr)q3e} ܴiSy睉uݺu[n)W6kEYBjL<}ZŅ>`=ںuku]q5ĬY@pϋ4h-_Es͛W3(nI봴_~%uo~:###:uTIPdVvg{.)vGiiiUw}zoXlYDD?xcСqW`{hb.\X,֦M2-S.l1cƌXrelڴ)jԨՋf͚E׮]F[џGh[O?ٳgԩS:*986 "Zu]Iڵkc=VwWz;ۋJL9mڴ?~|b}qyWf͚q7EO<?|b㪫'x"u떒>֯_oVRSO;,%|^zil۶-kܸq 80;ԩSߛiӦ=s̉;wƃ>??l;wW^y%)֦Mhڴ^s,XȈƍG޺uk\\gHj.+++v?v>Ә1cFRN;`bڵ1tX|y"[]P`'OaÆŮ]xGC>0S}O1bDvE:ꨣbĈ裏FVVV"cǎ[nMI'F~~~R_~)9,6lC I٩S/~^FݻwѣG*g8ؼ I#"z]bΪU֍52>#V\Ys?Ϗӧu]}?ѣGyիwߍ;{w<;;;FZ.J2+0}2dH`Ȭ9rdt֭Lgƹe˖r5kq?-J ۷/WO'1|Dlٲe1a„oW^~}nY7իW'999ڵk[ZxvY&""v#G~zɊ+◿eRnݺļ_QVrS^^l~X`A 0 8+[oZRNݻWh=YfM|/]ƈ#Cn fϞW^ye&bqk0^zuܹ}ΎOO6m?SzϏ-Z$^>wܹ1gΜwb׮]1a„hܸqϨUV 80)6eʔXbEJzYAAAp y_+1jժ==`ҠA8s{_|1}ݘ={v1iҤx衇ώ;v-nrʸ[bYYYjB۶mސOR.HO>$.زeKRo:\gn:isXreye:Ϛ5kGQl߬Y"???n֬Y<?Ϥ؍7X*===N?7n\"6{عsgdddܗ^zX_~>Ν6lHvie>Nz()D޽=8u]1})\p>sw |D}>>կ~ַ"3UV4oĞ:thQ~cǎX~}Rk=gΟ??8s+;[ 12/^ *6o?iӦXlep¤uVJ7uԘ:ujjnĉb53"yIظqc4hР\īСCtر=ײeˊŚ5kVsjժr_"///6ls̉ŋGaaaDwqW_|0N>j*ƠOJX|y 4(V^kcРAuQGّ͘1#N=2#"ڷo_}1yXfMRo߾)QZb999e>gO9~@Uk]wݕ]vhdee%;vDZuJ OKmÆ 1hРXhQR|1lذٳgztRVr/Ν[?ƍ+v޸qHSNMZy~3㭷J#4hPGF?TgcȐ!eeeȑ#[ne:0-[=լY\gobO>/[_K}qVϞ=]XX>lrssscIN;]={v̘1#)֣Gr7qĤZYڷo_+_߯`3{+#777KOO{7N?2w'W^;w,9˗/_v~x4m4)r:矏{')V~x'yuve8EI_*1RڱcG\uU1s̤gsOa H=G3WUȴя~ReOWKkq-Į]})Eb6lpnԩqG'=zt9=)6gΜxJ]wҤI[o%Ŏ=ب[n'|^zilٲ%)~YgU3[n޹sgW/Yf8#AIu֕'ÓbjՊѶmr[Ӌ ={v t bС1mڴi<@dddTHݫ*WǏ~?^szx駓bgyftԩBz_n)fϞ]ӧ7o^RwqǖsƜ9sbg}vdffTˊx7?)nb񫮺*%`1hРذaCRo?iӦXle>g…IVZYaaa:--\L4) 2իc=;wޯ+[͓qJr+Hr=zt[Ihٲe5\gv)zUFō7wqG"|8 .޽{Gfb۶m1wxgN:^zq7a׮]++-[=zDN}Q~]vڵ+l}Y̙3'x㍘1cFZhzk{y饗[Rk׮ѷoߤ o|#U͛7Xvm|K/ԩS׳gѣǁ|0˗/Aի^{m 4h>ꨣ";;;3fĩZs>uڵk 4(ov\QPPeeeů֭~X*TM |\X yrٷo}~Ϗ֭[ܜ=zty@Zxq7\GuT[?~|RN5kʶiӦ;vlR,''ԃ>ϟ]vYlٲ%)<>YfϞ3fHѣ*ɬ{{ۈ:ȑ#c˖-1cƌXxql޼9222aÆѡCر}'s-w~ sΉs9'""֯_~i,[,֭[jՊ5kFf͢cǎQF=5*B֭cذa~7V\/5k#"VZQ^СCj[RvW]uU̜93)~g=ܓ{svX 0 i>cǎ%W+W&iii(ʕ+qeˋCƚ5k^xa3ҥ1hРX~}R[nϢgϞI}ˣI&媹~[b׮]I|9 jժzjz꩕Jկ_?ׯ|eиqhܸqt1W^@*((CƴiӒ⧝vZ<Q!uꪘ0aB&я~om۶{yꩧ駟NyѩS q}{ߋ38#.СCr-Z7xc̚5+)^N+rʸKbժUIC%\Rq?sΥΛ>}z? &{{lPd'e6zx뭷bѲe5jTԩSի=5o1Dlq\{f͚ŶmbܹotFzo.So/ٜ9s7n,qy{ &L&LVZ7رck.ׯ5k֌X~}̚5+ŤIbΝIdeeȑ#~|Cg}kԨQl۶g)w2 ,]v+Jl2zڵ]vT^=6mׯ>,̙oF̘1Y-Z[oBc'erbxg}f߾}93"?A̟??~'b[n<ssrrbqGcǖz-[Jꩧ8sw~i|駥5j]w'|rZXlezZn]a>wx7n\r:xꩧQF) CUze7e5|k###9M4qE+wƋ/XXt>sӣm۶qYgE߾}qS5}PfsOs=Ft!F[l3fŋc͑ 6:Dǎ#--m̝;7E)rJDDڵ+/^˖-+VĦMb푕u֍uFvuUo[*Z/f͚m sΉs9'""֯_q{iӦرcG4n8ׯ5k֌f͚EǎF)~Hf'_zjՊSO=5N=n%ӣUVѪUnV~8c#;;;رcT^P^ @UY Yaaa|g1o޼XbEl޼9N:qQGE.]Zj)e˖bѢEe˖ɉ#<2;hܸqJk}'1gΜXjUڵ+իڵ]Fff#WgpIn* dƍ1iҤ?SLuoVVV#~FҥKcԨQ?)}'pB 2$N8r),, &رccѢE{S^ +5jVU|&]|~=nӟTψ4iR 80 [l)W_=+ǁf9mڴ8p`򗿌2ٴiS\zqmu fDĆ b̘1qg'|R:U8 e{C)322#N:Gk._A֭[TOSp oРAt)8HKKK cرqwK/_I񬬬8ꨣ}QFϖ.]_|q,^&Ȭ˪N:ѧO8_jԪU+Ν;c1jԨ>}z">s̸[bԨQdɒ[c׮]X[n}k… 㡇/O?ݺu3$^:+++bCEv[uQؒ%K_gѣGΝ;}{ѧOo׮]?N=Ñ_bLnLN?2ddde]]w]"o3o„ k׮Ί6mڔSZnK^x8q_Xŵ^cƌe˖EDIJewމ=zR@je@ޅGVv +8t-iaÆ-1o[s-U޽{G5Yfʕ+{$ݻw͛Nzzz/)oS `'u-ۼy^/\0/^XרQ#;R*0uN9RՉ8䓓o^Vg ʕ+իqҺK.Yz|?^7#":wىUbݺu{[ vg'ӧOOZ7m4idQ ,HZm۶LڴiSy_Ϗ%K[hѢEjUgݕt >yŊ+b͑uԉ:*tժUKi-[|--[DNNNyqqEƍSZO>9sĪUb׮]Q^h׮]tڵLonߗLg„ IN;~i#(S&Mx.]uNNN4hРL8∘?~RN8ؾL;d6n&M1eʔX~^feeE=?at}.]4Fӟ"??iiiq 'Đ!CcaaaL0!Ǝ-zŅ^W\qEԨQܵ3{%WbڵkeY{wnݺSh{Wg%r /ǡ{&MI&9?OVZeǭ{SXXӦMe]7pCΦM뮋_%۰aC3&&NcƌvڕND|&aÆ~7sLy۶mKZeիWOZG^^^dgg'ŷnZb^ih{Wg*۷GAAAJH۷|8p|8kʾeȿ=ȈFEÆ >ؼysҞ_~9.\O=TԬY5? 7vJ7h 4ik׮+WFaaaDw8رc#///nRپ}{\z1s̤xVVV4m4>K tҸCl~&k׮馛bŊXڵ'?>s,:r_rrrŶnZ윢uUV:{)ϪL5o޼P.\X-TYkTߵ*Z{gT:uO>qWըUVⳝ;wcԨQ1}D|̙q-ĨQJUcɒ%q뭷& СCr-}-[pa#"222Oqŀ>{7bʔ)5rȤ7w%{[QFuGAA>,X ^xbCM Če~s=uI?xgNƍ'|2)veE޽K_Zu~~~iPSgLn2dH|{'1|3gN̞=; ,O>_=ʊ{キP/mSNEEDĒ%K_?ZG;w&O>{߮]?O~8>߶^ 88q]w%7xcϨQFzOC.K}b5kΎ;TgO9E[*23]^۷o &֭[J] vye7@E] 8ļy*a_"~zs222.뮻.{7a„صkWb}YgE6mJ̩VZ\~qm%b/BC17n_봴k_c̘1lٲXlY;ѣGC[o7|s&bgyfy睑Vsܶm[MZgffFjՊ+:(h^i>3WNN_*999QznJ] Pq|fZ?Rs@t-iaÆ}^V>sKUwIg͚+WQPPXw=7o:ѯ_؛oYbNU|&2eʔ:th]¯DFFFjذazŊe/z߱~{נASh{Wgy[n͛…xĺFqqǕVѽc~v)ND'~뭷*>ppW_}uر#;׿udggVZ%/_^?ΠC@^^㏓]tI/|?^7#":wtuժU{Y 8x|qǶm;رcF:`)…%h޼ykҥKKU*>ΠCӓM6- ,HZm۶LڴiSy_Ϗ%K[hѢEjUg .ƍ6m?k.:tHZϞ=; J$uof̘Q:gώĺQFѰa=30a„iVO?4i}G^&MJ< K.M 4(SVU|&-[, k׮MĚ5kO>dզMo۶*MKKO?}~};I38c{32+*?xb+1geٸqoݺu%敧V T^۷oOF p0پ}{kw--pr/дjժKbŊXƍ㩧*=93⩧JǏ'p>^ضm[bݹs{ѣGdff&~MK.͛X0^|ŤXϞ=K̩_0 ۰aCg?K}ߌc9ļ/@FDԨQLuWϏNoݺļ([ T^Kyi…@@} bÆ 1xXdI"֠Ax'9H,O?tFDkW\qEif9;v숱c&=ԫW/zoFDwѣ+1olٲĺiӦqI'r_Hv튛n) k׎'-:X0})+:sOuUV:{)ϪL@زeK\ve'$buԉ'xaѾ}իWbÆ -[qaaaE%b͛7֐!C"=_/8q^ϟ?̫z43 リW_}5뮤X~o,5jHZieIo^,Vf}ٱcG){Wg*R`իW6$ߵ*[w-`UCCYuYQXXv[ҽuźu3x\:իW<SLIO?cNӦMc̘1ѪU2ժ[o7ߜ3<3HKK+9EK};&333ZjY44VAUCK>}bĉѧO_} '3<Æ +/ԫW/|ɸ;e˖%ꪫW_>u"3phˬH)SСC ׿<@ddd鬆 &WXQ+W&ׯ} 4(1(V^;wzEvk׮%< CAUcǎq#;;j*i|2I֭[q_#3331tnݺb2KRժjժ=z=zTx>:> S COze7/m۶%b;vcF5uf/S… K< YYYѼyrˋKVU|&b… cqDM6Gڵ}nֳgώRI|+{[3fٳ#///nԨQ4lp{3>-[ k&b͚5'|24h_gi&ZhXo۶*MKKO?}~};I38c{3>VZ\rIX"kܸq-[X7m4N:s3>Dl]vY|'X:u'6mڤVW^u~~~ 6,lٲ_"-Z5oy2ٸq>]wuQzz֬YqEԩS}1dȐ"++kuڶmw^R즛n#G&/b|ߏM6%G}twu3nқ6mZبQuָqO,qO˖-Ep QXXq\|ѠAhҤI[.VX ^z[n%̙gώ]v#<cǎf͚Evvv,]4mۖW~9rddfW*>ppH8uY_2rrr֭9sˋ Ә;wnM6~:ZjUZUg'ԧO8qb'Ngy& iiieS^x';-[磌W_}5>2׉?=:i9H8VQ3zΠQk8=h_gWQ:,l}ՎȀA**" "Zr4!@~tO D|Z׽ծ>ZWVk[vm0`@˿Ks=o%%%O<1s}233++"֮]Nر#<ݻwc=6?z9wZOK.1qĘ8qb{Faaa| hy},                                                            j?)>زeK޽;"333rss#??? H96n֭[Fiiio>rss1dȐݻw}}GxcժUrxcǎ)sn׿ެ7m'ON)(**yaaa=;K/ofX"֮]{mpߞ={_W\qE 4(n۶-غuks bܸqqǩjo}+̙s̛7/t Eeeef͚(..{FVVVF>}bԨQfͪ [8͒z~G1}&sΘ5kV̞=;n׿999=f:t(yx衇b߾}Zyկ~[nM{'p -z{u|A򗿌sFqqqs]vY|_.]4\-}yRWPP/b{ZG$33U(Ǎ+**G~;zرc())+M#h/9cǎv:SNѣG6lX\~.oDIII՜{./is?cJgFFFp q-DΝkϏ!Cą^wbٲe;4_R;""bĈѱc?>3n~xk 4iRL2%;ˋĖ-[bɒ%_"6o\5qСC|1cĉ'u|ĉѳg&_|16lؐRkǯIr;GѣGǨQ5hР̬zd瑚3gNʸk׮q9h̸K\w3ό5}/;###瞸+}N:)N:FhOhAś߷mRΎs=8ko1gqƬY|׿&k???fNHhwfO?tkQ֮]WN]pűhѢO>iqg'zU_n}4) }@ 7?QVVR;k <\?q{)꫱}ݻwN߶-^8vܙRkL*pMgDDQQQZ{Ο??['N=ԔϽ{ƟҥKϟ6m;4OHW_}5e_җ7+++.%𴲲2-[\pA4mfff\z饭sͪy _ڷqg'zYVZZ`SO=2+\2[@;߷mۖ2>#;;ѽǎ[}Fo늋cѢE)G>}ZD@[xعsgJmԩiy$'?Nk.w^kʕ+Sj]tQGhAp;қۗ2֭[!G[6(//O}_lmY۵kSLiў-o26lXo߾5Jʸ_~1pY}uyyyWߪސ={ԨիI{e?~͍>NUűhѢi}I[.2dH=:m=zx饗Rjuʕ)s֬Y+W;vDFFFG~⤓N.]uX~},]4]vnݺ>=zt@$rGzq˗/X"***"+qC[#"ƌmڵkc) /G@ ϏZQQQZ{Gbկ~5:TU;c+lpURƃ>l<Ժ6+++N:餸SNiܹsgyNNN|q駟ނ'O8%qE]gϮٳ'|ɸk\{xGSjcǎ}s> VX;v?ϭ~yՈp7@Tݻɓ[gS/o}G}k֬_/zeܿlڴ)ֽ{سgOu]pzWTTkZwyq}Eǎbtf>~ 'oGB'Œ _BL<9ѿ8qb8bʕU?pg?yKj^QQK 2$Fuq;Mk׮իW.´^]ۅ&L^z=\|h̸ v78Ϟ=5jYYY}-x&׿ulڴ)?3tҤ-iQTToƍk  Gn~~~lذ!"rM7w^L2%;޽{ǖ-[bɒ%1{xΎ?FѬwyKjxعsgJmԩin~7 [[I&ŝwn{֨=#k׮ȑ#kO>9z%%%~x駟Nߎ;x#{F۷oL0! 3fLDDܹ3zo/bTVVV۳gOL>=x(,,l#RI?x|;߉ FDDeee<7~ȑ1z&nռڵ)SW:ZFEEE̛7/6t5jTZvaYg֞-Z(}kkmA|~_[o5233j۷cرc/iӦG}T7M,X .#|xq駟eee;T=ʊ;F׮]c1uxwFlذj^IIIr-s5_eT7ǃ>˖-'|2W7`wgyf7<8c޽{G.]b߾}vyqqq,Z(6a„իW}v-^8vܙR;o: /#ӦMWVV޽{cǎjժرcGճ?Oq}ܹscƌQPPPއW\~{G?+}^~M?lذx_rJ(xkI@B}Q(]7ڵ+^~X|y!~az1u׾}ito5Ȩk׮vkQQQb7~y{VVV\r%-3"o Æ 믿3f̈^{f͚⩧=zԹSNsssըwqqWٳS\2Fݨ=ZBΝ;/ʪ3g mXfkhy}qC=~aU}1eʔK_RL4)w^,x≸KbōwDŽ j '1"ɭIkcҤIh7"cѢE) &DϞ=ֳ G՝|c׾M{wmΝkO2%t3|K_Q{/ڦMk  Go~={vqwު駟s΍ ?'?p+8޽;nxW}|ryR3gLڵkc)/8ڷoXtjϏZQQQZ{va|{&###n Sow߭s]NjzM?hРݻwJmŊMڣ_uҥK[$@cL7(vUWţ>Æ uMVVV\z1gΜ8cqwƞ={uHCT#"N-2w&MjўGza;j,XPɩQ?c{С)mѣ#33 ޶m[+hO8$}>ר6w5鼬Hk.wR۵kWhO8$y5kbժU)iӦEVVV8NK=:OcVŋ׸_UΈwXۅm[?z2F}iEtV8 H7/Y$e~z93R@HVfee_|7@TTTļyRjÆ Çoocĉi/o(rذa5jqV_=ZBEEEٳ'J""Iyqnnnt֭߿Fm۶m>Wc$}yqqq,Z(6qѣۚxعsgJ(=k0/nIڱcGʸ/<u5o5{n-aŊqСZ^Z4@C}Q"7srrOj8Ь#ϟ??Sj6S[gDֱ4V\R :4>ϧ?4kxMڣ<5jcǎm!=777e{l>vQ޽{sՠtj^8qٚQ\\-Jyi GÆ K[zꩧj\g6 .Hϛ7/辏?x|O bРAt"!>(߫?8p }&bŊqFFFVo5_vm^:vGvvvlmn~?~ԦN֞]Dϲ(..nUV(O8g}I{۷/~_Ԇ]t9)WQ(**j~>տ;ٳgL8E{&qa|DΝ;cӟ4lu_*n())IyvmENܣ[n1mڴڜ9s?qS۷/&O{o|^rʸbݺu)ĕW^1eKFصkWU')SĈ#O~7ğqu///?8zV󊊊7o^JmĈ1lذfygy&뮔ڽ[oh[@[o׸>b>͍ogXvm^:vGVVq.4iR%III< ѹs8p@ݻ76noV̟??>{uYM ^UVUfΜ/r\{q'GϞ=$֯_=\<5@O'pB طo_}zSlWyyyDaaaN$I'p^s̉ 6Tv\sMs9qȑ##///8[n_=xzkѣޞo50aB ><ݺuΝ;GYYYj^袋_RmKEEE̛7/6r:thZVֱW^1a„D8=uwyg\~ZߧOx'f+^׾}[շ%M81~D޽[(@#4:n~СC<#qUW۫7{}_믿I[VXhQJΊfn~>vܙRKڬ[.įx饗^+VĮ]\׾}SL??:w|DϏ3g /3gΌz+*++k_W_~GԷ1t?bɒ%o/\׫W83kÇ@r}gXK^PPs̉;^z&رcu]qW&z$uk|h9u#ڳp;5EEEi֬ϤIbڵ-sСi9p8p`e[l> l{Ύ.]Dnnn 4(=Fwƹ;v+VĦM4:u1x1bDddd4kg(33398s""<}XdIGiiiEΝ#777bذaѿfh}>3%o~ٳg?W_}5x߿L:5ٳg[̙SDŽ Hpt9cci3+>V=C]cȐ!qڈ#cǎx* I> ƛǍƍƺubgϞYYY={QFE^me˖ضm[ݻ7999igI|!Hvci퓝{l{iiwAAAAA>@[y}>^}LkH>|de~s^kϠWGlgOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOH@Vkh?ʕ+c֭Q^^1hР3fL$goƍ?۷o1`D;p Gh۶mrXbE\2~())z^PP/b.,,<-\0u?>`^:u>}z7|v튟g1gΜ(--uȑ#o>}"Z8z fx7b֬YbŊؾ}{k'1;͛Wxcرckɒ%q뭷FqqqV^ӧOK/4Gԧ% [fk>VZo?S!nF fv1|ڵkʳ]vŴibMlٲk|ƈ# ڵkg;#*++;@Vk>k:u[XXַW^裏… SjW]uU|ѧOKp… G?Qlٲ%""v[̟?Fhfm~{U ɓ'GFFFDDlݺ5|x' c 7Ц "}ܹs92FGQFŦMKWnbQ\\?pJ;o1sN=:ؼysD%s֬Y ؾ}{ո_*x}c9&~Tg̘EEEѭ[6NO:+{XlY̞=;!;(((h5ˣ>%%%UcƴiߧOR{Ǣ>vٳg~}zJ뿢5-NOWy7c׮]U)Ҩ]vYx…ίzѿ;⭷ުs~K|"+W[FyyyƠAb̘1XǛo7n?8o߾qǀrT/Rx񑑑ѨǏO/Y$JKKSN5斔IJeRjvZddd㩧wN8-Np8Ab۶m+WƊ+bʕoGIII󂂂xwaam…)77ă>WyN(O>߮]g?Y̙3'JKKk3rȸnv{'ZfMx̘1^ۧO(((͛7GDDyyy_>F]c룼jܿիW{3&%R)oĬYbŊ}>Nb8w}w̛7yǂ c6ג%K[oz^:O^zi|ߏ7OKptپ}{l߾={n~7nLDFFF{'nݺ51ꪸ曣O>̅ Ə~.v[̟??v`={ߞYPPw}wL<*s֭ƓO>Y5^{,n6Neݱ{Zر#,X ,#F} Y\\UѣG裏jWԠϞ={FVVVTTTD_ܽ{w-Np8AR;w#GƨQb1jԨشiS\wu֭[?>}#?R;oLeff9G:6o 5kV}?#o^5߿򗿬f߾}c'U3fDQQQt֭ͼ@my睸+?޹/СCթSzޱc&Ȉψ󝚫,=ښz$o-[ }ڲ6KYg~z 4(233SmڴN|>hǕcǎiӦ9O>0c=^{mչn׮]1{~!馛_KFD޽{c̙qwyyy1iҤ7n\F߾}sQZZ[l7x#zxw֔ŝw={cֹw̜&}6WR\[|Zn]ƍ[Y~kZ@2B[WW1dȐ!Fgy&v-4x[q⤓N?_{.ʱcƸq]ӧOOWTVVֹ% 8:x{K.֭[deeEnnn 6,暘;wns=Ѿ}qw)& MsWKӟɧ֛ov0 N9FR .w~SOWwoV[%\R#t.W]uU˿Kʥ۶m_uwyyyX=X3''yՒjpz饗RǏF?~|xɒ%QZZ:u1$-[R;'###ƏO=TUw]p ow˹SL9sTΝ_Wj߹sܳ^oN{;5СC#+@UVV7n4(:tЊ'ִgZ"H"֭[j}FYfMx̘1^ۧO(((͛7G_nY_~}=ק޿իW{3&%RPn!%sڵsٳg,ʢїGDֻg]}5GDDeeeꡞI;5W";;;=ҩCѱc>gZ@eVΧߎKڵkcG7FD &۶m[շnڤ>;wqfff՘גj[.&O6mW^1vؘ:ujq޳,lْRׯ_շoߔ{WR\[lGNNNJ% !}M ܵkW:tsLl޼e˖ٳg{OJ4yw]Ss15hw Awݻwlǎ`X`A1"(,,lp⨬ggg7>}ezA={:wѿ%HdЖ; 9~k[V.(++Ss:pP 6ѣkÆ )B1ׯ_MrIby睸+?޹):DFFFuԩ=wرI}222"'''֒ĺu ]6nG[ Hvܙ2˫s_/_۷oO Ύ:wȐ!yؾ}{ݻQ|KK.@ەEEEq믿WKܹsÆ KYSVVwyg,]޽e4|469:t`|'lݺ5%2"_~u?3SQ^y啔);wun.]⤓Nѫ1*++k̝4iR[p>/{7.(,,nݺEVVVưaks=۷Z[^^wqG߿82nT?Gz;WUʸ_~cƌ~K,iV:՟W__^{-6mT5ٳguowOdh.FϽꪫ"???n8tPDDl۶-~_WZT,//ok:z=]ZСC#+,6nX54hPtСOmKk0o--n~k^~kmUEEE[aÆ9sfJϮwMfffL:5e݌3SN:׽꫱lٲqΝϯׅ^?O4"".]j751cƌZQQQdffwOH#{nL2%̙SU;wnA;wN߿=ԩSכӫ^-NGCСCرck3o-{ }ڪ\ YfM,Y$Fߚ5koZbڴi 6mZ8_xGoumvJ뮋z#xGjw◿eӧ5K.w5oͼwaVW߀CTPX{&blHE?ĨabĎ]!+,EAahSG33p`:}ꫯ;,=9wܸ駟8?`hܸfL!,ԩS4k,ۈ~hte}6i$RT$Iߖymy͛n֬Y׭?w ׈X`A999ѤIRmg6_~y̞={-X [w'tRtM]hQ :4͛7&MDc1o޼Xxqg98묳*$߿7.F{'cСѺuhذa̚5+3թS' yyyӸqۢ_~ٳgyyyvE~~~|Q\\qGѯ_ ""`˲f.\Xuuԉ֭[g_gwi2qzuZnk.uݦ|&``b1u2C>4h7|s\qwNNN~q1dW_}SL)ٸq{cT}'hܸq~~~~L2%f͚U*cAE*, }U뢢r]3sڴibo:-_ǎ7MtA0˳N;_#F?~֮];nָ;b]w-zi/b~U{ /D>}nݺ^׹s;[nZjUΦ|&n*n&M{Nz=nܸ8餓*Toٳg5k֌ۗymf͚QXXgώo&ZhQZ~a˳ <1bݻI&ѿ߿D|?9cƌ3gNGAAAԮ];Eѭ[nVGGqD̘1#&L͋ˋǞ{k:͛7n!7n\L6-f͚Ѳemݢm۶YxMLl}5shժUr!q}ףG$I"J֨Q2_ԯ_k4h{w3&։':Iѣ3=rTl}ڴimڴu۶m͵SNt=wkmg`S ?nժUk׮sόWwjvak~<_w}7f͚^7oqWaT*""Νw}w< 7ެ |.={:[`A۷N:餸馛=hѢ:th :4""7oM4c޼yx2Ϟs9qYgUI׿7n\92OСCuѰaØ5kVgSN 4(*TqqmE~Agώ??b믣8l=_~3nagcԩe|6h n+*uc/..*LR*qq^{UZOs=Ѹq2eJ̚5T T*Y>yI;o~8蠃Jagv/}Dݺu˽sqw-jժtML@˭#Fl{7i$ٳgnj3bΜ9Qvˋ-ZDnݢI&YGG3f̈ &ļy󢰰0}{Fڵ7Nn+2ƍӦMYfl2vmh۶mh=P}Vmڴ6mlm۶ZԩS'wݻw63U#-OA> }@, Y @'dOȂܪn V\˗/+VDqqqԩS'֭ՋN`kSm>˘4iRL4)OgώٳgGAAA6l[.veҥK'?o:Wm>.\oV[1jԨ>I/..sƼybܸqGDD*]w5>~[R՝NmA˗/W_}5cǎ("kM$&OSLoѬY8㏏]wu` ``6ˠiӦ??|DD 熼I`2dH 2$:vgyfwqQV P]*s̘1qѣ#C ~m~|5~~i\s5q뭷F>}3όJnuTf9nܸދ2{KjT*Zn[-[ƶnyyyQvSNԨQ#V\bŊ?~̛7/͛ӧO+Vd{u~#~~8guVԯ_]T%sJ>O7tSQ @[hoѩSرzf&I_}U|1iҤ&Mtkdɒ;G . 999U`sb`UI~8MR{rHk.k=Rabv^zEDĊ+bرkň#b…k6.?ưak;k}lJ:J>8X`AA:)G}tl6u|p{?W_+W O3Έq)l>\'@TIK WGzjUVobk6=X̘1#"b]=:o޼l`Ȟ* \-I[n;9hժUUS83O_~9 }@,ȭܹs? ĪUEѮ]֭[UU1 l)ѣGǠA⣏>*VZgg}vRM\'6A#F;c?;Hk̙_㥗^;7oMlu-gy&&M'ONyAU?$I"IHR~$'F~bɒ%seA+V~;=nE׮]+tn)csmV?uԸ6w-NUI&ŪUT*={+ #"͍͛G͚53[=|/رc՟NUǗ;*tv… /F_ߎq͛7/u{w؂(_6mZƺaÆN;U(**J$5jă>|ԪU+""jԨGqD<㑗?}̘11,= @e|"sSTtرg_yID*O<1:w\;Cwy$Iz$Tq-NU9sѶm [bEL0!RTis?#777coɕ`d|"sҥ뼼 {(cM6} F.]oO*1\'@EgAAAƺ~:^=y!Tl3~7:%3 Pjۋ+t?,>Tlf2˖-9-NU5tuYjUL0!RT~EBkժ6 ``mEg 2ֳfZ> V\׶mhڴij9Zf ؒ(_lѢE$IT*$?xgxϫW^B(_ܹsz޼y1yr/,,^z)RT_X7jԨgT:W->vZjo^3ķ~Sɓ'4Jbx[(sA=zZjED4GrK$Iqĉo}_=йGӦM+ToܹJ Xܪn"gϞ/F*JK/Ayyy1mڴEEEz]znN:msTw:W->#".1bD\2"~xY⩧J_M?cǎqQGU/\jo='2([NU7PQm]\~$Izo_Yﹹq7VN~~~;ˋ:lClu}FDW.q&I?!v} xg(}T*%1 PZ ߿lL>=Fs΍ŋGzuֱ.RۑZJR8>\'ejڵve~zs9Ul }@, Y @'dOȂܪn`C}Gy,Z(.]˖-HR1eʔ `KeUˠxGcذa1}~$UV\']{^{m̘1T*zLf S |K/H$ßTNҪMG}_|qE**5 NUϒà?$IԩS'wkm66lՋ*`d|"wމ)Sd &I 6??N=Ԩ[nvu0 Pjke$͛ǐ!CCU\'@r0aBID*o0(&f|"s>zQEluZ}Go}w}#NUgf*`f|"EUVUQ'[7sA;w$I"JEDĂ #NUσ>8s${Ua7[/sAvX4n8cĉUV\'@Eg:u$IR曫+NUψ>;v}H$$? Tmluu}֨Q#n~#JE$q=_(,,:Vm>#"Zl|x,_<=MrHxѲe˪l`a|"s韓$T*tP}ݑSl]uZLS~'鷾GDԨQ#nFàNU."~x{Z>:W->KJJ2֝:uNn:W->2֍5Nn:W->waH$^xqv2 Pj٭[HR1k֬l`e|"gϞ韓$jժ*`d|"s}N:sU]C[)sAW]uUDDRH$  .>:Vm>8s#IHRpׯ_,][ت([ KNK~q '{WխlUuV>#"kVZJbqgg?|̛7[*Ȕ[ Tԙgn֬Y̙3'RT$I~6myyyYT*~ \'@٪Mرc#J,X ,(H$+R([ \ǃ?^9$Ik+ (@udvA4 qT>7MdNҪM\P-:#J1 Pn> }@, Y @'dOȂ* |뭷Fx裏 b {$sύ}Ɣ)S|V\2ٳgUz1 =UUwߍ޽{G^sUJX"~xcU3 UQRRjqDcʖ??xG$J3 g`Uig*JV=:Fm۶_q'FӦM5jT<1bĈ(..6 lul* 㫯JV>}z|qm~x#7n{W^_=Νߏɉ޽{駟z&sS%AzhxqoRO$ 㭷ފz+ڵkᄆ~{uZ?ӦM1v;vl|w>V[spuwk6~dM\'@TIgDDZ .SN9%>lE**5|YTTǏǽT*~رct!Zn-[-[F^^^Ԯ];ԩ5jԈ+WFAAAX"ϟ͋scԩgŊ+}5ڷo]tQqo `3 UZ˖-}?xK Fdj&I3f̈3g믿A|߈@#"ڶm~09Ujo}_= 4$IDY|ּjw}W^y%N8à\'ɭִW\^xa +ղ=FcN:):uՅN>WW^r)q)Ĝ9s⥗^7|3>(**J_C?Wkݺug?=zā999?\'@lA?ֺuׯ_/-[cǎɓcwU~ѡCҥKt%gh߾F`a|"ׯzhz3gN̞=;.\+V(**uF:uAѪUhݺuludvAeˋe]N`k> }@, Y @'dOA> }@, Y @'dOA> }@, Y @'dOAU@ǔ)Sbܸq1eʔXpaŋcժUq9į~*`fZU/"}>|xXIDDD*%K^|Iw}{{!R 0 lmw}_}$IzsMTԡCb޼y0;/f͚Y`Kb{9Ux㎋^{-JJJ"IHRe~*#7778㌌1bD``.g 0 ,X1mks)DnnnK/}liud]%1c5\ť@#"ڴiOm67n7xckEߎT*IĻロg(}.Y$~ߖ9 z{wƙ8N/^&M.]_[se6AsO̟??= $IĕW^guVVku޽ NTubŊx'2AST\{Yn"///coڴiYPݘ(_9rd,_<"~իWgԩSVD_|jT:W->}R{_|FٺuIܹs7j=\'@E)S2;SNfÆ 3K.ݨsAs̉T*ID*w}X/[lܙ(_\dIƺyfqqqƺhܙ(_Ld\xqƺvڛ͍NUϺuf-ZkΟ??cݤI^`sg|"E3flǏT*ID*֭[o;sAڵKf&I&LVoڴifui.uZ}n낂xW6Z'xޞ{T:W->e${'JJJ^k޼yF*JAZՍNU]v%vy爈g}zkV%\+VOST34hZՑNUψ~E$Ph$}ݗZ*~ć~}f\'@٪M ';wNWzqٳ&L>}/]#<2ul)u-T*SO=5 {I+#F=zG{Glٲ{_|cǎW_}5Ǝ? ֬Yk7T3:Vm>#":u.,JJJ"⇡UV//rDDԫWx XlY~${^שS'nhڴ|$j\'@i*3"#ꪫ0"29W[=z/IXdIޓ$uƠAb(%0 )XsL<ѦM!T*YӚ߯y]$#<|&y\'egDncQnH$c84b%I5k֌s9'{ڵn`a{USN\|ѷox^I&EIIIѶm8◿el6[-NjZF⬳Ί:+8qb|1{X`ADqqqԮ];uѾ}}cv:cM4>8>n`fTu%ȭ-ZǏ3gƲeˢVZѢEܹso~.**?8M .SN4m4~ԩS4h +꫘4iR,\0N:ѪUO~o}VjͬYO>9sIJeˢFѰahӦMo>ڴi{-O2|w1iҤ裏bĉ1iҤ?~5\pA\xY=ztyM۶mO>}D͚5V{ҤI裏믿K.-y㠃޽{ǎ;X:˗/G}4̙3˽nwO?=zUxx'瞋ӧm&o8#W^YaCÇ8p`|U@'C=MZ0?:1cFOaÆŝwڵ۠?1E$뼾$>OQFqV;W\qE,X`~gqSO=~{m۶u3lذc~;&O}5̐O\ n*[TToFM:5=Xo~j_:fϞ^+cСq 7Dqqq}_2yرz.,,k&{:XbE\wuUO-R*I&qGϞ=UVhѢ4iR 2$&Nn…1`? 4T3f駟~m~ǎ#8 Zh5˗7|qoŋ+\k̘1e|xѻw֭[4n8-Z'NCƘ1c}w1`xgI&zΒW^ˋ#<2=h۶m4o`t֭Bk֬sL}'b…{oK|闿e,Y$n޳>sN=uԸ{2 PK.~c(**k&ڬY}lu[jYXX>h 6,Oq`7lذMZ?#Gf]tEѪUubԨQQPP~uO_RzG3֝:u:Bg9>|xL:5"_ׯ_̘1#$1 PM^zE *|OXϝ;7&M3/W_}5co߾YY~8R޽{z1jԨu:th#VZUUVp UuVm>?裸cժU?l~Ȯ9sV{GԬY3c?Z_+W-[}٧Ru+c޼yZjEv*uwܱs9rg.\zޱ[7|3&O^Oxu-루A:uDc]wmFÆ ^zSmrL~i=ܳRSNt9&Lޛ:uZό5*co-XUT*oMc^׹Rj/Xw)c]XX|A|駱hѢ[n4i$vqmݢf͚/IBCEynڴik~ĉw1jժ_ĸq믿$IiӦѢEkiӦg6=#tn( TsAZ:Ih޼y 2$:tPE]> lٲeT>nmzQ\\5j(u9sb{\2?/.VzW^q;P5jϯй5ynɒ%1o޼hٲeׯhھ}of 4(LRT*]vs=7z^n~;ƌ^{챱WI/\'@*V&L~{$Jo4 P }75;+uUVŢEʼvpш8?w}w!˗/ÇQG{ok֬Yzʕ1s ]mjժR+_|nԨQ\28s wĉ .38#.\X~7ԲeszݠAK7iNU d~ѣGuX|yƺAuޫp۷o|gYTTrK\{vv+Vڮ/9#J?kz+CVرc㗿e|ו:!nz}MnTNVu?}}X_kT֩SgSֹe˖y#?vW… #8"N>ҥKŢEb„ 1tx뭷2:4:u~z'?ze \tiᗵk׎wrH4o:-")S]yϹdɒR{?<餓矏N;-ڶmuԉ֭[7>ڵkq~ܸqVUXXW_}uDDD5_~J6j]֏NU-ZdWZUEP,˟ٳZϟ|qWdC=ssN斪/ܿ8p`ߗO\ko=P4l0c+W qԩSc=6nV c|"sΑ$I:lUnݺ/(((W^2-o{qqUޙgK3<aK/+,,K.$?9rd,X c1bĈ0`@\~QTTM4)u58W_~5jԈX֭ .[`A<>^ovqI'mZdNUσ>8s${Ua7l5C7 쬈ΕtY^iVzT*铱lٲ8bʔ)Y@z,Z(Ə3gΌe˖EZEѹsh߾}Vk-^8>쳘1cFʕ+aÆѤIԩSNJZ*-]4&Mf͊%KDAAA4h 5j;vw9jԨQmĉc̙wE$ѨQibwwAuԉ7ߜ̹G-ZdΝ^3ֵjՊƍyn[f5TvEZbժU \*.(9昸뮻bĈ8Wkڴi}ѯ_͍z+Zjo_nm6Oױcu;f}V9+oo9nذa\uUYa`bҤIGĉcҤI1k. Zߏ1zr_r޶m8ӣO>QfJXbE1jԨxwK?qqQGgѲÇ8p`|ux駟> JJJʽnݺqizˆ׿of,[kիsL 0 nM!Z}FD}k#A_\}P9;Sz޼yQRR999ϚAڵ+e sV6sŋWlnUV|3gΌ ʕ+Yf뮻?z3뮻uvv*5蚗W;S笈?<v!y(**m޼yGDĒ%KJ_/"c3 ,⊘<{*4h՞孷ފwtҵ^|6lXk&z:}zuY3qw,F6l$I*UsTUVt=wkNڵZСC1"co}5Y'OjY|x73N=TAN禛nEEEoTԩSO>cr5}/o$I 2$>2dH/WX]w]VGE߾}|yE?<xM /o~())+VW_/~m6o9U@el2yرczoȐ!qI's=VN:7n\QPPP*ǿ*Rwŕ`h~~~^&M*}ZtiL4)cX]v٥ޚ=WĦ|N's[[nTg&M /ÇرcW_[o5uq… ctݸq8c}<3o|#G+:>8*DYwqG̚5+k[xq{B>f袋bС1zbĈ1x8cJ=@Zɓ'ǕW^wJW^1dȐx뭷bԨQR7|s9r 9ʭ*UV1tC$I?*?n]tMFÆ FY{'f>[֭[G?O;6~WƍŒC9dg=xwo&-Z7p/T-ZT|ekQPP^o6qO͍ԩSTSf7sP}ի]t.]D׮]K.zO>$z行w9xhٲezQFѶm8c9sf <8 w}SO^zEZJ}ߠAhݺu+>ø袋bǎCSO=2[ɓ'g}ec Jģ>w'Gnu7j(w=ztzoѷo ^xqL2%cooO?-bŊڝ;w5kV#coq'gG*\'@vu-vN~c}4(cƲ~q}e|X*w1k֬x7Ox󩧞kѢEtС~:cHA_Bg9昌;3gάpCf="y.:kذaquI':tucȑ{]tQjjgoᆨSNz]PPWիWo>coرӺ <8cݮ] ٳԋ(,,jA ,**wq̙3\=׹s8C3㎘7o:xQPP^׮];W^xazرV>SN;3*tFػKvN8h۶mz]RR^{mW_}w}w^\ѩSϝwY~ul^z饌uzw:ۢE83^y啵QF\É'Xj?ܠ{Μ93{qo=#}٧R{3|6_~e|'{~xfלoZE L$.?I.^dI׿.73I馛^կ~U?:蠌뮻Ԁꚞ~[25kv:kGqq:-**!C+WLZ͚5/{w/ϸ8c\p߳d0 x72ֽz T';wnL4)+eg͟?yuץ_D_V6~X^F*u&Md 7Y? k׎{e?ޜii"ҚI4A/Y95xcY&eyPh2H6' [X?\?}^ga뺸zܲbq#F}٧5ZnΘ9sf??z{DDqqqt!ڷo(@#c5YfY^^ǏopO~1f̘;r%KisNp >Ć _+.\X1a„F?F7n\v8qbQF!~XhQ7^x!F6mo%W_}5n֭[wq1lذ'?{wk.֯_V{.yx77tSr)?)q9OS,X =8裣gϞm۶x7c̙q{5jTr)y @s0\2.]yyqGIII"gC9$kSVV7o΍<ύ/ov]֨}t֯_w}WZ$|p5z~&aÆ\g}{'#>7l7|sA۵kO<0"(@,YӦMkϏY>}1iҤx7駟***bʔ)1eʔzv%~_[n'ޒO'Kqqq|ߏ#Gڵk|s v[/d'???,XX"~7j1(u^{FmȐ!yQVVtPK,u֨HԺu֤֮]|pd2D/zhר~g7߿|T>|ӟ{!C$>WX[lj{{n1?$ްf#q=X?N:)2(8n=zt^>O jҾGyd{ѿu=N/Ҥ}(կog}Nǎ;/}KykӦM3&NM^'@xo^go+mkܹ̈NG~s㫮*ڶm+%t9>D{Fkqw!="s֬Yf#g?={lU%%%qUWŽsLOS~n8?_ǤIqٲ!rJwyq{83bq5פwsLiӦg;v_җ/gh(˖-K{}+1~7c[]z81:>l̘1#7>bĈ_u'NL\bE|ߌy/R\ve',=F }^'t-F6mW_ݤ D4jԨ5jT0tиcݺuK/o7oѣG|1hРT,--1cĘ1cbٲeꫯիc۶mQ^^]vC9$=]vY\vexw"*++4w 7[#bĈaÆx饗⭷ފ?ڵk]tǡMi0|xRv}&: ǚ5k;FD;1޶m[_>uj裏&jÆ ;u6oW]uUnܹsiԧ>W\qE\s5ڳ>_җk_Z}ѯ_(--7+3f̈?qYz]aӦMyӫWd2fsիWr{="sݺu-tǾcͺGǎcС1tfݧ>{W{-?{>: ǖ-[;6i:ԺvA۷o+"oߞO0!&O+W̍oE׮]wǺ}ѥKcrO~vſƄ m۶͛k:twgIIIFeeeV{P;v$x` uк P805i+L .LN91bD^,Z(#38#s)g>㡇e˖|&ѣGǿۿ5kGj ܕO5+j<1kZͽN$m۶m:BcԩS=zĕW^:UUUqWBKJJkL&ZYpa̝;7yfq}׿xG߅JKKcA@kS 4Fcڵ Z˽N>N ./Ѯ]ذaCkH_{8qbHԯ:th^k_"7ׯ_|v8sbٲeZcҤI|%JKKkyc#jc1uhӦM}V.D4u8hN:)l̞=Fh:: G;y:thZYbEwy~Dv~yf+LP~ߏ]1o'B>kxk_Z!ɓ'7D瞋oY-3O#> x`n\YYӧOoZ): GIl޼{yVJ/;vl9sƧrJ|3irs=?O|?a„g?M:޻`֦SUUT >{DgDw󝈈d2fn q0#1XnbܶmܹsZ~}3&|DΊ_]6~ӟƝ:u;~$ O>95o$ƛ6m'xb{w!i߇UVE6mpmBV 4ֈ#bq뭷F&;vls=ѱcǖnp0 0 1^zuر#Zz~mڴɻ?ƍK,IԿ/ǕW^zK.7nM6ը=c'jƍ+Qf1o޼D3Lc?3,X /V}"9p֩]1A\rIlڴ)d2ꫯƗbذa-@T[ ѽ[^'z~of|Keee|߈E%G?Qd2׬ŋcMƳ>y5>7m6lH~EEEѻwDի^!]vΝ;s7x#߅L&Ӥ,m۶L&+Vs=79xᇛ 5 g;k,X538ضm[L0!̙s1,ZowԨ5iv%۶mk: }Xpak⋉q>}Cfr&ݺu+WF&l6͋y>ڵkGQѮgf2wyB^'@aݻw 4(.]͙3'k,X ;o&M{.Q:th|ѶmF;kj֭[פ***Ν;7i|ٳgƋ/[Fiii`g| 9gΜd2~ѥЏ{{|>l* : lj'9sf\yѡCF͟>}zbܫW8C57w9sf~!ĭڵk:>|xkM|81jԨ疕Eǎ.\wo'j=z{8?in\YY/B{챍?{Xre5Z]-,ygώ<0禛njԞ]tQbo̙3O?]pQRRҨ} orwt P8&NU޴iSu}fٸj;}ѳg:ujL:5QӧOu]ѽ{&`4jԨڵk6cƌ۶mwkw^_>Q׾;vL՜O>9>Dٳg9gΜ9q'j={ѣG7Kn.np9c5Yf֭[k<Ə'c̘1qwjK,N;-98b} 6/wuW,\0Fc„ s__O}*צu!.w?1z9rd'bÆ +?3f̈ļ>}|,k3_UU7xco:+ mڴ7|3z衘>}zL&W_}u6{#AŽNdɒ6mZ?~̟? 4iROj1eʔ2eJst/cǎ f͚ZVq`6ŨQ⭷ފ_?N.]bڴi^{b rH\wuq饗Ǝ;"""ƟI&m춊ZeM7Gk'>XOIDATcРAٞK.'ѩS&>|xL>=rgu'O:4zNYYY\{1nܸf `'ꪫ{c9&9ׯ_|ߍz(o=ԗ9sfL4)/--?>Mw}wc2c믿>/D|]vqgČ3+_!aԨQ1jԨaСqǺu⥗^~;6o%%%ѣG8cРAMZ{w8_c7^{ڵk?>Ə׿5֬Y7nm۶Eǎ<t|KQv{lKhmڴO}S1rȖn`W @! ) ) ) )(nM=ZL&| `O^'@zZ$sΜ9dl6[МCC}HO}~$FDu9A½=֭[cƍ$zvX/ս~XhQZ*< C )3hgS.\ >:wիcѢEpXhQ˱y}􉧞zjo9zزeK|qEI'K{=</^\۷QFń k׮Mާ-[t {t { l?ηu6͋/wyg,\0֬Y͞=;=֭[Ws/ &i?m۶ym۶w?\h-[WU<#q7ưa'0@"A_|2t_X7o^?>*++۷olذ!VZ۷o}6}زeKx㍑dώ;bĉO&mڴ}':u˗/M6>qŝw~x>hl۷-[ֆ K.Ib'Ɖ' \jU?7fΜwuW3Q{vm51≮袋gϞ'|2~ʕ+#">'ƟԩS<DDt'СCy1vؘ2eJ'W_jUy睭LQܒ{qwƙL&xlݺ5ϟzlܸ1JJJW^qaE~RߎEŪU*c1dȐ(--MmB<4{;cDQQQ˗GEEEs=ڵ^ .{.Ν6m;FXfuv[l޼976lX7{?_zv]w9]tiUghƍcŊd"&޾MV^- ƢE_N\DӧOx&u;nj3w #GwN& &$*|8qb;v_`ȑ1tИ7o^DDl޼9}8묳Zՙ#E ?_/8>g?ٸcڴi1{DgZfϞrJ{ubFD,^8&L]vYl۶-}mַb„ ubFDlٲ%~_ũ{#{ L@az'38QF}͍K/TϏܸ_~1|FU=WWg<֬Y{͛7/Ə֭K㠃>}D6mM>=&Mlر#&N?pަM۷o <8:u"ƍ ,hg ͛c޼yGݨL&:Dgg}61>ꨣ"4j̞=Η`g> HS[kÆ q%Deee֧O[bΜ9CSO=O=T|_M̝9sfu]n3<3~x'c1gΜ曣w޹g>8qblڴ՞ (\K.ܸo߾{7z!CW_}gV}n}z}ɍbҥ>[g Cu!<;vlL2%z/~{Y&7۷o׿N:)&^z5\\rIb-6lhpu{ҤIqWGϞ=s>U;lg ײeA5_޹ [;9bƌ1o޼{v|OE{k6RY\Æ ˍ7mwqG{vmyxذa1nܸ:ٳg?LXn]o$ƽzk>\2nZXresRU/3}a 1cFlٲ%76lX99L&&L=f뜳cǎ]|ődkȑ1tx裏;k&bv=s;vk_g /9h׮]^3LƇ~m޼}|yu۷wͺpL`wWYY-,Zo-`ww}yDdDGݨL&:xrgyPg}61>ꨣ}򨣎JgϞ[lq20yQVV¬-zPfS>ApZdI4_[(X~k4Zu+jؽ,]4r}{C I_}:Yٳg'7Kl! (|[nMKJJ^m۶qeeegmQg $aٲeA?pzY$sR L@+--Mw~ycUfD@ڧz! vV {y7^z5}IW\[nq2V\Y܆Tzu L@k߾}buּרw͈:>ԶOmB8SSpQ\:7iZ#  eee-h^~kкE[ ޢ>XdIf k׮M ޽{LJ~;vGϞ=ϭ[.ln\RRݺukkVョz!iWTVV~!мh>~k+hՃ%?ga0kۧ2ld2Fe˖z׬^gj())IuMTVVڵk6 Zo-;j}PBbf2(--M\X޼ysuq2ekU/3튖L믿-,[o-Z/_jU^{ǢҥKtL&󪪪Xvmt޽{^:1ex&nyt 5d2[E`Ҽ(++KaVY=()TؠB8|lYb5{~cYYY;VX\2P{ 0 L*3ܹs[l6Llݺ51.)){m&ƕ5ٶmDzODa {-zҥKlٲz\lYz衩UgVAhMA?RUUUyQ=𲶷Wl}j™vŻG[ P eee- h>~kZ4/h֢y>XdIK::[֠A$w'qŊf͚ѣGϟ??1[gvi}֭[^5#":tTS> L,JJJR_9EvZ @{ 쮚r={-cǎ1txsYfil6fJԎ?:?bڴi}l_Ç׸WB<쬨`R=X>k~6VYۥTVVF6k-[Իf]B8:p ~F{bq!CD.]rwy'fϞݨT L.3VZnݺ%ƫVk{~an\TT .]$ކ^UUk׮kիW'{^gCKl:#"N=ċΝ?|slr-ڨQEEEq'jrKݟ7o^nܡCPB<|Y&W_}hwk~{5+++޽{NJ+r+WFݛ׀j}ͽCnώiӦjw_ٳ9zk̝;77ԩS;ƍlٲ%""̙ӦMի+H=ڵk;|WX*U\ti^-[Vz;Y}nZ{Ë/fͪgʼnnZsfjn{wn|=zt<䓑fsUVŕW^'ON̿ s k׮q&j_}\uUz\mǎOęgxAz=kpB=DDt^ %%%QUU+V5kD=5|vs ,O?QY&q$X볅x&`p饗&ދ1c駟]w]s;w'Oc֭[#!/(//}ƍwߍ۷'x1vFgܸq`xs_D޽SN|ظqcb^YYYp Q^^ި} LQ {رc :4Q5kVfu>qg7/Kb<|СCeذaq뭷FΝ7+˗//~1nd2ާ(Lzj}xwW^ٹs:ujqL N8w] |ܸ{qa!CK.;gn^{sux&u9rd̘1#Fڵ:(n馸m۶ySZZ?o\㬳ΊGy$>y&Znϩ'O-[DDܹs矏#G9'-ܒ5*Β-**O?=\[nÇ>͛w!/3-逸X޽{\uUqǂ bٲeq())={ao*{|q'[o .իWGUUUcȐ!QZZh}RCnώiӦjw_ٳ9zk̝;77ԩS;ƍ.sΜ91mڴ?~|ϯ^:Dsύ]3WYYY9ޗe}M-h>x&Zn1k֬/^xn֭>7k֬Xti{{˗/ѣGǓO>l6W_jU\y1y /0:wYv^xavUW]WvO|xDW zcݯ{qUW_ ,e˖ƍ$zvXᄅu''oV,\0V^UUUQ^^!CDii.SgZO,F#GlwԂ6SgZFQK7`f-^'(n9lp -ٱcرcK@ n OHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOHOcNa s$( *8jA-穢@qBZGb[jժ*uZS*$LIH]ĝq}{_羠]&@- @'AP }@- @'AP }@- @'AP }@- @'AP }@- @'AP }@- @'AP }@- @'AP }@- @'AP }@- @'AP }@- @'AP }@- m{o1mڴ)fϞ}Y]6ӣuѭ[h׮]/ދe˖Eaaa4k,{8CAg[;AbԨQ5^mV5WQFɓc9K/c9{xcTyƍqeEVVVl{`畺78f̘'tR43">.,SPP^{m\ve|FD_>&L'|r ҶM6ٳ>kFzzzn:uڵ^_|E{lٲ(,,f͚{rH4hРl{袋bƍ f͚E۶m#???-[7o.gȑ##%%%>qUWŴiՋv-222b_~z2dH<#qp>}FO?=njYzu5*&O\Ip@\zq1xo/r=7n.,jg[s:tht9kRꫯN}cذaqG^.[,FO=T_|1Əw^Rzr!K/4Zj 6mZz_FDĆ ⪫g}6222v{A f̘W^yeYy|A\veqiã~SPPÆ U[~}L0!N#G=zTOĶ'`rDϞ=k?pXtܶmxK7պu曣M6qw߿4o޼>k֬1c$Ԯ暸袋jqF׮]3ό%KDC9y䑸+v{on @NNN\tE15kQ^Ϟy晸k$>qUW W^m6or'^: ;=ԆիWc=P1bD@S{=Cnݺq=bȐ!oժUr- o0myO;wNzԼcƍw= G}tDO^=zt[.t{@m2eJ_tܣGիWkRRR.s=OUW]UdY=vW:[zݻGNNNDD[.{8묳=w $)p@ٳ֯NJ+Jm۶I[o9ڴiw}wiGͫf͚3fLBk.({lt5=wn ZzuګW޽{xݺusUf[@mOtܶme]^!$?Jڪj*v}qaaa|ݖ%fڴi &mۖW\NgϞW.k.zT{*粶=;?~Ļ . 6ZOwرZ;tP>r햔[U'vN֭ڔݻwL8+\W_}5aܻwHIIIW޽3f̈GƍݖnXhQlڴ)p@':Jz ,HnݺZm_~elڴ)4hP߸qc|Uݒ{+uuOPV;O?4 Km۶]v%rH?tnʮJVbw/Ƨ~ZmyOO?-QTTnw}ѷo߸{c[ުU lٲe#//ܼ5kDIII8===իUV {^Wemy WAAA,Z(֬YѢEu]QFվ;v:TyϪ\%;v%K$\k׮m{ƍcёcƌ&MT:w > 4ںuاaÆR^7Ꞷƍ۸qZuw-#f $)7|s,ZiiiqD>}⬳Ί`quOIm_~elڴ)4hP߸qc|Uݒ{+uuO'%%%:8#k׮ѡCh޼yƚ5kbܹ1}xg9sfo=:իWˆbņ &aVY6(&}ʮI6賮ik̛7VP>,ߵZuw- $)~ixwwߍ|0?+}[VJW7e˖V[\\yyyѪUyk֬qzzzdggWWk{ewĩ uw-@] Qy.t){Wj*Zj}K/4={v篼J<_eOOOׯ0ۤOĶ'(K'fƍ1zɉ1cD&M*[DFUWJJJ4h 貧WԧaÆR^7Ꞷ߁6ߵ[uw-vFrHs[nǏ< $ Dz!eQ>8TT^]>TJJJtAqGF׮]CѼyHMM5kܹsc3$z>s=ztԫWk ŬɃ 6L¬(lPfM]lg]@U4h~{tIZ*1Sn~Cѿ\h6nX5#!5SvME}*=m}'U_}U8++y;v%KbŊI={vxtnrssbŊXdI8===:tPmyOUcڵӧOO>hܸqߎ7|%%%q'?~zB*{曑S:nҤIxUV3jլYƭ[p^ӦM{ ?I()))7o߾?#à0ٳg4iҤ¹øqOz~QQQvm->xu]+]g}vBm,_5cǎ~t\p8dȐΙ3gƃ>X˗_P;s*=[[>5k׮_|1֫WJuQ V歷ފŋ[lݺut!EŌ3UvOe\ֶ'`ǀb%͛EEE}GIl=.l.e]JNj/3<3Mpe}} /hѢdee%\P뮻oL,.._~9K,)qo϶'V?~{]t}t'w}w_>""~x7 -))?ֿHM<6555N?\ZgϞR7|3rrrJM4O<KnnnFDDu֑K,rvexML- . 6mK,K/45km۶kҥKc k> !CDnnnL>OĉM6/Nx65"aÆq=Df͒- ""$ RθqOz~QQQvm->xu]+]g}vBmIe;6~qFFFRP2$7n\:9sf<_|y\ s9'- 9_||A̙3'-ZTaO~?{gѣG;6ZhP_vm̝;7/^\.SN{JMM{7N>͛cѢE1wr!-Zqŏ~l{AT_/K̛7/g#<{]v{]xᅱ./^gyfL6-JJJJ˖-qw'K=tYKjwuWx !/cɒ%]w5?-ٖ]r%1xԩSԫWo7n'pBL0!ƍ٫W2eJyѨQJq}]wׯv ğ9rd~kܸquY1uٳgDl{v\׏֭[GFFFF^^^,Ywex#33s=Zhw}w\piӦXdI\zѬYh۶m]6.]Z>:.gȐ!ӧO/=1qhӦMdddŋ˝ްaø{YfIٖ~qGDĆ O?%Kʕ+cQ\\͚5f͚Eǎc}I*tKZl7xc\wuϏkFzzzj*u۷>|pxwcQXX͚5:! 4>y $)_lqO~P{cǎ*Jk׮sVSN?tԸ{7ML2yXhQkZh#GGIv<5j]t.]l 6^zE^WI涼'v>{x.C;?<?ذaC|駱dɒXre_>YfѬYرc>InI˖-o뮻.rsscvHOOVZEnj?>?Xpa|(,,f͚EC lumyO%*5j(t]tf=6lz^zyomyO7?>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z jO>Z7lٲ7o^,]4#"y摝]tVZz;wn,X V^ׯ&MDk׮Y=#">ObժU7DFm۶ur+W/".]W 6DDDFFFh":w{WjߚϏO>$.\k׮M6EFFFdffƾ{WQ O?f͊3fĪU߮]8p`qU.]cƌ^x!֬YSٳg\xqGlU5kC=?c֭[{qI'ը믿gώ裏*ȈO<1 x`Ԇ _[o^-Zĉ'/Cf=#`' oQ$u-;ƍÆ ֨>#G*Ǜoof|1|hҤIz>31|o8wߍ:zӟu͛믿'ƤIb1th֬YQ]yyyqM7+ׯֺ'x"|k~uS h(Ν[֭믿>̙7tS늋cذa1yj2eJ,Z(yhڴi{1zj|뭷b1a„hժUDIIIo9s#++zZ*NZ%%%1~5kV?/eٲe1o޼XtiGDD#;;;tR'?)..sƂ bձ~hҤIo>vA;G޽C:Dvvv4h V\1qxw<䓑W\qER=r!ՋI':tFʕ+㭷ފG}4>ҹ^\{DJJJR=Wy 'io4k,VZ999_:/_OFFE׮]GѩSs=#+++4i6mիW|/BL>=K~q饗O<ֆ-ZGzhpѲehڴi]6>2eJ {3gN򗿌/ c֬Y%rrrbժUUo׮] 808㌭>Xeҥ1f̘xb͚5IMM={ƅ^GqVے8sb̙>{GgϞI_{7=w[}-bԨQ[}1cD߾}kaGٙ^(((wy'>Ϗ̌N:E.]^zkʕ_ҥKcձaÆȈ-ZDΝcڦl1b>v2ՋN8!zhs222bSO=Çǎ'xbtԩ^7bܸqѥKzf͢C,nxK?>}z<ӟthѢ馛j5{7~$ԛ6m۷O?=ト~?9.-0`@uQqWڤIʊ;F~wߍ/#G*Ǜoof|1|hҤIznɤI*  ; /1cĔ)Sbi޼y?.믿gώ裏*ȈO<1 x`{֖@ IF޽cqQGEFZ7hРHKKaÆ֊{ʵFqZZZ;\wկ_?nXzuL>>r8"==ʞ>`lܸ1vw Ը+#///C=-ZT3"b[SVn⡇$uW_}u35rH<1xFgqFV{oٲe1o޼XtiG_Ύ.]DVjgqqq̝;7,XWG&M}ѵk̬;g}ѩSҥKԫWz\2Xti^:6l}Eѹsk"%%zOüExH>3Ѻu믏9s; *1lذP/86o\5_uL81&M CFfͪu#;A;{նFk J222OAŁXힵeƍ1wܘ3gN? .Lx)C{lڷo{C=4:tѠAXreĉwIXOFvvv\qI^zѿ8餓CѨQXre[裏^:ދk6xZ 1bD]֮WnrJpu-g}{neg׭[qtҥ:묤j*>xKkSN23''' jgyf=7n~sigDaVEe˖Igzji1{lA-sK,!qK-Zw}w7. Q|0FYPYoƛo'|r ><4iR&M#Fƍ|xA2cǎO<1:uTe=ztB-+++ƍWa͚5:~駟.lOn^~xJGqDvVvkk֥8{_!0};jqi駟{gg}O>d⋥(..CF}BFk׮ѣGԩS瞑M4M6ի>^x!Oťk?K'C&*;"A$sj+Wt~NNNx]w:Tg^>gΜ`홖={v}.\0͛OS-[,W[~}ڵ&MĺuJkUv EU#D֭믿>̙7tS늋cذa^MƔ)SbѢE#DӦM;ZkK.+N:js̉GVVV|GBS_~p }>2`8ꨣW8I&;v~Ż_~yXtNnnnL4)8㌤Ďz R~hذaچ *|qM2ˮ)**2l϶mVxWuZvmZvvveLwPRڷogyf};rrr瞋[o5:rk|9rd=r!Ջg#kovL:5~Ǟ{0ދk6dL>=΄Zzzzg?<ī;ƀL>=}٤qӦMq 7$ffddğ1bDG?hݺu;FwuW}{0 ƌ3gΌG}4~_ ';w]w54iYYYѱcׯ_=:|u]IOO? #FΝ;o=Z=ti%׭[quYIkժU} S&"f͊'|t<`vjjg9x0ƍP;cs9'Z2[c}SN9%{E~~~QOonS_ި ϯӞZlY< N;jo]Ϲ΢$%5;K/M:tiРAqM7%Ԋz xԨQQ\\\:NKKcF.]*]S~[o߾ #GFaaaR{~c n u'ǵ^P{㥗^Jão߾Q~GDt-zrA"s=~oxꩧb֬Y1yg?YB(d{ʕIѡCjիWx̙5\)((w?ʊ_P-; 3f(w9眓Tr{c…I;\mٲeukG<vt>آ͛7%:t{k7o0Iڵkժ ݬY`ABVmYhQ\p~Z˖-/^e 㔔8#n`DZvCc!gG֭vB2_{(((t͂ 7H <8v~)))qM7%`~3lqmaaa7.vG1ŵsN dMƾrJBޫqVn/⠃ i?j 6,W۰aCˆ>YvMQQQo;vlĆ-Zѵ ӦMKi&ztϓO>ρ^lٲ\Ԧ@ > M4).]P;餓\0OO>)W+PUŋǦMM"//ZשȦMbٲeꫯ~8S\5k<@dffnu's=PѣG.iGIٺ_vi u\?jguVZjG}tBmԩ[\7cƌXzuBsIgjjj=~ᇱp¤aVlٲ: XrJ9Pfͪݳ4}Q3;vlC-`G}9x^K#RRRٸq8jJkS>3Q[v1 m{o۲e;L5o<~W[n/'\?=3oVƍ8RSˇiu-a\XX999qoUψmUQ; Jzm۶Iٖ6o_}l޼9~emKΝV\Y:tV^z?_:9sfWJiӦ%۴i={L'ÇOc6mZylٲ\muj{GKJJ w5$(,,馛je/[jժ͍%KD~~~4l0ZhZ>Fuiҥ˗/kFƍEѶm֭[4nx{ovJ; '|r׭[/Nr!{!ĬYJͫ5c h׮]/}'J5\_uBꫯ-ZTGjO>d\wuIꫯ⥗^*W/))GӦM}ֽ{ {&iӦTg5EEEoTRk0ѣG7nx`ίJ}]\-;;;͛7%:t{kʆWoEAGnnn.޻{ٳgG޽+<555o83_~Q~:SUV3v-~_B@>gU`֬Y&lEM,[,~iVk^e(bԭ999 #8"ŵtPt)6a„裏}7GAAAU٪UӧOB_,HU{'V\Y[+777.9rd|7uck=1nܸڮӎ/,Wʪt~^^^¸YfY6*"nݺXxqBCv߲k͛WkTٳƙѮ]: 8&MK.MtIU) 駟V'|RV6*˗/?O=3.j&8>osLd_~ #'''8S?F;>mݪݷu[S- . -[_\뽾'>c=_j[;#RRR>%%%.„Zaaa 20͛7Lj#^ڕ袋ծ9sf}szVUV1dȐ<8˽f͚_~;\]w]3aycJR^:tPe׬Y\pimYlY< N;۲e;L5o<~W[nW[oUq(..NjM7ݔp( 7ׯ˗=JRϟ *6Ծ +VHDVݷM6ꛬM6ŲeW_wq)$4k,x:p |ߤm 2eJz ;vldee%}~?ӧO/X"N:):t5+W[oǏǑ5k$\YfѣG} !|MO/odddիc֬Y1a„5kV=322v-J?;wn3&!tqO<{wՅE]6lH1"wンv?<7o.4ԡCrռyq~~~]\ϊ>mݪݷu^G?QUE%\ׯ/l2.Z옊k:~WG-\ۣGr'|2zWK/iӦUcڴiѻwzoMG~xѶmȈ͛7ǚ5kbܹ?񏄟wq>}^zENbvشiSY&̙ӧO{. Kmڴ)~.zf/'\?=3UZ 9"aÆtoUze%N 4(?omVOum @^{-ť Ƙ1csպVJJJvmq9\Z/,,{,{*{ѦM_RZ_~4hРu]w]|go֊c1y*tAq1ǔ{騪pњcȑSO 7%%%cĉe%Kyyy _4v&MK&N:*dgg'?jOՖ/_^+V$SRRUVۦMjMַNqqϛ5k<@dffnu/`w뭷FNNNB#oqA:uJ4a„8Ӓ7~ϵkƈ#J͛77[5v}[?i?wkԨQi&9昸+㷿mBiD~ڵktСN={l-===6mڵN:)ٳKŕW^/P˾o/ h]u>uǰaâk׮ur}KJW\{{5f-}VkݩwqGlذ!޼y-_~o 4( P3gNL>N%㫯??-[P .Ne˖ o޼y?r]n]?Vz\mƍ U FzzzzFDR/Ro;o?]v'qEĉB>ɓKc_ׄZ֭;-OII /0VXXC yUn1bĈxvU?ʕ+K^{mֳg8p`a6YYY1zr?{+,,{キ.㎋O<ϊm6{ӧOB=???ƍW[Zm]&L>([:x"eIigÆ rss㢋.#G7|S'=k{O""ߏ/8%Ը#ܪk7m4ƌ>`t=RS+Tǎ;#5j w!1|8qbӧ6m7=Pdeeٮ]̓_ѸqԩS_U+vYgn=UQQQ\s5_'ԯhѢEk{QO&^z\җjkuu"&Mz(ڶm['=˔)S[oMeddرc#+++ׯ5+VLj#bّdɒxN{t~fffVu̙3o[gIy[bK⋱dɒʥwKB'M6m]AVbȐ! yq0w͚5qG~?}/Pi{l|I\p7$o8kO>}O>999|XzuDnݺ>쓰f .]TgnuENNN,Y$V^Ѳe8K. /lmjܸq+MVZ{wgE _ׯ_[o5rrrjGqD d#GSO=7pCayyy_:&N[7|/,]4;XreBWUwyiW(***G=K.MݻN{FD%ɳ- .3g&ԏ8ӟTno֙2eJЧ;vldee%}~E߾}j+VLj#bّdɒxNKx633u5kVaF%kƍժn^{m?7xc92ɓOXx8s>Ѿ_|qlذ~{y[uMƘ1cݻWұcǸ;FE~~~:tp'|?pxw.lǁP[bv͖뮱'Ԗ/_v? uyBٟoɩwqGFT}BٟOoܸ19d}7[vm4hPð̙SC*jb[7vR_}U{dɒ_~vU 㣎:ZW5&<ܹsrau3"bq5oP޽{5*ׯM;^{-ť Ƙ1c#%%%n8s?.c=Yc=6ڴi_Jk׏ T8 5 h]mȑ#㩧n!JJJ"""///~_ĉ ?>䓸 tMq'Z>}D>}"///rrrbzꈈhݺut-g5Owҥkϙ3'!m۶q'DDL81^ڵ^jsQeee%LtG/ȶ Z;6fϞرc\yqqED$}BXlYm۶Z^6򗿌 &KkSN>zFl Ϗ!CP4hP :tl˗?P;3cҤI۴fΜ0KC5QRRo_Lx1vhԨQv&999qW$8ǽݻw5[h?x\{1}םzq7ǭޚPo޼ykʆqnܸ1"-zr,UѵkˠAb1nܸڜ9sbu3P.\w^%ԯ:N-c⼂r?{,賬3fČ3j(MD}~:Uo޻\mҥYA7^zŴiJkN+^YlY̛7/.]Zz\#;;;tZ;z6mO?4ϟyyynݺhҤI4k,k#==bŊcѢE7Djjj4i$ڴi{Wo~=YXX ,O?4V^_u4j(7om۶.]DÆ `[ ٸqc\ve1o޼7tvUyv[_tܥKݻw=zt,[tܦMOZ=#"^zr/yuo }'~hڴi_|qlذ~V{iӦ1f̘xbر1{(..pnǎ+;."oU0.))e˖e]~_Ƅ [N:U'|-]4;XreBWUwyiW(***G=ΪUƙi')߬!PQp}Y833]v -l11O?f͊3flUڵgQg~_̙3kRgώ&M$58f͚z͛+ߠA8sΉ^zm^kƍ1iҤx?rn-{qQGŀju3gN[o;MOOG<:ꨨW^A;;cܹ ;.nv:SYFSbĈZg='O?pBoƍoqҥKcvQEj[UbԨQ G}4zŵwuW< =3yhѢŖ7$O> . 曄M7'|rӧO'"'''/_W֭[Gnb}IX3qu_v^ti}V07nziӦy:ԽUV\K,Iqo]%?0>ꨣ~~X![k徏.i7C;xiӦѶmXxqi-777\fJy|]HKK|%hGy";Ӂ?t ň?0JJJ^hѢcܸq1lذ8p`ry3ϔhU6m׿_W~?!Zj{6mZtM|/Z}?>{채߷nIaaa[[owqGWۓOE{˽xߧO뮻ģG}4>*CwuWL6-nXbEBsύ6m$}7n\7.e˖uvF .;/]w]quҳEq1lq^AAA| }Ο?/~g ̬5]v - 츾書}4hP :tl˗?P_55cƌ8sjS^}X~}BmGjyvzB>}|sÆ 'Ԏ߿;6;Z}$?~1r?MEcȑ5'xbs^z饸[b}A:uHKKUV… csϕ ۷o\y̙3cѦM۷or!Fvvvԯ_?֬Y/^{-N˖-KXÓWPP&L &DΝ?q~ѱchѢE4m4믿 Ɯ9sbʔ)1o޼rڵk\zպx7⮻*WҥK55k]tn ~p.]w^m_y睷vɉqzzz!GM6m&>777\fJo JKKB7Oz;ԃv[Be.]wqGuŸ+qa7[vxj 6 I!𭣏>:!sɒ%1cƌQ:ujlڴ5̙3m۶5QӿؙO?f͊3flv3ΨM6ũZ;`:O/~r]֡=X7n*6o8쳣cǎ[Y|yddggW\Q?OjNJzzzկ_?z=z>8Zj~X`AL>=zꩄgH׬Y~<5:C?>ְa8c㨣N:E˖-#555֮]~i̙3'^y啭 czECݻG֭#;;;6m-_=xXzu隍7ƥ^?xty꒠ODrj=X駟Šo-X ,XPqwFzj/k,w<@4nܸF=?裏jzƽbŊ e_w]'jժ bɒ% ??.D?Gu_ӧOovznذ!ڑGYkDٰ: Ԯͅqw }nY͛Gvo߾qW_|&MVϚjժU 4(Xwꌌhٲe|qyů׿Uuo'|rAp4iW]uURzzz\xqGZڶYÆ .?j7|7pC] C>*c傾8-;>,Y3fHe爈SƦM]͜93aL)g=sPӧOu]uRţ>|ptes㮻z*~Ч_r%Ѷm8Ne+V_ZB====:/:8ssk9s$ԛ5k_|q]l~PvcȐ!q^z饘>}z۷ʵ&L> vemҥKcv⼊,Z(PKOO;nk5jTBGM};ybܹ5^nݺcΜ9G?\׬MzJj+_~yBxD =ܳM>3fLůꫯ .W~~~<1k֬1cFZڵgӷƦMSO0X}ڴiz_ٳgGnnn|Gf͚-ȈO<1 x`u^ƍg'YYY͛7O>9>رV/1cĔ)S͛G㢋.-QIJj;GuT4j(u 6lXi({h5צMꪫI?ڪUx衇b'֧L^zi߁꫸jzj~Im=k&5C>;4hԚ .zj'?6'5:mXoK.r瑝]gyf4l0l8SsΩq+j?wy'/_5iii~ũ?O+< ٸqc~^x>Skqm%<إKݻ3bե{,kBmWcK/zGiOveb4-򙖖۷#Gp@xUy饗[n+tPt)233#---VZ .ӧs=yyy W^yeNj//bi&x`ムQTTyyy1wxgr>|xtСN[TTO?t<ӥ/K&M ֬Ys̉_/͛{D/}1~׿x w^ᚩSwܑP;cb;.88cNzӦMo1VXP?sM6I_&v1`Gо}ݻwzѡCΎ ʕ+#777&NNš'|2+=~cVSNnݺx??qimq… +(w{GuYѳghݺuԯ_?V^~Ld̛7/aԩSӧN ;ybĈ?LJ~%%%I[hQ}1nܸ6lX 8N7z C>k/.{-cĉ1iҤ0`@ :45kV9991lذ-xcĉqyŕW^MտkҤI1bĈظqcGɓ'GSk84.?f0 y晘9sfi^_~mmV<5hӦMw]s9 iӦ%9bĈ/xqmp=FFiiiq7lj']vڴiw"`'7""OZ~x~'Ē%K"???qѺuܹs瞵ҳ[n^:͛-شiS4j(vuԩS>Uvm"ʊ/v[<#xqG4@ErA̞=׬Y>H?xSڳ g> {챨W^1dȐRŃ>}Yq;,*rݵ=+W . F'pB9MMzꩉ5kĜ9s,v54i .,M6X{?x饗 iii1lذԲԢE8+3]Ovޡg֫W/~vfÆ aÆѥKv&P7EٛG#G,L_tPnfXjU92zD}Ѯ]byYgc=K,)]uUqǡ=/b#8;83⬳Ί֭[{_K.M;hܸqD͚5K/-U֭[nN:U(ҹs1bDjժG]ž=zW\#G,SO=UͥLj#C%>??"sC=5j(S;BAZzvaӟ4zQ7222ⷿma-///޸;4ӗ_~ѽ{իW>bرE+Vop1bĈ^p tҸ kK/K0}[lٲDsbUT)yk?dԯ_?.䒔̐^{m1zѩSҥKѲe˨W^ԬY36l+VYf/&L½s̉ /0+,|q%y͛73<3v5*UĊ+?1cĉW㮻+"56L0!nDrqꩧF޽y'?O~cСѢER}$mwX6ndwжmDgqO?XweOڵkwM'Q9Z|fddD-⮻*U:l_y啸UVq1DΝc}uFFFF|W10aBKrc9&~_hU_/ kW;/zjfǧz*QJ_6nODVVVԪU+믿|ƍO>HN:Ņ^X[V|p?_`A7~ߠA"333esDӦMwމEmM4SN9%>_쮿x7ދٳg7|=5k֌.]D~裏./w&MSƜ9sbݺuݓݺu:˃y`7/ <8֭7pC_X{233[nc9LgDj⢋.DFA{1V7A 9ivisŻ[X8qblܸ1TRꙆ k׮o6lXL:j̘1u:uPw}76mڔ(vYgsO|׉9iҤ:--DgL}FDL}z9nᆨZj^Vh޼yp /UW]|}83Yf%y%aÆ6lX^ڵk}W5;,ƍC-uƍqu?rժU+R+w߻mK^^^N<Ĕε3ڱ8Jv޿]DUEjyyy㏗g޽>,>?n+/|_=+#Grss㩧zj{cĈzqEDt9z表QF_͠-ZhׯOhKKqDDQzr?{ѽ{ogX$>XpaZ*FѨQh׮]lٲْ]Ɯ9sJ:("" //"-ZWuEJvڑ[mFzzz}1Xpa,^8V^ׯʕ+Gfffdffƾ[.ӬW^8ӣ[nqǸqKѻw8^z%:_7x#&M3f̈%KlwOFFFo>N>8餓nݺ%:* ]멧\fM̙3':vX~cǎ'/hѢE}Ι3'f͚ٳ/oɖJ0ʕ+ǁ~Vm>oVA߱cǨZj"l<]AAZzvaӟ4zQ {oqe/L޽{իAJz(N;Dfq>WXoFvĈ#{O 'K.o/RGDo+r5bĈFD+-[?W^yX}Ej%]V7㫯J;찔ε3)gAYf\zq疩nݺq 7답'333n8ctvITV-.0`@ddQt#;;(WiiiѬYh֬YER,ѪUhժUE= M6o11cF̝;7V^iiiQnw}cǎy~E~"fr/ԪU+j׮-ZۯXVr_\;? ]vEj˖-+U+WM7Tnݺu(l0f̘"N;X{^lfee%J-iii,y`WbșMKN{.>qظqcTR3 6,֮]UVaÆUXzD~'xbk̙jժm^iӦDE<Yg=_ugy}ƟDcG{ƿ={va{ e{5o޼} O>?0/_֭ʊ֭[G}j_u|GZq̙35jDӦ/2L .ʊ !K뚟_zxWo. .M;S)kҤIr)qg ~zt-7nQ&_@ݻw%~믿oFL4)f̘Q[۷O>9N:館[n`P^Zjt5vZ4l06l]t)sRߪVZ~{F_}Uze *x6māX[7a%_bժUu 6a%_PPPd^{U`^zj"s͚51gΜرc;6&NX EHŋ8KVʕzT nӦMEDČ3bȑѿ8S+,䴸&M/NԎ< ʇO(%%~8""^z۽{޽3,Y~i,\0VZyyyQFhԨQk.ZlY.34m4nƸbƌ1wXzuEݺuc};FFFjr~E~"ۛ9ϟ .+WƺuRJQV]vh"oZj__|q6m4̙S`[">|3_~S>}?Af+'޽{{P6u8㊵?///D.%ձc;vl>+VYbZy {v-[TV\7tSu_ԳҖ׮]=[ V0dee%ֹ%_ǏO7n\+?$Ə Hٌ'ycѢE 'P^ 6__~99昸馛JYImذ!FժUX!K333 xcӦM駟oxf͚ q=$j*U'4AΎ ;jժѵkX 6 yԚ7o^ׯaÆ׿ug+F|KrJo>ڴis-=(ӷ'իWӣO>>~px衇⪫*?ωuӦM UVHmK1bĈꫯ Ç*UzT5wׯ=[ Zzv}ߪU-ĉ.]DZZZרQ#8:uja_ x8 &D޽cԨQ[ uO;#.\?Zjmwʕ+5kƭ}?AfKiӦ%unPˋ?Q;$֬Y_|E|'|R`OW\qE|׉e]YYY%ղe߿u]K/{f͊ɓ' /ÇCP/^%KlĉGMԚ5k *?{/^{СCcܸq1}:uj_K/bŊKo)ݛ)'qz뭉Z:u+]O?-JN8={v/)\w%铒bرݻ}~gy&N=s΍uEnnn,]4Ə_|q\xᅉ0?~ \qѥKuAAA\s51px饗bѢEqX~}̟??駟<@឴裏.y`w{5o޼D=֯_Æ +\geeů2ϖ*/کiii׾}hӦMG~~~}'cՅr7o^{rrrbʕ H7tSL2%Q;ã_~ѣG/r7?>ZnuԉʊmF~kD%Ky@L|qWFAAAaRJ1bĈ^zzlbŊ~ǿ0`@>QFUVk.~_K/;wNKRlŊqƆ aÆA?Ȩ[xqvmZfff}iӦk"///""*WÇnӎ0f̘:+++=Rꪫbn-֯_s΍Çowofff\x||.ժU|0nxg !zx뭷e˖q5GQޣnoӦM'jmڴ&M]w W]uUԫW/%3Ղ .k4hv_]tQamƌqưaâJ*[;~[;/ZhQw}s-R{KܧQF[}!j.<1zDQF?D߻߿Dv5y۷o|嗅)S믿҇VJ/8C!׶m۸yU^xO ֟}2dH^Ş|\qy[t:---KܧqEjK,)\'7n\tMZڵcԨQ;$aÆqwg|GRf͚}b%UF"a@ԪU+2dHamƍsŠAJ4K*ť^ӦMK?k+h*(>`'wM7Ŕ)S?}!_}U'Ɛ!CUV;`J}-^8nD-333>bشiS\s5>}g}sNafDD bСѫW--|Gq-;SX{'bݺuE>T{WK/W_}/ ^o9,Xׯ:SO-y`wtM7Ŕ)S?=gyf 0TƔ)Sb}}nnnw}1y⋣[nؾpQjժ O8===n8+dΝ;-Y ꫯW^y%QԩShԨQ̝;7Qk۶mlgʕ;6vo9昸뮻J*[ܓ{lt%;85kVD|2q!͚5+y`wѣFHKK+vaÆ%RgqWƌ38QFQJs͛WSyg}vwҽY`駟oIԇ^n߭GVXQ~Ç_Z۶m㡇5ko֭{ҞUVms(/HԲG 9" O_fnե+z*)SK.Zʕ;C=DnXre/ j2ɉ &$jGqD~O iiiqwl3;q̙3cΜ9;3N8}ȑ#`{W9r!WN{'?Ilذ!""֭[tM)vGƍ+ڵkǨQ^zs%BN<޽{,;ʭ|/\X/΋Ѻug9眓VoРA :4z*U*磏>[n%yO<֭+ի']/jԨQ^;?~?TDo~83*fHz=Qkٲe<#Yg}Զ4ѣcȑZzGM9yD>8q-G]^oV;pݹs۷oF-c&L#"N;R?Xw=:vX1hРDm1uRϴ5+VW^y%Q;sf͚Ѹq8Sqƍ, 3q:th֪U<@k׮}VX7|sN:qWtT㮻+WUW],dƍq%$B>6m>l|[ h߾}<#q'?cܸqexG[ygi>O'$-Z(F⋣4﫯*R+Iwxƍǣ>{WΖAW^]>IKK+siq=?!QSN<#ѦM2,2*z[~i 80D}ѫW>}zbݠAӟT|Iڃ>k.\שS'\⹾3f̘ĺnݺ%3ΪUbΜ9ڑGY>GqDC-\[3mڴشiSVy &ı}O?t1---q̙3lwygp T|xիOX7h ?k'|RFڵ u)uԨQ#uǏ/͘1c_jU̙3'Q;#K|GQ6}}nݺHmѢE) lӦM㢋.*rMs'+hOZvAEZܸq1lذDVZCE۶mS6wz'O]k"/va)qsn\r%[X\ru]\φO@˗/;/.\0 RASxGK333H-''}cʕZݺuK;VթSH-'''6lX>+V(R*Xۚ?~߿ȿg3^}2'&M[gDDf믾j/ Q+ihfķ)kN/Y}gKAs΍.]ϼyu˵!J6nv[̞=;Q?bĈ h/n0-Z(.">Tg„ qUWE~~~az1jԨرc)nݺE.]+ ><}>fժUq-$jYYYqI'ms_|{lֻw1bv9sf\pTT)n8裷vg=VZ?DOÇWT;Ƙ1cʕ+I'TYYYѺu7o^a^+2VZ"\:wiiiQPPPX{G?Q{s=eee>쓪1`|8b…bȐ!4a-'33H-''gʕ+u떸Oqyp{hu?LԎ>Ts Ȉ-Z]wU:t={nV;8[nѾ}hٲeԩS'jԨk֬Kƴibܸq1cƌ"}5j~vɉ_aÆD}1iҤ4iRcDĠAxMΝ;9?xamqǹGuT4n8/ &?_uOnb8 P亮UV7nR۷o4k,A ;ƍnٳg'w\1"*h򗓓&LHԎ>W^{uQe˖wW^yeϜ93zDQF~ms߳>W_}uv7G>}g?f͚UX{W7ވ:X9s$jGqDk?VZ ?SL)S&MĨQ";;{]fMYHߏg_~ :th,Z(^}ڊ+;;c;tӟRJw.]Z6o޼ws%uGdZؑ7ŝwY$##G/ Ǝ[$i[%qF*U|0w}7?"_".((.,5?xr-Zzzz <83jqE'||1| j+ĢE>zʊ֭['j/R 3fLڡZյk"a !ϏѣG'j۷-Zl>ũ?|=NU\9߿r'ѣSP2*zؓԟZFFFh"RСC3㕻Ý4hGydJz7n8sO~wǸq_~ѵkhذaTR%rrr>q㣠 O?=%mIϞ='ɓ'֬Y]tQt=N98~iӦXlYL:5ĬY;쳣m۶6/*6nv[`;.FQn!;ҢEb. DrqqmsQG+\/[,+u̙3㩧J5j6=qW'j7|v+W J+1a„8c'(ruEmszZ#Gɓc1o޼شiv5i$=8w٠o~'tR5*^{"Nj . <<)%>"///X`AĆ vѲe8VZ=bmذ!ܹscʕf͚YfԩS'ZjT\حd_U?x{{3gNO>922RkC '͋n}7oF*߯IKK{':O?{.vc=6~ߤzDo;SHG#GJ*U`uqgguVnݺƏ_},]4Q?q{ѣcƍ|0U^xaouկSzYgc=K,)]uUqǡ=/bL8\nsbAb쭗GDիW/U'xbxmVg} .Kƚ5kbÆ QFSNԫW/:t 6,M69s-Y⮻kƌ3>:222^zѸq8蠃jժ_3}F3 rrr?> fΜ~a,[,!C_3Νw_7l5+W:*?ԩS9sw-˨1mڴYf^SN7|3y睘5k6RQj޽{{ѭ[2Z=z g޽cĈ) @3Hw)=#---nh߾}~ ٳg ><233S:ۖԩS'Ǎ7>lgddą^\pn\eQb'jѢE뮻JճC;]ƍ''x"ڵkGqDo>gʊZjE^^^|1bܸqE#":u^xvlܸq <8DqE~k׮ѰaèRJ;~(((H;O/UV믿>~_V^w^'N=hٲeƼy⩧߉YTuNxVڵvU(;\5;GA'7n\̛7/,X_|;wӦMq뭷?ˈxWcq9UW]UOLn-{"A۲aÆx^ݻ78eS(_yyy /$j:tmۦ0`@3|x/枚5k1gyfr!)i[jժ7|suY1zxWcܳ^{I'gyf4klM ;/u޽{tA\;wz(jԨQ2$?ϛ7/nbۼy5j=zW\#G,SO=O=6Lj#C=& tF\~}\tE[oh_AAA w߽S}3&/_^ovr)׿\nuV#Ē[/?W^mY>}O>)뗑ov{]vY\vedɒ⫯իWǦMvڑOm6KuN>8 n馛b޼y1gΜɉo&ҢN:Q^iӦe>sUV-.0`@dd?&---nh߾}~qݳg>|xdffxoi <8֭7pC_X{233[nc9v7>"/YZ~?yQJXhQ%-[V &7]w]g8ꨣJYp*UDnݢK.qAEvvvԭ[7֮]}YL0!zXvmវ0`@R[\SGeщk׮?S1rTIKK6mD6m*z`'o&M3fĒ%K'###ڷo'|rtIQnR ={ƓO>?||Sf8c3όC9T駟ݺu?ƍ֭233wqGzv@'ӫQFpqDǎ.KiqŘ1c-ZC=͛7O333]v,.처8qbF'xb|'?IJp5-[gq)lZjEÆ k׮ѿկ~ӧO/e[n?O:KwΜ9&j'pBTVc[rP{G+x-ӧO~x~E~""bҥ1Xpa\2֭[*UZjEڵE~EժUSb.,.XdI|W_իcӦMQv̌}'ڶm:5m4nƸbƌ1wXzuEݺuc};FFXi&ZnGqDr!Ѻuė ,(({7QUV<ѤIUV{ѷoߘ={va}ȑ1z͗J7K/4*U*֞x衇_~駟Ǎ^xa>5nlѻw _Æ aÆѥK 9?;;;+ҨZjt5vZѣ='OTov̝;7Q袋*U)SĤIR:ck׮EOMV֑ĉcZ>}*h`O"أM:5СCԩSD=uXk_v6m۶M-[VAlۘ1cJ*)RA{"-Y$<̲86W_̙3semSZzڵ4Ą ;,+h"}{[DD,~aҍ=LzEPV\XשS=?.H;E%ׯInرӧ4=@E*((HJcK{Ν[Y>cnݺ̌hݺut!*UT⾩_G}5o޼Bfٖ1c$֙4>=Z:ubٲeUVիJyuE^^V^f8cqY⋱aÆD#ء3lϜ9sb֬YZ^J*4{[DD,~aҍ=-H 4Hǧ~ZdɒVgDĚ5k_38#.XbE( 6ĨQZjőGC/1cݻ&T=@EԩS|G?0曨UV{_ڵkS2L0!zFvڕYwqG,\0Q^Ϩ /ѩS Rn NK7V.@'G~O=TzÆ 1f̘8s>3f$e˖q1Ao4h jԨ|M,]4M/bL<9o1xx#;;ĉGMԚ5k *Jkĉ|Dw4  ю=ب[n8Æ ֬Yݳ{]vY|[yVVVdeeE۶m_~1yK,)|͒%K/ѣGo+ZJbĈQzWcƌI322SNi=UzEPWg}vjժ0`@,^x֯_]vY[yZZv߿VC>k׮3Dƍ)S믿^>ő~Z*QKCMYe&LHԎ8hРAM}{Gq 'wf͊o&6ly<q '׭[7N:2kÆ ;,$#5k?ON92vMSA{UR%8s/,Y&sύŋ'?k׮]nv)~'Λ6mZ]6jԨQ7n!C̙3c=6n2./cƌI裏a=ZzE3h֬YΝ;{Ozzz 0 XfMg)0OsssceiӦcҤIz׮]oJ*y3gN̚5+Q;餓J*4' ^OƟ<0СC#===VZy6ms-.Y ꫯW^y%QԩSQjR.Ocƌ)RӧOLQLҢW^ѫWXtiL:5-[9997C9$7o^ ͛ӱcr~Ej+V(uÇǿDm۶CE͚5Kݷ< /~ i(/>aÆѳg/k&jYPPPV^zkOZl{WƌXW\9N> @'@=)zӋJcw]ǣ>7.|-'''&LuQ ;HAe0{2eJv駗/Rb]rܹso9QʊGy$Zl رc#777QݻwM-A#??vG֭ӧktAQVb7n\ 6,QUV P q kk׎w77|Ss-Z\rIz„ qUW%IWF;jygcKg-u9sĬYN:)222:*@x Ν[.^8 0 @RFEyɓA˕I&ņ :uG~G;v}'233#"b1o޼?~|˱vgѷoboZ*{nݺEe˖QNQFY&.]ӦMqŌ3iԨQ~=/'''~_ڷo&MI&{4hPg# /u!o6v: ,(-ӦMiӦmgM4)VwfϞg.#"=o[=Ϗ)SĔ)SJI&1jԨk׬Yk֬)R/ׯ 8qb,_=͛6m&Mıg}vhѢV\&LHzYYY3f};:*:z;̞={FϞ=#"b1o޼Xpa|7iӦYf4n8hܸqΪVZxq'FDĆ > ҥKc͚5aÆQFԩS'ի:t ̦MƜ9s4wi'S>YYY`"!`3͚5f͚Vڵv3Hv> }@  ) R@'OH`s߬AtcE+z ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R =9sf,^8rssN:Ѻu8jժ=}Aq}ŬY5jD>}⢋.z}G [s_';O('=P?>Qׯ_L0!Ə=\q=Dƍ _nݺK믿# "ANNN<W\ÇZzzz?'|24iRX_xq<;l^ݕ:ؑ}B9x衇b͚5.]ĠA{,rrrmF=:ؑ}Bdz> 2$Ҷ[nq衇׬Y/R'p_';OHiӦŊ+ ͚5]kOz) `ONv4AboFb}amM<9֮]$`G )G%|pfggG&M ׹7eI &Rl޼yu6mJ}f?}h=N֯__~e{GF>啺lIVUѤ^~嬊TnnnEP"((/]k-ʋ-ʃk-`WZZzTҽY[ qv:Ɏc+qEyqEyqJ\kQ^\kQ^\k[ZZRE)R('''ʕ+GK#;;;ꫯJ=ϦMfϞ]~%iA *zvS+gάJĵŵqEyqEypj\kQ^\kQ^\oQtLu(/!6nܘXW\=TX_L3 @EȨ`w͜%MyzZJ"--=( 6mڔ/v: T}>!j֬Xoذ=6SFRϓիW/~]t_{:=N6ysQPPPk׮fO}TABu֍unnn|W%dɒĺ~) `ON*OHjժEƍ/D=-ZXnݺsi @E )֪Uzܹ%ڿ P:`G )־}z޻tXparѦM'q_';OH>:4iRkoXw5j֬(`G )vGݺu ,ɓ'k3ԫW/.Dmȑqǒ%K kꫯF~b… ƀvؼ+u#T;Ϗ /0&LWT)7nk׎/"V^yjGC9dG r_';O(G6l:ƍWgee]w]v-,`G ;/|G[y5SO!CDwt{uP}4xcɒ%uԉ6mUV<H`w ת("8<Za,''d 3ʹѬ&krL)+;C7(Of *'@A3lجc a}ֽzu/ @0'}@hwey3iҤރ>x 3fHSSS v!~ջ<vMPKZ{>8{KM4)W]uU~ߥi766Cͱ#Gn :9j! zr(˲w@0{<3gNw>k?nܸ\pyꩧzK.3fL>dU .3iҤL:5/r5jk*z5n͛c=:vکڭE_2eYfxSQkEQCP>ϥwޛfFZד`IDAT-ulZrD@~\pcmɉ'c9ŗ/8dkVWEv\ya*`s~|8ֽ{|Ιg=z$Y[}_M7ݴ«{-"^zi7Uܹs<?pʲ|^jmV?(+[VZ3@{"gP-@u:6=NgOR7|s/fŊ%(ko%In|K_[eklРAo~mtDz-\r%袋QF'?IvK.$kǒ5 |--ܒ1ڕ+V䨣$+{,ӥK>y[ߚ]w5KϞ=x+:uj&N'fŊd/~񋵮TKPNMOuL TG$k~ _Wsa… [;t 0 ˖-ˌ32gΜG>/˛EPqON?Y+}s{޼HSSSX޽3bĈ֔)S|?̮\p-?:ZD@e??EQK.?c9&[ou'O|;7n\ݻgr-^ eY__϶vyN)"ӦM?^ }C袋REz葳>;b9s{^ m٦ Y(2tPf͚U r}9c7x*_k6lWP5@!gP-@5:CL TG>ڗ^z_rk׮I+V[oW,X I;'LQ߼~|_̯$+W򗿜|#>@swgر^k뭷E]QF8g^x!I2p̜93]vO5o۹K?#|%9k?yߟ}k=ޫmg?[63~ :r! z]]1~IR[;N IҥK~wUs?S)"t: I__r-k_ x `$+{]?ZÝ{w~K6sE]g4IN??SֆOOڻSN9U}LZSOj<LZ-uԇ\'@ɓ7)o{^/Oʲ}3fzݧgϞuSLi]^ :ZGΈ#^w=p@_J?|Аw]kNڊڿٳg׶ CjCf~kΜ9=3~ :r! z&*O׶yx-7Fd*^ :>lm{_Gn{V[e^y#޽{?߿8m2&@{$gP-@u:CL Tbwq-z׀z_JP Զ _}V[mZ=:!CԶ2@[ՑN3'PE($I>}yѣz߫gϞe˖u^ :/Z{^zj!+Z4I/_tDFJ׮]Se̜9U͜93=\k׮5jT[ .TKPNh=}(˲ݥ˺[9Zƪ`hk`wIV+Z5ޏe(w3k*'9j6 NMGkPwӻw$~r-5/\sMH>}-:ZD@ <8ӳgϬX"}-[͛;/,ӫW\|4hPŕt,rPk}ao;{nOgrAo}kvu/=zȒ%K/τ 2nܸ,]4eYf򗿜CfBTL TCIQ|]Q)2K,wܑ;u/˲vs''Ot0r>M:5 v<-Ծ˫LZչ뮻 /9Od^~ls=3r<o1/bcwqe].Ch;EYehyܜg֭[o13k֬5N->GO}*/B^+TEv?>g+h:Nh2{AX @Qe<={v[gvzPw˖-ĉOgj2bĈ >5ijjW__WygZkll[y9#.`^3ԙ$]v׿}K<3>y%Y\UcE#<2_۸z^L'ƓX7N' P }=ܓc&Ynu`ӟo=ӦM/}f=̑G#G;#72cƌѣG>SOMϞ=,`|\~˲p59s跞y晜r)yꩧ^w5$m?nL]|Ź⋓/ZoZ\u]},_Z/"[nedܹV?|%7tv2rU\'@uL {sON8$+ N2l3g?L8/#̹瞛nݺ>wG}φn|ʒ%KO~2{Fڭ[\tEy;ޑ$yoo5kV^4Gߵ[?y ֆOyz'7I.k=̰aòt9GuT.~z=lI c{-"wqGjz>:@! tfrՐ\'@Ի6̙3s1dĉ)˲(X=tPenƜy晵~?aeAXg>˛K_R5zUV}lٲ|ͳ>$̚5E?$}̀ҵkZߵjٳg笳Ϊ":*˗/[r[VgWEꫯnqͯ﯅Aw9s7gb ,7߼NI:rՐVQr}ݷWYB("w^o{̙S;ֻw~:thd{2o޼׿=lg+'xb>!#FȠAdɒ&ɻwqЇ>Tvqǜz9kAL0!^zi&LТ:3f̘z=6@VX}7 ,Esu7xc>}YϞ=.̝;7vX^y啔eÇ禛njG`-d:JrՐ^C `~kA{q6lX.]I&嗿eΝ,(]vmG>~ӧO񛚚0?яjr-9{6;/nmo馜{YdIt֭v~7=zhqMccc<x7ǵ~k:iӦe)"EQSO}0hy䑹3mڴ?Z^W4Ir|c$c= wm<\'@u:! P.. _"?V{?_"G}t|g>䷿mvu$s=1N8|+_Y# *{>$~4Zpa?#GK.Y# $cƌ7͔e˗oN 80g?p6SNM;l׼KIws,3y ֟\'@5:# PztL7t&O<Po .̽[ )tAԧ>s oyޗ<3IC欳Z罎=z_d_I6?쓉'֮9c{ESO=5wuW!I&hSl^yvm.>|z oxCz &I>}=@G" PNuTGz&*/|Rh@G#(r';S5а~>я/rOA 6_O?tmO>y[ߺk9Z 4I<ߛlߧO/ ,HV[m^۷^vmW`fUtTG:rՑ^ztleYn?bB󖷼eᆵz%I?SLYkڛy%Y 2d6lXn}\R:ܥEubqٻw:3N' PNuTD@ؤtt5tzyP;ׯ_X!Μ9sֿXvf :н{ YMCe˖նWtV2mO:rՑX-2ݻwȑ#+׼y2u؜,YKӫW[ls /ThUTefϞ^׼3/R|饗j}YڛU=RY1c&yjz-3\'@u:# P=}~O'Is嗯jY{ 'P={ֶ.]^׬kK=66lS۞>}z^|&@]o?$7n\odWzBF:rՒ\'@L TbwOnݲ|eI&ջ$a[l $IuB{Ço&Obvڨqڋ:*/=EQ,ˌ7nªKȇ?MoHsss?iF!C!/b̘1M=zc&VMmc=]wݵCeҤI~ko{'2Ց\'@u:eOݺu'Iy,X }se{/EQ{_ٽV[Uv/Acccoϼyj!UUVEweuYI/~0aB.\,3wq-[=Hg}vn w]zWZ@o{؜tTK:rՑ>ʌ5*k>hFysQFKY{hѢի|vmBhwUW]:ujáeY7#I2dȐ\|'?%KԮ[=EQ3L>mG>s\جtTGZr\'Y嫗`O[nȐ!C>կr9$Y^7?ZbE~;SO=sO>}w;Z{'/9'N|ͱ{uYg}*:6 N}Vv[^xۿO>u}:uj tw@C hKMMM={vmРAu:#}g}67|smNc5ˬYr]wnjSb:رc$EQdu}Zrn :Df_(xqЖx| _Zm{-CY.Zrn ` t6]]P@uZt4}TKP> ڀ> 4Ի馛6ٽxMv/FZ&*/|!EQlE,7 Njd6UHsSO#Njtw@Q&f@ .V@˲L3r5o޼L:3Njl 2$vZ萶fy.C^ `~7M_{{+Т(2eʔJuTK :#G衇T @& P-}[-"@(@uTD@v}t-IRe&MT:'Nj\n2|3< Ա"IZ .\.\3gfׯ_n466jYf宻3U_K.ӯ_44裏gV JӦMˏ$IQ׿^7jԨ?5jTgwt$2Փh?:H:EYe:o1]w]eŊ3zϻ{ '($SLiڋ &˟+ :(GydFQcs=;vl!ɓ'E{-CZrw[/:fϞSO=5=Ps/]4Ǐ{7^eY^t/qI쵞\{ks衇K_RN @`S\'@mK :ٳg裏C=T E_?|Y6@`;Re-DU~#mVMLZrL TO?$-ݻwπҥK5 ˗/7͜uYY|y`uyo[ͫV/2˙go}[ud:% OjN8CO|"#FH,_<_s[nɊ+ja7o^G|ĉsM77xF}^ߟn!'Nlqe]^z)w^sc\'j]]1]uUIR[m3Z 4I> . 7ܐÇV/23v,X.Oև?Wa::thƌ:\rIآ38#˗/\'jE_… ַ5+VHr!뚚ryWU-PEv}?Nz=ܓN8!IRELvijjʛ477'IFj+ϝ;7g?Eu/N=z^ lwl{-]4gNX_>lniVj62Ւ\'M5Ի4iR-PEN=1]v%~N<9rWf L4Z{Q?0hl喹Ks'?I-z]wO%\>}T]>f矯J¦@Gs/x}2Ւ\'Muw@sնjt1|;֭[HQy'я~4>l[ Ю<ӵ-"FZk|_ex w\^~6Z^?6Njulr7N5hsKnQcz衹ҫWX?|>L6jhoV Q!c=6^xavZ >#9c3k֬.*˲?L'@:6Nړzt`hOk$+{$4iR=V\'@:hL XBm]~g]wCr5^ttC V[mUƍ&>Ц{4iRmӟt:6… s)dĉI" L'@:ՙ@z> @C : f̙Y`A.s֬Y뮻jcƌie\'@(ʲ,]йx㍹-+V}ؘѣGwmsON8E$2eJ uztgΩz(Iu.]gs=syeu,kPZ>vn_Eש/^N;t<3k6˲?|93s'nj2Ւ5^ :S:>|x /SLsU*z-:N:)ӟZ_#}/$Yw .HCú6{r 'kTOZk\'@uG:U/_k.6^ L?=CO|"#FH,_<_s[nɊ+REʲơ7/ңGz> ԍ@uZ@g" P..ʲ86OW]uU(3Z4I> . 7ܐÇV~/23v,X.AZ .5.k… 3qڪrHN9׽f/;/կjп9c8d:%kPuT(- T{ '$)"_}sꫯ矟,k+ǿ oȕW^~SLih:ץs=Wj6( $sL[n)"EQ'G?:'On{tdr3'P}&Iʲٳ[5ֈ#rWg뭷N2+*K-X 9t}<袋rme-u5OƎm6[z-sgoL3ӟҧO6{޼y?I& IZkNjԂ }-7tZw5'O}]w]=477ڕ(^{K/M߾}۲t(]vY8[ _Bƌq3L-Z^\ƶ.`h{o|*wqm6Er)d„ V~X NjTG:eO2ɓ'^~╤gGuT9$ɟtI/g>qV}{kIϞ=}(DSSSN??~>gyf>O$I~uQYlYx׊K.$]v6z-j,^8/^$0`@~߷555ӟt?Z`52Ւ5^ r2'Pߞd͗߫%/C=4ӧOoql:thd|K_BWyo~3ojv5rKvqz<2&3iҤ|e޽{lk=^iԨQ9S{[>L8vnsgm#l2z-:NjTG@{eOԧrw&YտۿsIcccM9sfd=#<('^xav+3v|s6k?I<0\pA{''Io})"~{kGY>[n%ʞN˧> Zt42Ւ5^ D@[ti򖷤9I^{k-GjeYrW^{3&OZ,l喹[5ڒ%Kw;3fHY~s3l.Zt42Ւ5^ K :)SdY5)^y[ߚ,Se?uBk׮>V /Yf!6S=X,YRN= ($A#G赎67o޼$+CC}ձVaZN}: ew#[2dHzk^[n}g-Ϙ1choZt42Ւ5^ ŋk[lz]ӻw_ K/$Y`] О,\=ӫ{ f̝;7I2 =klc_Դc3 hd:%kPI`hslٲK..zmJhkuM׮][74l+:2cŹ_:'O^L'@d |ul8}mnծ͛AEQ\hQm{P*@Gz`Mr%KԶWIt4z-Ä }./b,ˍ(e tTK:z-̓\'1'n$+͜93˖-KnڍBzo߾|=@{1p$+ӧgŊҥK\Z=$o'?+Voo:$c,rL'@d ?NgO 6ܜ'|2^n/ި-\0/BmȐ!5@{z|׽fwgQ[ti^kРA5@{_|1guVkϡCː!Cr9>?3lذ̝;7?x&N3g֎9ӵk׺=@Zkԗ\'@hsCMϞ=dɒ$߿Phws7eʔ65@{;1˖-K< )l96~SNMssstTz-+3Z`9t-Ir9=~˲̝wޙ . >lM7~>}l،tTK:z-h..蘆^ jN0{?6dm~Եk.^kĉsm_~{̝;㜓O>yիW~dOY{s׶eL'@d Nkw@t衇^?a>zk{$+Wsxܼo#&M;!R=MPJ?2dH8(%K^wC9$={ŋgԩmڴfBZkԇ\'@h׬PSN9%tXz-.\Xr-zNϞ=hѢE x9eY(,[,ַ2zxw}'ƍKy'tRth-^ `uuwVk0aBM9cǎ;[ {e̞=;sέ ($+C{&}Nu]sM7eٲeIjsr饗+_JneY,t%GuT>MR;@G" :&6 :޽{?wq[楗^JYz7)}{7aT 1ul<}3|zuԥ@G`Oh&6`Oh .7O萺wAջ I`,]4eYn3֑@uZmO`݊rc P9s[n3eʔ̙3g*"'On:N P,\zя~%K>`Ӓx&ꮹ9guVnZ(E! u>+rm%I-Ze ]v%KϞ=\%@' :EiZd~eѢEUG38#{w}D:Zgɓ/g޼yijj1N; *hc_73fLZ… SEȡ.(]twiL'@d hN3'Pw/B~oK/BaŹ_:'ONsssR@yg$eYK.җ$ P2Ւ5^ u>YhQʲ|H<(eY;d„ >_|1kRX^ m-X hv\@# P-Yږ\'@o1_{,xO~2+VXzu/Wez-5^߾}k}c%L'@d h{rgO.y|_0/2FGxd]vܹs{̓>dKw9guVz]'|9묳\멆;,C 9SO̰ajĉ3sw9|vZ؜뮻ֶgϞ]J:NjTGP N3'P^zijsꩧÇgkٔ)Sr̴i3pJh̟?4?s9'ݺuKs9s<VY;sgʹi&?@U{?sɌ32} 4et 2Ւ5^ rץOsss/EQ(x}-{~}{_ʲ̴ir'gٲeU >e_׵ȑ#կ~PX(w37|se'OlvAP]㏯_q+@uZՑh=}#<ŋ,$ɉ'Qt%~sϔeSbtz=XΝ[N>W^~>eY{͵^ۖ;z-j;6#FHYk%tx2Ւ5^ ZrcO`{'kۍy[/_5u%gqF+o]}յc=Vڵk<=ŊyG93.)kT!^ziv}4773w,XޥtX2Ւ5^ ZrPgܹI(2dȐtEQԶ.]ӧO,X^z)=PFٶEJZkzdɒCIϞ=x<:ujvm6kTo/~|k_u]GO8 {Gݻocƌib:NjTGP=NgO`[xqm{-X9z… SEϟu5 … k[nZٳg-Z(ֹjVccc'x"I2e!klݻw;L81O=T-Zc L'@d 4:6>Mw޵ӧO 3fsWwx饗ZY!@ճgezN޽hѢ$̙39f^j۳gne^ z/r>NExY=VQ)r1NjTGP=Nץ[o]~݇Z~G9Q_άW&]ݠAjۏ>:ǜ5kVm{Ŋ}kTk޼y9crw~eYz>@g$ P-Y% : .|vy$+͘1#˖-KnZ3|/IY0aB/^beM4)gϮAjj`3N;%GbŊtrv-jаDӦMˌ3jV~*`sօ^'x"EQVm۷o8.ׯk!GZkTKuL lr;czE9O|<f1{wmZC9${\ seesMO~7n\z3bĈʟ`s}ݗ˗'I- O}Jb2Ւ5^ :rgO.>57443ΨPe~_f̘1k2jԨ|yGZpim§Zlٲ|[ѣsf}{7.?V"=餓ҥI@?deؘN8t2Ւ5^ r[%P],L4)&MZ~9#j/œ_W[y|{M ?Ù &dڴik3v-~ܜٳggܹ)2Ijc=v>HP]&Y#ٳg+{ޗ)2%tJrSeYֻ+VE7M9ӳ~ջ4NE`>=a„<)"Iҷo߿7hܢ(":ZDfeř9sf,XKfcZ}٧:VZ *A:Ç@WYzQeȔ)SZ]@g# P=YZGu]c=n!Ǐ3<+VlXEQdmX@6gΜr-?~|L9slXz-Zۘ(OZkTO`%K_zeQaveYK/͏~,Y >d:%kP@uUD@],Y$s׾ԭZ<innYgoEU^ Zչ;]@$ P-Y# :&n򗿴xY^Esׯ_zY2ڥ+"v[z,3`.z-VkTg.S@uZՑh-\0_-[V[ #gֹ:)_-ZTF38#{w}hZt42Ւ5^ YC :{7MMMߏ:ꨜs9. CpZu衇梋.J.]]@@uZl|;6>IRev38t uktTK:z-6g .|^~X @dz뮻ֶgϞ]J:@}455/秩i3fLtTK:z-X?&6#FdРA>}zf͚iӦevwY{gΜ91cFOAջ,A,^87|s~_ginnnxk' P-Y6N ץ'\я~TJ:]㏯_q+kl&Las=7=P/_,7/I6NTGiul}u8tPʲ-ܒ%tcǎ͈#RewwI^ ZǏ '_|q0gQW[ت GZkTK`utҜ~?~|GN;-Æ wi/cfʔ)ihhI'O<1}wi^ /b?̟??EQ, 6,vX s9;vl sĉ3sv)|v$9#\L'@d \'@,se?A.](2|߿7xN;JڟKk_Z$IϞ=szݻocƌi*'@ۻ rWBr9[nIÇ׎]qojזe;3\pA}E}'?p`=tTK:z-' :&ꪹ9/rgW}SLi:qFz$Zm,ˌ=:K92/[zU-ZN;-w}wѣsWVL'@d h;rP駟i'x"I_e1:_~9gsw'Yڐ_kF{̝;7Oިqzyޗ~:{ouQmY.@# P-Yڞ\'@uw@ /cXx/˲?V7o^9}ݯkm?cնvMnĉ)2>lP6r}e嵕HO?z. CkTgϞe˖޽{gѢEI3gs^zնgϞ :&NjTGPNRg/g={̿ۿձOeƜp ukTW_ݠAjۏ>:ǜ5kVm{Ŋ@uZՑh=}\=\}On\@ѵk$+{vءŊY^ :;S?1cZ[ &d9޴i2cƌE$ׯ_ tTK:z-u>Mn.Zjhhc%^ :;s$˖-SO=9Gmϙ3'^{Z*2^xam;Gd:%kP@u:ZD&{$Yk/a.de5s:Wбؘ޻]wq!޽{(Z.kL6-'3nܸڪ[ouFQ3G2Ւ5^ :rgO`:th$I͛'ֹ"c]vdW9eY(,[,ַ2zxw}'ƍKy'tRtXNjTGP-N:.>OԶ7uc9k]tQZ9kO0!ӦM[㜱cf}=˲LsssfϞs,$~cݤtTK:z-u>8蠃O|"eYfҤIg>%KԻ,}{_,׿za3ptMsueZ㜆\z9#[|^E-*,я~4w7IL'@d \'@/Ϸ477gС'>I߾}]@bŊwy(7)~zoz6>hn>_߼cEkkki}'.R݊N'@~t #kTN()]1w܈׉ #GAE}}}M$暲 |k_ˮN---٩W1zNg-'=@ȗ@~d-*>B455e7a4Ϸ$MH$fΜY|V)9ke-Y /]ZTPg,ZT#Ͽ#kG(̆'bP^@~d-g#kPT1bDnktT62+zzr)~~$q5:@%6r3uHNӴ4z]=P4-zB+$mduE1cƌX`A,^8VZ5&LdߪUg Ē%K:;Pd-N'@t #kP$MӴ!mܹ1iҤ%7s2LP/_w]̘1#JZOY j /]ZTz?q믿[ulL2%{챈Xo[s9ѷo#ꫯ9mmmY5jTbȑqf?ScvȲ?s~n'?ɨ-$@rhiiM0tK ?@ D6 ,VZgufmjjcn3gy=X477+cذa ?%Kd?8scms=7(kiwyg\x3㦛nѯ_E*@ȗ@~d-*UM=O[[[O$I"I8-mo{[|i477gWij!M~kַL$w;?Ƹq"Mx|ct F _d6ܓO>˗/4M#"SO:555~7ƌiO?W]uU9GvyXhQ8NӧO;ciSLnt;F _d6ܬY8x5kYMMM|K_u'o]wuygɮkkkk׮mg qgGĺuWgHnJtK ?F@[hQDD$I#GM#I$ʕ+޸q_~O""V^?&̮ϟ7pfJ4.:_HtK ?F@?{}or!m]D{ ӟ4oO?Ve˖}GN5jTvr<@OT__x;>&qѷoH$+"\~dɒ83{Ͳo{@6:5ȏ@%'P;,8""׿u^7qĸ曳]6I'7;wnvaY[n]wu{֬Yzjkڴi1qĈXfΜe tK ?@uF@a/^Bz4MK/+"VXuguVz꩹ Н<ӱzꈈ>|x 4h{V\7CVRxI'_bm*9MMMY&*G!`t:kY s:eO[hmm;3{hmm4Mc>QGnm#t[O=TlzK#t[455u%I N|GN|ȝ*^'@I P:=T}@ʠrGIɓ  /rJ$I\s5Y j /]Ztw6ʪ%$4M $I \0uNPd-N'@t #kE*]c1qĈXIN<*N'N(Ç=@U0*ʕ+%"٨;ȟ@~d-ʧ >}eu]w=@zꩧjZTN|G)z62'om񶷽-F]8=N'@D]CEPO'N$IbOP=d-N'@~t #kPN6]+WXWR|d-#t #kPN5EF@$I#zcO[IӴNͩ+z~ŝwY=^'@l t 1bĈ:WSP l ePW1gΜcڴi`0`@3&>ѣ}{^swܱoW+W4M#Ix㗿eviqg>7@(>Wn)$QFm #&NW$I"Immm,f0aBTN=<]{[ .իWgE4M7Z'=:@U(M]zꩧ3gNw]=O>r8c _&M$I-Pt:(>\|+?'={v}ּy*PYfe555񶷽{o戈H4jkk.v-"" _ף5?GD_ַlM@ȗ@~d-buF@ٕTq '@1ZZZ"b]>~{xge qY!_gy&ƌ݇N'@>t #kCt6r , t@O#ktKf׍7o޼xuQGmhllŋGDij> xIG ?@(>\ >@~d-bŊzK?cx$cǶ{#B… 2'@w ?]Z]Ot6rq]w=@UHzY 544d˖-kSfC#G{o^ =N'@t #kt=NyꩧjZhllu/g'у>I57,n4y(@~d-bumvˮWXӦMgy&A5[[[SLt6 {ʞ^z&?uGD?^{?~zȑjuW^qQGM7IĔ)SSTp 6M7wqGVobĈoz]vu~j P:}0aBvmzH4LSL4M#I83ɓ>H45E;iiN8?~|X"H$$8 ^'@i7}8qbD{IXF;. \pA477gO4~zq[\㷿m,Y$";,ϑd:Ol \2ZZZ"_'fP@iƏӟ9Xre :4{隷)>CJkY 4zcO뮱뮻v}A(^'@=T}@ʠ6gҥqǴiӢ9-ZK,4Mc})zDA`(-K/4~믿4$IbŊ}gv[DDlq]w>/@踚Xoqqu]˖-4M;SO=544MW^o9M P::F=X$EPd-{'SO9sl$Ib[Us=c@5ȏqzgOfd-YlYy晱|Xwď6mZÛ>:[oh(#]Z P~qw=@U:+yeϏ}cQSǎ?""%^{2dHYr5ȏ9z'@T__#F(z $ktί~ w3wzxBX:G4|hmm4M#" _BIǰaò/rIz'P{.nll}٧5K@O P:}?~DD$I#F(˚zʮWZU5^'@K1}7o^c=w߽=Y `뵵e559xѢEu˲&I ?(>;9,X1f̘xÆ {xG7#GƩ'tRTm7qyE[[[lP${~hll,OZ_ӟtDJ<@VPSSS$IW]uU;vM2%>Of|ͱn3<@d- P,Y'CiI MXfM\r%qgsύ5kDĺ{ic='cRBzogo.i믿>nAȕ@~d- P:}tC_'4M7*lXXXn!hkks=7{=yO==33gΌ]Mz qAe/ tj+&Oe#<t #kt=N=׿5nDi1">abʕ=XfMi?я-fϞ;5jF??qM7e^|$vy"~e.#k糟l$I̟??<~[ƽ'NuY.N?F]Z(>믿>"";1p{Q__}qiŜ9sgE]64tи袋bqUWE$vڸJ(~G}t:?x{>9۷f?xL4)nhkk$Ix9+ z]Z(MiZl+W~={㗿e?qI'e4!C}gq=\ioy[{)Pd--_<>яƌ3"Iujkkcwg}6"֕=wmH4ZZZbŊ?I4:555EJT1]Z<t#3gΌ5k[>n;e5wxJ%*P޽{W\5kdeЈu{.{X|yw+.RePrkY xz_<nd֬Yu]]]?M{ƍ՟wGFĺ3ft7@e4hP\s5/9'I׆ߋXW߿|_'ѯ_B5ȏP::z/uGl͛gv՟7xn5$q)Pd-ʑ$I|3SN9%?ĽӧOEmroCCCq! @5ȏP9::F˳o{t3ח"".]ڡt'@ihhN:)N:餈7o^,\0/^{N kY ul=t# ի=555[|f]oX6@NuH(Ԇ'.^CMSg$t@~d-3}t#o}DDisI׿36,C466vj @:B ?Y]va캭-f͚{߳{%\ҩ[lY̝;7{=rNZ@GG;'@72jԨݻwX"""M%AŻN}̙37:tw:݁t@~d-3}t3MMM裏F$1u8s/:th :4@ehkk{'リ9sf̞=;-[NG$I3faJ5ȓP::F~>hi^:f\>[o$I""b}3*P[n%.˜7o^DDiZD>]Z}t3GuT̝;7{;S?stC9Pid-b}{ߋk&+&IMGI(;]Z<}t3{w޹… SN^~&@d-xqWGDlT#FĶn N5ȏPN$! ,uE]w5:8ãO>O3uGg'pq;um֬Yo}[ʠ(>B-^8""$QFN;T@=^'@dɒxꩧp@TY ]o 5ȏqz'@#>OGĺӴf̘QDC]w5^paq@t #kt^'@jbi}P^@~ѯ_H4^~hmm-z$R:Ft6 U__'xbDnz&N(Yg;ciW\qEL>z$N(\߾}/>VX~zx㍑iѣ(z+zv!nW>`|߈.,:=zt 4(zu8^zgOblvqgGsss5\$3fqBAsl TUVŷ#MH$""4-x2El nժUqꩧ#< P N(x#IH$4x{lѻwzz'@p@cT%Y .]/"hDĿǗ:th@t #kt^'@t9o<@ϣ Pzi':JgOPk׮$Ibw<@Ϥ PrK`hllQFE]G"koذauCCC@5ȏ>NL*-ޛi\uUqWǼy766?}mVF(w1~W O ?@u.I4-z痿ewyp-;_;=&J$vicРA ]Z9_$IOS.E%5ȏNl( n-44Mcر[,(L4)nH4$$I%Ii?&L*~e]V$P> P}TxDz{v]re?M4sL;6jjj£>7|sWI>7n\irK\}E%5ȏ/N1jԨv#""M0`@|߈{WL0!.ˆXw" b9MPd-|]r%{Ƴ>---q뭷Ɵc=b̘11hР&L(t #kK46"s΍$I-oyKi=Pv | .tә3gƸqrZK .wii̙3㩧 A ?@:JSS믿]ׯyXhQV>x׻޵ o}zNzY %I}uLE5ȏ:>H[[fGͮkkkck.^dIt_@֟^/(@~d-ut^]P>}u,X}SNu'g̖N׵^[PV P}T!Cds̉%KD7g͚5D$qlqŋg׽{.݋<t #kK45E@455EĺSF׮]~&L<9/^iFĖ]6Νd-#t #kPl PEvia"""M~/B9s$>}ĸq]9VX5jT>t@~~0aB#=L ?@~:JWWׇ?#Ihmm|qAE]]]<#dɒH4$c=6]롇ʮkkkcw_bZx'bɑ$IG5ȏN=O~2vy爈H$V\}ݱx~Ygŵnl-zY mmmi >;@1dȐX G ?@>:JWW㦛n뮻.nxbŊ1t?~|zꩱ;nq*֮]>#[-oyKv='5ȏP~zK4MW^7558 @u6jժ8cő$I{1tТkY `cz'Ps=7~_G$qYg?G:JcOps΍c=6.]z>=@ P6lX?XbEvizN$iE@י;wn,\0,YiFSSS߿ygώ3f7͘7o^$Io}s1^{ȑ#_~QWWסP2]Z P}TUVM7wqG<裱tҍ~~UWرc7y= .Ašt+@455E$}o}$Ibƌ%kY |:Jӱ-VnmA̙3G?QDDH޽_,knX|vm˲&@w$kt֘4iRq1k֬M~~UWm6^y啈6lX kY kut>H49s攼^kkk+$IDD 25+Y \q(V^injs^K"I? B ?@踚|vezҥO]wJI^{Uzݙ0aB|ߏUVm-A?4M-n9"wiL>}@W5ȏNʜqѷoH$֮]VѢEq9o~!CBӏ~XW۷o\wuwSkU/ˌQ P _WF$I˗//qqcƌu֓$sO</RL2%?˳SHkkknkTY -iӦe4sgƮĀbѢE1k֬c=2+t@~d-|uFU㏏O~IDcc=}i׾M$I'pz&kߴiӢ-"_ /y=,]Z P _җ;N444dKt"Ⱦ_|q|,7\@y./92K^eJ^JkY :JgO*}(n8W^XJqM713t@?="bm-˚Vʮʲ&B ?@uοtUnԨQq9SNGy$c…dɒhhhȑ#㠃qvmW݂PˮuJ{kkkv=p 5ȏPz'@1`8ꨣ⨣*z#kfРAiڵk+keZPNJ P PMMM+b%wme׵{@O P:}ycȑ$IDD?ZK.:$$Ib=~kT P:};"MH45+_J̝;74O>ܣT5N(ܧ?n"IH4W\mmm['>w}wvN;{ru&Ioq @J4f͚J,]4VXѩu;Pd-c„ vH4$>=3fL|_$IsΉ#GK/SL)Sd'GD444/~=ztlB ?@ut>=7xc<.&lh̙e :Zկ~wF$I""bêﭷ@iE]sL oB ?@>::(ĉkDl?DvJ׉';CL81͛Qtrl5xGȗ^'@$i) @Ypa|o@I8d-dɒbҤIꫯ SN9%>OD~`Bxs^'@=uƬY6:1~x[nmݻ1%Y ?>iO=T<# .%KDCCC 80FtP^QW@e5ȏu::&IK=p?~|]6;s\|3d-#t #kPlu PELmmmI&L}sEPd-#t #kPj3gNDDizSO=t@~d-*>ȪU"""Iqt@~d-*>'>d6"o#""MӘ3gNTY ]ZT|v}m瞋 O<(P#:?H4x  jZK˾hJ }T??N8H4?=PcTY 4W)=:O ?I_>u뭷\GAE}}}J$.<d-:uj' /uykgx~/@~d-W]ŋ?XjUvբEbʔ)^+MS% ZRf[[[IJkY :eO*cń SH$)x"!kkY JdO*vZ,]4"֟F @id-#t #kPl Pe?Xtiviq!G{ 80 {5ȏ@'@y㡇N 0`@\r%q=@'kkY JVS#b Iy睧P&d6"uccc}NP]d-#t #kPl PEIȑ##I'd6"Æ ˮW^]$G:B ?FUo{Fk֬)z$!kkY JfO*2lذ7n\DDq}<@5ȏ@%'@>߱lٲ'T62x;""^38#͛WTA:B ?FUO/8ӦM0zHӴ5Y ]ZT$_U#̮,Y/$I"". zкIɓ7(@7$ktކYjk\2͛;E5ȏyz+zʫ%$4M#IիWg;b:=yfJ4ZZZ:E5ȏyz'@zR|w!=@5:+ B ?@uFUfEPd-(hG(^'@~Կ@ΦNZgx}6@ȗ> ikk˗g{lMTY ]ZT|~}=#EP5d-#t #kPl PEZ[[#MH4cǎ-z$!kkY JfO*ҧOH$^4E:B ?(!C=@ՒW\qEDC .} >}T]w5""4W^yiPW_}5~G$ T4]Zh_MP>{xx zZ#MӢG7kY rulFUNȮ8 @5ȏ@'@9c}4Mk\xEP5d-#t #kPl Pejkk'?I~i\}я~4z衢Gd-#t #kP4MӢ|.䒈XfMx1o޼H$""ns=cȑѯ_&L(݉PoO}*""$3g<l@~d-buFU)+%#7#<`z:Y X t7 о7@TJ1!b]ɡ5t@~d-*>ІP^D62^{m#T-Y ]ZT$u4 kd?8 @ϥ >}@=T}@=@g1mڴX`A 0 ƌ~x=@ >z4M'iӦEsss,Z(,Yk׮~1zGd-;wnC9$ /B| _g}v 2$oǡˬP ]Z P}TիW/~kc,MH$-Z{FD}^D(I&W\Ǣ54$I"b]{W㬳Ί_~[A ?@yuNj/'xb|{ߋxDltݞhiiGy$<Vd-;,7z^xa̛7/"6.nX]fM|_ ;8l]Z P}Tq'̙3SG#P|-;vl[o5yY /^?|;vmii[n%$4MW^qg_]tQoϊ/++~h@~d-(>̪UO{Ç[o5}ѭ.*=ɮx\Nd-|<ܾ{7k׮r8C\1f̘XG;7tS9 P:}T_ /d%:*??e]w[֡Ϛ5+,X݅_~92dH߿{瞈H$~F?/~W_}5 [I ?@>:JgO*sfѣG_ Zmo{F677eFJ?I$}˖-Y7nܸիW駟.t #kCt6"1{H4"">fm:^>}bذa^z+Y ?+VȮ{}c͚5Y&;C7{_]]]5*{=o޼2M [O ?@~:JgO*2cƌO>1nܸlll̮/^\zݕzʕ7mڴ_~ˮ-[Vq P:}TGDD$1rȨ)φ'mmx@O#ki+-"e}gknʑt #kGt"U___5,Y]oxZ@O#kgw̮-Zfyţ>IDDā5.\]ZA ?@~:JgO*2hРz%ֶI[,yMJѣ&+{^wu3iҤXvmicǎmwKܹse P:}T!CDDD1{N|N;Tzݙ;H44_q%c1iҤ+;cƌiw3fdkEl|<t]Z(>Ⱦuuu$Iiv[I̮Z:#"u饗㠃|;f͚H4$O}S[\ή 5ȏ/N?gTg&=X|͑$I$IrH^d-|;6>яfψȲ׆{Z~{gg5ȏ/Nʬ?*IxW3gΌ֖=lg>Ǹ݊o|~6jkk#MӍ~i?>.첨mw;fϞCuf]Z輺88$I⮻N8!رc޹sƤIbҤI)Ix[T4Y _җ#Hy /Ċ+bС1~:GE 9I-ϟ|̞=;"";Qt1f̘{"b]᠃^zŋ//Fi{GW~"kkY JdO* /Ygf 4ѣGe]Æ *t@~d-*MMv)~ʼn'BB$zO<1n͐5ȏ@I *͙3'&Mw_<#w19'>F)'Y ]ZT}0/>.\/޽{ǶnFr(@d-#t #kP}={vnk><^'@il )$)IČ3ʾ.@(M]bʠ 967T P N}t;?pa}]A(?g11cF,]4"֕CGGMG(^'@i6JSS&'Yu$Ibƌ]{ liy(GHD(3 B$*Я $AQ B"4(HJy  C=d^ 4uը&V$yPJSmmm 4z׮]9N#k{-$ T#E@ũ/\>|8I\'Pm}뺺'nr@Q T͛7ѣG 4KF>q?~$IRwܑDIV>wcҤIwވH$n2dHTN=?ީΝ;GDi$IRw޼yEutO w7o$I:uoaЈYfESSSQr](kO~0`@K1a„'.r>q W!z1|hjj`B y>ܵ=! U5y@'A]1[l[ZZz֭i=zNP.5Ȏ*I;)6lX$Iҡ{/{}wԽ^ YkP-WJѣqĉ8}t3z7pCՉA/YkN'!@ʩ%T{-Ұo߾Xzulٲ%mmmS__Ç1cĔ)SoaR@vJ\'S Pfbk~-ϟ6l(Csc{__ˑRٱ( rJ_=\8qM+N$IwޱhѢ{Gs}%a۶m1mڴ8{lD\ w^7-}=zDkkk?~<~8|w=VX#G(cr]kkkL0!9Rv&I<@L<9ow_[[[l۶-> O4nX~}tZ~+eMk}[dI,]0`@,^8Ϋ^cΝ1{8x`!P:sxgutO wMMMqH4k׮\;wpP5 mmmEYutO W4MEY}eMJ' u>\ 8p}رcG۱cG;v𺱱KT NS jԨQIDDĂ … 1jԨ.P-:N'nݺرc#MH4n/b\xCiJl޼9$$IbܸqАE}knnH$4O?4L|UƔ)SbՅ5jjj9*\'@$iy Έ$Ibq=m}=zDkkk7oGĥߓ$iӦżyr~ʕ\'@)J /֭+: ?}O1iҤx2rutO\2.\gϞSNzn矏G} P::N'Pr:~aY&=c1u0`5:utOOs_ɓ=z믿> #F!C=&@œw>j*O(EPuyT?#g+gNl%iyTaÆE$IݻsJ\'@$( rQ \Y韞! :\ccc8|>|8)@9ȖO s6ll/ƺuB [>~XdIݻ7"p{1~f&r(ƍcűgϞB3IرccΜ9q9&@œ$mٲ%x" WA뮘;wn=:1*\'S ]vśo6mA/_>d<ѫW\rQ@9sL|bŊ8uT\>8IH4uSNӧG>}uOikk>(-[ǎkGy$f̘yZ% E@.\k֬KơCAkjjbĉ1s 6,$(AGs̉Çg^ze>@9ȖO s FDu$ݻwgr! _C}wS# P<>566=! $Uo ]VP }@("P E@'O(EP>}@("P E@'O(EP>}@("P E@'O(EP>}@("P E_+RB~IENDB`accelerate-1.9.0/benchmarks/torch.compile/imgs/speedup_factor.png000066400000000000000000006632711503574341000251650ustar00rootroot00000000000000PNG  IHDRiu:tEXtSoftwareMatplotlib version3.10.1, https://matplotlib.org/so pHYs.#.#x?vIDATxw7;3t@EPqŬbVT kbZ\p(*&bvTP$ |x9sVխ=]Uc봼BY[A> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@ T*͛~xBߏM?&/u떒aK2bĈ[6edd|PSqLn+Ǝw^B}e0 lz8p&;Ɛ!Ch"JڹƍֳۤgիWMe' [N=3ѩSbͶqm SA`رqy>^}mJt%?~k&---U5k֌5kFFM6Qz >+W~:~Gڵ`"ʃyŀݧB ;rJ)(z(v'GӦM`v T e=[+W'|O(a\#Bͯ.ʥܘ5kV̚5+ {Gu]Ѯ] ɮ:v(JYL2%8xz(5zt):w^ 6LI6n?pDD\{e; A@~ŕW^Yc[nݺEnzmBNbe=)ԳgիW˳bŊ1};vl꫑t݇~8ڴiG}t MmիfF[3 iӦk( e=Yfddd[wq#6lP@ [j/8>|R JO ehٲeeq)ORYlϟӧO իrѠAu]u֑V#EPBѣG~ F^^V9ӧOo6.]QFhҤIi&222zDRxob+VG-bw+%F'e"+++>?~||7`̌G*UFѨQu]M6qD֭Scƌ)SܹscʕUT N;{g|^{d5nܸxbĉ?ʕ+B Q^_O<1֭[~k׮> >obѢEz]vlv{Gx`deeŋ//R̜936l'tRѰaeee_|_u̚5+fϞ˖-իWGvvvԮ];ԩ 4Gbڵki WZtaÆEZ~Wڵ{E81;cƌ=zt|1k֬XbEEc::u *leK.I˖-+V_Z͋?,リ6m^z3.|.6lO?t<z{u]Ѿ}]7s=7ƍI~{,Z=kԨ7xcuYEiĈq-lRȈ>@ۧ☌Xpa[;ĤIbƍ6"Zjqgą^X_2js]~ӧO/1;s۷o|G޽sE-lM_[s۷oׯO=ȡ1rˊ[yyyO<k׮-vo}78(wdϟ ^{-rrr ]ΝW^ѡC"'#">}]zyq7+LpGśo7T/Ν;_(sTΝOvX?"=kSO=Gyd%0eSو>N<|F|#~ SNza1}8Swޅ c=6iɉn!B|F}K/TJʰa㎋^zP!?֟s9?9֭[rrrs\~xwO.Pb2Ϗ8 zG.RgO=/秬mذ!z8cg-tg CO=u{o#~v_'|r kذaqꩧu=X#knժUqmV;wܸ袋bذa >8s |衇/bQFAwuW;Hɉ#FqO>d LYcߏ;.z|FDqW z͛7/N=xg JJ\p/N)Shs=#D^^^}wO>.\ݻw &߆ kczѧOb_~;R6SiZti~ɬ[.x衇Rү*?})tZ}Ct&(жJ*ѣ3({ŋ/#Fi曋q#;ᄏ_ݻ95ꪫ_,YYYqĉ u"++HM7ވs9'/_e-333?8p`BO>3<o:+>Mw6l(_~e\s5lrʸ ?,d/3)]rPs΍ݻǏ?:իW}74i 4XbE,\0&O˖-+y睸[ Zjt9222vڱtҘ={v|IYti\xG͋q=@y~XjUeu-T7|3nƤA?6瞱;Gݺu#///,Y_}U̚5+a7m 4C9P :4ߐϺu8#bwGfff̟??>xbѢE{c9P/)}^x! 4;FƍJ*hѢ:uj!m&M.({bOFDqGmRVZ~ՋիWܹs>5k$Xvmvm1bĈHOO/L+V͛Ǯu֍ZjEJbժUO?ɓ -ۄ~yyyqF.]~z||o5jsLѠAX|y̝;7}9rdkO?4|ӟTʋjժŮ͛7ڵkGZ"777VZ}L<9@#FĮ^xa] 1;f̘6LKK6m׏ *ŋ?O;>v횒2 deee]oggϞo6h1jԨx7+-Z~{իǵ^rJԬY3鶏?x 2$6Y{{.*VXy^zq5DnݢJ*IӧO|' ]v/MkCq7^{t{/K,٤>gΜxg/./6lsLy _o}le]7|s7.vX4jԨXsk#?N;EZZZ>hL2%a{W4nx>}=o޼8yiӦx4%cK8fMfѣGIC⭷ފ}&c֭/~"PM0!8ΌoN٪UK裏\r̙3}M깹q뭷믿^࠻1&]vGG>}z5j(5j{w\~ѿx#ϧ^z@.I}Q<3I5m4n8C~N6-M<97\F5o\sMwqI֭<y䑄P3fİaâGEe]ƑG{gsߚ>}z 6,p~ưas), W^|6o<8CzYpaoĉ իWǍ7ޘ400===.޽{c<333}4hPdq7ȑ#wfoիe]ݻwOwٲe׿5^{eN;-}M[j7tSt%*T_|zk|333㡇>}ED\u9a6YqѸqСC\~ѧOx <8=hӦM*+{t5vkҟ/O?ǘ1c?vm>[NXgN:Q-ᘽ~}Mg^zi⮻J8Nz<m(}̙3*U+^|8C6s@̕W^of<#Ѿ}[o+V$ԛ5k#G??ƍmO#"vO=TԪU+a m)<@@ȈlժUѷobU*WrJDž^;fC>#~9cqW$,_re<%5vٵkFg#Fk&tZZZwq1tnGhѢbP^I{f~kڵqGvvv² /0^ys-Z;H%K r׷oXzuB[nn`h5⮻/̸ے.;bȑqݦM9ߟ{ժUgϞ3k_, ۷z(>ۣcǎ9wCMz#lr|={7x#=͆|FڣG>|x 6,<_~1w܄zݺu^:nݺqWK/t#333-[??Ɨ_~9.|gׯ38#aYVVV 80 kOW^y%:|W;tC Iތx-߾~`tII.LbŊqQGW^*ZnuiiiqASO=}IX?''/͖p u֍CƟ~F.]瞋͛',6mZ|Ś -OcРA3mڴ~۬*^~zzzz4OS;*UT^sN;& _?c =#B^wuqaxOŋO 0 jԨ=.K.$aYnnn?%v1yqG$ L9sfq7oÇ#4_o@}6f͚1dȐe] 75J?䓿qѯ_=Ǎ7o9jժx 4Ȉ|059{ok.>qbSO=5|!I2g}vz {ob-阭RJ<ӛҔjРAMz.+{L>rkر1`b:ujY?mC=޳g8K|Æ KZ[cw,T;,iHcEᄄSNoܸq}.oѢEp 3όU&?BI&---nBԬY3z^^^JR@u&fffemK:fhܸq9c~Ş<+eq 'Offf< :*."֣G863<۾KP/9ND-R@sJ*ѿRJ]wuѪUI믿.?߿W\\/(pT˵m6=܄P?㬳*?쳿~o]q{Wv⪫JlСEnGDT^=N?|ׯ_?B uQID0xEzzz0HY^={ a6lz+%ږtp ѦMBm^{E.]]>H~!$w}KJ|Ϗ ;?"kv >iSA~ѹsBoԋ/8*W\~իW?7zu&;sB}ĈŜjSjդh_}Ub[1ۮ]"wq 5bŊ'OX^BӧO#_{Xf&5W^P9rd`zjoEcv#(i_Q""֭4d1cƌ"=ӊ]6m֫W4\ vmw}W^UԠmO?Ŝ9swKԩSĉ`ڒ̔,vѢEr"(Iy 4(?;O?=t^:OZZZ|q!xoFB/Ro~Im͚5 [lY󎢞sH&3(r?7oP裏^Tq>۵kPK9nQ4o<5jImٲe1w2'FsYf?UV%O9B/*W4dq͚5g/3UPѲe˨TRec "5d*x/\~[\[ޖt̞vi^m]mذ!fϞ]yr  >䓄Z:ud ժU]Ǥ=yDFFF=3TR;SB-33H~#ۤIwK԰aÄڷ~[֖tvܹ.I+V(rO4v0`@{x믿=֭ ?]lYL<9~QG~P?~|wqk.]J~~}v!vab=jE:(Vk-jeٛ|G*^Ŋ N :vX*WXqqE;'V\4p0?,X4D{K%Źy睓~*l+@9гgիWYA!Ɣ)SoT\TfHG5w=ʕ+Gvv&:+W,QYڒ|TT)Wz`$?~Y&޾}Rvvv| vۭؽӣe˖ AP3f(R kdA/Vz"hժU\r4o5kVlذX6nYYYw1Ϩ@yB|FDL>=Vz"u&Ԗ-[/Nh"Ҋ=K˖-$oMZOo6m"---6op|Z*~x뭷bĉ) ݒsrrbΜ9 ҺދHZJk>{WUV-,ɮϊs_PիټyHOO/Lhݺuݖz7x7wމ3f$|@Q\2~LW6cZjŚf͚>A… wuRʕ+B"~ I]v%!46 (WUBL/77""nbmd6E}̙3{1cƤ?lm}O#rnݺxG'k%}OI-[ ^K[ѣ[n5k֔H-=mIly`UP ֭KZ/ /$///%6UVMIjժ%J*D%՞|ɸ{K,sK2U^=va8CO>;đGƍo3fw~%is ʕ+dSXe0l̙qyX疦{kwsֹދx뭷b-mYUnRU쿬ǖ&UA]k׮MU^=%Kĉ_~.o׮]~ѶmhҤI4i$jԨUTU&͛~xI].8f,׏M7&֮]_} ܸqcIY$BV^ީ*>7qƸbɒ%I7j(:wO4k,~W^T\9T ۜ{1nܸ}[6 ĭ6lH|]w;F####4i5kzB ۴nݺ.l}RUVMZ_reVZIYYY)jժ.\d}Y'sO%<馛 ⒓ ,֣B %f͚SLdٌ3wTR%Vf͸袋R6]v˒}VmAMlZX>ԩS쾥^ӧ'ׯ~{uQQRnK3/ %ye~Kö~׿!{Gvm_vvvF+7l}ƍz2W^ /!U3l.j[tҔYlYB?ӦM'&ԏ;*T(t+Vbr1 uRJ7N9ذa&{֭[l}:///ʔZP>.\ީSX}\24iR[/PW^˱;gfff1*[]yj*UJ*~M:wL>XbŨQFJomxw_ <8ihl 8V"---[[c(Cawߕ 3gNJϞ=$by$jժwuWB>#"/_^ܱ,֧M6qg'׮](^:rrrR2_a sƚ5k{ڴiQ}~/.>˫ 7|P馛5n8*VP9sf͐T玵jJ"f͚d׍z>J *}Wψ-!?YE>mV5j YlY4m4WP8qbrIØRaÆZ*vՂ bʕ1s̤1ڵ+Vߒ6yUV{N28#KYW\|W_p-Z$Ϛ5+Zsܤ1w2 ukٲez`š6mZ%wub.MλTG}t{.X 2331U٫TR4o<^Z{_K Mv 8w_NNNC^WX ,Ho{O4k֬=NZ-,"A6f͚ +VgiWiiiI9.kڔ)Sbڵ;qX~}>(V{/i}[Җ.]P+nՄ } oKU[矟P߰aC 4h>=nܸTVhZU&G]F*Ѹqay{I[%Kj}AA\HvŋKe^K ,Ebܹ_"o{Ǐ/?N=,[;A6nݺ 3ggY7tPB-33aH{&֬YS#G& ؼ7x#7l0vyb-i :uߢEbرŘ( JY[=z$ 7bnN;%wR9^UT)QJZqG+d_|E,ZX}_Zݺuy[ڒծ]X=g*ɮ6n/R?k7#GLٛoY*W\T__>~bLd|z/1 M'jٲeBmҥ1o޼"1cF|kp1DZZZB79䐨P!֘^x=W\oVB=###ZjUۂO?4&NXm|͘3gNB[nI_cIjj` gbLQF̕ YS:usMoܸ1mv?>6nܸ0aB+SO=5lٲxNJ綾^caP-q>C9ܟT͞=;>bLT~r!I+{XzuZj%_~"/$ *^ztܹH=3g,zL0!$ԏ<Ȥ?${ˋoȑ).KvlcmO`{$' +|8lU6m|pB}ʔ)1xFFFz qo]>`X"~g& (ˋ}Ɔ  ]VVVoKiJLj_~ez}wOwHOOu&gϞ]z]pQf̈́ofw38#*WP{bݺu) =hذaB}1iҤBGF+c9&O< .,t۷oesNdK.-t;"///5jĉ'P_hQׯ_z8餓} 2H= ?&}cSC^7l_.RƏ_^K,)""gUY_9f[fիW/ZlPg"++P^xxwS5VW^P㣏>*&t=Ă ?g}6^J-"<0aBӧD^eʔݻw󓓓|WRk֭K.$O^͟??.X|ygMʕ+'\\vm; oI˶mƞ{Y9Jsxg 롇qƥbr.*U$_x>|xNzCԩS kԩ~qG!͛7/B7~I۶m:uJx%"{ķ~cƌ)Tݻw>cmO`vI'%/^~{lܸ@=}ٸ;S;V]vq'srr⪫*rˇ~^{wƁP_xq\xkIzˢ^z곭TүK/u]fqE߄e5k֌n-c.]$6lӟbʕhѢ8bڴi)+Y{/RQY\5kL[ ꪫ{=:=<ӊ+bѵkxG ͩ_B}qꩧƠAbŊI]zu :4N8ᄘ;wVZȅ^ 6L;6nXn]<#$+T7tS, -Z>>xO #777m6vKդIٳge~{ 4Կ_E].I7׬YSӧ%\k֬IXv)ĮZ>۪^}q… 7Mfff\s5+$w& ,o]DD|vYjU\z駟ld{SNL,A6N*U$G^zi̙3'm'O_|qu]-[,QH{N~{~Gvvf̙3'~8c/'h}:u$gϞ'pB 6,i8DDO?w_\pIi߾}\~c[v)l3x뭷c~cő?c3&8cرI]ѸqX:u=aڴiѭ[裏"///+V<9MB>۶m[?&n?>+s΍Ç_]t*,4---ިUV²0`@t%.袸;cq=_p@s=z_Gyd_֭_.{O>(pӧE]~t)e󖶳>;W\qE 80VZt7Ƨ~ݺu{z͚5cw.qKE]e N8!Fk׮l SO=zjs9=ݻw0%K) KVX?pzꩱdɒq뭷hmY׮]cwcƌ;.3fL㏑/G~cѣG'ףGmJkbi֬YҰ1N99rdlذ!k֬ÇQGcƌ뽣>:*WP衇Ck68p`ޠAq-l6l={ >8裣]vѶmۨ[n[./^_~e,X ׽gyfi_ըQ#y8s(}q%DգCѤI_~Ŋ+b…1iҤXlY߸q{k5YYYqw;w]v,]4fϞ_|E6[n?*VX% 6}F^~}>-[Oo}TT)-[s̉/2!aÆѧO8ӊ5׮zhNX6o޼7o^fdd$ jK, .CFVV&QFŕW^o`~cѫWX~&֮]Æ aÆE b FݺuB jժXre,\0M+W,y_W\veI srr RتU'LX,$r!qy3-ZZjʕ+cѢE1nܸX|yB;#^z~ĖbŊ1`8sc gΜ^{mTR%gȈG ~}LRߪQFq'?ĀbРAѩSh֬YԫW/233~cFNNNҾUT?j֬Y%ժUqƺu""b / /P^pqƘ1c-[7xc/whڴiTZ5-[͋q%fWc=X3խ[7N;6lX²ŋǨQ6W^YocmO`wƇ~ǏOX'N'nGڵ*DӦMcСqe̙3f͚KdGyd{qw$'F_͛r̭Z׮];;3 s'ǽޛJG=z􈧞z* $'SVxǢVZ)o߾qiS/[uFG٤x m9x'ꫯK&]gҥr|u)~իW,ZG͚5c͚5 K;3"[ouŋ/tŋ㭷*pmJ*LT^=EB"~O ڳg8㥗^*QL:u駟+25uDc̘1%;\sM㩠ỿ^z<r̭Zkװ:cQҖO\ڵk7|s/I|ɒ%onxE)릛n*L~`[Q(k*U<:uT5k?|i&œm=5kÇSO=5Ҋݯ=N9x'b/~{>|xm۶X}EgqF_4jԨPU^=?_רXb MWn8s}f瞋v-e35h F||^[ . i0ѣcƌݶC1r8S:S:ub})v{g92>HOO/6u֍n! Ջ+W&˲ҔsO~QZb:ScذaQn Wڷo *IOOo=zʑz3<^zi)=\o˖-瞋C9X}EvX 2$veBmW^ye<~)+w^\E_~{n \v-ز*eddDϞ=;v,jԨ?x :4~ᤁ;jժqYgW_իW/)l5jԈ/qgģ>|AlܸP=222[nqiz:u7x#z2dHdffx۝w9.8餓B߭[T|p5*xc֬Y۠A8 .(t8hySbŸۣcǎ#fϞ]j֬sN\q%zSn{ѣGɓctXzu]6R߂rl}֭ݻwGydz^^^ 80|nvŀ⫯'|2):uc9&?\r{ҧO>qUWo}Y̘1#.]999QzhҤIi&:ڵkԨQcK,I۰a"͓ {nz1hРx v)z_ NX6/2f̘ .UVƍZjQF~c]vv-t-ZHɾO?1eʔ7o^X"rrrjժQ~i}q>쓒n=7n&={F^?6mZ,\0֬Y*U D֭u֑Vc裏?O?>~̌QFdddDV? a`999?$q#==Fs̪UO>c|X~}TT)jԨ5k֌&M.-Z+ڶm[nXl&~I'T6Ɗ+ދ?__?͋+WzVFhԨQh"Znp@n)͍c'|&MsFfff_>Tu֍f͚E۶m㠃N:EŊSn7|s+N>_tŋԩSc+VzE˖-cwJ*إnرG_|?cdffFnnnԨQ#4i-[Ν;9sfBmv(/(/l})QؠOm̙3cMkѦM2  ؖ؆UT)@ I+!`KWOHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA> }@  ) R@'OHA>1"Zno.96lN8!=ye=l3ck)^PýPp viW֣A@իWǏ?3gΌDzebݺue=zgcƌZhݺu+ؚlܸ1233cܹcƍe=f[.~駘5kV,X J&+2WIm1nܸ/eǽ@+WyٳcٲeaÆkcɒ%1{7o^\2rssKtݻw&MlR8qb%_RY㫯'ɓcɒ%5jDFFF4m4vic=]vN;EZZZ)O#F-Rj;n+lٲeC%?EŊKd1cƌ4iR,Y$:;vN:YYY1eʔ2eJ^::'|r4mڴfZlY|g1eʔoo &dEڵcv;gܹsT\fEfff|_ĉヒ*TѮ]h׮]|Ѳe˔ͱvy<&{7^y;(y,OnnnuYW_NǎcȐ!7T9nݺ:ujL<9VXt]nVj3Z*ƍ&Mo6f̘KYfhժկp _z^֬Y~aL0!&MӧOk&5vEcۧl*UDϞ=oߤ=8∨Yf@jmŋc1|X`A[zu̘1#Xk׮:t>89aR=2+Wܤ{G׮]S?777f͚&MϴiӒ)VϞ=}cڴi,>쳈hذaqqgFÆ Kl_L<9y=zt_w͍sܹs㭷ފ~>gyfp QBbt?֦O?|s9@r/(PJ^xg7-ɉov{A|QASLwy'ƌ'O7h?~|?>jՊSN9%w͚5+љ#"fϞ""O{'eغ̛7/^y啄zFFFt֭ &$|w1bĈW\Q?c?>&M'Oɓ'ǚ5k36i1q_3fDNNNYiӦ+FEŋc 7ݻwu?xg~7(rs?_b]v)VZjEc'\&r@ۆ-Z?J}&Lɓ'ǤIo)s/F9s椤U⩧!Cĥ^W]uUomܸ1=Pdgg״ig}6{gUX1.Ҹ[62$;hҤIZ>`pӟǏ/}-]k As;vtsVL:lݺur!Eyw_ѶysNZנA.֭{7GA)=wܸK6T?~|tIq}qW^sN ?;Sz/_<⊘0aBzF{gƍ7^xazp 1`eggǠA.P(y?CuYrc'ԩSwI_|e0 $OsΉEߢE{)nQFOSOM<8^P4{AyxJ}oSL>;ʊ=z<ѯ_bG=#Fy7UJ֪UK. nvw18}ݣUVQ^YfTX1VXxK,K/4 4(Lv[L6-iiiѥKڵk^ѸqUV_>VXӦMqū߿m6:vXYO?׿V }/%r}9s仼]vѻwo]aÆѰau]>eʔ=zt=:~T M6VZM9sfNzQRoUnVbߨ[n4id7-Fʼn'7޸qhܸqq饗'|}']ʔ)3n"oֱcǸiӦ jժjՊw19昸kKI{[.x'(F6mD~M6 ӣf͚~x\wuǣ>ɗ"///oFTXH֨Q#?x692hذaZzL:5^y|_~1|ع9{G\ưa# eO?t7WX1N=_zzzqg=j|ϗV5jԈ/.?n;6n2kw 7P:xWSN3dȐ={v۸qc<.?駟NLZ/fž9NK-[,FY{AVgIUV-YUT+wz+0`@εN;w}Q+ߐdsxww}7ƌS~|u1^|Ť!TZ5x=5kV;Eɉg}X}HT= 7WK.뮻.eСCt!Ν&MJY_`˱|xtP4iҤ+V-Zvgvʕ+jժ.D۶m}wertZj^{m~UN4hPuY1cƌ7n'x"BLUVq}y=cڴi1bĈ_h߾}.,YI}Ȑ!~#6^P4wqGyQGrH<6KyTRm~m۶ѪUTDdddW_'pBTXXT˗ߤ+r߈8'ܤ>mڴ7n\tرXH -G}4Mj5jԈnT(^|r}/']w]J&:*kt̘1xhذan^.,j׮]Yqs=cǎHKK+rnݺ%}.]4|8䓋ԨP%c„ vڤN;4+PVZ|)_oHܒm۶%2c۶mOlժU1iҤB>}zzC)tjԨQI͞=;6nXGydz^P1{xG\sM4nܸ(mڴ)s&MgtYnnnH={A#"9BUơtYVVVG& u/(@ RͥyEi͍Ks̉KHEʊ̙3c…]#[I&ŬYx`TV &DGqD˾B[hQz۶m#=={+V(vdZ;${J{Ar/( ^;#}l=ݻ _tԩ߽ѨQB_ xbnܸqo>>nܸb?;v=rqqQ|||/&^z΂Jҋ/}ٌkٲeСcO209oo$i9aԴiO:aÆiӦM նm۴m67N͛7Wnݔ5kV{ڵk;w.]ׯ۷oɓ?4f=cj߾5j5k,ZJo6=.$$D!!!:v֬YaÆ\rz;X>}._lxLdduOU-ԱcG<[j $$IWʕm6n:xJ`DEEiʔ)?~111:qN8 |O wzſi&ڶm}|Oxxu5-///Jx^:j֬@A _~~4i&Nh8תU+}_#K,,F3>>>+֮];BrJ4,(YP#dA,(Y JdAsu5*x͝u55nX7nH0秥K*_gҥ ֭S:|S~~~ I0޲~~~ר]fժU, ,NX (888;ŋꫯ>4(͟?_5Ҟ={,^`׬Y v9q_]Gǰ0}WDfݫ.]]vvr .y}5Μ9>p.o޼6lC%) ܹs;\3=W:\`? ʖ-3B2g_v׸|ڴi?ò._-Zh„ pޡCԼyst@5iDw8X"I[lk;vX?_sfʹ~k\tIڵgH´dݹsҺOo%KXZ7&&FFRVhYݨ(YVf̘mٳ׺y> 8ЩmݺU-Zǝv +矆*UJNROU^=ùUVvHH>_;6,>zO=C,ym0{^|ykK|O>d0>8iYЄȂ}Tu. dAfƍZjU}*K,)C͚5ծ];ùݻw뫯vTTzsnnn=zt𯘘Yt% eoP֬Y/I+VL0o  ʙ3O?8n͚59r%"##ս{wܹZ'O믿Zٿ޽޽{kʕڹs7onIn޼{O۷owɓ'նm[ݼy"##իW/8pZ͛7'Yr,]TC .&&F|VXaIHuUfͲkl3f]'W޽ci]I:|:w0k[,fȾ (Ptɓv߿._l87`/^ܮÇ,X0Vj8M6%]v=kڵ_+W6' ,1 ~MȂZ,sf4<<\}YիAN[e˖51co5J2ԩ#uĉ'L2 Z~YPSO?ܦM|r5l0;ϵk4lذ!yASϯ9s͛ 9sưfttve˖)wvu%iF׮]3=W+WVJTpae˖MqΟ??S[n5ܙ>66V ,21{g=R*V+[lruuՍ7t)mܸQ;w4ܵݻڵ~'*T(۷MՋ/ʕ++O<ʒ%_/?֭[mzP=&>m<==UN5h@ʛ7bccÇkٲe ߪTRz7RKH<<>^דO>UݵaÆDQժUUjUɓGʔ)nݺ8qBu-II .̙3=@_|IɓG ҩSrDw4dÀ޽={&x Pʕsϩ`ʙ3ǏkÆ )s6it)Kj˒%Ǝ~;AtJrͅ jڵs 6TӦMQqFӹRJ%^ժUUHL0A5k֔GJ`O]5Dx# J,($,hAԹR; :a]xfS rᡱcW_Uhh\\\>C/Orks+VTΝQk=Y"EJ*N>]M6oJҜ9s vwwomWM#dA mbO2ҥK4ѯ_?]rER1cM8Mu&OOOcɣSN:yM6%8֭[ݻ{&ǡC&sܥKy{{o0`./^|hQqkȑ*\pfͪ%Jq:p>#:uʰaTjUUOrݻ5a$РApB{=zt&uM͚53}]hm޼Y+Vof=Ç?޶mZnJ*ivr}3gNsjԨvҥKT?vNʕ+=zh̘1 5l0 <8I=#F.\8uPZp4uYqcǎ1b닙Eiņs}I{<ǎStt G,(YPMydAM>IsA{W"Ezm3 Ґ!CԫWsW^U߾}5}$eԷo_ùlٲii}\Z~pU+Wn׮]m۶ _~]]vՌ3_֭ӤI }]ϟ߮^(QB... q._|Yv-@%_@FGGGkѪWΝW`wI`sҤIӧC@vi…ȍ7j髯Ç y/j߾}B]yȑ#3.^'&?\/_6_~Eof9s:ud8~}o׮] 2eʤyO>{˧YfgϞ6f_`g h"`ʔ)Y+V4רQ,3#T˖-n:͘1Cjv͔)z--[L˗7F\\>%/T޼yѳj*;wpbŊʕ+]u+V-[mݺUoT+**J'NTn? vwծ>.}AKH: {9 5k_רQvZSӠATndSlY?t'ɓ'M*Tмy'OdՔ7|S7ot$믿zjùkʚ5k{ٳ}]ùiӦ%Ǝk8Ǜ tImڴI9J*iذa['OV@@5kLiŅ h7o4֮]t!CԩSs}Q2eQ3fηjʡcƌQr l٢^xA=z?'O֭[UhhΟ?5khРAY;:uѣӈz7 % .dAɂ" jʂΝ;5hI^֙o=3fCP2eon8תU+sGIxxIjݺCpjԨڷo^GUppbbbK.i1bj֬I&.Ok̙NK,i8BzSc:v]gd^^^6l]-Zϛ7O#FpEO,կ_?m6áӗ_~7x#܆  eL |g5tPf=̂...3fLx{{뫯Rz`oUǎ WӦMtSΜ9c8/_m$ə3Fw}7A֭[ӧ,X wYb,YbXf͚EHܠAj8OJr~̙5{l 0@VJ0_Ukk{xx{j߾^˧#G$?}SH: . M=&N`AZӮ2eҸq+""f.::Z{֒%K ;wjʔ)ux }GN9#3f.^h8WT)Nuww׸q4rH-X |||OɮVZ>p"fW72ڵkMu˗oi̘1jݺ*T~[&Lɓ'-ԜP=zm.]AAAZf\>}={v{ۻwކˤw^۷pO?u8ݻIO?svfi/#i۶ ,h<ׯo8fÀOcǎd2 IaW``\ӦMUlYkΝ[:u2 ի_>>>-L.]22^U^]ݻ Ν ɕ+x-ZHׯ7T߾}-ƍs^,Y[oWzm|5{~RY% (# uYДYݻ6cYfuJ,iٳg5x!!!`Gƍsbh"[ԧ~s\rVÆ b }'N'c1tPn:QQQw_|M㏊Ks5jpx9rAsk׮5ܙAK.5 Z(QBCի'?|K,1Uy5kXb wܩDύof8[o)K,VJ/_ޡͽ_e{$ݽ{W6mr>2Qx޽Iq1:tpy*\= fKum۶ܪU/ISl,,| }Gf̘{%8UFz͈߯Çw—_~Y˗/ל9s~XT)M2E4aJfW),(YP.YPǐM]l6oޜ`W^ʙ3S͛^zs˗/OPvM P-1#px͛B ^jժ7ߔ[k.\Xƍ`iFȂ@B<Կ͜9SE޽{駟A?, ݣ,y>|[p~yQSmۖy?§]v%zѣGl8RJʕ+u~i*Tp:fǏ' ]\\ԼysJ/Ϫ@K/d8k.EFF:|ƍ;\nܸa!L 3ff͚`...N~ߍcǎՁ tAUVujMPPt風(gyF׍ʕ+5{l<ÇժU+խ[W-5%1fW1 YPM?Ȃ_dAC4et4$$DÆ K0^l4}OӧOK.\kנA5mi=fDaaaԩBBB )>)^~f͚_~p9w:u5jhԩ]LQfnܸa8 }YV\/B%K3gԩS'}ᇖU?%*Tٳ޽;so߾m /po9r$;k׮%wss3 ^h,Qe;UVޟfŇ=`9r'gGݭ[WXҰZsUAr<8:TM˗wFJH,p흂m СC ^}jƍ )_u3wsκz|5n8vXOڵkK/>_e~NK.?WgL(SLaaaN&>dAɂZ,hA4" j?)'#FH`>s/dɢc=\xxzkĈ.\X}PbccէO{yyi„ ʜ9ݹs^}UuQ֭SDDCn޼^0] =_Ƀ@H CPfԬY3߿_We˖6mex'kI-=a|!%T->pL2]L2ruu5hOtPfbJW7o.ƍe6k֬3fa0bbbԳgOF;vk55tPß=ŊSU|yȑC~~~ WHH}vm߾p .e˖8p|Mh8dAɂZ,hA, Yп[K,I0޲eKק\rѣƌ`ȑ#z뭷 7[wyFg%0g}f}3i$M20)IW*wʖ-SNiںu"##;sѣ}K?_Ƀ@S<ʖ-e?˗k.۷OѣG{Q>}4sLKvS}`=p3g=,XHmfD3$eϞpa=x {z%K1**J.]RB,VFq!>|XǎS``_&֭[se˖MutwwwKH"m~h" SݻwMC85}'ڽ{l`$ 2D pvkF|| 6λ/Pj,fLL5kΗ,YR{V:uLk<ܹΝ;)S6ΠA&MXֿϮFi:H}dAE4yȂdASYA4T422R J0'Ouݲ8{ァm۶i ̲{tG ߛ,_~yϟ_={T L7֩SG]Yfi#L"wwwuzDZ'$˗O 4P $ϩStm߾]۷oׅ Tk͚4i%a"E8\#)]y.]2?~T3!!!ΛaÆ4իW Nj+fiŊKtGEΜ9-hѢsW^%Ξ=%KhŊI~L}\rtoZdZS@+QaӬʚ5Cݪ%Y!KǏkh07|S/R tq 6LK.5OԸqcK9rHE>4h/"a… kVﯨ(x 8Peʔc=p0&&> J dA,h# rR* :yd\o뜳hĈjܸ_k֬vڥ@gܹs5em۪SN^s޼y|VZUƍ3]$ArR~TfMp'gQժU˂&<8\]]cI&6l֭[ŋUVʔ)Cϟ={vB cu`ì͛7=ѐ"""O=}dbiVK2gli=oooyxx=9( ӨQ+hڴiN vJJg˖͢nҦ`qg64!~:Kb&5 Pz\ɒ%'@GǤILCԭ[7nkرC7UFE>QF2d\TT욉I=\R&C2=H4eMYdA)'%ǎӜ9sתUKճ))gΜ9r\\\=.W\1bCÿ,YÇ7mT}Oرc J*S&y^+8'X& i }$+S>S]VM4I؈ FrYn1 w>,fiNt>=x]9?j3/ j֬Y}5WW 1ut9)%ף_]+W6رcZ Q5w\M8tM6ڵ3firuOq7nl8~mݺ2H{Ȃ.IsnIE4M[ȂggA4p+ooo 0zꉾruuըQ]ok֬ѧ~j˔ziС/:i$[wwwM0E2T:]tI˗/X>-gΜ>|q+VpZ^^^/?N$z^TT}8Czluh?@YʹhL 'OTVtnņ; m;H{N<<ݻw'zLFZLtf͚_~_ڳg\֭5kVѥK}׿,{*ȸȂMG),h M9΂~ڻwoΝ;PBO-.]ҺuR cƍӧbcc {9=4Sik׮i͚5s5R…Fv%Kùo$03O=ogj޼yAAA:}+f5vNKI=9[]/r!<[nݏu> X8[nZtwP,iPk{?ѣG;ܗzY>aa&oooDJ5V_pB?~pSZ믿n`\\CLj+pF`03^j>*3fʔI N0 }ٳgz\`` ;tjڵ裏L_/ԩ#G:0v2/P%A{TbgHHN:a|͟?õiYPE4m! |dA&)'5׮]S&MR VTI ,p'N4=ڵk۷Ouľ}+""pL26m~GeA===3Xv*U.{VZ_/_>kB!YdQ:utRk׮9Tߊ`Ν;!5[lQQQ*YmY¬Ǹ4ӣb/9KTTS[ Ull\9,VzyʕKSNU2e쮟I֭[:i{d$g$矵lٲ$ߪzzU_W^1R&L8ܹs :E1 rB p6 # J49Ȃ" MȂ,IkI>~ƍ7oڶm뼦ҩGSxx|ɒ%5k,:,h޼y-]7,իW-Yz\S=쳦s!!!>uC'^ܹ=/㎆z4>>}XU]gln޼iWbLJ=2ݻwʕ+ ]\\4fGb;:T;3/^HKuĉd,XppӧO?7+Pgn8駟l-ٹstb!kԩ,5{{wȂ GȂodA.),h?45kVӍGC9tԩSzwL_'-LB͛\M aOజ9s΅:TرcG+V,/n8~44󊏏On}S\>>>aaaԷjqX5㣼yZvb׮]+WVʕyϝ;i h߾}N-j8ciݻ:s~d$?Q޽{8qiذa熄>PllL8_w5/UfΜi;JV͸Yu_}0'> # J4Ȃ" MȂ,Iӯ_?Ӎ#\ttzmj۶nܸa8?~͝;7VI YP7˂=Bannnsվ|_P{Mi%KLܲekΝfCBB,O|ˮqu]|ْZYd1w4|OJk֬c=h$2 ԭ[גV*Up|NJ,izO߿߲s'v>]v 2ϕ+Ν|H?fs[4>>ZLBaf;JRfkHҟij =`*T֭[ ʕ3u<?~\'O%u$pܹsOUϟX]paϟl XR۶mרZ;tYCіϒ%i׈+W{yy=R}.]ҝ;wRg?\j{N;v8ٳzwL _-)GVOHJܹs;\,ؙ/_>ӯM9K j,YЌřY)SرcWR%UT 5:tHG6+Q>۴iڵktR-[̮2`kNϟ7Ϟ=k)R$z2˝)**ʲ8; z'O}5Mf8-[G6g8~UkϚ5cw.Dٲe3 /x||Np .X~}7faZ+nͽ9o޼`EÇ7 q+VL 0y]7nP۶muE9sjܹcgzꩧL-[f5N>)R49ve8^F kdA,ȂdAG4! j>3]Թ_~*Ydq5J  S^LΈ޽{O6ь3Lg;SJdAoݺ 6y{{CE& ڰa֮]̟?_׮]3+X-juK}ȑ#MM6MR]ҧM&,5kc=f8_Z2euccc_}۷MI{nBI?bbڵھ}\Fawpݺu+?^ 2sssS֭m/27֬YcwĉOi+ wܩ(*P#{qqqݻn޼QQQٳ~o|}}]7Val)~;w4 .]w}g*\܈#tQzLաCӯWg϶ds{UXQs6 ч~P~Ҋ+L_yksqPc=|9\" G,hbȂڏ,hC֙3ggϟ_C}hnݺ|s_6mPi]llzpC'NTժUS*THe˖5VϞ=aw-[h֬YVdACCC >Β%ʕ+p}X nϞ=С5jNaaa_;ݻ>>>jٲ%׺g֬Y3gNYjU,_O<ڶmk8w55kLk֬IVM3ΝUN-_<խ[W3;y4i;vX#GԧO⋦ #{6QϞ=sd1uT͛7/Y$U&M O8aÆ%ŋ[o… V,'OTNLiFԾ}{]~p7PBj1ݩQxTTze뮙={VHHU65]/^lW/B}]1 M8Q.]jРJ.m8w)~GDDwަ;ȑC۷tW+WN0BG֭[!~;V1c#\TTzm>,{>>>1cz̖Zje:~zWɮtRWHRr&>@% _dA,(YP3dAEdAɂJ4̹iԨQ{hwww3FYfM0}:e >>^n8ѣGVZ)YBڵ3;xtbBzaPn|/'onxZj. H9_cǎiРA1bԩ_|QժU3 5c mٲ%c{!GV޼yuʕG;wjʝ;ywш#?ӬY3=W^ڹsߟ`.44TݻwW5Ծ}d,zyYF߯d'I:d"vZn_|Q۷W2eU;00P֬Y@X5je˖% W֭վ}{uYަu._&6_||]=aÆ5j-\Paaa۷efZ#**J~Lb$˝;^jIFrʥ]vaÆ:ti0Q'Tb 243gNu)}SŊ P5lذ>n޼I&SLLM}+wٳ ׬Y;w[nzגaLLl٢+V_Udd֭k\t4h_~Y*TPBc2*VB?>L-ZPttt}A>}O>$IaÆAݠϟ?7nHﰂ 'ɵoP4h-Ze˖U^4rs'OСC_$%O .\PٓTdɒI:NC ѣdb3*VXiݺ;]xp_~ѣGէO$QϜ9ɓ'GE}MVf 6>ZdA" J4)Ȃ5C,ȂL4=z:d8׹sgUP!ɵ (!CG nܸ?Xg6]71O6|^<(SAAAI~/sAÇ?l:߮];/^,̪$˚7ok8yf5iD}K/$77Di֬YZpLL YPH\ H.\!C$zx .]Z= ( eʔIqqq ՝;wt);vL۶mKnuՔ)S4۷ ٳg'X0]=ԩ|)G ѕ+Wyf0^"Ed&qZhg&z\|TB=3ʓ'{{{m={VÇdXYf^jѢn޼qEU T\9̙S~~~ʔ)BCCumݾ}['Oԑ#Gtaѣ:wܹƍ.%+/ʕ++O<ʒ%nܸ .?֭[}ԡCd׬[nVKsy}=uꔚ5kD˟?jԨg}¨(ĉھ}nj5h@cƌIvUj?4]`22a:tNQƎknF|||4n8nZcbb~z_>54m4#G-X@۷Ot˗/k;;[bŴh";rqgΜљ3gxNʒ%O-ZΝ;DŽjɒ%ZdIjzyyi:qℕC_uq[niIz{{kĉI=QzҞ={.{I}WvL3UPAo~cva{b飪U:ڞ 777M8Q]tџiz\xx~wӝ}~֪U+]rEf2=6m]_x 82 ___=ZbٳguQ=Y8vFa8WH 4Ȯ...9r7nl!lٲINz*T_|~%HSt) 4P=>֭[C2?dA%IAYYЌQςo߾|f˖MGkOIOk.o_}TeU+^&NN:ټ_}ХK?$;a*UЅj W^yE> _⢶mjʔ)˕+1cZ7o/pCp7xCƍ#|MUVp')ݻիaCcƌqC%#F0̝;wԧO]{W5e;:uꤑ#G͒FK///5nؒ,hDdAE4!# }mf8ײeKs%KT~ Ο?:| 8|_kŲrɓ5d ʕ+WZղa[2(OOOթSGuQllvܩ[;wԁt]nݺj֬ʗ/oQ׉RV\)S~Pxxx˟?:v_{yUvmYF}vܩ8jϟ_ժUSڵUV-yzz:ܟ^u5lP{:tzŊSjTn]UVkB?ӌ3tʕ$͛sʒ%C=$E/h…:un߾e˦VZ3܁7%i̙ZlNSN%>>jڴ:uZjZjZ|.\hkʖ-Fqʜ9]TREN>>>)+::ZGu9sF.\Э[0C>>>WܹUpa,YRʕSR, JW?saÆiӦ ܹ 6h˖-:v.^xԾ*\z)ժUK5jԐe&׵e߿_ǏץKtMEDD(>>^3gV̙%K.\X*^ʖ-bŊH/^Ԗ-[t:uJ.]RHH"##rG???)RD P/_>?S7nuym㏫zUV޽[jƍ:t萂@z'sϩf͚jd۶m sUTI ,H0]viÆ :xN>۷o+22Rʓ'J(jժ_zϨۧ͛7kΝ__CCCu5DZW.___: ={V7o־}ti]|Y̙3+k֬*ZTT)UVM~~~<-[h߾}:{t][9s;J*X>(QDEEnݺvxڵ5}T HwՁo>߿_/^۷uSYfU֬Y#G.]ZʕSr8թS'iVs:u># |dAYPÇς={Vo۷*wwweɒE~~~ʞ=xYPg.VݼysٳfhѢZz5qF'H;ΝaÆٌj*Zh4@zj3 TRoaG@ZCH߯_=դIT`mZh^u*Tf,**JK^vm+W.idAgΜ9+Y^~T`><<<'_h_ ;vh6cfSȂi˹sdɒ|\]Y>O 8 /իیkɩ3fLVZx = ǏWttXzTjTNӿyxx،:w\*u '`,BX{j78C\\"##mJҋxڌyyyՕՁ@`eOdH:|pj J*L2v#(J)eky, X> v3%+Uݹ? dɒNŎm?H[x @FÇی89P  m@3AZȁvC`] 8&&&1ggi ޣȨri(GH{x m*s)z5 bO, X> '`,BX }`O, X> '`,BX }`O, X> vdQQQ:{.]+W(44T񑯯 ,zJݪ'OСCv횢9sf.\XʕSlR=pܹO%KT%m"> }`+WhӦMڽ{<ӧO+&&&s\\\SO^SƍBڊַ~ ܹsǸzzUB͛:x8v1]vUnRjժUk.ř)S&z뭷T\kR }`%Kh„ :'>>^Ё4c j۶J.-???]|Yk׮… ruiر裏'H}nR,+W.hB5jP[nȑ#ZjV^ݻWرcSK@:}2"NŊG}^xA 6=WyQժUաC9R-9fZf<<[THHV\yŋڲejժeeo{կ_?sͫ3gW^QXX$)>>^&LԩS'YٲeUL/^\Z|X|4qDeΜ9s+UÇk׮6˗/g}}RV3͛vYf)22cooo 4˗Oݻw?tdf]|9癹{{l?&MY|1-9|BBBlZly`3}dA>g}6dp RBlƮ\k_{饗;w$߬Y3،^:Y=^ZzR6msDDD5n8mbŊɪQBW^Md 2>3Q'$gΜ6Ýr$ë}}}یI>ݺugϪu}0"""B;vԶm$IӦMӤIlݽ{7_jdϞqttt%kh> }Jn߾m)ٴic=3ɪQR%ǁtRν~bbb?>sڴik׮%IT.]uV˗/'0fax00!!!651d  g bORAppN>m3O;Z6K.-//d0Zf|M 0fjӦ_ԥKm޼fiӦ:thĕ.]Z...6cvJVݻw fwd 263vd$6oޜce˦ݻvZ? |}}yFWLLcǎSv4w\e˖M^ziݺu63f ~meϞ]}BBB$I.]ȑ#znLԦMuEA c>kj7&SLݻ.)/mWϳwOIҥ:wl3vkNnRLLz昺ujرrwgRBvZ} x...z뭷W^/xY d$$HawոqeuY+Wvڵփ9Ч$C>}Ç]v*X֬Ycs|ڵ5ayxx8t]<ӎ;toFvRN/[Y d, ׯ'|㘘ݾ}[OÇ!ڶmڴi}%N5{Vtt̙sСC:tq=&N(OOϔnGVpp 6،gΜY< (L2֭[:r䈎;8IұcԳgO^Z#G%=5x@FBX^3 ײe4aKgϹs*::Zcvz<yWVM'O7nUV:y1Gs97NV?fiƌrss7Ȉ\S>>>j޼V\'|fnѢE?rEDDUZSŊK0#G~h8G}d~l ָqԽ{w͛7kٖG c> }cQQQ:z֭[oF3gԩS`-_\M6uq-]T3fдiwi׮]I Uk֬Yʚ5I,bvz<ק~ӧO'Pn$ƍyf!CDI:K.Y،3fId 23QvH_U[lў={tԩ.%K+jݺR-?fΜ3g-[6hB:tlqH/r̩6mhĉ:ӧOXb]'{Ptt+WUrPo?8`-Y={ӬY9sf <.^ի:uҦM?s֮]ƍ;Y dT/.)RԸqcM8QNzwտE>%)$$DSNUFtqicnnn*RإK]'66VW^stCx 4H?xժUբE ݻwC w qڹss=:ʕS֬Ympod 23g奢EL2*] ( c5i$ߩDDDw_e3ᡢEdɒ;Zng:7Hk *`_9O:gϞMD?o3VJM6M Rmvܩ_wuܝ;wt- &o3Pod 23g:;wn9r~wݻWk֬ŋdڶm yڜO?駟~rZoÆ ?vuuUΝ_i͚5Z|mۦaÆqٳbcc5 ܸ %Ky|!EFF&Ʈ]Z79ofRJ6m7ް9fza9 )S&QQQvՑd4g #ctfƌڴi ƍpruMcoe˖t6sƍS\\彝sk6n۶M:uJb͛v y-[65x@FBO {jղy/2<On3ʕ+'Y >6dbi76mڤ;wȑ#~y9rPZkB )iBŋ?oF , suuUձcGKz\5ÃDd7s=ŋkɒ%:}t{yyZjz뭷s9*YGHX3o4Dqqqs=>}'0Ǚ2eJv d$""B111gH1H=ʕSr$™3gtE]zUaaad"=6ܽ{7ܻwo1{Ho7xC'NбctUG Rٲe-[6 m@{4Y34mTM6Mk;vС:t`8p@W^۷%___e͚UŊӓO>) :R }>^z%j)|J JvqBRۍ7t]~3g={Tݻw7oے,Y@ [*w>:u*[ȝ;rm8w ݸq#;JbŊXb6c.]ҥKRgi ޣjժm > }>V^իWB /UHKGFF|bfrέڵknݺʚ5k uо}|r;vLFjԨSCp }sW?PHH=]+VhΝ;լY3͙3Geʔ///ɮhMQw)}7:r|իW?hjժj׮& ܾ}['Oցzlxx~7mڴImڴan }f0ʛ7ͫڵkCѣ9"Et+V(k֬\qdddklwՌ3tMŁܹs裏t…dӧK󎓺K_"""tԩ/.ooT3gid111 L6aWH͙3GM6˗%IAAA={ze52glݻɮn }z{{@J0w)jժRJ P9k׮iϞ=w^s~GΝ[ݻwwjӧOE>ɣ6mڨzʗ/tYomꫯ^zɩGʔ)Sj3gػ0zY\" iL܈4w/ǭ-5VV#%sD-JӴD@T DMYfwn~3,u˛Thz|GLGR ĥ^^{mj6}Ċ>KJJ֫V?өS=-UnnnviqǠAv]vѣG8sn)?<{SO>rN0!>ƭZK.1pҗ_Ao8c$%;h'tRdee֫W˗'rwn"7:cn֭+WL[#lRdgggo[ni} _qƥ*++㮻jO6;C;U<05[~}{  I>[Gfk׮M֭[lѢE}~tҴOHwyŭݻws=V1/۷oO*^ٸq"//v-oشiSs1:ۙx*;Eg+]rH7K+ҥKt)l>;me˖X`ݹ3/RC=tϟZ?_ gώѣGȑ#c…u:#qWǍ7cǎ=hykzIۼyslܸ1mֹs2dHM_z7[gg̘:thb۷oٚ5k,y睴tGqqq/׿f>luYu~Μ9qGyyyLj#bԩѫW:3fL<ODnnngdeeNe7@+0K$܈Bv3Lq QSSZD.]N덝9sf,]fCAAAYyyy]6f|pݻwsTVVj~a ><}:種k6x≴5k-V`֭1yِ!C";;i555qw~_W|Ժk׮qG' +VԚ$Ά jڷo_{>~,:;dȐ;"///5[vm ></^jjj믏{,m~]w}:(lF&Nsέә 6ѣcɒ%YNNN1b}I>}'ue>OtEbĉiѣG+σ0k֬Z8 wjjjjͲ|ώԥOI&Ennnjf͚>|xڟ;SSSGM3Q 씢f䥗^???2eJ;QQQQk_MMMq]w)3fH|ѧO+?UW]?cƍYEEEL>=/G}'JKK]UUU׿N3v[۷5w׎,ZNwr)1qIV^Æ jjj?AL4((((Stnܹ1w܈ˋ}'ڷoyyye˖XrelٲegKKK㪫jlcǎz+͛__,ϏKFYYYڹ;Ə$$G+WN;ysεf|… kVZU{N?k&S 6,Mݺuuo~8m6p{M6uٙe˖os̉E䳨(~nmڴ{7>{ł j|v5x8蠃,V|AL4)m֡CW k׮?˯JشiS۵cwuYq7GVVVjr6lX,[,m㡇Jqӟ4 >z(lFnwOܟ}􉫯:y򗿜VtPcʔ)qM7EwK.7ѧOMeee|;߉M6ͯ(..nw ~7b}Gm۶՚׷3"4n馴?WXÆ ˗GD7ӦMK;wD۶m6zf:gϞѳgϸ袋:,Y~X"6oѶmh׮]t5[;sQ . ok֬(..>8?ۣwcic=6/6w^<]t3EEE1q8餓b˖-iu!l'OUQSSW\qEZ*C'@ 0o޼7YvvvG?!Cd,G+orrrO_qYgEYYYZaiDDϞ=t]w]wݵϗ,YÇ5k$zf:{o[|_͛7Ǎ~zR¸KK.Eżybݺuq(,,<0 ;vLYhQ{ Y瞘q'{1|:ujtyZE9rdlذ!m>f̘ 2jzz} .L[E=gqmzDAAAz뭩}TgNmutgʕ1rXfMK/#Gf(U2^y啴GYYYoʔ)1iҤ<@DD_W\qEڞEň#bݺu~h=}4Ck׮#FFo}[J6;}<&LH3T?=:.Ҵ… cĈ~zgZĔ bȑdɒk2*A/,w}㎫]?oNtADΝwx[VGl1rذaCrOfd˖-qE… guV7.Cf͚;f]tQz(n馴فSN.]e]\rIwމQFƍh}4۶mK.$Ν6?&L:th'kСO}Q|߈M6f~x|_]/oLu=N{nqW_6{뭷bԨQiI'@3PYY_~y̜93m~ǭ999Jc~a?u|%z,???nήۿ~_IͺuSN}٧Nw};߉QF͛F۷.t>ٝw?|,777w'OםSO=5xǭ'O;.ݻwv֭[cڵo3</bZ1gVVVL0!S :ukFDD׮]cԩr5Duuu#??^-Of`ժUf1mڴzYZZ`ETQQ3f̈3f0aB~z)S׾h۶mL6-^wرc*MF+rZ&E4}ѯ_=wq}Ek׮dON>Q @tcǎ_|1x(++ޜ?_ND2?{O`W}4&L &4=\^1bĈ1bDTUUŢEbɒ%jժ(++h׮]t= ۷O89@S @ɉ>}D>}2Aeg:> > > > >宴IDAT > > d7@+0K.dg:> > > > > > > > > > > > > > > > MɪUbܹ1gΜ;wn̛7/lْk׮s5JC4&LϧOcǎ iӦ<>h^{2eJ̙3'V^8 "///S @o3< jȐ!-O؅(++۷rKl۶~?Zw)N8:==߾}:-Oڶma e˖Űa2ӟtM<9m}gFnn~tW^1xzOZCƱ=zϖ-[T{o6+--Ph]}p{,mݯ_۷o@[栺:x≴Yiii@Z_~9>:///8 &E'=XzȐ!QRR4f:6o>lڬtKƺu"+++K.QTTG@KZ~:SΝ; 'P?~|lٲ%m{Wڵk][n֭[wq~д~Mg}@ c<ͭ /Yd?~̟??Nw^;6 'Ypa -o,-^84-Ov{ߏ^{-mVZZVVV/~8c 4uӧ}]rH 2$8իWt1bƍp_i:ӯ_>}zw9:wK.$xgS[1y3fL${\?LCYׯ_#@uXxqjݣG(((`"33% f4vfW^˗yyyqg}:ĝw_~yHz9rd>u~W "///;iӦM#@TPPߣMgMg@sTQQ@=i~ѱc7;;;Ə۷OͶmO?t MOhlf~sMK>45>~eeeuΝo G}tk)Q ?>3#77Q3iu5Д(fhҥ1k֬99>^,ZYYP ?555uwލcڵi뒒FMOhfjjjOs9kwߌ@'43f͊e˖yyyqg4zZG}tB'43=XzСQ\\9~ӟ>FMOh$}Iꫯ6+--ݣ\=Pu:3eʔmbƌ;c^z5H[lItw9w7n\7N=8}/}fJ8ƎG9S qUW?qڵkcȑ;4&Lt4=X3όd~,x1#777wh׮]G}˗/Zڶmw}wpJ'4+VW_}5mVZZ oUVVƲe>q&MR g}v|{ߋN :)lF̙ÒϜҥKt)*++cŊiӦ=?x,^8h۶mcEh5}6S۷38# (YUUU̞=;&OgNΝcǎɓ'7JΝ;-R3}m4 Kg3ӵk=ztyQPP=999qQGԩScܸq>J|3i{W <h }6#]vY <8wkNNN?z+͛?#R Кdg:oȐ!]O999q$ P* 80maÆ(//PIg+СCZM6e @˥XjUYqqqhr37{u׮]#??_n]Z*6oEEEQ\\odee5ZxG|inϊӟtqq'GNNNdh(ٙ@zb֬Yis9Q޺uK>#"6l+SN9%fΜ(Jnp6lf'xbvaJc~1">|xu֨l֭[3@f}g;Y3Kg U]]W]uU|Yvo𷋊㎋c=6֭[cժUo^Kѹs8$… ^ƶxLG_~д~@k賅8qbio1o}?>ڶm[볢(**={yd\wulm޽#74@+ңGd`G|?~Y[nŋ־dh@KPYY .t !-?cƌ|?>2l׆ UUUaÆ7o^vaSPPyyy ѦML i  h***2f*;H+_~yTVVfsLz뭑dCѯ_ŋ3~}s̉ѣGǶmR#8"~D~~~/m~ %E㢋.Ԭ_~, 3nrssJP?>ŋǨQbƍYϞ={ve0Yݭ]6m]RR$賙Z|y92>l)S4۷Ǜo6o2~}6CW#F|>q>dSOEyyyjGydԝffÆ 1jԨxS2eJt-g͚5q͎9hӦMԏfdqԬ}q}EϞ=}kٲeѧO_˖-իWɓcƍuz /UVfYYYq饗QvLtvѣ7L1"֯_3f̨]:$mq]wŔ)SbСq'OcƦM>6lX?\Eg32sZɓ'뮩SQGj)++'|2|Ɉ(..N:EQQQlݺ5֬Y֭SN9%ƌx&ƠaÆذa.Ƿ1bDdee5N0)$;w뮻.^}_k׮3]v:+/>)fd_ bذa1lذXzu{rX~}G^^^t!JJJb=믿K.(,,nݺőG;vt<hr} {{3NVZs΍9sܹsc޼ye˖]v{|k_3g8s|n8?EuuusrrN/<[|(U{bʔ)1gΜXzud<'Nʝ{.^|3fL|k_kĄt)U{7gt&aʔ)1a„Z.]{W5k֤敕1~aÆ5fTh}NFYYYcL2N{{_[f 1cDS7|3&N3gL~ŧ>8ZEm۶ǀIJebذa2x{ĉQUUZɓ'G~~~ھĽ^ziOʘ8qb?o|(U:th{ѣGNlٲeJո^xx7Rk|S~~~|qiņ ""b֬YK/1I-rpѫWZ%ɯ~W())N:ŗ#@kywj|FD_C~zO2}|utZLf͚+bܹq5|uaaakӦ.K֭[2{%)//thnݺ533%h ?k+|r!ѽ{h׮]l߾=֭[oFPVd:@",lݺ5/^Z# 2u i h *++c…9[nuEDDuuu̘1#N;/,,?0.++{ZVPPyyy -I6m2hB|?hZ|?LGL J{;ߩSU>hM}@ ~ׯ_ӽtPzŊuz?~&>M[WTTto=nZx.D'0k׮M[to߾}֯zC9N%Q -kovzɒ%|zgٲedɒԺm۶1hРD' 3gΌ?mvGt^{sLGݭ>㎋L -Oh!bi޽{GnvyK[?nݺ]㡇=(FЧO_.?>VZ[.F ,H_vexvȐ!O}*ްaC\{QQQ۷o뮻.6lؐ 80; -Qn@km۶Z秭m3f{w+LӦM~8?8#߿־+WSO=Y&O<1N:z磌}kQ]]?|5*ƌO7o^G?3gf999qUW -OZ*/_֮]#Gg1a„Dsm߾=}xg#"(tڵXvm^zgzn5p&MJfΜsNѥKXzu2шO}n9h}@3yؼy.dggǨQ+:EENNNL4)RիWP4'''1bDJ'4A7tS+ʕ+?q.]SO~ѽ{z;jԨ8;_|1kΎN8!۷oF's=o,XN/ ""bƊ+bݺuQ^^999Ѿ}رc/uXC9$Xn]t(++֭[yQRR{R(&cǎ1pF$N~L&-75(,,C=4=0`@\y啙۶lƍK>ظqc^:Q ~#S7n-V\YYYq 7W_T(#d:B"׿C=Zwy1py嗣cǎѷoD_/~1rrr>I'oZQRRwg1z9rd,\p﫮:n;vl*?44E?M+޼k87̙_|qǺubĈhѢzW]]cƌ'|2""x≸룦&SIׯyśos΍yŚ5k|[ߊK/4C w߯~jKKKc„ HDsxS룎:*>Y~}l߾=c1uٳgk6x≴5k" ;huL*\ti$bڵ1qLǠ7n\5dȐ;+,ѣngK{q]w)Qdg:@c0aB<,YYL裏?(Q)hx3b1iҤo| )'N,Æ jjj?AL4((((hq>V0 Fo=qd˖-1nܸ{wuYad AQ ,"@=,ʚ$-f#"!۬?u 3037qp]s]{l9-^z)lDi7~ؽ{wb=v(W\}eŤI"=? z˖-ѫWذaa7.-ZuY1cƌXbMuvGvN=Ԥ#FHa¹ccǎn)ŭ({Xw%;ХK͍QFE~~~DDl޼9z ,F%0aB,\0ic̙QR }'Gy$[o4{qYg=JԬY3Zn]$C=W_}u)SH}q'իWo9%]v999q뭷&}~ᇉa 6'Ƃ qqGʕSQ ([oUV?-{.] +Ƽye˖t^^^^tM>~4iRQ[ŝw[nMӟFڵS+;vlbMW^qǢEo۶m<QJTO=J3gFfffb=rȨQFoGc߾}cǎӧO^ˋo9~GDO<GN q$u.]483ό+2ꫯѣG'e7n=>s{^߳sJ?ݻwYg-O[W322bܸqfҳg5jg~޺u3gNT^[ Pǘ1c8˕+~{թS/lѻwXvmϏѣG?_p1mڴ(W\uOg߾}E)lt>}DǎbҤIQF/a'@)hѢXdIbݿhڴiwEŔ)Slٲl۶mѻwX~`G}4)?s <^:fΜX7j(F7uXx!y~~~ >J[Ɣ)S&MĀK.$&OeʔIҫW>s_~~~vm':tӧG 33zNnjs2mڴ6mg~~ݻwl۶[G8?>vޝX;6ʕ+W"w_ve#GزeK+,X7>dϸqbѢEIYg3f̈+Holٲe]fXdI,Y@{?䓤+FkHl۶mߌ3bԩIYV⢋.Jnݺݻw̟??ԩs2xsbݥK8JC.]"777FyİF%0aB,\0ic̙QRMܹ3f͚u333#333);SzYNZls΍ZjE ⮻J|f͚İڵk՝pS]#OoOW7|sJt5Əiii?^zŦM""bĉ`}gqFQrK4gΜ2eJR֢E7o^ԪU+""ÇOzfѧOرcGIU$}غukbӟ4j׮>W^ye;6iMW^1f̘7o^m۶x TRU)͛&MJʚ5ksM AСCӧOܹػʦmҥhѢ3ό+2ꫯ?~|"۸qcR׈6mٳjժ%]XjQܹslڴ)ڵ!: 7ML81)kڴi̛7/ԩs=C 6mZ"[jU7Ν5j8NP.eeeŭƍ7={ƨQ>֭[ǜ9sz%؊j…Ic#"4iϏuqabI?ׯ_|'E>Aǩӧڵk}F-RP};ŤIF%_$֪U_\sM+?ƍKN9唘?~ԫW@g1"X"w.p$}V^3gL5jNaÛ:uj,^$ibŊ2eJRv%D۶mߌ3bԩIYV⢋.Jnݺݻw̟??ԩsF1k֬?oƛoy6lxlڴi̙3'暨\r,X /әFX`A/FQDm 8ܹP#I٩zԃ>g͚w}wRֲe˘;wnԪU+*TwuW5k$}֮]ֲe˘={vTV-6lX$g=:>⋋<(T3gNL2%)kѢE̛7/jժÇ'=zӧOرRʜviѸq"=ӐOJZTt7o^L4))k֬Y̝;71 q}%ӧƠ7/j֬Y"S[nѭ[sժU%v?_bw@ia'qX6v96mڔXw&&N5m4͛u9!CD^^^L6-Z*s΍5jS'Mz p|[pa?>)kҤI̟??֭{ĽÆ &e?_~'yWT2j*5\JǸq㒲SN9%ϟ+#F'e+V~ݻ+@ a=1f̘Od7G u 7Kʖ/_" jeS]-_<~?x8pa?VZ߿Wڵkk׎۷GDDÆ cѠA:oȑsMd;vrE]38dffƬY ooa?kذ ;iӦ1gΜkrʱ`xLg5*rsscѯ_1bDH=>L-[ٳgGjբaÆErѣώ/H8^ viE~!ȠOӭ[֭[޹jժ/>8y_S\ZY@  "`'> EO(}@0@T> 4JuDp"0AP "`'> EO(}@0AP "`'> EO(}@0AP "`'>MuGEfffl޼9>䓈^zԮ];ڵkOqCN$}'۷?JkƍG㪫ZjPCmݺ5}į˗Ǯ]/~ݺuKMA ĠO{w;%KD^^^mذ!瞘9sfr-ѽ{bl{ ${~G%r/ˠO[|ykGϞ=,}ݸۋر#be'pB:ScǎѡCh֬YԮ];ʗ/۶me˖?oVҞz(j׮Æ KMiJ5>Fٲev\yѡC>Sj8S+E#;;;3K/-ZT툈^zm6ڵk׏oD zqǰaSN)=zDٲe[nId9991mڴ/Y Mlٲկ~5ڵk߽7nXlwP| JݻG=jW\_Od/RdeeEr努b … zWf͚Ѻu"9/777z衸ꫣL2Er&@iXK~{KZٳ'VZuLgϖ.] }Fff17tS7.FyyyE18nl۶m)hR~߿۷/v}իWyyyyq7x'bё_TJ >^ ۗ&oΝXѻwXfMˋ[n%x≤|۶m}]J>އ~xHVV4)~:u_򗑑ȶo{k=zt}qm?w!O*T(;>#<7oNʾoMɺbҤIde˖իWlذ{ƍ-J:묘1cFTXX NZ}QL2%)^z3EJ^.]bĉ6oz7&=;a„XpaR־}9sfTTD NJ999q 7ݻ#FD5RS*EvǏOF^bӦM1qX`AҾ38#\r8^MuT8qb,]4) ꫯNQԺ+#777ƎiӦիWhѢ۶m<@TR%uK.P,X>`R֠A;#---ERꫯѣG'e7n|xlٲ%}'˗ǀb߾},===ԩSMӦMׯ_{m۶`+O{ŵ^~iR~e]Vǧ3fԩSVZuE޽c%Yg'pzo߾k׮o*5Sf͊;)kٲe̝;7 7pCgk֬޽{\5k}'͛7G߾}c۶mIСCo߾)ju|3gNL2%)kѢE̛7/jժÇ'=zӧOر N8۷o>}ĦM~Ő!CR4o޼4iRR֬Y;wnb 4(effF>}bΝxg'pBٵkW7֯_#FRǩ71qĤiӦ1o޼Sa 2$Z*v*A cϞ=quEfffRޥKSp?~|R֤I?~ԭ[{ LD~O>)A 1pxwo}[1iҤHKK+;;wZJܹsQ~7n\Rv)^z:cĈѿlŊѯ_ؽ{wu(M J_Oʿ]weʔIQϓO>cƌDָq?~ԯ_Pgp ѯ_lѯ_*IT8Vw_ IYٲeSNSՙvZ\zEQ/^/>gw>${cڵ}qѣGۮ]]vl߾=""6lϏ y#GF^^^̝;7u1ʕ+wTf}ޖ-[rrrbG}f׮]uof*/b⋇CٴiӘ3gN\s5QrX`A|_,p5jTƂ _~1bĈc:2$Ӳe˘={vTV-6lX$g=:>⋋<ȠOiVg JI&ŤIJ:4ZmHTAP "P64裏"3336o|IDDT^=j׮ڵ맸6l˗/͛7RJѨQ8ӣ^zAP۷o>^yx?>7ݻUW]j*Ϗ'x"̙+W<3iiiqgƏ~ܹs 7ϠO(w}7XdIx߆ {3g-ݻw/Ɩ}>|x,YooF\|_"*W\B-KOu( /_Z|={~ۊّmݺ5σ=3ѷoسgO15OTSN;FYfQv(_|l۶--[?p[I{z表]v 6İa>Hʛ7o}Gݺuc+ܹsc͉~[{-p"0(V?}*8 ,K쮲eƷ+C}jժqꩧW\-Gvvv3fĥ^-Z(֮ocٲeI٥^'Or%jժEV[n8,Y駟^x!okW8qǸ;?sz~{RӦM+ 3f$em۶+iVZL>=/${o[W8Q н{뮻SN)+/RdeeUC,Z(mۖM0!ʔ)s}UTѣG'e?/K;+rB,Fnnn EɠO(y%V:3駟NZ}/@{;w 6$۶m[ܵ}xIzzzt%){饗"''@~߿۷/v}իW7#"'ѣGG~~Q O(*T8$۷o_/G^^^Rv9ꌃObٲeڻsJ?ݻwYP"5[n'x")߶m[dgg<(N}@ jժU,wZ*i]nhܸq8ӣL2IYfffv)~_FFFF"۾}{;֮][1zxǓ . Mʕ+YP dɒC/}Kr4O=BQrhР=.(Le˖Md۶m޽{?w~~~3&}Ѥs5APrss'Lʚ5k 6,֬YxT| _HZfgD%\'O2e$[F^>}qm?w!O*T(T()}@1{GbIٷbo֭I 9ܲeKϸbҤI4e˖իWlذ{ƍ-J:묘1cFTX 裏>)S$eիW={}qJ*Gu{Tt%&NiiilѫWظqcҳ&L &e۷3gFJ~()}@1ɉn!vޝ1"jԨQ,wng ꬃψ]㓆}~ѫWشiSDDL81,X3Έ?*W|w@I)p8qb,]4) ꫯ.;7|GuegDĕW^1vϏM6E^ϏE%=߶mxJ*t/Tт L4hwygU]}1zlƍ lӦM̞=;VZ ECL81)Zj̘1#jժUwWTGuwٳg53?oݺu̙3'W^$@I1K/#GDVB>}znݺ?0Y+A};ŤIFEv>,]4 ى,###8묳JC2e|I٧~zTg(}N:5/^|HÇ-[]PR "|0`@۷/w:u*.KZGGu>hM6-M_>z۶m+ ދk6>Ӥo.zI?9x_f͎ӿ͘1#Nj*[.z۷o?; ߏ}Ʈ]o9ꪔt:x ڵk }޽{㣏>J ZXf͊;)kٲe̝;7 7pCgk֬޽{|L@I1͛o߾m۶|СѷohժUz۶maÆBoGnnnRֲeˣ4gΜ2eJR֢E7o^ԪU+""ÇOzfѧOرcQ %ŠO8 ۷o>}ĦM~Ő!CR_.HOOGz8իG폪ϼybҤIIYfbܹ!6hР:thR};wPR BڵkW7֯_#FRN:qg$e ????x≤_z-[]~ĉMƼyN:3dȐ)kҤI̟??֭{ĽÆ &e?_~' >8wy')ַ&M"sѪUįΝ;h_=N:I٭G駟Ƅ ֭[Dž^X?p7.);SbQ^1bĈ߿RbŊׯ_޽P}$ _}I׿뮻L2)jvx+V&eNtMu=wAŇ~>PCL|3fL'ƍ~*"nׯ_R|ׯg~*eS]J/^xᅤlٲq)ԩSN;-.ҢwX}[}Z*۷ڵkǖ-[b1wC|^|o~Pk.j׮۷o AGu92bܹcǎQ\:AP[l9$ɉ ]v-Ae˖SFϞ=>H^5s~1qB۴iӘ3gN\s5QrX`A|_,9mԨQ ,~ň#<(} ~p¸7(o}[_"TrTl2fϞժU =zt}qyP \ݺuw1gΜXjg>{gu];w>{O;c>`|p<3 `ҤI1iҤ/sҢk׮ѵkذaCy8p@TX16lgqFԯ_HApiܸq4n85p"(@dggǺubձ}سgOTT)jԨZ-[FzzzkIˠO/[lwy'~xwbgϞ 6lذ!yXxq|jժѥKիW4iҤP֭5k֌}I{'O7nn*IϠO8J*޽{S]#5j]v;Fv"###g{76mڔ.\͚5={k6.?_ZB'>Wr8Ӣ]vqGvbƍѫWu:3cqGZZaPB|ߍN:ŵ^n{7.QFm޼ytXIǠONz;w . N=HOOOlƍ)锑3f̈N:xOcڴiq%޽{#"gf'^5E85 /}Kprjկ_?… /l'(M:묤~&pr1N0ժUKZ޽;EMb'`lْYfɥl E7HZ7i͍ 6Ď;"---jԨu֍*UAK8Yƍ#333>ؽ{w+W.UM4vES]3rrrߎ{/vQ^8ӢEpBO駟Nʾ3fĄ bϞ=Iyzzzl2:v={ S_8Y|'s_xWcΝlFFF|߈޽{GJٳ'fΜ=Pڵ4m4֭[lAЯػwob]f7Ys֯_Ko=y.ss=[o*UsYjU?7u-㗿eTZD|͘;wnR6hРXbܗ=PDzGNNN '}=|q @p"853y;L2Qnݨ]vć~wNzw]]6Ν+W.֞k׮޽{Ν;J*EƍiӦ_뮻.͛˗/~@ڹsg[m۶عsg)S&j׮-[6mDZZZ+>O~]vѳgB/9:u۷͛G͚5####>̌#$7QR"|=ڵkS]3cg)UZˣSNկ~5T,777.]SNK&wy'FSN-^999q' QF5*.Ȉ]vŜ9sb̙˖-;3F]l4۹sg,_<}xwbm۶g CMQϏ?O1gΜX|yVN+kիpKGVVV <86oޜ*WwuW)S@gi&{8y:uN:ѱc8p`r-s%>_bEL:5ncb3賔iذa 4(D L2esΉ-J|3ī{n{G~bz͛'=WF1bD4o<~ӟ&ѳghҤIfΜ96lHuٰaC\bŊ}v1cƌx;o|%Ē?OcٲeL21eʔ8S |N֭ l믿>?' F߾}~> N$}"Æ ;Fr |2en+V#n!;NKOFDDNNNw}q]wy?(&M ;DcΝڷs8p`L<9S;رcgIbѹsb7===&LjğӧOղe([֫S]Hi&р߿?֮]XzQB68 8DfffJ;xۭԩS)S&~/\t/7oN6lW\q:th!\pA'/B\~E JO?=ڵkuꩧFz~vˈ#R?zx嗓 /0ƍw}gqF̘1#~${Y&޽{(|뮻^zŰaJK ;v(pHG(lrHVF"cm۶ĺ\rѦM?3+W,nP=#1f̘ڵk4ogϞI?0-[VӟbĈѻwڿ 0 ƌs1Py晸["???]z1a„HKKKI۷'kժp<0$裏&֭KZ7hРPkժ˗OcƍE (^k׮;v$e;v,͛7u&e=\/o1rssߏ^zӰ^ӧǯ~>(q 7Dnnn"ԩSy睑ׂx㍤ua#A'_|1,Yu֭㏓Gfzx&p|ںu!Y- }N˖-/Bn߾=rrrG޽c۶mq"fΜ~Rv A'㣏>JdUVѣG}333QF5нDb h1gΜG?Q|.;˗/.YYYG<8>U^lƍѢEBqC;v_hѢDaÆİϪU&={o,[,)?s?!pcӦMۣo߾k׮1iҤ"nݺغukRw޸UV}3+W+W=e˖ DժU####ƦMhʕc/}騺 SO='NLʺu?OJ*%?xjٲe-?S]ȩ*THu 8s}VnhӦMٲe7n\۷/mܸP=֯_;w<$ZQ}=mڴڵkǯDw3gL 'ɉn)x㍤^xaq~/ 81PZ߿?֮]XoP999r999am;3bʔ)|@yByoDo}+&LiiizC9{GA}VP!222LTPBTX15oߟeddWx>j*{yvvQ=_}DDҰUVŠAbܹQr9rdwo{{L͍3f$׷ohԨQ<3IwɐO8ŠêU 9SggqFDDdeeڵkcƍe˖سgOD*UZjѴiʕ+nPyxcРAqD־}կ~Ub/P6m4iyB㏓gddDƍP.8SOd^zqM7}"Yre?O~Ύٳg'+VĊ+׾w? ʕ+[֭[ :.Yre\wuwD֦M5kVTTzzI> ͚5I/}KQ9PZ)S&C~~ܹѭ[xbӦM|I1a„޽{l۶-""j֬yȹժU+~#G޽{;viӦ KkF~O>Id͚5x VZ]֭uM ʊ+VD 7LZ_.#"C?TR$'Tn}>(կ_?ΝOYΝ;'{}|A,Y$Ȉo|E3gN'?.(=я~{o <^lذ!1bD;93"b1cƌ_,' /0}ٸkIʕ+ݻw'|2??y3YXq뭷c=,[,سg1ʦk׮ׯ_|VZ1gΜhܸq E|k_ $nڴ)}޽w}IAoEժU+mҤIP(N?vY};wɓ;N18yլY3n馸cŊjժرcG|Qjh֬Y}QJĞիW'QLhӦ1Ϗn-y䑤΋&MoDoFc֬YQRc89Y|({DVZ={v4k֬Hڸqc\xI_hԨg)W\ 80Ǝ&O_WySOœO>X)S&zl8ilݺ5~2iӦq5Ĺu֍}Ś5kC<㑛yyy1jԨhԨQo>_ /---ڶmm۶g333͚5;恛ƍE%e{nL>=ʗ/C%>[ti 0 fΜ+V<AȠAwM;wŋN;-W^""{&~'0F_~y-ڵ+Ν3fHߣGhڴiUT)1]v1bĈTWJ5jTl۶-)ѣGz뭑ȪU׏;UW]]vEDDvvvg?z*ʔ)S8ȫر17a„XpaR֡C>}zTP!""Ǝyyy'y>@AYdSN=ϟsαV:DFFF{ 1@m׮]1rȸK_R߿?6mI{O?9rdwpN?h׮]שϏAoVIم^ƍ;38#f̘?"777""֬Y?xt޽pd7ne˖%eĉcIYg3f̈+&7n\ƣ>_{4hPL>=ʗ/=K?ӬY7o^4l0)߻wo\2֯_Ȑώ;<*T(ɪ#<cƌ]F͓|Oڔcǎ-ޯ|+qUW%eӦMBxgbͅY-*JG~~~bݾ}hѢQuwļy3<3fΜ*U:0aBt-)_xq?Us|Nkݺu<1`^g>פI0aB̞=;UV (cIE]+={LZᇱlٲBӟ#F޽{ǖ-[ p 1c=sV|0)0`Q5eʔ={vR־}5kVT\3w1dȐ:>ɥl PpV*5jtUR%~СCߎ{/veʔuF6mUVEڵkcǎIYǎ uFͣnݺm۶DsW_7xczG ^{-""O1dȐ:X:t ]~a 4(rrr%\7 }=fJ8㌸J*?===&MyyySO%^z)w_+WнGz pȈ:+ǠAѵkWC>8j[n=$kѢEiٲe^(۷' Z~};iphA8p JRyBJ^ZjW><3׿u 0 5ks9aVlٸ뮻"///?ѣ㪫: 5sΝcǎطo_۷/rrrbŊQbŨRJԯ_?ʖ=d8)n`?Bs=W.ψ>}DnnnL<9>}ļyVZݗÆ _|1)cҤI^]bĉѸqc:΋YfE˖-zܫlٲqwK/^x13 TU3yX|y,_<֯_6mq_ZZZԭ[75j[mF۶mE%( u9${+_J<$۲eK\{w}wYfͤ糳c /$\rIL<9ʔ)SǃVZO~xwߍ|6###:v?Fu8s쬈43Tڹsg,_<}ĻL۶mKzfȐ!1t5LiӦDw}7VX{IzfEg_|x㥗^˗G^^^[l[ƛokժ\pA|_}kQZ"͛GJb޽W_+g]6nzHgƀ"'''NVZ}sF5"""'''F_[ߊːOݺunݺ蝫V:}5jԈĀ"+++{xc֭w([lԨQ#4im۶J*qs Ҝ9s27lؐ:G}x衇=wؑJqaÆx'⩧> Ѵ{?8|x'####:u]to|q_P,ʖ-gyfˉg;vDZ toG;3"b?İʕ+O~xgu9([ Pʕ+vZvibҤIP`k֬/5Rผ&K/żybq苟yipwdeeų>>lԬY3~u9;uW$ X}FD\3fHo߾ѨQxgԩS{ P켟 PAyyycٳcݺu;~z|A|};b֬YqǀI&GuP.8SOd^zqM7E2eoʕѿ.n?O";;;fϞVX+VHzk_Zw}Q\bܼ &*UmF۶m]v1bĈTW::uDvm۶{o+ǁb>Hz1=~if͚ѢEhذaԫW/4hժUG L2qؿ߿?n[l-[ڵkߏC;~O=T|ߍKP2e;={FNNN";wnѻw8snݺo߾XfMz(1f͚sΤsUV$F1o޼~ޱcǘ6m!@~&p;ӣ]v_zj'>?}VP!;D_k)lOR:^{}Ow^lh׮]t!:hժUԫWʊիW^_=mۖ<---'???rrr'|2zC ʕ+So>ƌcǎDr5j/_>L^{mR^T>#5^u%*Tɓ'G.~&PZ<#P`gqF̝;7y8ӓ}œO>=z(Iɠ-[ĤI駟yOiZjt).8bŊEڣ\rѦMhӦM\uUf͚>lC_(ΎsSO=7xc|-^@#ԩ#GݻwhOݺu;͛YQ ϏѣG23"b1tиJ*Er3N\ӟnFł ~tckŇ~#F(@J}^r% fzzz|_+2oDFFFvk֬Y 4( 7n=*Tcǎxϼa~8y_S\ZY@)LΎ_l͚5ak>2dHHK.<Ҋ7R2sРAqF*URq}hٲe̘1#.];wLu6n˖-Kʺw~M81,XuY1cƌXb"KKKqEnnn<裉^Aӣ|G8yy?֧O͍ɓ''իWG>}b޼yQVʊaÆC/4iRko8٤Q_}~uYKu ϟuEGuwK<̘9sfTTb„ ѭ[|8{Ue8{oT4Scƌ0`@TTT+ 2b ,3<3-[UVV駟|'zM>!G4s}Z|A{QYY{qQGyo1Ə;蠃b„ Q\\\d2JKK~F|1p@>ب??;,kgyf,_<*++cȐ!SOe9nL*}Mocʕ?mڴ8OǶf6lX;vl3&#ֵk׸ksd2\sM{. T:\xQRR?~y1dȐx'2+nhٲeC [mt s΍gΜWG.]Çk6z7c}]v%cʕdɒx'׷j*navݣ(""K.1qh׮]jѢE5*D"zUz 2$֬Y'NL͛wG-Zja'a…1~Z3gN̙3g:uF딗3<ѡC-Ϋ nᆘ>}zs1[<[C㭷ʈƨQuy >|'P_;w!C׿)e˖qGƸqbԩ[sC=4v۬6OM>T*Æ ۠gDĪUbbŊӘ;wn+ܹs?3`8p`2s}Wc޼yrʌ1SLC=4OBR䘗^z)~ń m۶ l4VZE.]K.NI&eNg,YwuWҥK4T*W^ye{={{ӟұ9sDcQTTЩVEO CiiiSE-ܒ4 >}Rx1~A&LO&QZZ3ӧO,j>dy䑑J"""J< 2uW\ՑH$"?\gaP O_UUU;gE>WZ?c֬Y1f̘[.Mرcc̘1]^&$ɸkψ?s1hРl DDA؜3<3D;n())w}w7("]W:`0`@V^@c>gɒ%QYY>~o߾1ua4իϏo?0+6Fs΍gΜWGF=c1o޼ؿ=?Fw%;Z۽{(**'Nv2ϵh"FUUUD"z\5FD"W_}Ť7tSTVV_y?o[E4=3""=;dŞ*g@vr)QYYÇOz۷oL2%~ZSQQ~̘1##޻w1bDVsnL.\Ǐ9sĜ9s6zSN9o9mڴxk=G乓N:֍>uGkbҤIѾ}Z籾룺:|6lX|}FD7d\uUuŤzkTWWk=1bD(5" . ;FD}&dik֬:[hQgǎ7{}EEE 0 ~n={>:t nӧ1 uN_~y3bѣGM7Ty?ltHLȎ~/ˌ؛oKn򺊊 bڴi}{QZZdiG8Ccm|-[r iD"~ߦcIǎUUU_bO2%2$Ȩ`k駟ܹsW_W^y%ΝqƘa?뫯͋+Wf2eJzy8쳣2ntl…ѯ_;zݠk.XזFiii&3"?i$o~+&0aBTUUm:"bQZZ"ҋ.(9眆yL&MJ7|wf-Y$tcϥK;%JJJ2~ߥc ,3<3fm""2O?tw*|4WMgDĩd2tl]1I:.;#暍򗿌:a_@#>殴4)ڢE[nw@=QYYGN^{t϶mƐ!C⩧ʸ裏n! d8M)~'G"+"[WL:y䨪_Wqu]"K.$뗏^xaTUUرcӱǙgK<{7|slٲS ˚lψǑL&/O+&j-"Ko߾y1R ֢(88k׮1x|Yo}t58H&q7;%C 5kĉӱyży2qq-DV:ErI7d2~_mPL"_WѧO И9:k׮鏯|+L&SٳgF;SYPC#~8p`L0!w;>Jο|dBiii<裍gDĢE[niӦiIO_UUU;g/|V1k֬3fLzJT*_~y<K/~Xref@.5F7|s=:Dt;ƯM<9F4 YdITVV~۷o|ukq~y@cJ+{7#޳gO~3gN?2Er dM7cƌE HSŤSN6lXhgTTTp@pѵkvp@$ɸ)1|t쭷ފ}Ɣ)Sb<qnj32{#Fd5ghw}wF1cD֭#J]wݕ>/FIII7.ڴifܞ*Loy7رc#"6( я~=PTUUϧRqW-oJ}&<ڵk+_J$賰0z왑N;>?k֬:ϵz8o[F?}8pfMLj9bĈ2eJDdh"87{6ZLƥ^#M)_~yF쭷ފ}ƒ%Kj=OEEE1cƌx޽cĈYɕ /3ό!CO#sŠA* ɼo^{~V|ߌѣGWNϗJ⡇(--M\hzN;XfM\}آEo߾1eʔرf?~W]uYC 5kĉӱyży2qq-da Mܹs3gkG׾}߿Rۨ{lI| Dt%;Z۽{(**'Nv2ϵh"FUUUD"z\Oh&?EDfiAAA\wuqio|>;XjUzT*?pTWWǨQ|hg4]5jT:oF~bɱvm bڴi}{QZZd2y< :4b=mݦ'@3p?~|ϙ3'̙s:uyiӦŃ>X<&ϝtIn٭[=zt^{&M:_OưaO|4FW\"oE3ƌغbGy$.袨Ȩh>2dHFl…ѯ_O7f͚4hP<c=6F-Zi4/C=sxaaa5*Zn zǏ'Fxbqg!CRn9lذ{#"e˖.[4ƍh1_WŤ@>()) . #`83cٲeXeee <8~錱w:IR1lذx뭷68jժ8p`X"@~z衱fm-[1h8N`sdR"VZ-GydV޽{?>y1u 1zt^3<3h۶m 2$zꩌ>QIRq<1/Rg? &D۶m0;wѻw]s4JKK O2 lãO>J""uqmetC9$ƏEEE ޽{u]d+υ^%%%ǙgC 'x"\^o-[6d4qT*ʸ{3={'9sD!Sh}~8su1f̘8#r׿@?#GzxYR 2$:묌ؼy6hyG-Zjhw}wF1cĕW^zjƹ_|1JJJ!Ȼ|'P]tQr)뮻t>8ze]r@h~UUU1y䍞?n:1bDy=zĘ1c0""~_Guuus=1gώ7n\z@sw"u[ СCc= ^XXF֭[!+#GԩS3brH;6ڴi%>|x?;k֬8sc /@5FR6lX[U 80VXh暘?~|mv%ɸꪫNȈϘ1# 9T*W^ye{={O~؜9sQVV֐)n:wѧO|T+7xc?>#vAń dc}͚FЄ >M>`?#Æ O>l%)XK3h4&kɓ'gĺuƍ '1bDRx3gyG*}R;={>:t nӧ1 dj뮋'f>?~|mv%ɸꪫNȈϘ1# 9ɗCm6klRO`Pu9G1u#vAń dFuuu/IǧO[n%Zj޽{G޽t (--b64dj6v3fLFk׮qת:d283=\ 4(V u;wn<?sXzFϵo>6ym܇~A{~tw\޽{EYYYډ'FvjZhF}"^zy.syi9x2dHX>kRTw}8[3@Sp?~|ϙ3'̙s:uyiӦŃ>X<&ϝtIn٭[=zt^{&M:_OưaO|@D2>c~7Xti>RbO?t|ߏ+"/^tLCϞ=c1qСWPP7pC=:N?,d[|-\YYGqYgN;픯tj%Jţ>&L D*D""B}&C|-[c9&s*o>#fӟ|zq3 ,[,z^R|Q3wѻw]s4JKK \N;~aSTTVV??pq'wرc>R5kVw}QQQ. b^@>ȿ4|}L4)***6(|7bȑQZZ|p|;߉#<2v}rʘ={v 賰0{:{|'T*UUU#W__җ{ѭ[w}c}zP]]ov,\0^}՘={vkQUUa H;vSN9%@ȿ4\gw1cK/7xc̞=;"6,\g裏ƣ>㎱;ǎ;;ck. 0d^:V^eeedɒ/w}7***69;ߡC8쳣O>ѦM]dLy>ωЬ9AYF|1eʔ&LŜ+uulj>N?8 I}&@k>ٳg3|͘4iRsbψMnY?88e˖u^ g4Fs;ꪸ~8GuuuDlXb!ꗾ'pBtyh,g^lN۶m?a?O>$>}z̜93-[-Z.]ķַ]vTJ}&@4F_Աc8Owy'͛s΍~;>>/9vye]sqmڴiW8Ȯ&s}{~7#^UUQ^^UUUQXXmڴ֭[)SA}&Ew l {lMiѢEGqqqShgN2 @s'dF}@h Y'dF}@h Y'dF}@h Y'dF}@h Y'dF}@h Y'dF}@;\[bEEQQQ;&O}&5FN̞=;^}՘;wnbŊNI&Q\\;St5<8Cb=cLiҍ>+**k{1gΜtţ_0ts_~Q_O~iC<> { @mUTT9ϏhhDDb}Ν;vmѦM(//+VҥKcpX|s+&}sΉ?OѪUz G}&@v5F1o޼j*N<ݻw`̦̞=;?WNꫯ+2/ gdW2 믿wuFHoc=Ç=zԺ4"G6׿Ʒtiw믿o3I48qbTWWSTDDq뭷FNhN:ŭw^z/5q-Q }gEEE 7}ϹsFyyyFlv/8']|ű;gVZJNhh3r7׿w?餓u9Y0N<Ư[od=> 7}˗o;ꨣrG]<"}n;ܤhlglF]veSqqq PgFonm裏r%K69]ȍFs-Zd|͜od'ҥKNh(3r7СCt-RT:׿5k>?'֭[o>k4}FD'?ET*XhQNz7Oӟ4'kLk>;8蠃"bm1ի/+Wfu+W/˨HuAquM}&@5FD"СC^>}Ē%K}-~mQFee~D}&@5F{G7.:tT*""ϟw\L4)֬YSy׬Y&M?>^{tncǎv-/P ]N.<◿e̙3'DQFŘ1cG}t~N;mr?0^{xg⩧>,]H$_z5*:uP/ /gdOiygܢE(((;/_<#"8n(..6mDyyyX".]+VHT*-Zd2^zisM$1yz_gdWi9{H$T*D"_W7FEUUU/;u94v34\g/ 8STƘTim( "}ֵX6[3Ѥ}nɻ!MO*ykK.;ƾ]tjO?4̙nEQQQѭ[vm4M@>sC/W^y%^~xWbܹrN:3<֬YSLɓ'ŋ7:_r7-[Zz|sEuu[hGyd\xᅱ{hn4J\GL4)^~裏F}qy7;?O\s5#qɓcԨQQYY1UUU3K.>}yhNW_z6O3ؠgaaa|_+Znqn޼yqgҥK֤Ibȑ4aK.;d+++cĈ1eʔ:͕F EEEN!.>nݺu\ve /#<_^K/4o]vYי3gN\{=z<3fH=zdkW^+COmF=ώo9y3fL^s1cFL>=}ܲe˸ۣo߾ѦMt(&L-[>l ZkԨQQUU>>ꨣo.]dڵk~ѫWt2FUחNAOcܹꫯ+s΍?8c̀byo֬Yqglb:;v1zt{Z!Pw裏jHNhΞ~c=6k8=،nr|EEE̘1##V9?nQQQQk9*wO~:_׽{dCs>&ӦM87Yk?x>lltY,}{FNj.{Gr7j+4'} 8 vwJԡC袋>HR`\uq`HRH$6k{ukkn>>Zi @NxѵkWH&#M ߏqQQQεSNѦMeeent_{wrk2-ZT9i2>oH$by[){B-vw}2_UUUu]qꩧF-2gc>>[oq/s|_Θ筷hiן/>쳬i߾}Fͭ]]]յ)..KWX9ɡ(ZjU㸲IQXXXUVŪU~ꨈQߢeTEQ"75kQX#M|?$"""Zj\YU"QvHFq+;96"QUQ_i(VUʨީVUѪQYRQǭIթ"*Mre*++cŊ9!LFkW]]}YNrfmj5nٲe9>b-ZjEE5_QQeee9ɡ kה<"2TĚ(b-|􌰏XuTFa-S[,79xkG(Y$5 ]j@}4Yf9n}#Z }=lm>"s-?}ZGTUh2>S _s=7ڴi'O}g櫮_#/*ys>O2swhu^狖.]ZkcܸqYE /V'Nk uշoc=jw};3N8׾V'|2^~pGF^j /ĴiӲ.j_,Nl+cry{]Z~Eo$eխ8}"IoZe7R_;hal9ﴝ_.w㦗vqxޭ^q?Vl==V|VNrثGjΚbfsÎ-މ)C{x>(jSծq?*|+(/I?T{\ٱqi^|exncZņox>>O쓓&V㸲TzkWj\VsXg}ZkZsMaa>^YnsVXOvFq_qFLm,X>skZczFlg[>bcse66b~}~>b\#Zjw0-d}F}\z嗣Q^^ѯ_2eJ:.x䑵Ex󟣠 ꪼ8πQ8eiӦssn*^TTuUVEyyyry^V9TWW,ZPUU֬YS֬YXzuORz^Z*g9DDr5#֪Un\EյakxF4vkGeVcGZc>#fkG|1|lJ_k|>hӤ}7Bsʧ~QT'D߾}cʔ)^{isF?5kDV3M [ff[sS>_gʕu>_xZ55/X۱bŊ>-Z8nٲe9?j6XdI_fM׹n]r>b--[V>rZ5<#;#ֲX#|Vm[3#>g5j dsLa;|'PT*k GXbƲZ~F|"#>g5j dsLG}D.$RT*I4UUUq7Ǹq"HD*N:wo}ӣ֬YJF-[)#Ew l|V`Au% `FC? 3 :wq<`8p`^r5kVqCqWСC3?N1uu]7&Ό!ǏK?~x6m4x6gkjв9gԩS4y衇bС?<&MT9Q>jԨ86o;}w_k΋/vZxvzN~c=Escڷodq}fso(((q܊+2'9EVjWVVV&UXXXVڠl6yFET-*dLETGqbMNrD7UTOeSk5CMkTHEq-cN96"QUQ5߿:Q-kqnQSU-+R"7!((L KkR-cu*7_#h\1{XbENrH&Ѿ}UWWg}"61e˖,#jժU?RQQeee9ɡ kה{^ʣ9#cEeTEϩ5QyN bMu\De<*"Vof>bT(O,DTGkWJDYgI}'j܊v=+i}ZkGk5g[>bQM}l߳ж}E2Q+E_GsX>skZczFlg[>bcse}~ƹ)a ~gձ}ZuGTUUok:<͚-ZĐ!C"Hرc#H{nu]Y/ yX|y1lذ-?ATUUť^?38#NKz#6hxǸq4_lfg@S>:vqs,^xs~1F_uDl6EV_̵/Um*"bM>"Ys!Q\/FԾ@}ԦiSE,Z%VrebMLߺ6 j`L&CD).#jhժU RSzF|SGRm-Zy kHTEqsH&Ry!">w#ֲX>b-_k|1<##ZMr9}rK>7}D}զ1j5Y6s>b-5>km3"{>bKe>6yѐ|r-5>b͚4 >3xx_ssfZaaavaqGľ{k.D,[,.\3gΌ?/,^:ujl61`-㤓NʸM?tN:ȑ#cԩtA1a„h۶@π̺s=3sklr>N?~`kw#""HD*o=ƥ^3f̈c駟rHQXX[w18:thD~]v߶m>|x 4hs_}:Scذa{/'Nu کϬD";wΈK~Μ9ǝ;whEfZ_DM޽{RtA?hl=8ꨣ2bj~z\z饛<ƤICY[}4'3W^WkgΜq/:C(}o_u{xm۶=z:Ohn4܄/}K?Rx 7eiY~ H$4fG> usGg?r[bE<c9f[n72br\GZյi Nj/S&4f;w=##6{쬮.fΜAnwڵk,&LPu&L׾{7{͏~?t^'ĝw޹y`k&,_7a…ڵS&4vm]ҥK2رcw]Fsqgz۷o,Y$+8π1g3gn1o޼qW踙3gƛoo75kώɓ'Gyyy:^VVwqGs99<ٳgֺ#g:ksg>l:֢E)ws̉D"_$R/֝nȈ>qwvmqק-Z(SL;n4>3k_|q5[dIy=wI'EiiiSk&N9x"bmё#G 7kRxwcnuC!Cu]͞=;z_җbv>(> k_Z^ 4#ɚl}F8>H^{h>䓌mv4iRF~1ytCǠA2Ƽѯ_XtO>:ǔ)Sb}͈Z*xx77h~Ŕ)SLg?Y :4Zh裏b޼y4lѢE\z饛l~ [> JŨQ/KƻGDqy gz5y(--͈^qwlШsύf.\O?98πJ}fvt)޸袋69K_R\|q=N;T:문W^Ln8LQG<@ׯ^@sTիW3<cǎ lp0>o͕֬J,-[׿b…jժHRwORH$o߾ѱcǬ@qo;#<ϝwo3b{GL2%va^{DUUU3&{⬳ΊI&E MS2<0QRRuȮΝ;g#Nk/^;cֻcƌ.]D4=Çψ1eʔҗT91nܸtl޼yqYgwڵSNl}͉LJ;JRYH$/ĉM6y|ão߾1cƌ5}Y\r%1z ]tEujW\ m]w)SԹ1/~8묳2bs΍:+***4>;Q*WκSN9%.(..zܹs3gի7z}ѿl!J //Bt1:8c}>ڵkD"-[ .3gƃ>+W`^v;v%KDDDNbʔ)/^gСQ]]wqG:vGV5_c>P}Rݾ}8O/gl .\Ǐ9sĜ9s6zSN9kE|Iw}q}ڳ:+ PsϘ4iR'ڶmSNw޹|ѥ^UUU1u8묳b[4_c>Pd}v=ks%h۶mG_jt%ghٲeq㷿myc}'FSNYkذaѽ{G~π\Q ]MԩSЅ^tP̞=;Ν˖-8蠃?q{Yi^֥K-c}/63R ]M'MK޽w  |MΝsQRR/wy'>XlYG"СCn~e"[}@c'|;;cӠs4O|'́F}@h Y'dF}@c/["; H^|V`.? 3b7^B}&@~g6lȳ٘P PykwbbgmN$xw"FSL٢M7o:t]ƾ{Gv׭X"yxW_˗GDf!{-ȿ4ѣG}K.?0DW^q)G{Yw=\7ߌ#Fܹssh(3/b޼yqLJ~T*RTn1iҤ3fLuQ."(((:*ƌ&M]w5]iZ^@ީȞW]ˣ$VXD"8;vllv[<Ϟ=㮻ܹs#HĊ+$Dxl[xq+/+s΍+Ww)y˧O>1{z_WG޽93+jk%KD";ƸqRDvmƍ>[G 0 8ַ ǬY2|D}&@v5F/z(DRH$q饗ƶnnKRxcY_>^}xꩧ⣏>w*@> @m<QYY~lgEDDeee<ѷoߜ EEEQVV4&MT{w2֧> DϿo?'8C wl2;x'ūfjTm۶.]D׮]]{gqFSK; 3I4\hQ$HRH$bv_\#Jśo5k裏o~|%dƹ{/OYMLk>?䓌6m|/[Sٗwjժŋ|͏>h94e3I4\D"RTk9_s5]Lk>wyHR ĢErޢE^KFDN9[Ⱦ&K.Įڜ7jԨD"p@k믿c޼yGUUULk>>׽ibĉY_oiӦE"ȈuQY_;Gq '駟{>:98{| [%Wj裏/˱xk˗Dž^,n)ƏA;sͿ5z6/++3fČ3w]W;.Z**++s4eN/?h\{4rek5S }Mg-bРAq%Hwܸq3E]Gyd뮋E玈 -ZKǠAW^Ct t~hϟ3 G-> DψO<1~xꩧ6(&}7?yqFnb}w}wOR;믿s̉'x">HRDs1q'6KmZn~x|[ߊ/v}h׮]TTTҥK㥗^G}4M;8qblѿSԩS״i&#"bŊQ^^1M~4J׾ks~Enbm\˖-m۶뮻~xcȐ!xn!ožېiVM}&@4Fmڴ'Ɛ!C(&]狅eeeQVV9Gusyq 7D6m}w1z!SNSN9%>ӈX~M7Ř1cr>MꖧI d hπ hπ hȕXpah03u=,**1cĽ]w],_<tcJCqEŏlf_2.tlڴilٲfm^aaal2@sh3 G#W֬YLH;=X~Ѿ}HR[|m]vq_?ו;nll^x!K~&@:>|x[V<?!.]n=ztL2%N:8Ӣ86mZ<-}WaD{ u}g8묳⬳ΊM6ʕ+c֭Q^^1dȐ(--aÆA{W.7f̘-[r L~&@B;1bD̘1#aA?uuu9uqƌs @)I-\0=f̘Eu }Sӟ>}z:>əя~wAŸqr@BqX~}_>***rR1a„^z#G? 6tx͛7_+W7ѥx!?` r@R/^ ,EŲebQSS1f̚5+G&g…[DĊ+2555 /9ȑ#ct=ԧSO#8"gV}xG㮻?kON,&{vL$B555=oFK:n56J;ҥK/;c?qMUW]֭kwƍZYYY\{U[[s΍sFDc!CD}}}lܸ1׿y'>O~h<@I賅>_~nѝFJ,mˡ#Ge˖=1t\pA\~QXXCm =y+n8c-ITYGHwtQWWsI __3Έ1cth^{3gΌ<*E> gt_A}{_:tO('|r 2$***ZΝ_W=wR~鬯rN_#"b˖-ocQ]]QZZ{GL4)ƍ. >Ux͓ 㨣}7 gqF(,,O<1yHRNW^(**J[{G|u@HN)f͚[%ĥ^3gΌA%'Q'ƈhJ"MRqyԩSĉ[֊H /3' }.X _׳#Z>Ogu=$? y}믿rr{*;, 5KKK3Y]'H^(aÆh:="bY_sʬ Sg$Oh1"kVWWg} \ >Q3//3ڬm۶vIII)3' }4(u֬nݺ{5zLBcƌt:T*t,[,544ğRT7.k$><ʕ+:g>rB|s2IDAT>gyfҼ1{WT*ѣ{Ld:/Gćɤ֭+"8S8۶m˗֭[7ߌ^z)ϟ|AKjT*rH\z=rA~&@rTϡCm_Wc˖-'oܸ1ᄌZiNMq]w]wݕ1'7vظ["??? g$'/t?cرR?vך?q~w}w^=rI~&@2\ψo~8[%6'J Mqi<cǎ-gt_,1tи;;͓w4k:蠸oҞzL)u5}>}zX"̙͋UV:I-F;.<̘6mZD >_ĉcĉqWFUUUZ*֯_~l߾=8JKKcǨQr6@%?sMԩScԩ_ о\B>  }@ r@6FyyyFDDiii <8Q]3v\xq,X -Z˖-͛7GMMMƘo~1k֬Eз>]賦&~{7hOӭƦRv[ti\|}{l?~}Lu]/駟?_t:J2>:C#Gƍ[>}ضm[ @ ?{dn!?X~}m%ur|{.–""bΜ9 D {>W{^̞=;Z%xj|WH#"N>2dHFܹs7@o%? 3x<~Ȑ!O::w}cذaqgtz8GT*t:^y啨$@HN)f͚[%ĥ^3gΌA%'Q'ƈhJ"MRqyԩSĉ[֊H /3' }.X _׳#Z>Ogu=$? y}믿rr{*;, 5KKK3Y]'H^(aÆh:="bY_sʬ Sg$Oh1"kVWWg} \ >Q3//3ڬm۶vIII)3' }4(u֬nݺ{5zLBcƌt:T*t,[,544ğRT7.k$><ʕ+:g>rB|s2>gyfҼ1{WT*ѣ{Ld:/Gćɤ֭+"8S8۶m˗֭[7ߌ^z)ϟ|AKjT*rH\z=rA~&@rTϡCm_Wc˖-'oܸ1ᄌZiNMq]w]wݕ1'7vظ["??? g$'/t?cرR?vך?q~w}w^=rI~&@2\ψo~8[%6'J Mqi<cǎ-gt_,1tи;;͓w4k:蠸oҞzL)u5}>}zX"̙͋UV:I-F;.<̘6mZD >_ĉcĉqWFUUUZ*֯_~l߾=8JKKcǨQr6@%?sMԩScԩ_ о\B>  }@:TVVƒ%KbҥtXn]|1hР2dHƘ1cC)SĔ)Sbב 9} '|2jjjZtƸm۶Ŷm""bOGDDQQQr)/|!MsR3\jUbɒ%:q4""JKckjjbΜ91gΜ8C__b„  =yns9',YҒJZ}qs,Y$>_=rC~&@2TϚǘ={vg$998G}}}g?.(jjjrI~&@r r@G566_+͛*ytѽ;:1bD <8QQQ6mUV[7t:/B\ve1{]>ЗHV)y-]&MyqI'Eiii󕗗SO=s̉>cT*t:{˒}0Ld:xwoo3C_WqwDYYYH#"JKK,㎸b7'~;$xrM~&@D_QWWnN<Svk?<83I#"Emg$lllG}t:T*N=Ըk ukSO=%G}4Y gdG/lٲ(//>|x?z>|xF|jVi3|[>o>-l :4+ :4>ϵ7z=M~&@vB[liw'guSN9CqE3lhhh7nܸmȎ^_s 5ڡ8" };U͛f[@_$? ;z}σ>8RTFիf[O4)k }瞭8c|JbҤI1bĈ SgdG/N#JE:9s{w{x3gNZ_җ@H^(ygM'WVVwhhhHtꫣe8 >D5>뮋∈H/_ՉQ]]f͊_|u][H^(1eʔ%s޼yqYgœO>٭|g?=\F 7rHBw \'pBs=qWot:֮]738#N<?~|rx駟G}4xH-_w}'?$Rߓ >Sꫯn|ɱ~hhht:z|q7Gaaa{ƠAbQ]]iӦx7ڈh9) &M_XST\s5x'? Y}C=T*/NG*jImN +WƪUZͳv!.DR >SΉ;s[I;s>wNΎmDT >US&@g$,++uLdB?s@"? Y}'~;,Y^EiiipqGDqqqz>`'s΍[n%^}6^RRsN\z1|q'ƺu,]~O6-._A^ޢ6ʸKwY3"*?38x{0®+,,u) _~ywϏ}'>2dH6o]tQ,Z'C?>!@W7O=TFߗKbԨQT k&֯_qǜ9sZN_1bDwqk_Zs1_ZZک?Rޖ-[n+_zF_^^^|1eʔ֭{/7h\Gyd3guVt.ux1cƌ.\<*++[GuT\tE?jԨ׌+lْ;jձhѢEGmٲ%^|Ř?~]6lUUU1p=zt?>>Oƌ3"/OSx3.HRyD,X ""*++ZCe'M'NQ4{ׅ>7lv[_rec9vGyo93gvy%KwhhhT*Ւ,֩miqƘ5kVv9\9ofFǏzJEEE̝;7[s644Ě5kW^Eի[s@Vŗ/_7nH>|xy]!oGMMMJwz*t:k֬ٳge]֥xzپ}{_>o̘1cիWw;xǢ{qnя~yyyqAŌ3΋cvy orZs-7'p~QTTԥXzuG{nuQ1rȨ{/͛b۶m-㛓IEYYDO9[l(TZXX#FFhoڴ):롇huYQPt5kִ+V+Vw{n\}1`.EN }/iw)tiƸ<=J98 &q\rI|ߍgyex衇zͩE~fTUUe kG|=ߎ fe}կ~Jq1rȬ}_TWW:vbл؟.gdKo5iϥKf$B1":.5o޼x3G|&暏~1k֬7o^R9~DRʌEK;bkrQhO<9&LХ>8{m۶XjƯ7/~_|rͪUb֬Y &aժU Ͳer;G]z3\-\__k֬Ow?#;}zGy:th?8 ;˻@OyNQTT޾}{b©;*++<&M|0~㎋cFIIIƞ{3f̈oSOŧ?_}ո曻XYw}73L . ys8"Ng/rl=A~f\s\,[1ucݺu-83;=ĉc;tOrJF6l_jk׶;C4ʕ+c۶mNˋ>}o=oŊ] Aekjj:=הt+z3'pBY_7///~󣼼<"_}zF{=>&9+Y]]ݪoȐ!]kҥmO|KED>튊.gvހb[~}xw3p@ꨇ~8}YgEAAA1z͛{t}MrV3N۾}{)//z9'MԵ"bРA.gv~7:u{;/rF9#kh¢=9+]9}ɒ%-xj1cbذa]-tty. ?k>E:|_Xn]K0ӭM~f]tEQRRӟ?v9~Æ ~7o̙1|\g„ /Rc'x"oBuƋ/ةkθ3N?Cۼys㛓^z3]3|5kVFO~~6lhkllsƗXn]Kȑ# .Xx≨li^qvk%KnW_}5jkk[~:;Xk3~ֆ W_}u {Dćo=#={vSݛ}V\oշ~ukN ?.XhQ<3-}w_<{ǐ!CbڵQ^^q݀ohuYQPLZ+bŊq 7DAAA=: Q^^֭V 4(n>HWuXL2%&LnN +8YEEE\wuqM7D:jԨnHSODQQQlذ!,YuuuH_|qu~iEEE̝;7cDR]uqǩzk,_q%%%qge]#F~H-ɓ'Ay/Giii,X VZfAǴi+_J[؝17Md҈˗NJ+Z7|Z|*8 =n>jjj22eJwk^"?N=8S㭷ފŋdž .JKK#8";=ʕ+%\\rIhavXDDƛok׮ 6Deee48(8yψ~zxW2C#Gw#"wyg|{ߋV'GqDqv;UǍ&L=M~&@*u;JRwwqYgܹscܹ?9lƍ>:N?>}z"k?kŀ2O;D=Wl6dȐ(++hll-[D~~~ :4TM,Z(yz+ },///F0 u( P'$@OHB>  }@( P'$@OHB>  }@( P'$@OHB>  }@( P'$@OHB>  }@( P'$@OHB>  }@( P'$@OHB>  }@( P'$@OHB>  }@( P'$@OH@AKꫯc1bĈ8qbL<9RT#HB}؆ bɒ%xXdI/ʖ;6~VWWw}w򗿌 69fկ~5> {8Bd),\0Xxqu8mzwK.e˖}{/3gNz1jԨ yyYtiE>7m3glUs<<(..ګ3gΌ͛7dR)))uo;N̟??̙cqWg\fM|;E(ut͠Abq衇Ɣ)SCk̙3ss=[څqwQG1$?4iR\pQWWOhIP賏9c8 2vE䦛nh_tE|hڴiqƭw7Ư~-y7>ǏoU3V\K,ii׾v hѢx7#@6jYO=TFN{N;wn>Iij>>c:|32t:+W;;|G^reDb)oݺuQ]].))޻׏;62GUUU##???9KKK#/zؘSPRREEE펫ڬ0`0`@o۷oO|N((Ie{QyQ.lw\~4T]VbhT|n!(Me!"b[CㆦjCy(ґjwܐTmE+v:O5;qP4di[PmoNWGQ׵M/銂y펫KFM$+1G} ̫lw\}}}TTTd%(--mw\cccg%aÆuh֭[}D&EEEQRڨJ 1xv{Dw~}DWT  JEAd>U.hyT}E{؞Ώ#4:+R>hw\c:U%]58o[ueޖ#M#G4\C}aѤ8c@^5]]m)IG^C"kI#G|>MzQon]u5$6}}loh}DCCv)I^:=zN1zyV^xٳg'Fo~J/~۶u կ~5ovw[Ys\L:qO>d,^8+1wqqǷ;n&#\7<پ_c'lf{~pv_痼6M;4U~5+1DD#:4.1X1CodU ku:S nwbCþ#f ]/ZO7J SSkwܲiSY7%sVBYYaT[qlwڵk㗿eVb:th\~+//n)+1DD|иl`>ag}v-[CM}D޲\IG3&hFpK>i3# S^9s 0#jjj2څ(}n r}_qqqFs~䜽?:th?FuuuVbocX/^7oJ {wL4qk֬kf%ګC1l޼9^{L4)~?~V/,,z۶mOg%P sZ M# 6C1444ŋàA:p#G4h[k4i>+kUM#>ds&З|=>CMft:u$㥗^3gǎ#o>餓[nf͊gy /w9XdIF~]-hllLtfkVTTD}}}Vb())vUUU*ꚔĀ}ؾ}{s6:0$E}EeqS/<"IQQQo6CAAA 5m+fٕqJ_G$>ߟ}ć#tfk֬2eJ 辶@$zcdG=Gk h{4?'؟π`FO?#[zCZv٭w5ySag,#I֑7If[IIIV7uVGXQ-ljNŜ H<}ﲥ#ox!HѦlkz#X$(Uӡ7eSa. S^wDAAA -yyy9!o&&yEEE*2M}>":Rl!۪Q8#bpcKsCDrs#G4hƇz=>IMDMheSyͲ>;:R5zk=}hbCoGxѤ$^/e.6uӼf>C!N!i\@7bĈ{96lsv }mF+>wVL=MOmر1`vUUU[ׯ[.[%%%1f̘Dc6>T*&L[hQW2<*Hl=EOqg_ /dO8$BQ }O<1GeeeUTT?wI'%@OPDL81=ЖvUUU?oQUUҞ:uj?>+1dBi„ /R|hg?_~y?*zw)^\+ut… U+2555 /9ȑ#cO}*9x"".W\_cQUU<@\QWWrqӧOO4&gtUWźuq6VVV^{mҡu]k׮b\sM\1nܸH;*T},+\@{w'N߾}{k믷*ywÇP';vl׿+2Fq#G*x3fLF\@=Y_cʕݺ(.׾_bʕiӦ1bDL81&OyyjBdU^^^L2%LP*/@'$@OHB>  }@( P'$@OHB>  }@( P'$@OHB>  }@( P'mp?_$#*K](Cںh :meEgg+YW񡕊Zݵ鴵e'@ɨEUQ`PWM9!s'$ /~׹3_d@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'@'`d`[oŋ/ovr>:ꢬXvml޼9#Ƅ .E;ul6g}v̛7/4׿Xz^g}v^y啸ⷿmtww#Fğ__ԩS: %b3ożyX|+_5kDŽǿ˿9ov[k׮o~sNw}9! N> "c_">bĈ8#6F%qFP˖-n!ƍ|31nܸzWWW,Z(_uAid`tXbEbϏ/ό;6կ&~򓟤 >֮]---y„ 1}~=s+RͶtvvƓO>X;suԲD'ʕ+s}}}l}}}b^jUm#8_gto>.}0yҤI ^yXfM[.nvzg>I^,p YP,ԔX?~|AwvayӦM/s̉ 6Dwwwb=F]]]̚5+̙F3?п Vbbimm\ZZcǎ-莚ļcǎT?VO>dmvicޙ{O{KKKA`YP,퉹e/gQQQ&L3<3{+9[nSN9%Ng|GТ{EXzTr]ZVVOzҗےݙ6mZw}===q뭷yw oRQQ?裂}&S}5q .̯\2{3fndعsg~.賣ciʔ)1r?}f4;@*=bGHw4`h π0tx?g ظqcQ3k7ޅ%%%;aΜ9qM7EKKKDDtwwO?gl6vؑ{O%w_ˣ4;`9蠃^ .g \.W)v(Dg.K^Gsssb;vl*E&O>9iӦ=w$7bP U^^~xb;mۖ>}Ε'==ꨣsAB'ZR^{ϑ#G&\.ǽ3}ÅO۷o[Ҙ4iRjŻロwԩyڵ=޿) >fΜ~٧z*1O>=***ҊO<~=>}zdo$ Le˖x7sEEE|Ʌ!B'Z]]]TUU͛7ǪUuŅvZի㭷J͘1c|b?i{5jT?Тa-YgXۣsر#x>D'^CCCdz9E]}>cI[jU-Z]vYlذ!>O<;s8{\sMrk6{ڴi_bPa:.ŋ{^h;/_~lݺ5~衇ܹsSu}駟͋e˖۶m[,]4_ߵkWl޼ygƌK,O>d2;wn,X JKK !F7|sڵ+}1"o_\s`(; eeeq-Ē%Kvl|__W1}suW?~|7..W_}u%gܹӟ4fΜ8ĩ;͛_z^zߏ>;DG{o}+͛ǒψ+_JYf?&>}c_">bĈ8#6F%qFF;{gҥbŊ_~yD@WX7pC455EDΝ;c_cEaٲe@q]w%֮ꪸKkL&88 .[FDo˖-x`)v thkk'tR444qMMM\{7Z[[,#p͘1#MÑڵk%?O0!Oޯ{nb^bE3E+W&())ļjժhooO-p~\WW555qG\.jj3E_=1O4'O+k֬u֭[c׮]\idԔX?~|AwvayӦM/s̉ 6Dwwwb=F]]]̚5+̙FM5zzzsiii;;jjj;Rɶ'ׯz{{{,wy',X/b|;lܸq_.v3 l}@sYYYw>3EeeeQ__rJĉc--->h\2zzz瞨K.$LM31|ǨQsGG>e/gQQQ&L3<3{+9[nSN9%Nj.һ3|Gв}ۗ~6mZw}===q뭷]wݕj)Sȑg4;@*=bGH3`h~ π0xGh ظqccpzxH}Qw>f)Ӿ8qb|ߎ V\^3&gGiiiwPsA; `@ٛ7DDd]===0gΜO1@}@$?rرcGAw477'cǦm_d28k6m*RR^^~xb;mۖ>}Ε'"%;>0GuTb~ :{`)9rdbrEJw}`jkkscccn߾=nݚKKKcҤIenb.R3sOGOOO>SyQQQV}') QyyXjU>ÉNK5Zzu[3f) Qyd2qYg%n3<=\~ٳgHBǢEkSL &)Qyjhhl6W^w}777ǵ^X袋s1VsEZZZ. 6$ϟ; EKk/}{X|yusM=}~z̛7/~ǖ-[vo۶mt83gM|vgz66; Oqqbqĉ,s5kV̚5+|x9r\TVVƤI. wÆ ?΋xע)ZZZbΝ1bĈ8cc„ g}!'N';FDDTUUŴiӊ`; > > > > > > > > #z+^|x#Eeee}QWWeee=O.M6ūnE61c1SLL&szY|yqnݺ~fώyEuufټys9ᄑ>CO_q5/~>կbɒ%qI' H /0^x~?q!~{dZ.`}0uww#Xsƌ1mڴ=XjF3g,\MMM̙3'O 蓢OkFKKK~0aBL>_g=ļbŊT?, hjj*R^}0\21GIII'UVE{{{jVeeebE kׯOuuu>[SSGqD~rꫯmo577'檪"%ed@1yҤI~a׿Nr) -ZmmmL&SL #ا0d5zzzsiii;;jjj;RɶhooUUUqꩧ|oψxW{/wwѱO`YP,XQ^^%%%ݑfsZvm{i.,:y^WWWۿ[]6~š: 舮;w;xG\ .(o}0l%沲}XE;v+2vڕ_; /,ژ9sf|sɓ'GUUUƍ'?I|37nK/4aƍ C/\ `p~0x?c(S ٙKKK cԨQc2Θ7o^l۶-VQQ/#Fc=6yg>?Oŧ>K/4X|yuŒ%K}8ewIg.+eeeePo}+k#Fo9&N{Nǒ9򗿜Xfd@TTT$>;zfP%%%qui6d2hѢxgߏ=X\|ũ>kʔ)1r?}f4;@*=bGHw4`h π0tx?g ظqcQ3k7ޥ%%%;ŋkws/?CsΉe˖מzԋ>ˣ4;`9蠃^ .g \.W)v(Dg.;vtGsssb;vl*>X7o^%dƌyӦM0(`*//?$?cʼn.(ϟ?K---= >֎:kt.~]w]bsΉkf@'#GL]]]EO۷o[Ҙ4iRjz{kٳcѢEQRR2`˻ロE k3gLO?tH/O=Tb>}zTTT-aʕqUWŮ]k3gΌn)2Y?;"%S VWWUUUyͱjժ~}ijz?~rcɒ%QZZ: 쏮~X1cFq a-YgXۣscI[jU;oF[[[~6l!xxg :lٲXdIbN??I-hF;[uuu\zq-/^MMMqeEMMMDDtwwo~룩)Cs禚i1wxkl6o~﫯g/b}L:5fϞ_cԨQ};wgy&XfM1c… COhll'x"C=~x=:l~\yyyzQYYjM6kqUW}6l=J+OO1r8bQZZ~lݺ5rUTT]wGW`P dn ƣ>_ߵkWl޼ygƌK,O }@^YYYr-1k֬;cݗfcΜ9qWرcs]pQYY=\lܸ1r\O}k1s())OI`pS ̚5+f͚of \.*++cҤIQWWeee߻aÆ~>}zAg?g?ǖ-[9ڢ+>ਬ:*jkkcԨQ%H}L81&NXըQbԩ1ubGN`(P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )P )Ph2T2 !2 32IAmhQmo#46"؊@fl~4   aH l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*FP6 'T>l `O}@*E rƏ#GfYdIjNg}ҺuU,3jԨ3&3fHt5}I߾}S(Vg&3g̈#2a„̟??ڵK޽>s[}6?x'FZڵ?{ҥKյdɒ :4~{Lsnvi9SҲe `}3?9˗/_͛C׿gY `-^8]tQ=uHdc=6Ço&M/}KiL<9\sM?3W/|!O>Z7Le˖'q456l/_ . [vyիWctر3f䬳K/TM>=zjF]vM6u];ueߍ5*zjf̘Q| zk,]G>7Gʎ/]4W^yeQ@SdFoO31bDؾ/|x} M>˗+;vyP(8  (͛?mر9rdiܮ] 2dםyi׮]iK/eܸq `]3W{'tRtkv/e M>#Fdƌq޽~/~e'x>1:l:c);W6uL/^gy:ոyO?t/^\kg#SO< -~sW /dV\kWOSEj5qӳg:ӫWy2|: M>1cƔg:_ۭ[%KoVbcǖ?^WcǦX,V6\`cYZ[ }62oVxwިweη,X4n׮]zQ{왶mۖϤI*RgY!7nE .\'޽Fͱvە~ͮkmN]lveF]6tM Sk&ST,Y%T?!πBFCПg@SG!hz4fk˥4g};X;g$DFCУh@S?!h3Пg4r6lDfΜY -[L׮]7jnݺO^VgSݺu+ unNm˖-[ѣ7y>ؐW?`KѢPw ,YsFݔ7T֭[g=,;ּy M-CX̲eʎU*oG JSrlZȁi߾}xSt5ڵ۬5ϦԶ7f͚m۶|=eSVhV .LXܨ9ϟ97կTm"SFHΝS(J%Kd5ǔ)S]vHm3y䍞jX̵_JM>6mڤGe&NQsL4lN;mv]I;7%H5 `]3׾~Fqm_ iϞ=ӦMxy|￟ ڵK+RgY[6ldcK/Tk?pg˖-;WBw}k1bDxwOPHm#gnZFaV6~R,t>[6oҾ}JFm>=\DI$vڕN{w)۷o}ݷu@SbFf}IΝK &^ӵsO#hm#d޼ynܹyGʎy `]3֭[砃*;v֩;CҪU:] M>f͚+;lo6lX׿۷ϧ?֧Oׯ4?~n ^w7g{]v٥| _wygf̘kO7$6l:무k׮4~sM7)SvٱSO=5]tY:{ٟ/tM>|:^p\3;,{wi/ϒ%Kzŋo;~a؀r!lp-hlu%g}vٱL2tlys '/vm3xz퓟d>xɒ%2dHn,Xt|rg?=pRgpYD?e5jTyZ ?Oc͛7_MIX,]o9s‘Ɋd=ұcǼ{={vmڴɯ~|{xСo6xݴir[c޽{X,f„ YhQo}~oMA>sn)^{Ƿv|#|S^38@SlçP5k뮻.{le˖e„ =z!N:嗿eBcmСCӧO .o7|s{쑡C:|[Jˎ5j|6o<]vM> 6lZnGO=ck._гg}ݹ袋ۮm6_|qt޽AjX{7vX5[{Yf9s}Oߤu)b2}ݼ+2eJ,Y*;sgnݺju-_}o߾ T|f2cƌ1"&LӮ];OtRu)'T@^ [0}@*FP-]t̝;7/rF{/sӮ]t1zJ{C.UȂl>}@#/gĈ4iR/^]fv'?tԩlhcѢEyڟvac=]"@4r :4ŋoVZ娣ʩ7@Cj,(@uȂTNX,]lM7x#3fH֭я~4:ꩧr5^͚5ˠAo~3]v4=@m9rdƎ3gmwyg~ӟfswU6eOX̪qB_(/})_|qڷo 5T@mџYP' Py6*xr5o[VOn!CSN)An~$YU lv[;KM _?3'N\֭[7i&Ɋ~/<0Zǎ_2.Yf3kbkmV=P(d=̯~lZ3@%@hdAꇍ>=˳|u@S(O:?я${oo[ik;֣GX۬=@mdD3`vmi޼yn?.}(X[oz(G6˗O+dEU,ӬY 80ovmt)m۶͂ fر>|x˗K+wZ!gP[gGq?6/礓Nʲe˒0yG9va7o^ٹ{N׮]dɒL4)3f(ͱN8!=@myꩧ|%feusGfťڷo~gcƌҥKUs뭷k)ю=\xѣ8qbsKVr '4Rz4selmO~ 0켗_~9\pAL$֭[&O͛o_?΍7ޘdEo}+~z@S-'tRr>?&Ϸ3_'>QZ*Ej,(@! PUR@h=:ɊF>CYp@bXahРAuZm۶ucƌټ0=@mY?ӯ_u{䠃*XIri-Z裏.];vͨ`4mڴ=zwޛ5_޽ӳgR6cƌ͚>@hϟ'7x\6>Vdnz42a„}%GqD F_W(R,p)S7|lyկ`Mu}]5 ҍY j ȂT,(@WD>}Jo-V%aZcƌkz4ڢ? eՠ?jEP{hAz26懠`C~z4ڢ?;%4ϱj Kl U)=h_z4ڱ1!yhJ P뮾jP j ȂT,(@H=]ѣԖVX'9ڢ?=BٚUh l `O.h&MSe˖ڵkvj@ɂԝ>:z+3gLǎӷoϝ;77|s|eu%~xN;]:OabE@M6-SLɲeҥKt--[vYߟǙ:u~eGs=7SLɺ"JB!͛7駟.^j'YPʑhX6&hř6mZiܣG*V<1bDM-Zdm<0ZvҒ%K2|3gN:w~O>fŹ;r]weeߵj*'rqs\}d|;ɽ޻΀f4oL:5ɺWfm&o}x뭷YlY<3۷o~,Z(j> vaWO93glh|ogذakY+p7 @& P{dAj,(@{(y;@ݝviyV B|3fk߾}8;I2q 6,g.lAxg3dȐެEׯ_z葅 %4I>N˗Rߵ;saV ,^8/b~__,Ѯ 4Z P/_/s-CY@sU.첲^m۶Pzf.f1?0b1}9_}_ϼyҾ}M(@լT~$x{׾wI'C9$ԧo~3?pvm${w98|_#ܙ|_e]Vz$sO}@3o޼/)Lύ7޸F3I k6b1K.̓>$֭[~\E]:꫙3gNq;vlks1#,XIO}}ӟ.}.=zFT HȂYPjQ`K4ZofX͛7/?|)koCI/3l fϞdEԫW:]ۗzuzY [5[Ĩe˖eVZmԚ۷/}^6wZ$ P}rGȂT> };k[z]ԫva:שSt֭83f̨{[K>5zxu묺>AˆE%K>.5' P{dAj,(@u_Ah +b1[Nmٳggر6?@SpUTOv;vܨ5;w)S$GVVb1ӦM5ӭ[:9}::-ʾX,fҤI ٳKg@-rGȂT>|ͻロ$YlYn:jc 6,gqF Tm۶yѢEuf/k!mڴ5nmĉ[#F_~}uZoڴiyK=]Ν7bgϞϋ-ʄ һw^ӽ{z꩛ҥKKr.Z# P;@j,(@mf.OXL"9z*Wez뭓x Շ~Qnlsy> >СC{_Yov]wy|f{nTMN;-Z_~yK6ƛo%KG7z P;@j,(@m.}@߿W_R%$o2eFY}e`c$ԩ&Tmvin<#k=wܹ9K@.X,˽޻ƎnTҥjZlwܱ,l?_ܡCrGȂTWj[Ε;k/yӯ_^^{ؤf͚Sƽzڤy?>W_}u B.]o|yrQG{Ypa^}}ݙ>}zȗ$wL2%C I֭Xcg…)) 4hPC&@>7Hiٲe.]b#GV$-Zӱc̝;7ImvڥO>ѣ;&Дx~wy'B!b1O=Ao- 9<~x|,[,_}nիWZh> /B&MT u&I6mr'74x`?Ưzvmꫯfȑ P;@j,(@m}@iٲe^{-I2~̝;7:tre[=3/B B~O}#z@b=СCs%dر=b:\{iӦMW^կ~5 .,]js9 BoK_RCp/vu" P;@j,(G`M꯽By衇J/~իW׹뮻~7ɊP^c=4%˗/?'x"ovf͚:d=g>xk >|s޹ 3p*grM->#<)S_ҡC*V;6Æ {wŋsٳg8ݻ(@uȂИ*Y FP-]e[xqMVѣ1dA &,;*V@L:5>liyr 7P($`曹KK='@uyxjP<-r34KU`j Z96@mџ=>l `O.=<@o6Z[YP'K/4B+ ) @S& P=6֩ $RȂ4f.]BAhx-]P{V:bZnZgرcm~-,(@^zΫvbmqW2 r>яwM,[,rKZjU/k 6,gqF PK<-r34ʑBX,V\r%dfV w 3^RȂTOjԞ_}*Uƒ}kX, I;YP'=#-[LŌ9PWcO` -[L>}JgܹU.tɓ3wt)ݻwOV6kΩSg- Uly-Z9sSNib̙3'KnlqoN \uUU6 0 Ni= :K dAj(@' P7Q(j[?w}w^y,_tUV>G}&=lذq) I1cTf_wߝgy&~ax=r衇K~6iaÆeIVF]DdAj(34j濆 (ӦM9眓W_}5Iۋ-O?~:}͕W^>}lZbpu[`A.Gyڥ LȁFЀ.첼IRhݺuvf͚*Xti\xYti++$I/ڬgb1g7G?Z@Ó-r@-jQ`KO/KYhਣʿۿ_~IK楗^=ܓz(˗//~̞=;_}ڴiS[h2xG>3<3 V[m)So[ >ܛn)ӧOϕW^Y:4MEUͪ]l)$^pK$iѢEk&{oSz{X3<gܹUoO]wݕc9&lMZj޽{gРA;r7[ne=}ݗ . K.LȁP\/@7o^w,_XwyU`ɂ9P$ Ȃl.#G, B9:]תU뮻ꫯN-G'[o5ݺu#G޾^(ro0ܙ$[ou~_kmV y>2dHntСhRr--{wnMhdAj(@mhzdA6Mj[{s۷F])K˖-S(R([o_r&LPrnsǎ3`:_[(r饗//bFN;-3gάx[ /,(@mmM,(Ʊ'4ٳg'YPzۤ9:tMi׮]i?'tRƍWZh=zؤ?S0͛7/};ȶndE0a֬YOG؀~Z! P[@j,(`Oh p@bby̝;"v5ӿRX,Vdnn/h=P/^\ywi7I^DPGݺu+LX̨Q\,(@m=>r!IV… sVl*v[wR [G={vz衊ݳgyyha*r̼y2nܸzYuѣGzݻ4-E6ɂ@R(z4 ̂ x?mVlŋϟB BƌS5ٳg砃ҥKS,ӫW<#iѢE֘5kV ^{MP#FȫZqݻw+`sL0!>`i|yU# P[@j,(@" il ǫQJO<1p@EXlY^({'+@SrWdȑ矟C=k̛7/g}v$Hɂ9P*Y FP6 hQc޼y}lZb`ɂ4<}@se B*U@p k;P `cL6-'tRƏ_ ^+|Y,k_bodȐ! \-@ P[ j dAP\}[eQӧO釩?\3UlVVңT :/)7$iӦM:t萙3gfٲeIƿS~35\-6.aÆ3(ͣ?dEN:6Tz K~O>Σ:*o~%I.]^z)sOz,_mڴf3=z4f.bXz0 )s/;E8p`{ӧO-b1+2wuW)K/SO7ߜ.]4D,(@m3=z4 B+^U 6,gqFP({I߾}|wܑ:bv)zkuƌS2ɂTWjԖ{sLSN9%]w]ZlBBz+_3a„J *dAF@ٳg'YFn8ꨣrM7]vtI7n\j,(@u(lٲ̳~n[odE>I'ѣGWd ɂT>2[mUX,fڴi5W~rwdmI"Nˈ#6VɂT>2={,}6mZf͚Y뮹;ӽ{$+sɐ!Csm.}evm$+Bb1Ç9~y~S,S(`}y'6{~VBX,V`?>#FȴiҢEl6}ݳ뮻V4-… keԩiٲev]v%;vvi[$;2eʔ$ɠArWWdӧ3믿^  $)}3fLEUE6,(@dܸqy2nܸ̜93[mU}s֭Z9rd?3/ZիW N8>KhΝ'|r#H֭z￟'yGtҲ7of9C@u]ve$m۶_tС"sϞ=;gyfFYYP"gP{h% P=66w?<7o~9sˎ}ݹ+lٲ?߅B!{W~_fdMҥK_"7tS/^[ou. 4SO=o~?{c9&\sMZjU@mxo~K/4vZ?~>4*Ej,(@М9srgѥ+HIRvOw$3<::c{gδm۶~o [xq駟^Z}o|#o$yr'fɒ%+ 9蠃r7y~_ v,X x`,X$ڵkŋs,(@m3=z4! P=6=>h5.,7pC;uQ8qbw}I޽$'Nѣ|sN9\~ {W\I6ܣ5k,C̀yB:uJϞ=$&Mʌ3ʮ/ ׿>nџԖo1F*O<p]cٲew^xt'" P[ j ȂT>_.X g3|x֭[GvmW[i3j,(@m3=z4XF@=<IV% _BUVe7.gyf&O$s=3jԨ |òҫ_Ws/ qKJ$|ptҥaÆ_z̙$9sϥP(uk]X,/C=dE/xysϭ;hg2YP"gP{h>,Z('lٲ$^{wvڭwe˖eРAy7S,vsE)YdIEX,f]vߟ-[z*_WJAb:n…g>I&X,h}ъ@c?ɂ9ڣGhV`K0f̘,]4+>{^{}7b1b1B!`g4o<'|ri)SdԩM믿 s9g$9Cӿ`p IڴiSO=ԣ?>fͪؽ43j,(@m3=z4}@x뭷J[h:h׬~΁X<$)ju`Ko>7k,Gq'?uQec=@9L=>̞=;Ɋe޽va;\ftڵ*oqXӪ=Z^ҦM ^zOy={f뭷.'MTk3j,(@m3=z4. ,(}رci߾}xuѵkL>=I2w܍`K0o޼Zrۣm5kVdΜ9u-@S?h:/^_=3g̜9sx⍞cРA/ `3Ȃ9ڣGh:dA6>Ʃ%Kf͚w!ڵ+}^5\ Z*}^lYi޼yٸE{/ }ݗѣG׹[NȂ9ڣGhdA*FV}ٳ7BIkΟ?yp)+ڣ͝;A\pagً//8|AX,nɂ9ڣGhdA*Fwd1'OΒ%KҲe:]?"j6z{wfgY LJda1 QԷZ*ESh)kw% PVTĖB $yH󘐅,s<3\W.3?;'߹nذaI6䭧~:ׯOKKKC_sӌiy ;rg[>_oeYr)+ԋ@hݓ.(@c8P;::gرc_.3o޼ȑ#wilӌnݺ^~}CzW, >|3g%\9jԨoȑ#3y{n8,^8=XOsVϏ3&g}vӴ=gP?2@ 8.0jԨ?VJw}/Z2dH^͜9s;|@OvA-k׮M/ZxK_˿ܥ{Gѱ@sgҥUAo{&O=#I2y &d,3u\~yꩧޞ[n%W]uU Եo`ԋ@hݏ.(@4{-ƍW.M׺;~ۯ'rHѦO׻뮻{#F4 {)2bG~UE׽usI',L>=^xa#-gP?2@ X.r&CΝwYݍ~򓟤(Ec9aݽH!~Y~}^[o2Gݰ3c֬YYxq9'Nܥ} *x`ʲ̽ޛn3G4gP?2@ XzN;-ͫϙ3'tPέޚI6UzU@O׼&?p'ѣ;uZ?#/s=Wׯ_=O`=:IRey晭1ǎ[]3mڴ[ngIQI޻ .(@9Kfo͚5y駫?4|5ƌ$ڵkOlq͉'X.\na{e+Z'(4gP?2@hf9眓$)"3fhDw}2@g]-G}tM$;otꩧfYbEU\jU k47i޾5sNN8ᄪYe:::2,^(l(uY]>z#]Pim@3lذtMYvmd}\}չrM7U_X,2eY%|;}KftAA.;v^ӷo|__[o< , :4sLe/{YL FOƍq5{ v.(ki8:>8:Akvs7{ 6ѷo >c?3zXfM-[իW,˝~tAE~d4z=E+{ G?Q̜93 .彊Ȍ3:q:Gs6{{*2W_}u/gժU?]Pp':::r%n EQ( Oh};O$ eYf}!;ol.(@#f͚?>+V~'梋.G.]Pjm=S1cF-Z%Kd͚5;ǤI0@f͚̚5+-ҥKw)q?@/%t_Y|yHQ9}.--- vtAE~d4 X7o^o?A,X)w쾕+Wo̘1#'Le?b'&tAE~d4h,}@|G>+V,m^WEl(ReunڴiK&v{12@oٲeI6d:(Æ kD P/z#4.(@c9j?A~(o,->~5wqG?_~ 9 sg0xj=hР&NP/gP?2@=4>&|?stY979#F!ŋ˽ޛx ɆnjK.$l;}\r%訲بQ71#Gɓ{96}̝;z~̘19ӧON>??o$ P/z#ԇ.(@c9jꫯΚ5krlqe}m̙?x ?k6Æ k=_KV}rgW PgP?2@4>~\2eY&I=]ڧ%'sᇧ,<~Efŋ6q]gꪫr,{ケ:sT^A>s9'GqDʲ 7ܐ itAE~d4h}@ <պ-|+{u\KKK.$l7k֬jݧOL0aׯ_~/_|q k!z ~Z[[sWCMGGG.|ϲe˚=@=GqZ=,^8IREF-.Z^ztI4hP-[ 7M<{!mqͦmժUSOMr̙3'>hƎ۩3d@=>w/׾O>9vX }gqF @gP?2@=4>V\Ysϭ^3`,_>|xug{L`'-_Z^[YbEx;SeĈ={vd̙;A>}浯}mO'x"+VȔ)S2eʔ]SntAE~d4|8p`^fV4hP3<{nz' OڵkzbŊ$ܹs_tT|PO-{IEeY^EQ,-tAE~d4zhf$CۺCQ?{nZ2dH􎡛>|x~G^t{Z_~7}3YdI{٬Yeggu P/z#ԏ.(@6{ 3fL ?<3Yvmcͮ7n\~_,L6-+WUz衇2Թ>4 @GN򇌶~l~ϔc2ںuں[3Tmn=|P?W\qEfϞ(;<8'|r97MF^ GF]Pq'dŊٳs衇nv'o|)"˖-W\tE[f͚|O(򗿼+@2f̘e͚5Yvmx≪Pщ'O,\07pClWY+uQ;vl@"… sM7U΢(rK[[[rgP?2@4Vˋ_4Zkkk;w޹5zի&W\veyGf͚,_5jԨ7k@֖>z|woqͩV`_qkt۫6tq =|P//nݺ$ ZgP?2@4>&N9j=eʔ-ommE]T,|;g:*w\&N~x;%L4 @ϲiF:u/'Nڵk|&'xb&LN8!~zn$Cy睗, >̙dCjkk{&O|gP?2@}4R&_ӧOʲC=zhko~ssᇥMlz[.{=͛gM-9sr 'lK8?~/^,irJ:.}=|P}I!WtA߿'h>]Pz3 >tA(7 4ݒ%Kё$0`@5eY+WլZj 80\pA=܆ ]4=@#O~|LQ9}#Ԃ.(@ԏPO,XSȂ Rec9&vZ{f<#[e/{YGU3y79InDݏ.(@ԏ8O`mo̙3SEn~  8-~>j}istAAN{[ߚ-)2wuW>O4{$v.(@eY~֯_}cַ(r1 /=/B1 }Czڴi3gNH <8vX ڷ( wh0]Pq'ʕ+3w,[,Wήodۚ5kv+ >S^@7n\Uh,vDY)"3gB~ GFz-͚5+rw'wy(2cƌNwZpa~;̙3p]KF}@=J'=Is8jdժU'>o1eYݦ\eYꫯΗZ!ԋ,9]Pz3 ^d1p'ĪU/w_;9:::r%n,E!4|P/SNm P/z#ԋ.(@8j _B~_matQ9GG{ӿf Ы\wuO$UF+2o9 g2bĈfP+gP?2@4NQ43~]\tE6lX֬YgŊUF;sE裏np|@ԋ@h&HެY;Lf+gԙ.(@ԏ@o]9s$IʲL>}rE5w O&ِъ??(4|@ԋ@h&~Xre(|tAE~d4zf$ƍ>l'`??o$$@wf͚̚5+-ҥKf͚3H^ GF>tAv>8 >cZ@\27|sgƌح;F^ GF7]P &NX/7qO>9쳫]w]@>iӦo|c>7M֭[,wOݑ.(@}ԏP_APx;W:eYG?Q7{$^sG,s 7?yG3;7>̢(?/sW;]Pz3 ~tA(z\xᅹ;RE7gҤI9=@hѢs99sfZ[[sys͠A=@$dz>7MYtiHY971#Gɓ9眓8 /c=ӧgܹGg>}$I5}]Pz3 >tAAP3eYkUW]իW(7.vX sҤI Xzu>n9䓫ַoߝ3)z .\UA2yIqU]wu?~|eYfԩSO(ꪫԋ@h 8ȷ\{y駫ogW̜93Fn|SO!CTM辩ÇWGyE|wc: P/z#ԋ.(@c9j_~I6{ӧO Tt= ^Fd/<3[-c;fڴiYn6koo3<(${w @gP?2@4> xM3Zkkk' f̘1ikkK]6O<לxz…WY+ub(@w P/z#ԋ.(@c9jK~9Clhsm4g֖>z|woqͩ(y5dҥ59sWwp:th8∆Τ P/z#ԋ.(@c9j`ԨQ9c$K,ӛ<rH=$07y"M>SN9ZO:u뗉',Ek3LNZskl ˛ӦMK{{לs99fYxe$O9唜uY]>:.(@ԏP/OWyߟ,C`VZzy[ޒ,s]wDG3z6lXnxw}gkZ[[sWmo{f_/*tn,~].tAE~d4zhx2P ^{m>Ϧ#F=@~|c˷Ec9&^xaƏz% zGr뭷Ȃ Rec9&oy[򲗽#t ]P3 q8묳~̛7/^0r 2$mmm;oQ׾֩Їi2gΜRaGv|@ԋ@h7n\A6_֞ߞ,SEfΜF[h6~k3L^ GFhm4|P/ulGF'r'Ȧwd4z3cԋ|P?2AQ `̙ӰGѰz* Qz#[8dFfIDAT:Ak㬳jE}kMh>}iӦ(vYM{m@Z=,=HZ=}O=Tf̘Eeɒ%YfN1iҤL{Y&fʢEt]hgqFKgԙ.(@ԏ@OSeY6{`s7?Ȃ v{3gvTʕ+s7~f̘OF=u P/z#Г6{`s7|s>dŊyEQ$6)"eYVMK/4>lmg#t := 5 7}\X;#~֯_EN{a 93ϥ^dCv2eJ'5gP?2@Ϥ 9}@M<,sqo~sF}wC9$/c={7<@ ?3&\rIw=쳹KQeQFo|cFɓ'W_?ssTm;wn1crgO>M{?ݝ|s^:sI/tgP?2@ϥ 9}@M\}YfM@.^;nܸ?~͜93=sfذa 't䷿---'??tAE~d4z}@ <պ-|+{u\KKK.$l7k֬jݧOL0aׯ_~/_|q k!z :=M 5x$IQ9rdZZ_tQz邏NʠA$ ,o~Nwx!mqͦmժUSOM$sɣ>yu P/z#Л8j`ʕz=5 ƾtקO >zcu”˫^{k_ee˖mw1z6{^6Ȕ)S:@ݜuYM}(}3ԉ|@ԋ@h9}@͙3'EQ,˦~QMy]:6mZrRY2 gԙ.(@ԏs'tf|@ܬB)@Kl7)S4y"] 6A`Çol_ gZzu̙9t]P3 g5/~xGy# :=`s-z}@'p'@_C=4zh;fNZ=eY6{6qK/ME)S4y"M>tAC~d4A]`ՙ3gN K>v@hYKA@CEE;A@CeE;=ӱS6{ v.(q'mmm1bD`욖f=>6{w;wnoˢE^{ϟkxl.(q'||߬_uoyޗ/s Hvwr-)"Fns饗fڵI(RE|GGG4iRgtA]wUO?^O|"k׮JeYng׾/k.(@6{-Oe#tkܹsV_Woڇz(u#G#H=,Z(\sMo(tttkͧ> O"y{VƻmO?t+ xǫuKKK=m^0IRe/}K3fLdذa,X zk?y>яf=h;ygP?2@ X.Uˮ,tWz_ 3gN jO[[6뮻kO>ع'V+Vd֬Y909=]Pr't1Kz̗^|=,[Z GFtAAE P/@jժj;?I6OQ?~61bDU|;eNΤ P?z#tO"/=[ѕw gC~˷yݴiӪ~헑#Gnھ}VMˣu P?z#tOzG='9﫻wߝ$)"wvܴ$;@gPG2@ X-^ƌSWZokf͚y 'lw TQ:.(@c9)ƍKW^5W]uU wzOO>y=sYpaxȑ9.hft/}i[n%EQ{{ycr-~V?=،1b==~^z3]Pr'&MYvmʲ̽ޛ{wkʲLQ9ה)Sۯ!3.(@4{9|$Ie,7ڟO>VԩSSEhfНKlzӻM3ug%/yI>OzY4hP;L8q{|{ҥKlprJ#G@h]K1 V^9s$]h |<YzuouQikk{YgU=?F =.(@s'FѣGmZ9tA:OKA  =òerwK{{{/^K,˿K9f6t}eŹ+}/+V칲,SEVZӟ4IEtA:WK?gqFg)rsMY)2CEϗ9ê]E~d4 ;{g*K>h˗ʕ+l`G}tw}{/޼ o74l@h 8;;c3ufε^WwǏ~I9sK^N^H~d4 8 mmm1bDg];NU<׸qҧOttt$IfϞ @ԏutAgLz~8 ,HYI|_[[[ V=[mO`̞=Z<8sn9xjlٲh,};e…I(2bĈNٳo߾z͚5'[hf<䓹3fС;vl9f+gQ[Z:^Ë/{g ;B~d4h,}g?K{{{-Zϛ 6lCST~_o#Gs͙gz$ w$IYY`nf͚<}gw3 tA(˲lʹlٲ|M7ݴ??׿p:::XQG:z$ 瞜s9I6䫻*|njܸq)"Irue[{g]0cƌi(z#ԟ.(@c4{fZti/27tSʲܢPe֭[/~G>R}ɓnݺ$>p!FVex}Yre@7&tx+Ve7j*vC GFtAA@ySfؿi`nL:5O?=^xaO$utt7vmHE=9rdW )=]P)ʲ,=@W[zu=㣎:*y>38=X 1cƤ,̙3'VJ_)2'pB4- ԏ=4 ^i̙Yn]6꯶{QGW)˲oO>y]ּysA3WZeEuUdCsٙ={vV\Ye׿կΕW^ 3 {h ?^[[[s'N:i_ﵯ}m V'Ɍ3v{z 2dH`\}(ldCs=?|_ΠA2?ݏ@hݗ.(@kmͰdɒ$>L5jTc:hǣG:thw,X EQK( g[Qyߟ:+7tSnYx/x+W*oە:iz#toA@rj{ 8p{N@,[lz~3̙g$?~,Y$>쓡CUm ]gP?2@Ϡ 9- Jk׮ݡiii3`jiy :th1a GFtAv Clzg%KEKbŊjiy qz#8$IY;w)tMK ޥ=z* Qz#6{f8uGGG;v3v|_ܥ[|y͛W=9r.Sg3 A@4jԨ?VJw}/Z2dH^͜9s|@O%;J~d4}ָq_:EQdڴiy׻հ׺;~ۯa]gWGGG?3wqGfΜ~:˗/~yggE3f4`Jz=/]PO:c_:eY;ڵk{4~򓟤($1Ӑ3G?Q.̟??IRe'73 {|zN;-ͫϙ3'tPέޚI6܁UzU@O t?ԧ}*tEQΖ

^먣QGysYgU_45# {IY%#F{~5qBz2=]P)JG;#zիhѢ$JG\׼50`@'`[tA˯~,\{k]]Pji@'p$G? M4>dɒ$IQ5jT:hfН-]4oLY)"IRe'4>f͚{կ~ 4>>ӧOOQ)"eY%GqD9߿`+tAAa9=C>˖-˷ԙ$\tEo<=.(@cƿ]v)S2iҤE$y׻ޕjT]Pji@SO%IʲL>}|tAANY~}(|k<;JZ=@OOfѢEm^~l㖢(rAf Ұz2 ^7w]ȏe/kHP3 ^tAg~ӟ,˔eo8o|#v[ʲLQ)z(e'x"&Mz$ ^/ZK_j$%=Eq y"_m^z\uUյeYf??>---U׿u~v13yߝN:)eYG?Q_#@=:Gq gΪURedۼvʔ)YpaOO,grnȀ 7z ~ZZZrWSNIYs%w]G3 ~tASO=U QFmn-IRek?SS=GfҤI˓lSE>4hzG>/~Iy2gΜ'?ɭޚcϐ!Cֶ{O43 ~tAA[1o޼$IQyK_ʲ/~zuo}[sWTw9sfN:L3g/~]ɆVeʲ̙3#ʝ=GqZ=@XZ4h͚5+/J~zݐ!CW7 )/N>슍9v@h݃.(@p'VtttluBu>}rn}ݷZ/]t7']3zxA~d4zhfPGLEmiӦ%pwcVbWH`K@ۿ[G3 ~tAA[񒗼Zϝ;7K.͞{5֭]wݕ($v\dI߿'N gW#VԏP?hܸqI6s۶fʔ)YdIʲLׯ_yUl GF7q'VtA9$eY泟l~Vϝ;7;0 't6kooϪUǣFj=|P/ӧOϤI?կ=T GF]Pjmuggg>(`o 'WҥKSe駟~ms_պO>9C-(@}7ɔ)SREZ[[O}#f GF]PjiuugNEիW;dɒ%u \ݽnjqmhgё$)2ÇϠA<lN~d4h,}lC[[[p@7*"eYg>aÆms'|2w_'tRg3xK^dCz…MM~d4zh,˲CdYvmxܸqM `Y&&LȒ%KREn~ :@hO ɓ'SE.\x  8vڼyrgٲe۷os1  8-~ ׯ_VZ}wSeY6{h޼yytҔeqe=lX|е~$Ɍ3ree)"O$GydFAu>|x#F@h]Kq ֬Y[n%?_:˖-뮻.Ǐ?3?|dȐ!?Ǔkܸq)bm";(̘1cf3 tAgG~ӟcX,XRogΜ/| Iu]& 3(˲_ԉ@h Z=@}_Ti`w]۷oʲ̚5kQ @e =Gs6{:+ooI6ʲ̐!Cr 'dϵ^{^yի^I; Sg񶷽#vԏPOmxGsWVw۷oo6 \{;t׽u,3}tttO> 哟dGm3 ^tAA>ׯOW;nz+^QWX=zt [g3 ޢъ+rw(Es9gI2jԨ 8zwƘ|(=M կvڔe$y߽{:Z?s@o";J~d4z}lży$EQdĈyK^{!ϟIDD;}&@9m1nܸX~}i7箖Y&nH$*++sw:E d4|(ȣ޻pjժxw;u޳>Iāةʍ|OMMM1z馛qoԹٙgi455OZq'Gi9rd1(@L<9~h#_FN4M_oz#.(@X Њo}[;F$qƸK}_|,|=p@eܸq;ƽ#FYtPằ1fϞ](_z#.(@Te=@^ 0 ꪸk"IXvm|;߉{'N;:thDl:IXpaĉw]]pwѣG?vK>ȏ˗ǔ)S 4sE>.˗/Yf_Y(Oz#.(@iY #GsmID1uԘ:uK4m%IRxݕW^Ç LMMMfF3 P(w.Y@h PZYw^zi?4ݢ _DVSS7|s|_'^3-Z("6eG>}:}f޽ ׫WygG2@tAJˢO683:+ _xY+++O>g3lmzDD~rfcccẪ(g@@h(-hw}{qǤIb1sXlY\2jjj1x8#⨣v)- ;bqpݿ 䏌]PҲ'xbxY@gY0`@DDis΍7FEEEϛ?~|ᇅǻKg䏌tAJQT___^n]:CẲ2:N@tAJˢO]k󙨭-֨|.(@iY iiFƣ>3n,\04QF{T>Ft,FN$Ii?~|4553gΌs=7{s\($ݼOi̚5+ϟVuuN;) [=\=:6niF$뮻Ɨ:th;6""$/<|AL81&NX |DDMMMw}1dȐ,$1= P}?=P. 47}"LP3xۢ$IDD4&mfˠiFUUUG?O>l䏌Wydɒ+W^-?nTClN>ȟ>;>OŕW^/ޢ}}kqFmmmL@93 tA*nYfmqG޽{c~EϞ=lgֻw f̘1#&O3gΌe˖ʕ+&#8"<6nXC_^xa#ʎ|@hkZ0qhjj*!tqg=@ْ3 rRy`H4x"&mg?2ĢO466FDD${DMMM7 h+=('}`w.\WVVf8 vz#PN,hiƂ 2 h+=('UYGo|ӟ?ϱtx |?L7Ix3yo=.(@v4MӬȣz*ƎIGwqG#5 kG$$Ib]t?z#t-]PTd=@^} _#GFK/ 7ܐHeM>F$]E d4lt=>{^uYi}կ~5^}լ([@JӴ]:ڮ[<ݟ@h]K I"'z'믏ӧO 2$ =zhYI__1ʆ|Pz&Mj}ݸ"bS/>;Z8=JO ;UYw+Vwy' wZ|yL8gi<I@hoSR3 kdǢOm:uj=pw$I2g@[䏌@93gΌo~jժT|P|@h >Z}/VZU3hq1?w}QSSC>J d4ʅE-={v꫅;7n6lX֣% h+=('YGzDl3h$qu)dH>J d4ʉE-hhh(\'/d8 Vz#PN,h;I#I'(oVz#PN,hA ׯp"3 GFX Ђ8 *++#MӘ;wnlذ!ʚ|@h>Z0hР8ꨣ""b͚5?1ʛ|@h>ZqGeeeDD?իWg<@y3 ra'@+>W_s̉o}[x⌧(_Vz#P.,؆QF71eʔ8S[n3fDYPv3 GF$h_^reX"""$۷oTWW$I駟.ޠeB>ZW[|Gxؔ:Šg?2@NUܹs#IH4$)67ec9|еJ4Νۡ(=.(@v,03.wp$t,[g@h]KY ЊG )[3 d#I>fҤI{tٱ*򨩩)֮][x\]];C7 h+=('YG>h 6oYP3 GFX ЂH44޽{ǑGHeM>J d4ʉE-իWDD$IuuuO|@hȣf=gۧ3g#b\O@93 ` 1}`}4McO|}ZhQ<裑$IDr']B d4.(@Td=@ᄆFDĊ+7x"&l4zʈ@h7]PguVzܸqN@|@h >Z1jԨ8C"MxWnz$&mg?2¢OVTVVmzhiw}ws9ꫯf=@Y3 rif=@zaÆx衇bő$IDDN|&QUUծG]y; `?);(H$OD =?c'@+ e͚﵇O>(w=?ӾW"6:{#mg?2ݑEdO>J d4ʁE{f3 GF\$[]%K/Tx|g8 1}@Td=t}@Te=P>^|Ř2eJ,]4C;.zh|]POf'@;iovL2%fΜ˗/+Wƍ㪫!Cd="@Ypx 94hPϟ3gN3&{ュ7p{lIf=< P:}{'͛4$Ib-k_~9""{-ݝ| &#"wq)܆W iDD$IۢEv?;PV GFȞ.(@Td=@޽qgØ;wn5_cܹ1wܘX"fϞ]_Z}ܹs'$I"MӨ.(я~p@bŊ;G 䏌e'@+ .ꫯ'|2x6N:K/Ty; ?{-~!qBVcرq1ĩկbСv. GF]PҲw_̙3P8w]|_;z泎=؈Ybҥ%;kzѻwVGDD$Q__'pѣG;xѢE1{ @3 ?tAJˢOVs=!Co[Mt̙Egc$իW;S^_[|QGՅWi(Gz#.(@iY Ђ3gƼy"MӈbvyzAgnݺuϞ=[}[o6l(c=UUU^xx"M @93 _tAJˢOL6pݫW8ꨣ:}f>} +VyD>ȗG}LRV[[[[^zu''䏌/e'@ ,YI4U\3|ݻwDDioyӟ"bS;yfLV@3 _tAJoA4T=rʕ+ H'e=(\/_3 GF]PҲ (\/]555mq7wLr"ː!CPܼ{z΄ bƍiDDy䑭jժXpaἺL @3 _tAJˢO 80""4ymqώxwbڵ{g(7@7!44M׿uz뭱dɒXbEL0!Ə_(k~SCz޴i gElyxh/=E,h!UUU$Ii:uޯu>}A6[ .(dg1|8#?Alذ!4$IY=\O>N3 tAJǢO;>n-SN$I$IsL39#s)8#ך#Gn󬧞z>@g?2@E|g$Ik\q~v1}K^Xq= 4M^1|/~sży ?cK:3A d4(ȫ1lذx"Ixg㬳Ί~qGn . &Ą  wM$/~t;@>]z3Ĝ9sbݺu.tG_|1 ?R @3 tA/I?>%Kȑ#c޼y;}1tx#bS1#xi<@dl3 GFX ̙c֬Y"@DlqqDl4McȐ!_" ԵtCVz#Ud=@#g@$5\YYg}vE"mg?2]6m۴`0aB{/Zhe=c9&=}ݻxJ!mg?2ݑEbŊ={v,[,VX={~3 ¢O͛WJv6d'n$IM$MVs]Pҩz`>/]PR(C`,k{6,+ `r駷5k׮%KĴibժUaV GFؾNZ $iӦu|P>4^x!~ӟƻIą^]vY֣3 |l[Ed-M.@3/I><@~鑦i~q7f==?]Pm({It?>|P>~tAiyK/e==ʇ.(@˪ +uuuz~ccc,^8"6vuRP3TYYcƌ??""n83 3 <l-I4z+7"bSy`OP3)?Xzu$IOY@7g?2@ ^$I4#tnN@wg?2@ %>ѣphѢ 'tAƢO s/\WUUe8 7}4iR4440 ih]P-Y dfҥquE$xN@[l͢O˭X"~if͊$IӟtO*׾>ŋ#MH4$)+(|.(@X ۤI"I6MbgD%\#F(|Nt,R滸4(8餓2 O m}nmU]]QWWC #FĈ#]PҰh3fd= P:Y݁EPUY^{]ϟ1c'Oiv轇 ֡tgVz#&IG$zMR毛6mZ^ Нg@[䏌Te=@sO( {ذaC444cձ;F߾}cv*%C d4A^g^ L>~}kӧOƭӣG2dH~qgtz#ltAâOle=gۯ?0xg @|G1uԘ:ujqq '5\ʑ䏌($uk ^~cBg$7'$Io߾q7QGES P }2eʔ8cݺuTUU{W?zk֬KٳcÆ [=gϞq]wPftAJâO֬Y'tR,^PL$N>83=zlƘ2eJ㭷ފcƂ Ћ/8F](!>v1bD,Z(4~#<uuu>g޼yqgIJe %^x.(@Td=}1cF,\0""$+CΈwWhѢ1cFf`Ke'.#""MӨSN9S}K_tAJˢO]"b={y={ou>ŧ PZ}Grf= ׍E9E@ 0 ""4 _3ؚ.(@iY ˮZ^ti曝:7ߌ%Ku<Z PZ}r衇Fuuu$I7xcλ馛 =zC=S:]PҲh>|xiiɓcƍ:'MӸkcҤI$I$IG}tTWWhrtAJˢOƌ$Ii>hy1q6W_3<3|1f̘O.(@$iYl~ǝwY(gFD$IkqGW^f͚XlY̘1#&M͋MwrO$""?+2yʉ.(@iX tؿc=V(h6/yI4N;XihN,:{n)֭[Ejǿ_SSW^yes9]2+[(.>N[paW~8>;sq1jԨ4hPL@ktAǢOfΜoV̝;7VXk֬^zE>}bv:({c'AE@w`'EPUYϪU2{;Ni@G$]IĴiӺ}]PTe=O! ȆE@J}4MxeRȆEVJ6lED PntAc'g}dgoܸ1{챸[KL ;}]橧qŬY"" wpO4""N8f`tA>E@ɽ q- E$I ÇK/48,m;k_oFDl[{R!]vY 6,1h.(@Y o?OW^:7_2$Ǝ#FrLZ q}E{-7n\7nUmK.SO=5***2tAǢO.\?GJ.(F;CМ.(@Y ے%Knx *u/.WO @sc'f+Wǽk׮ݪԹ;y_ף6ihNȿk/˸뮻bʕIDQSSF /0lAX 1cɒ%[:+++㬳Ί.(4 ,?p? nUꬨSO=5.^RJ$MVtAS@} CkY I< P:]âO`+uuuY@d'I>Nz,"O(>,"O(>,"O(>,"O(>,"O(>,"O(>,"O(!QҭVS IENDB`accelerate-1.9.0/benchmarks/torch.compile/regional_compilation.py000066400000000000000000000056551503574341000252610ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch.utils.benchmark import Compare, Timer from transformers import AutoConfig, AutoModelForCausalLM from accelerate.test_utils.testing import get_backend from accelerate.utils import compile_regions torch.set_float32_matmul_precision("high") COMPILE_ITERS = 2 INFERENCE_ITERS = 100 BASELINE = "Baseline" COMPILE_TIME = "Compile time" INFRENCE_TIME = "Inference time" FULL_COMPILATION = "Full compilation" REGIONAL_COMPILATION = "Regional compilation" INFRENCE_STMT = "model(input_ids, use_cache=False)" COMPILE_STMT = f"torch._dynamo.reset(); torch._inductor.utils.clear_inductor_caches(); {INFRENCE_STMT}" torch_device_type, _, _ = get_backend() results = [] for model_id in [ # non-gated llama models "NousResearch/Llama-3.2-1B", "NousResearch/Hermes-3-Llama-3.2-3B", "NousResearch/Hermes-3-Llama-3.1-8B", "NousResearch/Nous-Hermes-Llama2-13b", ]: with torch.device(torch_device_type): config = AutoConfig.from_pretrained(model_id) model = AutoModelForCausalLM.from_config(config).to(dtype=torch.float16).eval() full_compilation_model = torch.compile(model) regional_compilation_model = compile_regions(model) for model, sub_label, description, stmt, iters in [ (model, BASELINE, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS), (full_compilation_model, FULL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS), (full_compilation_model, FULL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS), (regional_compilation_model, REGIONAL_COMPILATION, COMPILE_TIME, COMPILE_STMT, COMPILE_ITERS), (regional_compilation_model, REGIONAL_COMPILATION, INFRENCE_TIME, INFRENCE_STMT, INFERENCE_ITERS), ]: for batch_size, sequence_length in [(1, 128), (4, 128)]: input_ids = torch.randint( 0, 1000, size=(batch_size, sequence_length), dtype=torch.int64, device=torch_device_type ) results.append( Timer( label=model_id, sub_label=sub_label, description=f"{description} ({batch_size}x{sequence_length})", globals={"model": model, "input_ids": input_ids}, stmt=stmt, ).timeit(number=iters) ) compare = Compare(results) compare.colorize() compare.print() accelerate-1.9.0/docker/000077500000000000000000000000001503574341000150625ustar00rootroot00000000000000accelerate-1.9.0/docker/README.md000066400000000000000000000063501503574341000163450ustar00rootroot00000000000000 # Official Hugging Face Accelerate Docker Images Accelerate publishes a variety of docker versions as part of our CI that users can also use. These are stable images that Accelerate can run off of which comes with a variety of different setup configurations, all of which are officially hosted on [Docker Hub](https://hub.docker.com/r/huggingface/accelerate). A breakdown of each are given below ## Naming Conventions Accelerate docker images follow a tagging convention of: ```bash huggingface/accelerate:{accelerator}-{nightly,release} ``` `accelerator` in this instance is one of many applical pre-configured backend supports: * `gpu`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes`. Runs off python 3.9. * `cpu`: Comes compiled off of `python:3.9-slim` and is designed for non-CUDA based workloads. * More to come soon * `gpu-deepspeed`: Comes compiled off of the `nvidia/cuda` image and includes core parts like `bitsandbytes` as well as the latest `deepspeed` version. Runs off python 3.10. * `gpu-fp8-transformerengine`: Comes compiled off of `nvcr.io/nvidia/pytorch` and is specifically for running the `benchmarks/fp8` scripts on devices which support FP8 operations using the `TransformerEngine` library (RTX 4090, H100, etc) ## Nightlies vs Releases Each release a new build is pushed with a version number included in the name. For a GPU-supported image of version 0.28.0 for instance, it would look like the following: ```bash huggingface/accelerate:gpu-release-0.28.0 ``` Nightlies contain two different image tags. There is a general `nightly` tag which is built each night, and a `nightly-YYYY-MM-DD` which corresponds to a build from a particular date. For instance, here is an example nightly CPU image from 3/14/2024 ```bash huggingface/accelerate:cpu-nightly-2024-03-14 ``` ## Running the images Each image comes compiled with `conda` and an `accelerate` environment contains all of the installed dependencies. To pull down the latest nightly run: ```bash docker pull huggingface/accelerate:gpu-nightly ``` To then run it in interactive mode with GPU-memory available, run: ```bash docker container run --gpus all -it huggingface/accelerate:gpu-nightly ``` ## DEPRECATED IMAGES CPU and GPU docker images were hosted at `huggingface/accelerate-gpu` and `huggingface/accelerate-cpu`. These builds are now outdated and will not receive updates. The builds at the corresponding `huggingface/accelerate:{gpu,cpu}` contain the same `Dockerfile`, so it's as simple as changing the docker image to the desired ones from above. We will not be deleting these images for posterity, but they will not be receiving updates going forward.accelerate-1.9.0/docker/accelerate-cpu/000077500000000000000000000000001503574341000177375ustar00rootroot00000000000000accelerate-1.9.0/docker/accelerate-cpu/Dockerfile000066400000000000000000000017511503574341000217350ustar00rootroot00000000000000# Builds CPU-only Docker image of PyTorch # Uses multi-staged approach to reduce size # Stage 1 FROM python:3.9-slim as compile-image ARG DEBIAN_FRONTEND=noninteractive RUN apt update RUN apt-get install -y --no-install-recommends \ build-essential \ git \ gcc # Setup virtual environment for Docker ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv ${VIRTUAL_ENV} # Make sure we use the virtualenv ENV PATH="${VIRTUAL_ENV}/bin:$PATH" WORKDIR /workspace # Install specific CPU torch wheel to save on space RUN python3 -m pip install --upgrade --no-cache-dir pip RUN python3 -m pip install --no-cache-dir \ jupyter \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ --extra-index-url https://download.pytorch.org/whl/cpu # Stage 2 FROM python:3.9-slim AS build-image COPY --from=compile-image /opt/venv /opt/venv RUN useradd -ms /bin/bash user USER user # Make sure we use the virtualenv ENV PATH="/opt/venv/bin:$PATH" CMD ["/bin/bash"]accelerate-1.9.0/docker/accelerate-gpu-deepspeed/000077500000000000000000000000001503574341000216775ustar00rootroot00000000000000accelerate-1.9.0/docker/accelerate-gpu-deepspeed/Dockerfile000066400000000000000000000030601503574341000236700ustar00rootroot00000000000000# Builds GPU docker image of PyTorch specifically # Uses multi-staged approach to reduce size # Stage 1 # Use base conda image to reduce time FROM continuumio/miniconda3:latest AS compile-image # Specify py version # Note: DeepSpeed beyond v0.12.6 requires py 3.10 ENV PYTHON_VERSION=3.10 # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Create our conda env RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip # We don't install pytorch here yet since CUDA isn't available # instead we use the direct torch wheel ENV PATH /opt/conda/envs/accelerate/bin:$PATH # Activate our bash shell RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] # Activate the conda env, install mpy4pi, and install torch + accelerate RUN source activate accelerate && conda install -c conda-forge mpi4py RUN source activate accelerate && \ python3 -m pip install --no-cache-dir \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \ --extra-index-url https://download.pytorch.org/whl/cu126 RUN python3 -m pip install --no-cache-dir bitsandbytes # Stage 2 FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image COPY --from=compile-image /opt/conda /opt/conda ENV PATH /opt/conda/bin:$PATH # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* RUN echo "source activate accelerate" >> ~/.profile # Activate the virtualenv CMD ["/bin/bash"]accelerate-1.9.0/docker/accelerate-gpu/000077500000000000000000000000001503574341000177435ustar00rootroot00000000000000accelerate-1.9.0/docker/accelerate-gpu/Dockerfile000066400000000000000000000027641503574341000217460ustar00rootroot00000000000000# Builds GPU docker image of PyTorch specifically # Uses multi-staged approach to reduce size # Stage 1 # Use base conda image to reduce time FROM continuumio/miniconda3:latest AS compile-image # Specify py version ENV PYTHON_VERSION=3.9 # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Create our conda env RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip # We don't install pytorch here yet since CUDA isn't available # instead we use the direct torch wheel ENV PATH /opt/conda/envs/accelerate/bin:$PATH # Activate our bash shell RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] # Activate the conda env, install mpy4pi, and install torch + accelerate RUN source activate accelerate && conda install -c conda-forge mpi4py RUN source activate accelerate && \ python3 -m pip install --no-cache-dir \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ --extra-index-url https://download.pytorch.org/whl/cu126 RUN python3 -m pip install --no-cache-dir bitsandbytes # Stage 2 FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu22.04 AS build-image COPY --from=compile-image /opt/conda /opt/conda ENV PATH /opt/conda/bin:$PATH # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* RUN echo "source activate accelerate" >> ~/.profile # Activate the virtualenv CMD ["/bin/bash"]accelerate-1.9.0/docs/000077500000000000000000000000001503574341000145435ustar00rootroot00000000000000accelerate-1.9.0/docs/Makefile000066400000000000000000000011111503574341000161750ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SOURCEDIR = source BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)accelerate-1.9.0/docs/README.md000066400000000000000000000243111503574341000160230ustar00rootroot00000000000000 # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our special tool that builds the documentation: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit the built documentation. --- ## Building the documentation Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing the following command: ```bash doc-builder build accelerate docs/source/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite Markdown editor. ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview accelerate docs/source/ ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/accelerate/blob/main/docs/source/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ``` Sections that were moved: [ Section A ] ``` and of course, if you moved it to another file, then: ``` Sections that were moved: [ Section A ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. ## Writing Documentation - Specification The `huggingface/accelerate` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four. ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with `utils.gather` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary before writing the description after the argument. Finally, to maintain uniformity if any *one* description is too long to fit on one line, the rest of the parameters should follow suit and have an indention before their description. Here's an example showcasing everything so far: ``` Args: gradient_accumulation_steps (`int`, *optional*, default to 1): The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`. cpu (`bool`, *optional*): Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only. ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ``` def my_function(x: str = None, a: float = 1): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... and has a description longer than 119 chars. a (`float`, *optional*, defaults to 1): This argument is used to ... and has a description longer than 119 chars. ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it on several lines. You can however write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ```python # first line of code # second line # etc ``` ```` #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` ## Styling the docstring We have an automatic script running with the `make style` comment that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily. ## Writing documentation examples The syntax for Example docstrings can look as follows: ``` Example: ```python >>> import time >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> if accelerator.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> accelerator.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` ``` The docstring should give a minimal, clear example of how the respective function is to be used in inference and also include the expected (ideally sensible) output. Often, readers will try out the example before even going through the function or class definitions. Therefore, it is of utmost importance that the example works as expected.accelerate-1.9.0/docs/source/000077500000000000000000000000001503574341000160435ustar00rootroot00000000000000accelerate-1.9.0/docs/source/_toctree.yml000066400000000000000000000103141503574341000203710ustar00rootroot00000000000000- sections: - local: index title: 🤗 Accelerate - local: basic_tutorials/install title: Installation - local: quicktour title: Quicktour title: Getting started - sections: - local: basic_tutorials/overview title: Overview - local: basic_tutorials/migration title: Add Accelerate to your code - local: basic_tutorials/execution title: Execution process - local: basic_tutorials/tpu title: TPU training - local: basic_tutorials/launch title: Launching Accelerate scripts - local: basic_tutorials/notebook title: Launching distributed training from Jupyter Notebooks title: Tutorials - sections: - isExpanded: true sections: - local: usage_guides/explore title: Start Here! - local: usage_guides/model_size_estimator title: Model memory estimator - local: usage_guides/quantization title: Model quantization - local: usage_guides/tracking title: Experiment trackers - local: usage_guides/profiler title: Profiler - local: usage_guides/checkpoint title: Checkpointing - local: basic_tutorials/troubleshooting title: Troubleshoot - local: usage_guides/training_zoo title: Example Zoo title: Accelerate - isExpanded: true sections: - local: usage_guides/gradient_accumulation title: Gradient accumulation - local: usage_guides/local_sgd title: Local SGD - local: usage_guides/low_precision_training title: Low precision (FP8) training - local: usage_guides/deepspeed title: DeepSpeed - local: usage_guides/deepspeed_multiple_model title: Using multiple models with DeepSpeed - local: usage_guides/ddp_comm_hook title: DDP Communication Hooks - local: usage_guides/fsdp title: Fully Sharded Data Parallel - local: usage_guides/megatron_lm title: Megatron-LM - local: usage_guides/sagemaker title: Amazon SageMaker - local: usage_guides/mps title: Apple M1 GPUs - local: usage_guides/intel_cpu title: Intel CPU - local: usage_guides/gaudi title: Intel Gaudi - local: usage_guides/compilation title: Compilation title: Training - isExpanded: true sections: - local: usage_guides/big_modeling title: Big Model Inference - local: usage_guides/distributed_inference title: Distributed inference title: Inference title: How to guides - sections: - local: concept_guides/internal_mechanism title: Accelerate's internal mechanism - local: concept_guides/big_model_inference title: Loading big models into memory - local: concept_guides/performance title: Comparing performance across distributed setups - local: concept_guides/deferring_execution title: Executing and deferring jobs - local: concept_guides/gradient_synchronization title: Gradient synchronization - local: concept_guides/fsdp_and_deepspeed title: FSDP vs DeepSpeed - local: concept_guides/fsdp1_vs_fsdp2 title: FSDP1 vs FSDP2 - local: concept_guides/low_precision_training title: Low precision training methods - local: concept_guides/training_tpu title: Training on TPUs title: Concepts and fundamentals - sections: - local: package_reference/accelerator title: Accelerator - local: package_reference/state title: Stateful classes - local: package_reference/cli title: The Command Line - local: package_reference/torch_wrappers title: DataLoaders, Optimizers, Schedulers - local: package_reference/tracking title: Experiment trackers - local: package_reference/launchers title: Launchers - local: package_reference/deepspeed title: DeepSpeed utilities - local: package_reference/logging title: Logging - local: package_reference/big_modeling title: Working with large models - local: package_reference/inference title: Pipeline parallelism - local: package_reference/kwargs title: Kwargs handlers - local: package_reference/fp8 title: FP8 - local: package_reference/utilities title: Utility functions and classes - local: package_reference/megatron_lm title: Megatron-LM utilities - local: package_reference/fsdp title: Fully Sharded Data Parallel utilities title: "Reference" accelerate-1.9.0/docs/source/basic_tutorials/000077500000000000000000000000001503574341000212325ustar00rootroot00000000000000accelerate-1.9.0/docs/source/basic_tutorials/execution.md000066400000000000000000000110061503574341000235550ustar00rootroot00000000000000 # Execution process When working with distributed training systems, it is important to manage how and when processes are executed across GPUs. Some processes are completed faster than others, and some processes shouldn't begin if others haven't finished yet. Accelerate provides tools for orchestrating when processes are executed to ensure everything remains synchronized across all devices. This tutorial will teach you how to execute a process on only one machine and how to delay execution until all processes have reached a certain point. ## Execute on one process Certain code only needs to be run once on a given machine, such as printing a log statement or only displaying one progress bar on the local main process. You should use `accelerator.is_local_main_process` to indicate code that should only be executed once. ```py from tqdm.auto import tqdm progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) ``` You could also wrap a statement with `accelerator.is_local_main_process`. > [!TIP] > For standalone `print` statements that aren't wrapped in `accelerator.is_local_main_process`, replace `print` with Accelerate's [`~Accelerator.print`] method to only print once per process. ```py if accelerator.is_local_main_process: print("Accelerate is the best") ``` For a function that should only be executed once, use [`~Accelerator.on_local_main_process`]. ```py @accelerator.on_local_main_process def do_my_thing(): "Something done once per server" do_thing_once_per_server() ``` You could also direct Accelerate to execute code once across *all processes* regardless of the number of machines. This is useful if you're uploading a final model to the Hub. You should use `accelerator.is_main_process` to indicate code that should only be executed once across all processes. ```py if accelerator.is_main_process: repo.push_to_hub() ``` For a function that should only be executed once across all processes, use [`~Accelerator.on_main_process`]. ```py @accelerator.on_main_process def do_my_thing(): "Something done once per server" do_thing_once() ``` ## Execute on a specific process Accelerate can also help you execute functions that should only be executed on a specific process or a local process index. Use the [`~Accelerator.on_process`] method and specify the process index to execute a function on. ```py @accelerator.on_process(process_index=0) def do_my_thing(): "Something done on process index 0" do_thing_on_index_zero() ``` Use the [`~Accelerator.on_local_process`] method and specify the local process index to execute a function on. ```py @accelerator.on_local_process(local_process_idx=0) def do_my_thing(): "Something done on process index 0 on each server" do_thing_on_index_zero_on_each_server() ``` ## Defer execution When you run your script on several GPUs at the same time, some code may be executed faster than others. You might need to wait for all processes to reach a certain point before executing the next set of instructions. For instance, you shouldn’t save a model before making sure every process is done with training. To do this, add [`~Accelerator.wait_for_everyone`] in your code. This blocks all processes that have finished first from continuing until all remaining processes have reached the same point (this has no effect if you're running on a single GPU or CPU). ```py accelerator.wait_for_everyone() ``` accelerate-1.9.0/docs/source/basic_tutorials/install.md000066400000000000000000000067421503574341000232330ustar00rootroot00000000000000 # Installation Before you start, you will need to setup your environment, install the appropriate packages, and configure Accelerate. Accelerate is tested on **Python 3.8+**. Accelerate is available on pypi and conda, as well as on GitHub. Details to install from each are below: ## pip To install Accelerate from pypi, perform: ```bash pip install accelerate ``` ## conda Accelerate can also be installed with conda with: ```bash conda install -c conda-forge accelerate ``` ## Source New features are added every day that haven't been released yet. To try them out yourself, install from the GitHub repository: ```bash pip install git+https://github.com/huggingface/accelerate ``` If you're working on contributing to the library or wish to play with the source code and see live results as you run the code, an editable version can be installed from a locally-cloned version of the repository: ```bash git clone https://github.com/huggingface/accelerate cd accelerate pip install -e . ``` ## Configuration After installing, you need to configure Accelerate for how the current system is setup for training. To do so run the following and answer the questions prompted to you: ```bash accelerate config ``` To write a barebones configuration that doesn't include options such as DeepSpeed configuration or running on TPUs, you can quickly run: ```bash python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='fp16')" ``` Accelerate will automatically utilize the maximum number of GPUs available and set the mixed precision mode. To check that your configuration looks fine, run: ```bash accelerate env ``` An example output is shown below, which describes two GPUs on a single machine with no mixed precision being used: ```bash - `Accelerate` version: 1.2.0.dev0 - Platform: Linux-6.8.0-47-generic-x86_64-with-glibc2.35 - `accelerate` bash location: /home/zach/miniconda3/envs/accelerate/bin/accelerate - Python version: 3.10.13 - Numpy version: 1.26.4 - PyTorch version (GPU?): 2.5.1+cu124 (True) - PyTorch XPU available: False - PyTorch NPU available: False - PyTorch MLU available: False - PyTorch MUSA available: False - System RAM: 187.91 GB - GPU type: NVIDIA GeForce RTX 4090 - `Accelerate` default config: - compute_environment: LOCAL_MACHINE - distributed_type: MULTI_GPU - mixed_precision: no - use_cpu: False - debug: False - num_processes: 2 - machine_rank: 0 - num_machines: 1 - gpu_ids: all - rdzv_backend: static - same_network: True - main_training_function: main - enable_cpu_affinity: False - downcast_bf16: no - tpu_use_cluster: False - tpu_use_sudo: False - tpu_env: [] ``` accelerate-1.9.0/docs/source/basic_tutorials/launch.md000066400000000000000000000222161503574341000230310ustar00rootroot00000000000000 # Launching Accelerate scripts In the previous tutorial, you were introduced to how to modify your current training script to use Accelerate. The final version of that code is shown below: ```python from accelerate import Accelerator accelerator = Accelerator() model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() ``` But how do you run this code and have it utilize the special hardware available to it? First, you should rewrite the above code into a function, and make it callable as a script. For example: ```diff from accelerate import Accelerator + def main(): accelerator = Accelerator() model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() + if __name__ == "__main__": + main() ``` Next, you need to launch it with `accelerate launch`. It's recommended you run `accelerate config` before using `accelerate launch` to configure your environment to your liking. Otherwise Accelerate will use very basic defaults depending on your system setup. ## Using accelerate launch Accelerate has a special CLI command to help you launch your code in your system through `accelerate launch`. This command wraps around all of the different commands needed to launch your script on various platforms, without you having to remember what each of them is. If you are familiar with launching scripts in PyTorch yourself such as with `torchrun`, you can still do this. It is not required to use `accelerate launch`. You can launch your script quickly by using: ```bash accelerate launch {script_name.py} --arg1 --arg2 ... ``` Just put `accelerate launch` at the start of your command, and pass in additional arguments and parameters to your script afterward like normal! Since this runs the various torch spawn methods, all of the expected environment variables can be modified here as well. For example, here is how to use `accelerate launch` with a single GPU: ```bash # for cuda device: CUDA_VISIBLE_DEVICES="0" accelerate launch {script_name.py} --arg1 --arg2 ... # for xpu device: ZE_AFFINITY_MASK="0" accelerate launch {script_name.py} --arg1 --arg2 ... ``` You can also use `accelerate launch` without performing `accelerate config` first, but you may need to manually pass in the right configuration parameters. In this case, Accelerate will make some hyperparameter decisions for you, e.g., if GPUs are available, it will use all of them by default without the mixed precision. Here is how you would use all GPUs and train with mixed precision disabled: ```bash accelerate launch --multi_gpu {script_name.py} {--arg1} {--arg2} ... ``` Or by specifying a number of GPUs to use: ```bash accelerate launch --num_processes=2 {script_name.py} {--arg1} {--arg2} ... ``` To get more specific you should pass in the needed parameters yourself. For instance, here is how you would also launch that same script on two GPUs using mixed precision while avoiding all of the warnings: ```bash accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 {script_name.py} {--arg1} {--arg2} ... ``` For a complete list of parameters you can pass in, run: ```bash accelerate launch -h ``` Even if you are not using Accelerate in your code, you can still use the launcher for starting your scripts! For a visualization of this difference, that earlier `accelerate launch` on multi-gpu would look something like so with `torchrun`: ```bash MIXED_PRECISION="fp16" torchrun --nproc_per_node=2 --nnodes=1 {script_name.py} {--arg1} {--arg2} ... ``` You can also launch your script utilizing the launch CLI as a python module itself, enabling the ability to pass in other python-specific launching behaviors. To do so, use `accelerate.commands.launch` instead of `accelerate launch`: ```bash python -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2} ``` If you want to execute the script with any other python flags, you can pass them in as well similar to `-m`, such as the below example enabling unbuffered stdout and stderr: ```bash python -u -m accelerate.commands.launch --num_processes=2 {script_name.py} {--arg1} {--arg2} ``` You can run your code on CPU as well! This is helpful for debugging and testing purposes on toy models and datasets. ```bash accelerate launch --cpu {script_name.py} {--arg1} {--arg2} ``` ## Why you should always use `accelerate config` Why is it useful to the point you should **always** run `accelerate config`? Remember that earlier call to `accelerate launch` as well as `torchrun`? Post configuration, to run that script with the needed parts you just need to use `accelerate launch` outright, without passing anything else in: ```bash accelerate launch {script_name.py} {--arg1} {--arg2} ... ``` ## Custom Configurations As briefly mentioned earlier, `accelerate launch` should be mostly used through combining set configurations made with the `accelerate config` command. These configs are saved to a `default_config.yaml` file in your cache folder for Accelerate. This cache folder is located at (with decreasing order of priority): - The content of your environment variable `HF_HOME` suffixed with `accelerate`. - If it does not exist, the content of your environment variable `XDG_CACHE_HOME` suffixed with `huggingface/accelerate`. - If this does not exist either, the folder `~/.cache/huggingface/accelerate`. To have multiple configurations, the flag `--config_file` can be passed to the `accelerate launch` command paired with the location of the custom yaml. An example yaml may look something like the following for two GPUs on a single machine using `fp16` for mixed precision: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: MULTI_GPU fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` Launching a script from the location of that custom yaml file looks like the following: ```bash accelerate launch --config_file {path/to/config/my_config_file.yaml} {script_name.py} {--arg1} {--arg2} ... ``` ## Multi-node training Multi-node training with Accelerate is similar to [multi-node training with torchrun](https://pytorch.org/tutorials/intermediate/ddp_series_multinode.html). The simplest way to launch a multi-node training run is to do the following: - Copy your codebase and data to all nodes. (or place them on a shared filesystem) - Setup your python packages on all nodes. - Run `accelerate config` on the main single node first. After specifying the number of nodes, you will be asked to specify the rank of each node (this will be 0 for the main/master node), along with the IP address and port for the main process. This is required for the worker nodes to communicate with the main process. Afterwards, you can copy or send this config file across all of your nodes, changing the `machine_rank` to 1, 2,3, etc. to avoid having to run the command (or just follow their directions directly for launching with `torchrun` as well) Once you have done this, you can start your multi-node training run by running `accelerate launch` (or `torchrun`) on all nodes. It is required that the command be ran on all nodes for everything to start, not just running it from the main node. You can use something like SLURM or a different process executor to wrap around this requirement and call everything from a single command. It is recommended to use the intranet IP of your main node over the public IP for better latency. This is the `192.168.x.x` or the `172.x.x.x` address you see when you run `hostname -I` on the main node. To get a better idea about multi-node training, check out our example for [multi-node training with FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp). accelerate-1.9.0/docs/source/basic_tutorials/migration.md000066400000000000000000000254321503574341000235530ustar00rootroot00000000000000 # Add Accelerate to your code Each distributed training framework has their own way of doing things which can require writing a lot of custom code to adapt it to your PyTorch training code and training environment. Accelerate offers a friendly way to interface with these distributed training frameworks without having to learn the specific details of each one. Accelerate takes care of those details for you, so you can focus on the training code and scale it to any distributed training environment. In this tutorial, you'll learn how to adapt your existing PyTorch code with Accelerate and get you on your way toward training on distributed systems with ease! You'll start with a basic PyTorch training loop (it assumes all the training objects like `model` and `optimizer` have been setup already) and progressively integrate Accelerate into it. ```python device = "cuda" model.to(device) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss.backward() optimizer.step() scheduler.step() ``` ## Accelerator The [`Accelerator`] is the main class for adapting your code to work with Accelerate. It knows about the distributed setup you're using such as the number of different processes and your hardware type. This class also provides access to many of the necessary methods for enabling your PyTorch code to work in any distributed training environment and for managing and executing processes across devices. That's why you should always start by importing and creating an [`Accelerator`] instance in your script. ```python from accelerate import Accelerator accelerator = Accelerator() ``` The [`Accelerator`] also knows which device to move your PyTorch objects to, so it is recommended to let Accelerate handle this for you. ```diff - device = "cuda" + device = accelerator.device model.to(device) ``` ## Prepare PyTorch objects Next, you need to prepare your PyTorch objects (model, optimizer, scheduler, etc.) for distributed training. The [`~Accelerator.prepare`] method takes care of placing your model in the appropriate container (like single GPU or multi-GPU) for your training setup, adapting the optimizer and scheduler to use Accelerate's [`~optimizer.AcceleratedOptimizer`] and [`~scheduler.AcceleratedScheduler`], and creating a new dataloader that can be sharded across processes. > [!TIP] > Accelerate only prepares objects that inherit from their respective PyTorch classes such as `torch.optim.Optimizer`. The PyTorch objects are returned in the same order they're sent. ```py model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) ``` ## Training loop Finally, remove the `to(device)` calls to the inputs and targets in the training loop because Accelerate's DataLoader classes automatically places them on the right device. You should also replace the usual `backward()` pass with Accelerate's [`~Accelerator.backward`] method which scales the gradients for you and uses the appropriate `backward()` method depending on your distributed setup (for example, DeepSpeed or Megatron). ```diff - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) - loss.backward() + accelerator.backward(loss) ``` Put everything together and your new Accelerate training loop should now look like this! ```python from accelerate import Accelerator accelerator = Accelerator() device = accelerator.device model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() ``` ## Training features Accelerate offers additional features - like gradient accumulation, gradient clipping, mixed precision training and more - you can add to your script to improve your training run. Let's explore these three features. ### Gradient accumulation Gradient accumulation enables you to train on larger batch sizes by accumulating the gradients over multiple batches before updating the weights. This can be useful for getting around memory limitations. To enable this feature in Accelerate, specify the `gradient_accumulation_steps` parameter in the [`Accelerator`] class and add the [`~Accelerator.accumulate`] context manager to your script. ```diff + accelerator = Accelerator(gradient_accumulation_steps=2) model, optimizer, training_dataloader = accelerator.prepare(model, optimizer, training_dataloader) for input, label in training_dataloader: + with accelerator.accumulate(model): predictions = model(input) loss = loss_function(predictions, label) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` ### Gradient clipping Gradient clipping is a technique to prevent "exploding gradients", and Accelerate offers: * [`~Accelerator.clip_grad_value_`] to clip gradients to a minimum and maximum value * [`~Accelerator.clip_grad_norm_`] for normalizing gradients to a certain value ### Mixed precision Mixed precision accelerates training by using a lower precision data type like fp16 (half-precision) to calculate the gradients. For the best performance with Accelerate, the loss should be computed inside your model (like in Transformers models) because computations outside of the model are computed in full precision. Set the mixed precision type to use in the [`Accelerator`], and then use the [`~Accelerator.autocast`] context manager to automatically cast the values to the specified data type. > [!WARNING] > Accelerate enables automatic mixed precision, so [`~Accelerator.autocast`] is only needed if there are other mixed precision operations besides those performed on loss by [`~Accelerator.backward`] which already handles the scaling. ```diff + accelerator = Accelerator(mixed_precision="fp16") + with accelerator.autocast(): loss = complex_loss_function(outputs, target) ``` ## Save and load Accelerate can also save and load a *model* once training is complete or you can also save the model and optimizer *state* which could be useful for resuming training. ### Model Once all processes are complete, unwrap the model with the [`~Accelerator.unwrap_model`] method before saving it because the [`~Accelerator.prepare`] method wrapped your model into the proper interface for distributed training. If you don't unwrap the model, saving the model state dictionary also saves any potential extra layers from the larger model and you won't be able to load the weights back into your base model. You should use the [`~Accelerator.save_model`] method to unwrap and save the model state dictionary. This method can also save a model into sharded checkpoints or into the [safetensors](https://hf.co/docs/safetensors/index) format. ```py accelerator.wait_for_everyone() accelerator.save_model(model, save_directory) ``` For models from the [Transformers](https://hf.co/docs/transformers/index) library, save the model with the [`~transformers.PreTrainedModel.save_pretrained`] method so that it can be reloaded with the [`~transformers.PreTrainedModel.from_pretrained`] method. ```py from transformers import AutoModel unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( "path/to/my_model_directory", is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) model = AutoModel.from_pretrained("path/to/my_model_directory") ``` To load your weights, use the [`~Accelerator.unwrap_model`] method to unwrap the model first before loading the weights. All model parameters are references to tensors, so this loads your weights inside `model`. ```py unwrapped_model = accelerator.unwrap_model(model) path_to_checkpoint = os.path.join(save_directory,"pytorch_model.bin") unwrapped_model.load_state_dict(torch.load(path_to_checkpoint)) ``` Set `safe_serialization=True` to save the model in the safetensor format. ```py accelerator.wait_for_everyone() accelerator.save_model(model, save_directory, max_shard_size="1GB", safe_serialization=True) ``` To load a sharded checkpoint or a safetensor formatted checkpoint, use the [`~accelerate.load_checkpoint_in_model`] method. This method allows you to load a checkpoint onto a specific device. ```py load_checkpoint_in_model(unwrapped_model, save_directory, device_map={"":device}) ``` ### State During training, you may want to save the current state of the model, optimizer, random generators, and potentially learning rate schedulers so they can be restored in the *same script*. You should add the [`~Accelerator.save_state`] and [`~Accelerator.load_state`] methods to your script to save and load states. To further customize where and how states are saved through [`~Accelerator.save_state`], use the [`~utils.ProjectConfiguration`] class. For example, if `automatic_checkpoint_naming` is enabled, each saved checkpoint is stored at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`. Any other stateful items to be stored should be registered with the [`~Accelerator.register_for_checkpointing`] method so they can be saved and loaded. Every object passed to this method to be stored must have a `load_state_dict` and `state_dict` function. > [!TIP] > If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, you can additionally pass `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`]. This extends Accelerate's DataLoader classes with a `load_state_dict` and `state_dict` function, and makes it so `Accelerator.save_state` and `Accelerator.load_state` also track how far into the training dataset it has read when persisting the model. accelerate-1.9.0/docs/source/basic_tutorials/notebook.md000066400000000000000000000407551503574341000234070ustar00rootroot00000000000000 # Launching distributed training from Jupyter Notebooks This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system. You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. This tutorial is also available as a Jupyter Notebook [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) ## Configuring the Environment Before any training can be performed, an Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: ```bash accelerate config ``` However, if general defaults are fine and you are *not* running on a TPU, Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed. ```python import os from accelerate.utils import write_basic_config write_basic_config() # Write a config file os._exit(00) # Restart the notebook ``` ## Preparing the Dataset and Model Next you should prepare your dataset. As mentioned earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examples#simple-vision-example) ```python import os, re, torch, PIL import numpy as np from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator from accelerate.utils import set_seed from timm import create_model ``` First you need to create a function to extract the class name based on a filename: ```python import os data_dir = "../../images" fnames = os.listdir(data_dir) fname = fnames[0] print(fname) ``` ```python out beagle_32.jpg ``` In the case here, the label is `beagle`. Using regex you can extract the label from the filename: ```python import re def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] ``` ```python extract_label(fname) ``` And you can see it properly returned the right name for our file: ```python out "beagle" ``` Next a `Dataset` class should be made to handle grabbing the image and the label: ```python class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} ``` Now to build the dataset. Outside the training function you can find and declare all the filenames and labels and use them as references inside the launched function: ```python fnames = [os.path.join("../../images", fname) for fname in fnames if fname.endswith(".jpg")] ``` Next gather all the labels: ```python all_labels = [extract_label(fname) for fname in fnames] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} ``` Next, you should make a `get_dataloaders` function that will return your built dataloaders for you. As mentioned earlier, if data is automatically sent to the GPU or a TPU device when building your `DataLoaders`, they must be built using this method. ```python def get_dataloaders(batch_size: int = 64): "Builds a set of dataloaders with a batch_size" random_perm = np.random.permutation(len(fnames)) cut = int(0.8 * len(fnames)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training a simple RandomResizedCrop will be used train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset([fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id) # For evaluation a deterministic Resize will be used eval_tfm = Compose([Resize((224, 224)), ToTensor()]) eval_dataset = PetsDataset([fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size * 2, num_workers=4) return train_dataloader, eval_dataloader ``` Finally, you should import the scheduler to be used later: ```python from torch.optim.lr_scheduler import CosineAnnealingLR ``` ## Writing the Training Function Now you can build the training loop. [`notebook_launcher`] works by passing in a function to call that will be ran across the distributed system. Here is a basic training loop for the animal classification problem: The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) accelerator = Accelerator(mixed_precision=mixed_precision) ``` First you should set the seed and create an [`Accelerator`] object as early in the training loop as possible. If training on the TPU, your training loop should take in the model as a parameter and it should be instantiated outside of the training loop function. See the [TPU best practices](../concept_guides/training_tpu) to learn why Next you should build your dataloaders and create your model: ```python train_dataloader, eval_dataloader = get_dataloaders(batch_size) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) ``` You build the model here so that the seed also controls the new weight initialization As you are performing transfer learning in this example, the encoder of the model starts out frozen so the head of the model can be trained only initially: ```python for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True ``` Normalizing the batches of images will make training a little faster: ```python mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] ``` To make these constants available on the active device, you should set it to the Accelerator's device: ```python mean = mean.to(accelerator.device) std = std.to(accelerator.device) ``` Next instantiate the rest of the PyTorch classes used for training: ```python optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) ``` Before passing everything to [`~Accelerator.prepare`]. There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the prepare method. ```python model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) ``` Now train the model: ```python for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` The evaluation loop will look slightly different compared to the training loop. The number of elements passed as well as the overall total accuracy of each batch will be added to two constants: ```python model.eval() accurate = 0 num_elems = 0 ``` Next you have the rest of your standard PyTorch loop: ```python for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) ``` Before finally the last major difference. When performing distributed evaluation, the predictions and labels need to be passed through [`~Accelerator.gather`] so that all of the data is available on the current device and a properly calculated metric can be achieved: ```python accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() ``` Now you just need to calculate the actual metric for this problem, and you can print it on the main process using [`~Accelerator.print`]: ```python eval_metric = accurate.item() / num_elems accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` A full version of this training loop is available below: ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) # Initialize accelerator accelerator = Accelerator(mixed_precision=mixed_precision) # Build dataloaders train_dataloader, eval_dataloader = get_dataloaders(batch_size) # Instantiate the model (you build the model here so that the seed also controls new weight initializations) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # Freeze the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # You can normalize the batches of images to be a bit faster mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] # To make these constants available on the active device, set it to the accelerator device mean = mean.to(accelerator.device) std = std.to(accelerator.device) # Instantiate the optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) # Instantiate the learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now you train the model for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` ## Using the notebook_launcher All that's left is to use the [`notebook_launcher`]. You pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](../package_reference/launchers) for more information) ```python from accelerate import notebook_launcher ``` ```python args = ("fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=2) ``` In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time. For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so: ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8) ``` And in the second Jupyter session on the other machine: Notice how the `node_rank` has changed ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8) ``` In the case of running on the TPU, it would look like so: ```python model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) args = (model, "fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=8) ``` To launch the training process with elasticity, enabling fault tolerance, you can use the `elastic_launch` feature provided by PyTorch. This requires setting additional parameters such as `rdzv_backend` and `max_restarts`. Here is an example of how to use `notebook_launcher` with elastic capabilities: ```python notebook_launcher( training_loop, args, num_processes=2, max_restarts=3 ) ``` As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs: ```python out Launching training on 2 GPUs. epoch 0: 88.12 epoch 1: 91.73 epoch 2: 92.58 epoch 3: 93.90 epoch 4: 94.71 ``` And that's it! Please note that [`notebook_launcher`] ignores the Accelerate config file, to launch based on the config use: ```bash accelerate launch ``` ## Debugging A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems from an import or prior code in the notebook that makes a call to the PyTorch `torch.cuda` sublibrary. To help narrow down what went wrong, you can launch the `notebook_launcher` with `ACCELERATE_DEBUG_MODE=yes` in your environment and an additional check will be made when spawning that a regular process can be created and utilize CUDA without issue. (Your CUDA code can still be ran afterwards). ## Conclusion This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember: - Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`] - Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc) - If using the TPU, declare your model outside the training loop function accelerate-1.9.0/docs/source/basic_tutorials/overview.md000066400000000000000000000023551503574341000234270ustar00rootroot00000000000000 # Overview Welcome to the Accelerate tutorials! These introductory guides will help catch you up to speed on working with Accelerate. You'll learn how to modify your code to have it work with the API seamlessly, how to launch your script properly, and more! These tutorials assume some basic knowledge of Python and familiarity with the PyTorch framework. If you have any questions about Accelerate, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/accelerate/18).accelerate-1.9.0/docs/source/basic_tutorials/tpu.md000066400000000000000000000047251503574341000223740ustar00rootroot00000000000000 # TPU training A [TPU (Tensor Processing Unit)](https://cloud.google.com/tpu/docs/intro-to-tpu) is a type of hardware specifically designed for training models efficiently. Accelerate supports TPU training, but there are a few things you should be aware of, namely graph compilation. This tutorial briefly discusses compilation, and for more details, take a look at the [Training on TPUs with Accelerate](../concept_guides/training_tpu) guide. ## Compilation A TPU creates a graph of all the operations in the training step such as the forward pass, backward pass and optimizer step. This is why the first training step always takes a while because building and compiling this graph takes time. But once compilation is complete, it is cached and all subsequent steps are much faster. The key is to avoid compiling your code again or else training is super slow. This means all your operations must be exactly the same: * all tensors in your batches must have the same length (for example, no dynamic padding for NLP tasks) * your code must be static (for example, no layers with for loops that have different lengths depending on the input such as a LSTM) ## Weight tying A common language model design is to tie the weights of the embedding and softmax layers. However, moving the model to a TPU (either yourself or passing it to the [`~Accelerator.prepare`] method) breaks the weight tying and you'll need to retie the weights. To add special behavior (like weight tying) in your script for TPUs, set [`~Accelerator.distributed_type`] to `DistributedType.TPU` first. Then you can use the [`~transformers.PreTrainedModel.tie_weights`] method to tie the weights. ```py if accelerator.distributed_type == DistributedType.TPU: model.tie_weights() ``` accelerate-1.9.0/docs/source/basic_tutorials/troubleshooting.md000066400000000000000000000252571503574341000250160ustar00rootroot00000000000000 # Troubleshoot This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help. ## Logging Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized. To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either: 1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable. 2. Pass the `log_level` directly to `get_logger`. For example, to set `log_level="INFO"`: ```py from accelerate.logging import get_logger logger = get_logger(__name__, log_level="DEBUG") ``` By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`. If a log should be called on all processes and in order, also pass `in_order=True`. ```py from accelerate.logging import get_logger logger = get_logger(__name__, log_level="DEBUG") # log all processes logger.debug("thing_to_log", main_process_only=False) # log all processes in order logger.debug("thing_to_log", main_process_only=False, in_order=True) ``` ## Hanging code and timeout errors There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang. ### Mismatched tensor shapes Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup. When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception. You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file. ```bash accelerate launch --debug {my_script.py} --arg1 --arg2 ``` If enabling debug mode as an environment variable, you don't need to call `accelerate launch`. ```bash ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2 ``` Add `debug: true` to your `config.yaml` file. ```yaml compute_environment: LOCAL_MACHINE debug: true ``` Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue. ```py Traceback (most recent call last): File "/home/zach_mueller_huggingface_co/test.py", line 18, in main() File "/home/zach_mueller_huggingface_co/test.py", line 15, in main broadcast_tensor = broadcast(tensor) File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper accelerate.utils.operations.DistributedOperationException: Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid. Operation: `accelerate.utils.operations.broadcast` Input shapes: - Process 0: [1, 5] - Process 1: [1, 2, 5] ``` ### Early stopping For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs. If you have early stopping conditionals, use the `set_trigger` and `check_trigger` methods to make sure all the processes are ended correctly. ```py # Assume `should_do_breakpoint` is a custom defined function that returns a conditional, # and that conditional might be true only on process 1 if should_do_breakpoint(loss): accelerator.set_trigger() # Later in the training script when we need to check for the breakpoint if accelerator.check_trigger(): break ``` ### Low kernel versions on Linux On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version. ### MPI If your distributed CPU training job using MPI is hanging, ensure that you have [passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password. Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the hostnames for each of the nodes. ```bash mpirun -f hostfile -n {number of nodes} -ppn 1 hostname ``` ## Out-of-Memory One of the most frustrating errors when it comes to running training scripts is hitting "Out-of-Memory" on devices like CUDA, XPU or CPU. The entire script needs to be restarted and any progress is lost. To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma). This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds. To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code. The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handle this for you. Any object (models, optimizers) that consumes device memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function. ```diff def training_function(args): accelerator = Accelerator() + @find_executable_batch_size(starting_batch_size=args.batch_size) + def inner_training_loop(batch_size): + nonlocal accelerator # Ensure they can be used in our context + accelerator.free_memory() # Free all lingering references model = get_model() model.to(accelerator.device) optimizer = get_optimizer() train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) lr_scheduler = get_scheduler( optimizer, num_training_steps=len(train_dataloader)*num_epochs ) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) train(model, optimizer, train_dataloader, lr_scheduler) validate(model, eval_dataloader) + inner_training_loop() ``` ## Non-reproducible results between device setups If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU. For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate. For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide. ## Performance issues on different GPUs If your multi-GPU setup consists of different GPUs, you may encounter some performance issues: - There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs. - If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload. Vastly different GPUs within the same setup can lead to performance bottlenecks. ## Ask for help If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help. - Ask for help on the Hugging Face forums by posting your question in the [Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved! - Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you. - Create an Issue on the Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it. accelerate-1.9.0/docs/source/concept_guides/000077500000000000000000000000001503574341000210365ustar00rootroot00000000000000accelerate-1.9.0/docs/source/concept_guides/big_model_inference.md000066400000000000000000000412771503574341000253320ustar00rootroot00000000000000 # Loading big models into memory When loading a pre-trained model in PyTorch, the usual workflow looks like this: ```py import torch my_model = ModelClass(...) state_dict = torch.load(checkpoint_file) my_model.load_state_dict(state_dict) ``` In plain English, those steps are: 1. Create the model with randomly initialized weights 2. Load the model weights (in a dictionary usually called a state dict) from the disk 3. Load those weights inside the model While this works very well for regularly sized models, this workflow has some clear limitations when we deal with a huge model: in step 1, we load a full version of the model in RAM, and spend some time randomly initializing the weights (which will be discarded in step 3). In step 2, we load another full version of the model in RAM, with the pre-trained weights. If you're loading a model with 6 billion parameters, this means you will need 24GB of RAM for each copy of the model, so 48GB in total (half of it to load the model in FP16). This API is quite new and still in its experimental stage. While we strive to provide a stable API, it's possible some small parts of the public API will change in the future. ## How the Process Works: A Quick Overview ## How the Process Works: Working with Code ### Instantiating an empty model The first tool Accelerate introduces to help with big models is a context manager [`init_empty_weights`] that helps you initialize a model without using any RAM so that step 1 can be done on models of any size. Here is how it works: ```py from accelerate import init_empty_weights with init_empty_weights(): my_model = ModelClass(...) ``` For instance: ```py with init_empty_weights(): model = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` initializes an empty model with a bit more than 100B parameters. Behind the scenes, this relies on the meta device introduced in PyTorch 1.9. During the initialization under the context manager, each time a parameter is created, it is instantly moved to that device. You can't move a model initialized like this on CPU or another device directly, since it doesn't have any data. It's also very likely that a forward pass with that empty model will fail, as not all operations are supported on the meta device. ### Sharded checkpoints It's possible your model is so big that even a single copy won't fit in RAM. That doesn't mean it can't be loaded: if you have one or several GPUs, this is more memory available to store your model. In this case, it's better if your checkpoint is split into several smaller files that we call checkpoint shards. Accelerate will handle sharded checkpoints as long as you follow the following format: your checkpoint should be in a folder, with several files containing the partial state dicts, and there should be an index in the JSON format that contains a dictionary mapping parameter names to the file containing their weights. You can easily shard your model with [`~Accelerator.save_model`]. For instance, we could have a folder containing: ```bash first_state_dict.bin index.json second_state_dict.bin ``` with index.json being the following file: ``` { "linear1.weight": "first_state_dict.bin", "linear1.bias": "first_state_dict.bin", "linear2.weight": "second_state_dict.bin", "linear2.bias": "second_state_dict.bin" } ``` and `first_state_dict.bin` containing the weights for `"linear1.weight"` and `"linear1.bias"`, `second_state_dict.bin` the ones for `"linear2.weight"` and `"linear2.bias"` ### Loading weights The second tool Accelerate introduces is a function [`load_checkpoint_and_dispatch`], that will allow you to load a checkpoint inside your empty model. This supports full checkpoints (a single file containing the whole state dict) as well as sharded checkpoints. It will also automatically dispatch those weights across the devices you have available (GPUs, CPU RAM), so if you are loading a sharded checkpoint, the maximum RAM usage will be the size of the biggest shard. If you want to use big model inference with Transformers models, check out this [documentation](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading). Here is how we can use this to load the [GPT2-1.5B](https://huggingface.co/marcsun13/gpt2-xl-linear-sharded) model. Let's download the sharded version of this model. ```bash pip install huggingface_hub ``` ```py from huggingface_hub import snapshot_download checkpoint = "marcsun13/gpt2-xl-linear-sharded" weights_location = snapshot_download(repo_id=checkpoint) ``` In order to initialize the model, we will use the library minGPT. ```bash git clone https://github.com/karpathy/minGPT.git pip install minGPT/ ``` ```py from accelerate import init_empty_weights from mingpt.model import GPT model_config = GPT.get_default_config() model_config.model_type = 'gpt2-xl' model_config.vocab_size = 50257 model_config.block_size = 1024 with init_empty_weights(): model = GPT(model_config) ``` Then, load the checkpoint we just downloaded with: ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint=weights_location, device_map="auto", no_split_module_classes=['Block'] ) ``` By passing `device_map="auto"`, we tell Accelerate to determine automatically where to put each layer of the model depending on the available resources: - first, we use the maximum space available on the GPU(s) - if we still need space, we store the remaining weights on the CPU - if there is not enough RAM, we store the remaining weights on the hard drive as memory-mapped tensors #### `no_split_module_classes` This parameter will indicate that some of the modules with the name `"Block"` should not be split across different devices. You should set here all blocks that include a residual connection of some kind. #### The `device_map` You can see the `device_map` that Accelerate picked by accessing the `hf_device_map` attribute of your model: ```py model.hf_device_map ``` ```python out {'transformer.wte': 0, 'transformer.wpe': 0, 'transformer.drop': 0, 'transformer.h.0': 0, ... 'transformer.h.21': 0, 'transformer.h.22': 1, 'transformer.h.23': 1, 'transformer.h.24': 1, ... 'transformer.h.47': 1, 'transformer.ln_f': 1, 'lm_head': 1} ``` It's fully possible to create your own device map for the layers to use as well, specifying the GPU device to use (a number), `"cpu"`, or `"disk"` and pass this in: ```python device_map = { "transformer.wte": "cpu", "transformer.wpe": 0, "transformer.drop": "cpu", "transformer.h.0": "disk" } model = load_checkpoint_and_dispatch( model, checkpoint=weights_location, device_map=device_map ) ``` ### Run the model Now that we have done this, our model lies across several devices, and maybe the hard drive. But it can still be used as a regular PyTorch model: ```py from mingpt.bpe import BPETokenizer tokenizer = BPETokenizer() inputs = tokenizer("Hello, my name is").to(0) outputs = model.generate(x1, max_new_tokens=10, do_sample=False)[0] tokenizer.decode(outputs.cpu().squeeze()) ``` Behind the scenes, Accelerate added hooks to the model, so that: - at each layer, the inputs are put on the right device (so even if your model is spread across several GPUs, it works) - for the weights offloaded on the CPU, they are put on a GPU just before the forward pass and cleaned up just after - for the weights offloaded on the hard drive, they are loaded in RAM then put on a GPU just before the forward pass and cleaned up just after This way, your model can run for inference even if it doesn't fit on one of the GPUs or the CPU RAM! This only supports the inference of your model, not training. Most of the computation happens behind `torch.no_grad()` context managers to avoid spending some GPU memory with intermediate activations. ### Designing a device map You can let Accelerate handle the device map computation by setting `device_map` to one of the supported options (`"auto"`, `"balanced"`, `"balanced_low_0"`, `"sequential"`) or create one yourself if you want more control over where each layer should go. You can derive all sizes of the model (and thus compute a `device_map`) on a model that is on the meta device. All the options will produce the same result when you don't have enough GPU memory to accommodate the whole model (which is to fit everything that can on the GPU, then offload weights on the CPU or even on the disk if there is not enough RAM). When you have more GPU memory available than the model size, here is the difference between each option: - `"auto"` and `"balanced"` evenly split the model on all available GPUs, making it possible for you to use a batch size greater than 1. - `"balanced_low_0"` evenly splits the model on all GPUs except the first one, and only puts on GPU 0 what does not fit on the others. This option is great when you need to use GPU 0 for some processing of the outputs, like when using the `generate` function for Transformers models - `"sequential"` will fit what it can on GPU 0, then move on GPU 1 and so forth (so won't use the last GPUs if it doesn't need to). The options `"auto"` and `"balanced"` produce the same results for now, but the behavior of `"auto"` might change in the future if we find a strategy that makes more sense, while `"balanced"` will stay stable. First note that you can limit the memory used on each GPU by using the `max_memory` argument (available in [`infer_auto_device_map`] and in all functions using it). When setting `max_memory`, you should pass along a dictionary containing the GPU identifiers (for instance `0`, `1` etc.) and the `"cpu"` key for the maximum RAM you want to use for CPU offload. The values can either be an integer (in bytes) or a string representing a number with its unit, such as `"10GiB"` or `"10GB"`. Here is an example where we don't want to use more than 10GiB on each of the two GPUs and no more than 30GiB of CPU RAM for the model weights: ```python from accelerate import infer_auto_device_map device_map = infer_auto_device_map(my_model, max_memory={0: "10GiB", 1: "10GiB", "cpu": "30GiB"}) ``` When a first allocation happens in PyTorch, it loads CUDA kernels which take about 1-2GB of memory depending on the GPU. Therefore you always have less usable memory than the actual size of the GPU. To see how much memory is actually used do `torch.ones(1).cuda()` and look at the memory usage. Therefore when you create memory maps with `max_memory` make sure to adjust the available memory accordingly to avoid out-of-memory errors. Additionally, if you do some additional operations with your outputs without placing them back on the CPU (for instance inside the `generate` method of Transformers) and if you placed your inputs on a GPU, that GPU will consume more memory than the others (Accelerate always place the output back to the device of the input). Therefore if you would like to optimize the maximum batch size and you have many GPUs, give the first GPU less memory. For example, with BLOOM-176B on 8x80 A100 setup, the close-to-ideal map is: ```python max_memory = {0: "30GIB", 1: "46GIB", 2: "46GIB", 3: "46GIB", 4: "46GIB", 5: "46GIB", 6: "46GIB", 7: "46GIB"} ``` as you can see we gave the remaining 7 GPUs ~50% more memory than GPU 0. If you opt to fully design the `device_map` yourself, it should be a dictionary with keys being module names of your model and values being a valid device identifier (for instance an integer for the GPUs) or `"cpu"` for CPU offload, `"disk"` for disk offload. The keys need to cover the whole model, you can then define your device map as you wish: for instance, if your model has two blocks (let's say `block1` and `block2`) which each contain three linear layers (let's say `linear1`, `linear2` and `linear3`), a valid device map can be: ```python device_map = {"block1": 0, "block2": 1} ``` another one that is valid could be: ```python device_map = {"block1": 0, "block2.linear1": 0, "block2.linear2": 1, "block2.linear3": 1} ``` On the other hand, this one is not valid as it does not cover every parameter of the model: ```python device_map = {"block1": 0, "block2.linear1": 1, "block2.linear2": 1} ``` To be the most efficient, make sure your device map puts the parameters on the GPUs in a sequential manner (e.g. don't put one of the first weights on GPU 0, then weights on GPU 1 and the last weight back to GPU 0) to avoid making many transfers of data between the GPUs. ## CPU offload only If you want to offload your model on CPU, you can use [`cpu_offload`]. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device and passed as they are needed, then offloaded again. ```python cpu_offload(model, execution_device) ``` You can also use [`cpu_offload_with_hook`]. This function will offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Furthermore, [`cpu_offload_with_hook`] is more performant but less memory saving. It is useful for pipelines running a model in a loop: ```python model_1, hook_1 = cpu_offload_with_hook(model_1, execution_device) model_2, hook_2 = cpu_offload_with_hook(model_2, execution_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, execution_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ``` ## Disk offload only To perform disk offload, you can use [`disk_offload`]. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. ```python disk_offload(model, offload_dir, execution_device) ``` ## Limits and further development We are aware of the current limitations in the API: - [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) tries to maximize GPU and CPU RAM it sees available when you execute it. While PyTorch is very good at managing GPU RAM efficiently (and giving it back when not needed), it's not entirely true with Python and CPU RAM. Therefore, an automatically computed device map might be too intense on the CPU. Move a few modules to the disk device if you get crashes due to a lack of RAM. - [`infer_auto_device_map`] (or `device_map="auto"` in [`load_checkpoint_and_dispatch`]) attributes devices sequentially (to avoid moving things back and forth) so if your first layer is bigger than the size of the GPU you have, it will end up with everything on the CPU/Disk. - [`load_checkpoint_and_dispatch`] and [`load_checkpoint_in_model`] do not perform any check on the correctness of your state dict compared to your model at the moment (this will be fixed in a future version), so you may get some weird errors if trying to load a checkpoint with mismatched or missing keys. - The model parallelism used when your model is split on several GPUs is naive and not optimized, meaning that only one GPU works at a given time and the other sits idle. - When weights are offloaded on the CPU/hard drive, there is no pre-fetching (yet, we will work on this for future versions) which means the weights are put on the GPU when they are needed and not before. - Hard-drive offloading might be very slow if the hardware you run on does not have fast communication between disk and CPU (like NVMes). accelerate-1.9.0/docs/source/concept_guides/deferring_execution.md000066400000000000000000000112311503574341000254060ustar00rootroot00000000000000 # Executing and deferring jobs When you run your usual script, instructions are executed in order. Using Accelerate to deploy your script on several GPUs at the same time introduces a complication: while each process executes all instructions in order, some may be faster than others. You might need to wait for all processes to have reached a certain point before executing a given instruction. For instance, you shouldn't save a model before being sure every process is done with training, and you wouldn't want to continue training before all the model weights have been loaded in. To do this, just write the following line in your code: ``` accelerator.wait_for_everyone() ``` This instruction will block all the processes that arrive first until all the other processes have reached that point (if you run your script on just one GPU or CPU, this won't do anything). A few example cases of when to use this utility are listed below: Some of these are utilized with the [`~Accelerator.main_process_first`] context manager, which utilizes [`~Accelerator.wait_for_everyone`] to run a particular set of code on the main process beforehand before triggering and launching the other processes ## Downloading a Dataset When downloading a dataset, you should download it first on the main process and then load the cached dataset afterward `load_dataset` will perform a lock under the hood to stop multiple downloads from happening at once, but if you are downloading something not using this library you should use this method. ```python with accelerator.main_process_first(): datasets = load_dataset("glue", "mrpc") ``` Under the hood this is the same as calling: ```python # First do something on the main process if accelerator.is_main_process: datasets = load_dataset("glue", "mrpc") else: accelerator.wait_for_everyone() # And then send it to the rest of them if not accelerator.is_main_process: datasets = load_dataset("glue", "mrpc") else: accelerator.wait_for_everyone() ``` ## Saving the `state_dict` When saving the `state_dict` of the model, since you would normally save one file on just the main process you should specify that: ```python if accelerator.is_main_process: model = accelerator.unwrap_model(model) torch.save(model.state_dict(), "weights.pth") ``` ## Loading in the `state_dict` When loading in the `state_dict` to a model, optimizer, or scheduler, you should wait for all workers to have the weights loaded in before moving on to training ```python with accelerator.main_process_first(): state = torch.load("weights.pth") model.load_state_dict(state) ``` ## Applying a multi-worker CPU operation Applying a `map()` operation on multiple workers, such as tokenizing should be done on the main process first, and then propagated to each one. ```python datasets = load_dataset("glue", "mrpc") with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) ``` ## Applying checks such as Early Stopping To have a check that works with a flag set by a particular process, the `set_trigger` and `check_trigger` API should be used. Useful examples for doing so can include situations such as using early stopping and monitoring the loss (as each loss slightly differs on each process). Call [`Accelerator.set_trigger`] when your condition has been met, and [`Accelerator.check_trigger`] when checking if that condition has been met in any process: ```python for (x,y) in data_loader: logits = model(x) loss = loss_func(logits, y) # Assume `should_do_early_stopping` is a custom defined function that returns a conditional if should_do_early_stopping(loss): accelerator.set_trigger() # Later in the training script when we need to check for the breakpoint if accelerator.check_trigger(): break ``` accelerate-1.9.0/docs/source/concept_guides/fsdp1_vs_fsdp2.md000066400000000000000000000170231503574341000242060ustar00rootroot00000000000000 # FSDP1 vs FSDP2 This guide explains the key differences between `FSDP1` and `FSDP2` and helps you migrate your existing code to use `FSDP2` with minimal changes. ## How is FSDP2 better than FSDP1? First, we want to understand how `FSDP1` and `FSDP2` work internally to understand the differences between them. This also helps us understand the limitations of `FSDP1` and how `FSDP2` solves them. We'll be discussing a scenario where we have a single `Layer` that contains 3 `Linear` layers and is wrapped using `FSDP` to be sharded across 2 GPUs.

Layer
### FSDP1 First, we have to understand the original `FSDP1` and the limitations it brings. It represents each `FSDP` module as a single `FlatParameter` which is a single 1D tensor that contains all of the module parameters, which then get sharded across ranks. I.e. if you wrap the `Layer` with `FSDP1`, you'd achieve something as such:
FSDP1
You might notice a problem. The whole `Layer` gets flattened into a single `FlatParameter`, which then gets sharded across ranks. But if it's a single `FlatParameter` object, how do we store metadata? That is one of the limitations. Properly storing per-parameter metadata such as `dtype`, `requires_grad`, etc. is not possible without some ugly hacks. ### FSDP2 This is why `FSDP2` was introduced. It doesn't use `FlatParameter`, instead it uses `DTensor` which is short for "Distributed Tensor". Each `DTensor` basically represents a vanilla `torch.Tensor` that has been sharded across ranks. It contains metadata about the original `torch.Tensor` and how it's sharded, what is the [placement type](https://pytorch.org/docs/stable/distributed.tensor.html#module-torch.distributed.tensor.placement_types) and so on. This is why it's called `per-parameter sharding`. The following figure shows the difference:
FSDP2
Each Parameter of the original `Layer` is sharded across the 0th dimension, and split between 2 GPUs. Now, each `Linear` layer is a separate `DTensor` and storing metadata per-parameter is possible and straightforward. > [!TIP] > In the image above, the tensors were sharded across the 1st dimension for the sake of fitting the image on the screen, in reality, they are sharded across the 0th dimension as stated above ## What does FSDP2 offer? `FSDP2` is a new and improved version of PyTorch's fully-sharded data parallel training API. Its main advantage is using `DTensor` to represent sharded parameters. Compared to `FSDP1`, it offers: - Simpler internal implementation, where each `Parameter` is a separate `DTensor` - Enables simple partial parameter freezing because of the above, which makes methods as [`LORA`](https://arxiv.org/abs/2106.09685) work out of the box - With `DTensor`, `FSDP2` supports mixing `fp8` and other parameter types in the same model out of the box - Faster and simpler checkpointing without extra communication across ranks using `SHARDED_STATE_DICT` and [`torch.distributed.checkpoint`](https://pytorch.org/docs/stable/distributed.checkpoint.html), this way, each rank only saves its own shard and corresponding metadata - For loading, it uses a `state_dict` of the sharded model to directly load the sharded parameters - Support for asynchronous checkpointing, where parameters are first copied to CPU memory, after this, main thread continues training while another thread stores the parameters on disk - Memory efficiency and deterministic memory usage, `FSDP2` doesn't use `recordStream` anymore and uses stream-to-stream synchronization (for more technical details see [this forum post](https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486) and [this issue](https://github.com/pytorch/pytorch/issues/114299)) - In the future, optimizations of the communication patterns via `torch.compile` are planned, further improving the performance and memory efficiency ## API Differences We have already discussed the internal differences, now let's discuss the differences, you, as a user, will need to know. Here are the main changes in configuration options when using `FSDP2` through the `accelerate` CLI: Previous (`FSDP1`) | New (`FSDP2`) | What Changed -- | -- | -- `--fsdp_sharding_strategy` | `--fsdp_reshard_after_forward` | replaces `--fsdp_sharding_strategy`, changed to `true` (previously `FULL_SHARD`) or `false` (previously `SHARD_GRAD_OP`) `--fsdp_backward_prefetch` | \*\***REMOVED**\*\* | `FSDP2` uses previous `BACKWARD_PRE` option by default, as only this allows communication and computation overlap `--fsdp_forward_prefetch` | \*\***NOT YET IMPLEMENTED**\*\* | How to implement this is under active discussion, for now it is not supported in `FSDP2` `--fsdp_sync_module_states` | \*\***REMOVED**\*\* | with `FSDP2`, this parameter becomes redundant `--fsdp_cpu_ram_efficient_loading` | `--fsdp_cpu_ram_efficient_loading` | if `true`, `FSDP2` will similarly load the model only on rank 0, and then parameters get synced to other ranks, this is the same behavior as `FSDP1`, however, setting `--fsdp_sync_module_states` isn't required anymore `--fsdp_state_dict_type` | `--fsdp_state_dict_type` | `LOCAL_STATE_DICT` becomes obsolete and with `FSDP2` `SHARDED_STATE_DICT` is the default option, which results in no extra communication and each rank saving its own shard, other possible option is `FULL_STATE_DICT` which results in extra communication and spike in memory usage but saves the full model from rank 0. `--fsdp_use_orig_params` | \*\***REMOVED**\*\* | `FSDP2` uses a `DTensor` class on the background, which means it *always* uses the original parameters by default \*\***NEW**\*\* | `--fsdp_version` | `1` is the default option, to not break existing code, set to `2` to use `FSDP2` For all other options that remain unchanged, see the [`FSDP` documentation](../usage_guides/fsdp.md). ## How to Switch to FSDP2 ### If using Python code: Simply set `fsdp_version=2` when creating your plugin and replace options according to the table above. ```python from accelerate import FullyShardedDataParallelPlugin, Accelerator fsdp_plugin = FullyShardedDataParallelPlugin( fsdp_version=2 # other options... ) accelerator = Accelerator(fsdp_plugin=fsdp_plugin) ``` ### If using YAML config: Use our conversion tool: ```bash accelerate to-fsdp2 --config_file config.yaml --output_file new_config.yaml ``` This will automatically convert all FSDP1 settings to their FSDP2 equivalents. Use `--overwrite` to update the existing file instead of creating a new one. accelerate-1.9.0/docs/source/concept_guides/fsdp_and_deepspeed.md000066400000000000000000000263471503574341000251700ustar00rootroot00000000000000 # FSDP vs DeepSpeed Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks. To switch between the frameworks, we recommend launching code `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) . Example Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore) This tutorial is for single-node, multi-GPU, scenarios only. ## Configuring Functionalities Model tensors are split into different GPUs in an attempt to scale up model sizes; this is termed *sharding* in FSDP, and *partitioning* in DeepSpeed. FSDP sharding and DeepSpeed ZeRO (partitioning) stages are configured by `--fsdp_sharding_strategy`, and `--zero_stage`, respectively. In particular, FSDP `FULL_SHARD` maps to DeepSpeed ZeRO stage `3`; see this [comprehensive mapping between FSDP sharding and DeepSpeed ZeRO settings](../usage_guides/fsdp#mapping-between-fsdp-sharding-strategies-and-deepspeed-zero-stages). The below table summarizes and groups similar settings: Group | Framework | Configuration | Example | Restrictions (if any) --|--|--|--|-- sharding / partitioning | FSDP
DeepSpeed | `--fsdp_sharding_strategy`
`--zero_stage` | `1` (`FULL_SHARD`)
`3` | offload | FSDP
DeepSpeed | `--fsdp_offload_params`
`--offload_param_device`
`--offload_optimizer_device` | `true`
`cpu`
`cpu` | all or nothing

model loading | FSDP
DeepSpeed | `--fsdp_cpu_ram_efficient_loading`
`--zero3_init_flag` | `true`
`true` |
only ZeRO 3 efficient checkpointing | FSDP
DeepSpeed | `--fsdp_state_dict_type`
`--zero3_save_16bit_model` | `SHARDED_STATE_DICT`
`true` |
only ZeRO 3 weights prefetching | FSDP

DeepSpeed | `--fsdp_forward_prefetch`
`--fsdp_backward_prefetch`
None | `true`
`BACKWARD_PRE` |

model | FSDP

DeepSpeed | `--fsdp_auto_wrap_policy`
`--fsdp_transformer_layer_cls_to_wrap`
None | `TRANSFORMER_BASED_WRAP`
|
Usually not needed
Transparent to user. parameters summoning | FSDP
DeepSpeed | `--fsdp_use_orig_params`
None | `true` | required for `torch.compile`
Transparent to user parameters syncing | FSDP
DeepSpeed | `--fsdp_sync_module_states`
None | `true` | training | FSDP
DeepSpeed | None
`--gradient_accumulation_steps`
`--gradient_clipping` |
`auto`
`auto` | Transparent to user For detailed descriptions of the above, refer to [`Accelerate` launch documentation](../package_reference/cli#accelerate-launch). To access other DeepSpeed configurations, such as mixed precision settings, you need to pass in a `--deepspeed_config_file`, see the [documentation](../usage_guides/deepspeed#deepspeed-config-file). DeepSpeed can be also configured via [`DeepSpeedPlugin`], e.g., `DeepSpeedPlugin.zero_stage` is equivalent of `--zero_stage`, and `DeepSpeedPlugin.hf_ds_config` can be used to pass `--deepeed_config_file.` FSDP can be also configured via [`FullyShardedDataParallelPlugin`], e.g., `FullyShardedDataParallelPlugin.sharding_strategy` is equivalent of `--fsdp_sharding_strategy`. ### Checkpointing Do note that while FSDP can be configured via `--fsdp_state_dict_type` to save either full / sharded checkpoints. For DeepSpeed Zero3, one could pass a `--zero3_save_16bit_model true`, which conveniently consolidates the model to a single rank and saves; this is the FSDP equivalent of `fsdp_state_dict_type: FULL_STATE_DICT`. For large models, consolidating the model to a single rank can be very slow. For quicker checkpointing, for FSDP use `fsdp_state_dict_type: SHARDED_STATE_DICT`, and for DeepSpeed Zero3 [use the `zero_to_fp32.py` script to post-convert sharded checkpoints](https://www.deepspeed.ai/tutorials/zero/#extracting-weights). ### Offloading FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, gradients, and optimizer, or keep them all in GPU), but DeepSpeed can offload parameters and optimizer differently. Furthermore, DeepSpeed also supports [offloading to NVME](https://www.deepspeed.ai/docs/config-json/#parameter-offloading). ### Prefetching FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html). For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file. For FSDP set `fsdp_backward_prefetch: BACKWARD_PRE` for improved throughputs if memory allows. ### Model Loading While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used. For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, `accelerate` will automatically set `sync_module_states` to true. For RAM efficient loading the weights will be loaded only in a single rank, and thus requires `sync_module_states` to broadcast weights to other ranks. ### Model FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide how to schedule the all-gather and reduce-scatter operations. But for DeepSpeed this is transparent to the user. For FSDP, simply set `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`. With the latest [`transformers`] versions, we try our best to figure out the suitable `fsdp_transformer_layer_cls_to_wrap` for HF transformers models. However, if you get an error regarding it, please specify this. ### Parameters Summoning FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documentation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user. For FSDP, when using `torch.compile` please set `fsdp_use_orig_params: True`. ## Training Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clipping` flags. For FSDP this is transparent to the user. When using DeepSpeed, set `gradient_accumulation_steps: "auto"` and `gradient_clipping: "auto"` to automatically pick up values set in the [`Accelerator`] or [`TrainingArguments`] (if using `transformers`). ## On Differences in Data Precision Handling To discuss how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used. As a rule of thumb, for stable training with automatic mixed precision, all the trainable parameters have to be in `torch.float32`. Process | Local | Framework | Details --|--|--|-- Loading, i.e., [`AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`] | Preparation, i.e., creation of "flat params" | ✅ | FSDP
DeepSpeed | created in `torch_dtype`.
disregards `torch_dtype`, created in `float32`. Optimizer initialization | ✅ | FSDP
DeepSpeed | creates parameters in `torch_dtype`
creates parameters in `float32` Training Step, i.e, forward, backward, reduction | | FSDP
DeepSpeed | follows [`MixedPrecision`](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)
follows `deepspeed_config_file` mixed precision settings. Optimizer (Pre-Step) | ✅ | FSDP
DeepSpeed | upcasting (if any) to `torch_dtype`
upcasted to `float32` Optimizer (Actual Step) | ✅ | FSDP
DeepSpeed | occurs in `torch_dtype`
occurs in `float32`. Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preparation. With FSDP, in the absence of mixed precision, it is possible to operate the [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) in low precision `torch_dtype`, which may be helpful when using small number of GPUs. With mixed precision, FSDP and DeepSpeed will upcast in the model preparation step (c.f. table above). But do note that FSDP will then save checkpoints in the upcasted precision; Deepspeed may still save low precision checkpoints if `--zero3_save_16bit_model` is specified. To clarify the above table consider the concrete examples below; the optimizer pre- and actual step combined for brevity. With FSDP it is possible to operate in the two modes shown below, but DeepSpeed can only operate in one. Framework | Model Loading (`torch_dtype`) | Mixed Precision | Preparation (Local) | Training | Optimizer (Local) --|--|--|--|--|-- FSDP | bf16 | default (none) | bf16 | bf16 | bf16 FSDP | bf16 | bf16 | fp32 | bf16 | fp32 DeepSpeed | bf16 | bf16 | fp32 | bf16 | fp32 accelerate-1.9.0/docs/source/concept_guides/gradient_synchronization.md000066400000000000000000000220061503574341000264760ustar00rootroot00000000000000 # Gradient synchronization PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system. This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints when using the `ddp` module. These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. This happens when the model is wrapped with `DistributedDataParallel`: ```python import torch.nn as nn from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10, 10) ddp_model = DistributedDataParallel(model) ``` In Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model. ```diff + from accelerate import Accelerator + accelerator = Accelerator() import torch.nn as nn - from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10,10) + model = accelerator.prepare(model) ``` ## The slowdown in gradient accumulation You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when training in a distributed setup. But how does this risk slowing down your code? In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected at specific points and these must also occur at roughly the same time before moving on. The most direct example is when you update model parameters through `optimizer.step()`. Without gradient accumulation, all instances of the model need to have updated their gradients computed, collated, and updated before moving on to the next batch of data. When performing gradient accumulation, you accumulate `n` loss gradients and skip `optimizer.step()` until `n` batches have been reached. As all training processes only need to synchronize by the time `optimizer.step()` is called, without any modification to your training step, this needless inter-process communication can cause a significant slowdown. How can you avoid this overhead? ## Solving the slowdown problem Since you are skipping model parameter updates when training on these batches, their gradients do not need to be synchronized until the point where `optimizer.step()` is actually called. PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager that is added to your model after converting it to DDP. Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this context manager will trigger the synchronization. See an example below: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader) - 1): with ddp_model.no_sync(): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() ``` In Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!), `ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way: ```diff ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader)-1): - with ddp_model.no_sync(): + with accelerator.no_sync(model): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs, targets) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final gradient accumulation API: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for batch in dataloader: with accelerator.accumulate(model): optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. ## Just how much of a slowdown is there, and easy mistakes you can make To set up a realistic example, consider the following setup: * Two single-GPU T4 nodes and one node with two GPUs * Each GPU is a T4, and are hosted on GCP * The script used is a modification of the [NLP Example](https://github.com/muellerzr/timing_experiments/blob/main/baseline.py) script * Batch size per GPU is 16, and gradients are accumulated every 4 steps All scripts are available in [this repository](https://github.com/muellerzr/timing_experiments). If not careful about gradient synchronization and GPU communication, a *large* amount of time can be wasted from when these GPUs communicate to each other during unnecessary periods. By how much? Reference: - Baseline: uses no synchronization practices discussed here - `no_sync` improperly: `no_sync` only around the `backward` call, not the `forward` - `no_sync`: using the `no_sync` pattern properly - `accumulate`: using [`~Accelerator.accumulate`] properly Below are the average seconds per batch iterating over 29 batches of data for each setup on both a single node and on the dual-node setup: | | Baseline | `no_sync` improperly | `no_sync` | `accumulate`| | :---------: | :-------: | :------------------: | :-------: | :---------: | | Multi-Node | 2±0.01s | 2.13±0.08s | **0.91±0.11s** | **0.91±0.11s** | | Single Node | 0.50±0.01s | 0.50±0.01s | **0.41±0.015s** | **0.41±0.015s** | As you can see, if you are not careful about how you set up your gradient synchronization, you can get upwards of more than a 2x slowdown during training! If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in `gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you. ### `no_sync` requires additional GPU memory when using FSDP Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory. Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`. See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`. | Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16) | :-------------: | :-----------------: | :-----------------: | :-----------------: mixtral 8x7B | 69G | OOM | 69G > [!WARNING] > Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.accelerate-1.9.0/docs/source/concept_guides/internal_mechanism.md000066400000000000000000000105351503574341000252240ustar00rootroot00000000000000 # Accelerate's internal mechanisms Internally, Accelerate works by first analyzing the environment in which the script is launched to determine which kind of distributed setup is used, how many different processes there are and which one the current script is in. All that information is stored in the [`~AcceleratorState`]. This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of [`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits) Then, when calling [`~Accelerator.prepare`], the library: - wraps your model(s) in the container adapted for the distributed setup, - wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`], - wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`] - creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`] While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other `num_processes` batches (if enabled). The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality: - it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any randomization (like shuffling) is done the exact same way across processes. - it puts the batches on the proper device before yielding them (unless you have opted out of `device_placement=True`). The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level. The random number generator synchronization will by default synchronize: - the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6 - the main random number generator in PyTorch <=1.5.1 You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main [`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid setting the same seed in the main random number generator in all processes. Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get the same random numbers from the torch random modules (so will apply the same random data augmentation if it's controlled by torch). The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local `torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example. If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`. For more details about the internals, see the [Internals page](../package_reference/torch_wrappers). accelerate-1.9.0/docs/source/concept_guides/low_precision_training.md000066400000000000000000000131401503574341000261260ustar00rootroot00000000000000 # Low precision training methods The release of new kinds of hardware led to the emergence of new training paradigms that better utilize them. Currently, this is in the form of training in 8-bit precision using packages such as [TransformersEngine](https://github.com/NVIDIA/TransformerEngine) (TE) or [MS-AMP](https://github.com/Azure/MS-AMP/tree/main). For an introduction to the topics discussed today, we recommend reviewing the [low-precision usage guide](../usage_guides/low_precision_training) as this documentation will reference it regularly. ## A Quick Chart Below is a quick chart from the MS-AMP documentation showing the different bit-precisions for each solution during training: Optimization Level | Computation(GEMM) | Comm | Weight | Master Weight | Weight Gradient | Optimizer States -- | -- | -- | -- | -- | -- | -- FP16 AMP | FP16 | FP32 | FP32 | N/A | FP32 | FP32+FP32 Nvidia TE | FP8 | FP32 | FP32 | N/A | FP32 | FP32+FP32 MS-AMP O1 | FP8 | FP8 | FP16 | N/A | FP8 | FP32+FP32 MS-AMP O2 | FP8 | FP8 | FP16 | N/A | FP8 | FP8+FP16 MS-AMP O3 | FP8 | FP8 | FP8 | FP16 | FP8 | FP8+FP16 ## `TransformersEngine` `TransformersEngine` is the first solution to trying to train in 8-bit floating point. It works by using drop-in replacement layers for certain ones in a model that utilizes their FP8-engine to reduce the number of bits (such as 32 to 8) without degrading the final accuracy of the model. Specifically, Accelerate will find and replace the following layers with `TransformersEngine` versions: * `nn.LayerNorm` for `te.LayerNorm` * `nn.Linear` for `te.Linear` As a result we wind up with a model that has most of its layers in BF16, while some layers are in FP8 reducing some of the memory. Anecdotally, we have noticed that performance gains don't really start showing when using `TransformerEngine` until a large majority of the layers in the model are made up of those two layers to replace. As a result, only larger models have shown performance improvements when the number of parameters is around and upwards of a few billion. The `TransformerEngine` can receive many different arguments that customize how it performs FP8 calculations and what they do. A full list of the arguments is available below: * `margin`: The margin to use for the gradient scaling. * `interval`: The interval to use for how often the scaling factor is recomputed. * `fp8_format``: The format to use for the FP8 recipe. Must be one of `HYBRID` or `E4M3`. (Generally `HYBRID` for training, `E4M3` for evaluation) * `amax_history_len`: The length of the history to use for the scaling factor computation * `amax_compute_algo`: The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`. * `override_linear_precision`: Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. You can customize each of these as part of [`utils.FP8RecipeKwargs`] to help optimize performance of your models. If we notice in the chart mentioned earlier, TE simply casts the computation layers into FP8, while everything else is in FP32. As a result this winds up utilizing the most memory but does so with the benefit of guaranteeing the least amount of loss in end accuracy during training. ## `MS-AMP` MS-AMP takes a different approach to `TransformersEngine` by providing three different optimization levels to convert more operations in FP8 or FP16. * The base optimization level (`O1`), passes communications of the weights (such as in DDP) in FP8, stores the weights of the model in FP16, and leaves the optimizer states in FP32. The main benefit of this optimization level is that we can reduce the communication bandwidth by essentially half. Additionally, more GPU memory is saved due to 1/2 of everything being cast in FP8, and the weights being cast to FP16. Notably, both the optimizer states remain in FP32. * The second optimization level (`O2`) improves upon this by also reducing the precision of the optimizer states. One is in FP8 while the other is in FP16. Generally it's been shown that this will only provide a net-gain of no degraded end accuracy, increased training speed, and reduced memory as now every state is either in FP16 or FP8. * Finally, MS-AMP has a third optimization level (`O3`) which helps during DDP scenarios such as DeepSpeed. The weights of the model in memory are fully cast to FP8, and the master weights are now stored in FP16. This fully reduces memory by the highest factor as now not only is almost everything in FP8, only two states are left in FP16. Currently, only DeepSpeed versions up through 0.9.2 are supported, so this capability is not included in the Accelerate integration ## Combining the two More experiments need to be performed but it's been noted that combining both MS-AMP and TransformersEngine can lead to the highest throughput by relying on NVIDIA's optimized FP8 operators and utilizing how MS-AMP reduces the memory overhead. accelerate-1.9.0/docs/source/concept_guides/performance.md000066400000000000000000000110051503574341000236560ustar00rootroot00000000000000 # Comparing performance across distributed setups Evaluating and comparing the performance from different setups can be quite tricky if you don't know what to look for. For example, you cannot run the same script with the same batch size across TPU, multi-GPU, and single-GPU with Accelerate and expect your results to line up. But why? There are three reasons for this that this tutorial will cover: 1. **Setting the right seeds** 2. **Observed Batch Sizes** 3. **Learning Rates** ## Setting the Seed While this issue has not come up as much, make sure to use [`utils.set_seed`] to fully set the seed in all distributed cases so training will be reproducible: ```python from accelerate.utils import set_seed set_seed(42) ``` Why is this important? Under the hood this will set **5** different seed settings: ```python random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # or torch.xpu.manual_seed_all, etc # ^^ safe to call this function even if cuda is not available if is_torch_xla_available(): xm.set_rng_state(seed) ``` The random state, numpy's state, torch, torch's device state, and if TPUs are available torch_xla's cuda state. ## Observed Batch Sizes When training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. What this entails is a batch size of 64 on two GPUs is truly a batch size of 128. As a result, when testing on a single GPU this needs to be accounted for, as well as similarly for TPUs. The below table can be used as a quick reference to try out different batch sizes: In this example, there are two GPUs for "Multi-GPU" and a TPU pod with 8 workers | Single GPU Batch Size | Multi-GPU Equivalent Batch Size | TPU Equivalent Batch Size | |-----------------------|---------------------------------|---------------------------| | 256 | 128 | 32 | | 128 | 64 | 16 | | 64 | 32 | 8 | | 32 | 16 | 4 | ## Learning Rates As noted in multiple sources[[1](https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/)][[2](https://docs.nvidia.com/clara/clara-train-sdk/pt/model.html#classification-models-multi-gpu-training)], the learning rate should be scaled *linearly* based on the number of devices present. The below snippet shows doing so with Accelerate: Since users can have their own learning rate schedulers defined, we leave this up to the user to decide if they wish to scale their learning rate or not. ```python learning_rate = 1e-3 accelerator = Accelerator() learning_rate *= accelerator.num_processes optimizer = AdamW(params=model.parameters(), lr=learning_rate) ``` You will also find that `accelerate` will step the learning rate based on the number of processes being trained on. This is because of the observed batch size noted earlier. So in the case of 2 GPUs, the learning rate will be stepped twice as often as a single GPU to account for the batch size being twice as large (if no changes to the batch size on the single GPU instance are made). ## Gradient Accumulation and Mixed Precision When using gradient accumulation and mixed precision, due to how gradient averaging works (accumulation) and the precision loss (mixed precision), some degradation in performance is expected. This will be explicitly seen when comparing the batch-wise loss between different compute setups. However, the overall loss, metric, and general performance at the end of training should be _roughly_ the same. accelerate-1.9.0/docs/source/concept_guides/training_tpu.md000066400000000000000000000166761503574341000241030ustar00rootroot00000000000000 # Training on TPUs Training on TPUs can be slightly different from training on multi-gpu, even with Accelerate. This guide aims to show you where you should be careful and why, as well as the best practices in general. ## Training in a Notebook The main carepoint when training on TPUs comes from the [`notebook_launcher`]. As mentioned in the [notebook tutorial](../usage_guides/notebook), you need to restructure your training code into a function that can get passed to the [`notebook_launcher`] function and be careful about not declaring any tensors on the GPU. While on a TPU that last part is not as important, a critical part to understand is that when you launch code from a notebook you do so through a process called **forking**. When launching from the command-line, you perform **spawning**, where a python process is not currently running and you *spawn* a new process in. Since your Jupyter notebook is already utilizing a python process, you need to *fork* a new process from it to launch your code. Where this becomes important is in regard to declaring your model. On forked TPU processes, it is recommended that you instantiate your model *once* and pass this into your training function. This is different than training on GPUs where you create `n` models that have their gradients synced and back-propagated at certain moments. Instead, one model instance is shared between all the nodes and it is passed back and forth. This is important especially when training on low-resource TPUs such as those provided in Kaggle kernels or on Google Colaboratory. Below is an example of a training function passed to the [`notebook_launcher`] if training on CPUs or GPUs: This code snippet is based off the one from the `simple_nlp_example` notebook found [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) with slight modifications for the sake of simplicity ```python def training_function(): # Initialize accelerator accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) train_dataloader, eval_dataloader = create_dataloaders( train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"] ) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=hyperparameters["learning_rate"]) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) num_epochs = hyperparameters["num_epochs"] # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` ```python from accelerate import notebook_launcher notebook_launcher(training_function) ``` The `notebook_launcher` will default to 8 processes if Accelerate has been configured for a TPU If you use this example and declare the model *inside* the training loop, then on a low-resource system you will potentially see an error like: ``` ProcessExitedException: process 0 terminated with signal SIGSEGV ``` This error is *extremely* cryptic but the basic explanation is you ran out of system RAM. You can avoid this entirely by reconfiguring the training function to accept a single `model` argument, and declare it in an outside cell: ```python # In another Jupyter cell model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) ``` ```diff + def training_function(model): # Initialize accelerator accelerator = Accelerator() - model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2) train_dataloader, eval_dataloader = create_dataloaders( train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"] ) ... ``` And finally calling the training function with: ```diff from accelerate import notebook_launcher - notebook_launcher(training_function) + notebook_launcher(training_function, (model,)) ``` The above workaround is only needed when launching a TPU instance from a Jupyter Notebook on a low-resource server such as Google Colaboratory or Kaggle. If using a script or launching on a much beefier server declaring the model beforehand is not needed. ## Mixed Precision and Global Variables As mentioned in the [mixed precision tutorial](../usage_guides/mixed_precision), Accelerate supports fp16 and bf16, both of which can be used on TPUs. That being said, ideally `bf16` should be utilized as it is extremely efficient to use. There are two "layers" when using `bf16` and Accelerate on TPUs, at the base level and at the operation level. At the base level, this is enabled when passing `mixed_precision="bf16"` to `Accelerator`, such as: ```python accelerator = Accelerator(mixed_precision="bf16") ``` By default, this will cast `torch.float` and `torch.double` to `bfloat16` on TPUs. The specific configuration being set is an environmental variable of `XLA_USE_BF16` is set to `1`. There is a further configuration you can perform which is setting the `XLA_DOWNCAST_BF16` environmental variable. If set to `1`, then `torch.float` is `bfloat16` and `torch.double` is `float32`. This is performed in the `Accelerator` object when passing `downcast_bf16=True`: ```python accelerator = Accelerator(mixed_precision="bf16", downcast_bf16=True) ``` Using downcasting instead of bf16 everywhere is good for when you are trying to calculate metrics, log values, and more where raw bf16 tensors would be unusable. ## Training Times on TPUs As you launch your script, you may notice that training seems exceptionally slow at first. This is because TPUs first run through a few batches of data to see how much memory to allocate before finally utilizing this configured memory allocation extremely efficiently. If you notice that your evaluation code to calculate the metrics of your model takes longer due to a larger batch size being used, it is recommended to keep the batch size the same as the training data if it is too slow. Otherwise the memory will reallocate to this new batch size after the first few iterations. Just because the memory is allocated does not mean it will be used or that the batch size will increase when going back to your training dataloader. accelerate-1.9.0/docs/source/imgs/000077500000000000000000000000001503574341000170025ustar00rootroot00000000000000accelerate-1.9.0/docs/source/imgs/accelerate_logo.png000066400000000000000000000305551503574341000226300ustar00rootroot00000000000000PNG  IHDR ZiCCPICC profile(}=H@_S"8dND8J`Zu0 4$).kŪ "%/)=BTkP5Hcb6*^!`C""1SO3_.³9H&;.#i ~Fߔn5>N u|E^xwOgoi]reW%bKGDpz pHYs B(xtIME9 N IDATxyxU޲u:;II MPQaq ."(0qQDqCET@AVE$d%d{oUu:IwryH[瞺]ԩ{QYV+@ ͠*@ @ `@hVP d(OB0UJ@ Z=h ` 0.N\e@ Zm6=* [( wJB!c b"W\ 38>u`*o$BLy7D/[ :'J0j-1 @ p+b7>|[75TF FUBikajI@ l0McuPʓF1K/`55my,=@ hgυπFN T6i F4(Om`ނ)bf@nCAq:T7N }PԞCK_8%ow?\B2f@ `)[([;_y@YǎrN5"x8\b@ .7f!w!2 Hw# xCU< Eo@ 'Zn4Ч&7UE,䜅Lzj=DLE ]Ւ0WI P3'XȂ^>cS%reΜ9T*x  o)&`5ÁYRJg[|W/P'{: (bl6o%'Izz:WVt̢E(--0@ %kAڳdž> <S|AxiEհw*r~P|Lyy9? .AUU_|`h5Դoq3czjjjx:O>]j A[`4ٱc7tv&ӎuC8g~1J׎̄i4 U'/NvZΆj̜9cDzk׮N?f "NPDPw yc? )]ccƍŝE t*N:ܹs1bׯ2v`8<l>PA0 iI?m>ԓW_}U},[r_[]n v_ aC8Pj2CB?;سgO8s w@j*y绬 :aS[Ss`dsHfPʦXf(<>?O]^ KTMz hN@}Fp8y$6mrI_?HKKw@ 2)bjOg N7m۶&VQO7:jw>1So6 = #T2ad:׹O\8Yo|,Pń+8c>D?JN{Ae˖˗3c z A68dY6:~)6b'F񋁵߅h1Z=77Xm &V7G2k+!{ϴ4,&MR=4LdffFjj*˙3g0`QQQѳgO%66! IKKɓr1222HOO'::ݻӯ_?㉏O>ZMgjr)޽{9x sW2d(..&++IKKBRRR0LLXXz"**(∎n|jLȈZv* G J M;/^ŋekq]wu:++۷&۝9s ?<ƍct:!l޼_Z"M:uTnz=x7;wnvi稨F7DٳRޖD,X)SdUUU߿[׿-i+tM 2FH?/Ycݻ.]ʂ 37 ;4TEDwCj9 "&R#6غ_6T67-؛BScƌ!11oY9?t1pY^z%z}<3\~{=zTldo˃>Tᱍ7rmѿ֬YCUUG:.Ξ=Ɋ:~83f`f?'Ofر9̟?Ç裏񆧻'yC|C+Y̼x_.w* ԛtuiqDcgCj*me;(c'BS?Z2dÇ}l{kX,lܸh"駟2d^{5jkkd„ vI}q8Y);.3g;vPtܸqfUUUdƌo\}_zlYwO- 5gƚQ{:UϽnAK KeeLYkaki9^˿^L[ *gg12P9~5׾!yoԨQQ"'3c g0lrwv}-@ӜhqسP]a.l6n?}1sU+VhB&˗믷?}b3yNpB[߸q#K,4o<*++Iహ«s?v% X5y5'O*zb1bD# /pС6ܹsٽ{wɓL8e5kn錺xꩧ>vر~~9zVMםe>l0T8 :#Yn֭[e]xq!9#GTt/cIMMeƌ0a_|锕a2!??{2oY[b ?GFZGb?^Gz p+{OK/ l,f*:UkX;w ~WV?\uuuy1ѣ]6Ȑ+d뉺Xb"{=oQQ>&MdΖ5*%Kd} 7(,[LviiiN}>a09ԮCVۂܗ~Işmԕec_Yr}?~~~ͶST\wu|NJJj1e.//g…W~{ɢY~=G~OBM{CweeܹMz5ٱcnw/P- wg5>3mΝOmxwk,[L񆮫Kjk.Y1Ξ&]8~x Ə/[HٙFWZTWu|r?NEEfJYfͥuw}AVK裏n8d ;JU.WTy7C)~PXTJcʫ4msszm %@^褷7?>Ñ#GZ=\~{Q[e:V5k'<<ٳg+uС:UtE]ڵ~Q̙3ٽ{7| aaaMzΟ?OFFGa׮]|W|Gցp٥ڼ? ^qc,EۊdP2rШ mpuBSSN%!!e-6l+hX>,E֭S牏g̘1Kxx8``0[̣i2geen:Y| rJo]W?f֬Y2']MׯkiѨm!!!Av۶ ٥kZ/TԔMLWī_F͖rۿ1UmsɛfϞ-Y\\e[̯ ?8NG^M]d.35LCNNb+J5jH@$gO S+@ o=E0m~me0T;dvǹԴi#88%KڱcGeذaUXX&d.--Un'sԄ.Ntؼ$'',?ζmشiSzU'C?M* W߃md>zhu[*ڦJ x%Xeĉ⊿k bYv5&04F.rرTEL4俸xrQvڥ(K 9_:|Rn0VP^ PcӂAbLUڦÔkZQbJ( P`kG'h}(/43ϴۿ).d[uѢE<ò-`g@3Afw[8r-&pfӦMÌ;SZ3$:i_ EPU'-+\IƄ#V+TJUp~U |KVo{_c5nLUe})mLp0Jχ2i4ZI|IT*Sqw&Z~21//y4- Y] ](#<sb_G\'7Z-{1KC5&Ԙ"VYHQ\+ſ (. (ϱh%:UV^G%"ӽڙdv04.*QZvŃ>(z5BV~2((,7._r~5oS"*}njua{{9^?bI>K2H:*v^2E:-[M©K~/!MpZ-v=_?ШQ?HaX q%Jp o:}Qoy[ YMi?{aU`#2M *|<z)Dc,&WCysG]DMM=lذSNb[%_[n^ߡ4CBB0 ۇd*BUOJ«>|M]^w2i]l щ`VÙ=P9((_oiyQA( Η70&BT FzblQ ZCw^拄n@c3ǥ|tC Ç+z.| p:Ϝ9C~k+$$$н{wuFHHAAAؕmi2t:f̘ʕ-{ ДmIW҅ %{9㪪6W[4 M .( sy|X}C|k4IO.~rd " %cd7Xi2C K `b %R/=fxbt:!Ǐoo_j{ )d5j@JJJ5.a6Y~7xӕYϝ;ϧFcII,p}յR`=va *D ʤ. 51FFD4YKd8z)VYv$>mb0}t7rM7b뮻N֭cԩNL&lỹ}εkr;dvA^xѣG lB򩩩IKHHp~_yit-]\sMLJ?ϲZp,X@|9rd.$[Ν;GL&yE(͈36=e>54q'ÉA3! D FzUF>AQ*j !\T(GEMJ 5 J$GL Rf8'JNg MN.]OӧOWd꫊vVoݺzKVɓ'*i2O0A7l@RR:F$ϟ?a ЅEQXXS-[O }4{[x1 ,h1TUUIII۲5 r- ]gĉ._{E8==s+Gӹc`H}~Wk!l#pbXLЫ(y[4d4 )]*EY3I8fb٭H '"yRzD@&HAA͞^ҙk7ZVVŋe=zFW0h F_ճ@ֆ&Zl2Lii)&: ٻw/<򈢅wҤI5JV[O{QG_gС^'NPZZlh4۹;dKA Ѕ<~)j3gKQQ& 9x K,w޼NSZZ}ȨkRRRb~O?^{ͣj[]nPgu'^m6E@7:x ,^Ч{ôLR->:Ĭ^ D`fQa!sL gҠFΘk (ꎁ^6Pn?۷s7nffΜn[N;~֬Y|Dz6C]֥nr1Ɨ_vSI׿Yff&zvŊ<Vo&sΕ6##={ZRNZZ[0.=ݻ] %O>ӧOj{w /tn͚5Nyj:Գ>h+㩧Rtҽ[oǏ3|r!Lq&Nc餦3ϸLVg}ުE$&&7^؛bȐ!lݺ$BBCF͟?_qٳgJBy> [kdP{p)0P,!33W_}ݻnهU~7lu6iy~` ƍkΕ$̑#G8t'Nd2KLL ҧOzETTKu|7LRj6۠ SLq!b>z5 HV!+l¹- B &!&WIpF@ Dʹ !wo.կTF󯮗ܡЎw3| jwIi>_c!6mQ&`} u 'mEEĆKYm7w,hM#O[qs(z8n:1@ tuT6U'z^ \0涝TV l<o4:Pk3HZL`6&Zz5(> P[gYI}¡<j X*U~4_Y O PvvOf h6*iD]9T7=haA1@Ѕ [O@:^+ml /+?'ӠV$Ï37r{$a04BP]Z-IZh1JRN%AIq5E2hP$a0(\%?I)KBe˚@~6OqRFEWb5Awasn7Ƀ8A4~b@ m H(+|c?PWb/|%A A; @ <$@ 0@ A A;SfgIENDB`accelerate-1.9.0/docs/source/imgs/course_banner.png000066400000000000000000002346611503574341000223510ustar00rootroot00000000000000PNG  IHDR > pHYs  sRGBgAMA a9FIDATx 'Gq]oKÈ[6A p|##!;qؖ"Ngc;!a$!tcW}Gg{;w{KzNOwUuu}{3C?a#@ERF)iE^oeji(QC٤G)uԹ_f,ѿ8u=<{#V-Ci+hiJvSN'>q{Gk/t@eUo/;_[Sbv9[ K_}4TV}6à;~J ).:X)7NDUM9DZ?)4+͈mԵNG=(C٣v??{NG2g2i!ɬr2k".{z/৊o @3(_X^4W2e;wA~m}DB.:@ɾφ3[d21{߬LQٖ4#Cd6@T϶l"kݛs"kiSw}D50w\ ꦁ.7iY=4? %/ &y2)N\(i⺞s}JKS*Qכ%`dc}RObQG7Hz,dNhb؃HjlRX)_ꆌd#??_Cƿs ~52:} U"kgd cvU_9Ըi(.%}52tq mģfܚOMZFdD8 ()=UjC 7ŰX⛐/uֱox}y~K0Һ++?5O|5egkSO??_) _?㹪H`dUT.|ȥՠruI@ ;j˒A>a9&mh4K4SER[߄^=x l}aǩn3%ݒݍu[ g6!mr1HJq3Ǒ6PHTDqbu2wV\5vaz > ȠkfcژDj0SoD݈8.j5zxQ$R}rڂ] /^0~$}}$8FfzakhdkTƿ>٨M?2ԇXr(o?hW'|xŌm3l҆sƘUPxΕ-J>(|ؕܨ.s[8L ի~"ZZ2FSйTu%dt_?cѵMn]ǣ˹GWNd> ' /dgg7o.G>w33gwu.Ě}]Ș;CTkS1HTL^M\T.yklӏNl٭Mϟ+sA!jOG{h#㿡M?:!*#㿤vl2ߡM?:!*#㿤UILMV[p 0koCZIPǹV^OynU9ҩu[YkEQtnzUJL Q\NZܡxMvv0F/GpDU{ QI?ee7IO~mU!*׹cf>徔0߹b6$ %/@]"<AM>Q}H/$FR]_6ih^PCg 33v;g#Gƿ&YN.@d\&>LJ1W/UjjLIg3|(5Hڪ;@/E5j!n'֊xV)dqͩ4laeC' $ǐ4?> = ڑOBĸ2ŌPe#?_K23W$4eg[iCD&(maSA핔 WՁѝ3qUA;Qbކ0`!V5%F>Wӝ zȲ[w>tQ4a"(фke`?&Qoh\m~l>4SEQ$dggggg23/m&Kih#/'Q128*'7M8W~p0n!ک䧾1<$zVt:=4)*WL4UcSfq~m23xfgg7 dv}IOozV7???GoDYMl(赝$aPʴ"B#b2lKޔԏ9ӣ1}x5S验A, PU$XH%mVJȎ1Of^ņs }X݌__(3i;ƌ-3S8,22_܁ǐiViՀ3I:jKO1"RTYLjij ->$*]OWb 64}G m>q;.Avp'Ŧ8J %()Fe3. ǽCueS1?nGt%_O脌dK}2fD]ƿfm4{T4DhOD_k"][P#SS'F}QC4uM TS5T~)OnڨA*m*$RhlDz\Il3Su !S=|f'eegE<[f'ydgu2Ь`ޅ5Tzt!r?NIm0@\j/%z˩EV-LB؆񧔝 N!u#^Ͱ%&~EO+yV55jbm"t Im?v-gy<<\Oh2$fv_]?=fv3d=W}o匀tPEK3v S<`ѡ>0pzcMX d%c^vQ&i/ljD?HR}4TI·|? 0bL.K]%Gu# ${҅_SxWm2U/??%d#34nXP准;F޽0nBv|R :Jٌd#WNydׂ2LsfòHAC,ueޓჟrLsBZGA/ɈAHaї"0QD *UeJdh(Wy^@S~1r)e7Dpeek6^v΋HS&;)#C|㿻n?EO3G?f#]g7[?Z`ѩ4;4P  2Q a&<=v^e9`ŗ:QR=:َ/ DElIQGlL:$R]h =iSmEDNFD.`lL33EhYuG?23[(?HpIN^2 >LH[[Iq=" FKVE=aߔTͣGژ:l,`J۠Ew'/ @#K'N1~ЭZh[jD:]zf=j?׉W#-I5pc+F+Όȃ)yM#֗gǎ%0tO-CPdW-SyGdkuл>e"W f=A.V Cx`igB%UEAa' H BӼc$ Q ZDpۛ؈*iL oLM}DIOQB>wi=qmneLy&j'?-2ygƿ/L'337??dgK~.gS_r2.)|qGmtuSDqI⼌aYc5=5jfPH b< !ꯉ껳IQedž? qbż̒:+^ Ɍε )C:h1,?8cþKb^ZT%myAOfgjd*PA\GjϿgl3)_^|8_Z$g|P`.;).K:!IB]?=o>r@e}CMD̦Aw t}X3 ΩӯlPK뙤(JMv3H޶ >&~&֕G&•O5فHϚd}y_;zۺ\?Sc'طTE!VK݆En #E|kߐ>O!??_B,W: k4D"1~FC8e/>e){f}X[){SnիAz/=;QljgWbÃzY4d6d^@2:"l_x`}ho-S)32Hnƿhӝfgη A [] $T$0O233u23u[\?o|.M?ޖWG($)Dk|1pcZҧz׏uj{s^p6ljA%UQ~h{lTnjmvNoyo-Msh7ow]Beg ,o8o)??n_mtĨL`'1Y/]ÓBKm @vVLvԶN:c*cod[P0{Mۺ8Oٞ:۫Wz{m|\os*K.:dŒ~yg'zߡ^0_y(O&:4/)˔9w5hn؊ (#f|46ya=-D+ДXP|WܘjݛRli`4R"pA)k}:>A~^vs!,YT[Ȅ_D03xq6ߓÝfJ?U/_ 5^e#?G`Nm:*Dޟmr浑UzmtTL )Ɣ:T*OoIUM$u3џoe&׵%$naZ^O>IS9 S u2:OOƁGcI_opp!u"a.8c1N9nK`h0@ӖX%VRf =bl IvNnƙI\egggCꂌ=??__8ZɫEd9#Q+WN8" :Coxs1Cvȃ,D8td"9MЭVB Ϡ\#.NIm9wJGV ;CoAA;0~hE2k1(^t(֮6(F:4L!F~s 5v-m4/Q/??tL[Q?2333)?K3d::X<@VIr_=bjhcq*'$x|{uǙ X 7a1rNq}@PuJLG] S1;<ެĤ}ng?~$Ƴ-F04R7 ,6y[ysS}ƿ}6_~HRi?Z%xepeQ3C23e(u_N\SXN$MC8ǖƉslr4݋W<4hceЩNC]֥D6b磺/tܯ"71lz&ڤto i$,YLEBk3 X'.*n88wvާ|aS;.2Hi<+[?|q??vegged'uoQPZt `ճ2X(Ճi臧v:i1Q?hjN %'=䕂^ΒoԑP"}EO|¹՝nӫ y$ *koںsݝV@yiEkFsUCX::hVaڷҘHI2347)*oH7rtPr#GqI1/@_X>S٢Nxͻ  Xu`vI.;VEuQ*r UdΓG`!ftץ9 6+ma}l1i-ԡ;_{JXE8E[ _xb~ܱ' b d.h >/9f.c}Ґ-S:cW7h-xAi͏33!jBggggdgaN\L ,AT<3 i^-ԥ!Ӭ?W\QY^zZ$1{ߟ:?6,Vq? LZF Sa٣bl8i_{M8Tŧq"R&χnIi6,V}n_@rնG?5V'IQ~Xrdt`])9]4  ,45.v"|N@:hrqBA(*mIУ+j'IJT #;4a%' ֹ vǃڇ?} }\(48@XY$N9n瞾?iZńmyjRG23g=얒W^ 2tVHT\8-m^1duB5}މM~ヺ%k*Z!\xr,% R= xnU?|ү>0LNL^bP^^mI_&$〠+O]ub֛9|Qc8V&|[4"Ymɪ۾w#ɚxe%8rW|mo;%;76s,ZڴۉهM;?}g//W١Kn~(9aQ}{*?ߟk]I}BR(G,l Et3;*WxC<\!23<[uyy~2e:ZT=ub :BGo䠄%<0 (k#QԛZQٟFlM1}8nRmFЋۿIn{n2 >|6ƩDdU_^ȁyVLx8dFxEKSchhw8ɗq?2[t?BƞƲY[43_;bX( *˕c(<4߰ ƅ{]fgggg~'82e:ZTR!OFHt-بN,F[Nꦈ3mDCzQnz fP:5])UŌ=0!|}xah2[42ʺou#*Jˑ>jD/u'ɶARxѶ!f}K5A]$5l?D3>ldt4irn`ݟ\(54jP @`dVH?AB.@I $ ooT|O&LA,VTƶlCR#Mm:~ >69o>8pL^?̲tdirrhl~eg˅IYjjJ:6R0~|:~ƍ !/+=։/](.}'g@Ȯ=ع{X|Boʫ>;^O_8f3ck ELG:2 !p,̈DL1G+Ȱ'5zrDz恮*I%o̙  x|blB)Ȁ2%/E.Ab%,W65<<\jhZf_ZqՔTKɀdG7Jl?\.Ǖ6Zs3?+}땈i gg;Wrֆ[=QqÝæ-"l;޴]c6u=53cqj1^]}+EHQ?2}]h~៏USj̏5,\{+2e:43T G6YqJV!P:v_cJ܈E]t{ѣIZ/hwo@ΪU߸F!2oXҏ}`񲡁EaaK &e:KJ_cT,}sSi#{"U—m:ڏUynf.uEg=<. ̄ [U >FyGƿ/2N)ӱ@3B"r `XѐEoW5 N._}*k"YG R "~q2nQU0e^uyx2~X RPz,qd ,+&l"&wc)M~@vW>ĕ#§(/P_\Ճ+}i".mkM %XXDEfqi_ Ҟi;[zn,Xҥ/[ /}D&Etߝ2,TDPWiXp ?@(h%)ӱ@!B@$>hNNQ4P|Pk4 O\ڇa,^yNտr$5 PNū LAŸS:>Is^N)ye钗(Eт #- wU>3Moami@Ǐ*2" -)a6@,& )_@5TN< j. `P|BxY[; &$mǻgE+k~llznp;maK6XČ,K6Y9ZDZ4po tf,a|g眾 ox.:gDj)s;Vۛ!\K/^Yu`!?{9ӡӟ{8$Qeë88ٌu&21h kC}_=b||sT&P5c.սq}c2ZEߍ( ~ BK C7 ǎÝ=iy?^dCIB mߛεo:VުNOfUmU4X/>4R$~H*k#b]3Dz3!y'c#thA?~e[MԤ]U-d-&u4cG BcWj7IuGz=ŗ}dzwa*vYn,;k=;D +{vMy,ǏsWsV`!,Y4"c>>gs>mݑuNGpۛ>\klWidluqz܃-q JE0!9ќP$\-cdw_*ZG k 1n}\V?Ji?/#d,$z==dL_ml^s Kփ<$?K 1R= ٠~4'hYW89th%K°#ǵGrA#-ÐDm(r/WJGY(#EAdΜRDrx_K~hY\ŋŮc|ޝEBk+z|_ IϽ%MfEI*w[ g^/<{%^Vjz50ceڴ ++ w/7jP6 jbKW RqaV·w n?0̣Z} p;I: AyARNP:~ŵrkgˁ fuWxYBD]m Mee FփOa#/O>FuE}tvˋ3cas̺¡"4~&~.%s7 Ero +9S !g.Nj/X][Y>gfWT_yn>Hx 8O-LױkxϷqN;͟+ua?α\FFod q3k{q۔yOՌ nvCg%߷كB?ή+bIy"TͬAGy?'%^B?F-S}#/{c} Ǣ,H-~; fl/_H׶n*+? do;qT%l\rJ_v7v 5y+`wbyeswv|ǿC30}'TPjAҳ2Y}nn1_WTp$a+*$tg|EH\U%;E%c[, d[v77c3ϔ &Ųeϩ$\r=;˶|l"ٲ~N\mDty[>MԽWO^Zrux8I_IOju}WSsG# kHD,TyodCMX1s7FU=y[?q/܋uη^Ӌ*%/[-9Ar 7y[|F/ ^{OuG2??r14 -+1|ƌ<=XR~[aAC|"ɑp. y;/_ax563P =ߚc 6ROmkI U݁[t8>%=3 oCȱ;aO SR.Wͻ.we_L߿۶m[dZdI̲lBk*,_rٝYnLِ˰ +7Sdyݻ,x%:}YԤ*A+u8du˟ :%J tq-6tVͺ;۱WSsT&OaCRzn~[Z5?u ~( g:sX%P[ ,ʳ̐[6+}&|kL_pK]}#dSƿ|?9%;ֿ v"~$bpGg۲E/8Vj{>oҋ; > "*y\R@PL~q7AVJf ^M#lCw/Ksrv˗ggKMJgfV]*% '9Y97V[=6R$x?Q5o=񃸫íXw޿ I*yrwuYًl{q=k ^YQÔp!pZ/{[;]๳|cd;MT}"9X47_܆k>pgRDlNr"9;NB mRmqǥZwQtƩn4M&"kʃ؍[?}a1 7I-ތiIbcUt'mH9]i} υ!u`Ӡ>ѯ@0wGymnsѼICeg/,o+  ;)y9t򸩞tqҍyPI2dVFG% @wNgn"is|-ܤc-D[W_z<.xޚ# vڱcG&܃CۧB3<+k׮]L7qNi|:@_6i+5뾯Z1\̸7{''Pvw*E^___`ῧE`Wqo7Iޞm.y٪Eڇ\N}2,6U*m^gr7R皙%~?zpYZXɢCGwT\Z/[~gwq$'y6ޞ(! 6O&fjUO:7,8aR!l)3gI-[Τj6= ]xًZUٞ)CbaQɟcDȸkʉ&z<[SțD}E4E<57u&w-oebw&/ayGQߡƌx f&a61qkdmE n0w>{_7 *mlF}`7luoBK"(ַr8?"2 M\>E ;G$u߹s98F;1LW`60vX :0 eiI_7ɝ ?6t GwlG3aieW!@Bk  >pyr \Qլz~K733Ok}s`?q| =L_`gWb%f(!0bukr31$n zR_ ]](YLEKt:P0.KQ8T:> 0C=nۉJ|Wd\pH{`M\oӦM){_VC]{lˏ-%->XZ"%$(1l2>ȿj\$ɛSGw]>Z\p㓭Bwݻs]L cFøg˯b#g?domo܂ONñ_{@ɏ[\=2,\%K!BgV[6Cad Ik9ugk>3}6B0h+ [<?NDNl \=?8^^aoO!MG쳳ܛ -wVC xio7o 6QuĒTm4 -'P P@`%ჼzy&>M>WkB>pgF]SpM_ElRلv>ܝ]vz+V"g{gE S JplcS>0_bmn}6fzYmh c~;^xa/o;lwS[p?1{0Pk'y?iWa/:PyFObDcc0lct \Yv ?.>\yGg~OGp?,T'#;C"*읭:Ǚe&ckJex>EJ ρ XHFNNu}W> 냖0J>{~ɂQ}rtp1c9>a7V.xRq1x|A~`њ$x&Nݻwcpp|z9C:Z00Z^%0<ڧ/*D]ޞ9{n?VJ&:UI_wg|b>y eD {q}gqnZ?5Z|q`Mهom\F_u{k߿SX{q.:b}薄}\UOt%(ra Jo2v!nKO_~Pob\ &CS XuM oL)>vulj%oZ[I[d嵉yx:>.|&ӱ7M6EGvLIbvHRnY||Xƺ׃WD<ág?[=u{ryz^'? ,;g|R>X#?՗ŽN,-~ fr"nĘ-C zDs }=pM"1\>wgn}gg>&ER ϩ%kK4TY7ۭlj}(7wqg'vֆS:.XotVAL\5ɅJuڨjs־dfF)w<}`}37Uc/Ǟ< [kwIYl^z/wH>Ƨ8ˇ@& ϤxEwRK^ǿ^3ĞZSz$&ݷ=WoҼc9R$Дj(zVf^ +PiH;:1'rB\"etq˷NGx\M^vA}F sqprk瑰]ȪzcmY@*wu+.,vu5g 5w"{vUgZD v']cn. MOJ?V>gƿ&٨^?f]O,WUC;nsN^*EWWHcqHU+G~,B]'vJ`9yܱrӎ늄\K; #v:?XShM"LaDYA7HrC}þis/&M-+[V3wN@f>E'GPhH>繳ΐ9({5Sq C!ʲ!jFs'l2hBj>`1_ 0R d2od|iŒI@ÇobGa\uSV%/81&N֭?.Ƿ.2k C.'#1.VQr_cTk.=L<*W$O7(U_}g3&oo~h׮B6c u#yZHU/W@ag]u7{;coĸr|)AO kt.F('/ΈbuNM_'s~SݦSxm(hX$ ;rC /))Цl{^gk=3CY?'\? JC:h'0-y4Pe}ކw;p2ÒiDtjLk)b6v\op[#ekq_21$jxYT}ṯIȜ,]rt?8pۃYȤhT/;7W. ~?^СGKۼQNh)}9>g?/d3O.sb$X!K%P7MF9!oӋmf]=jM5fut7vF;JXO`av: /jDSnooB7t|{r'?F83|}qC1&5) f7{yY$dJ_3?y{?#Y ROGl1wR‘N(OzU(y.ҝbN5 Jf7e;}zU'/&5蔖;0f\|Zgh ׅFo\ sѹ'__ 6W(G9ĽzQ< x|f\|T(&>M8ү?4#ɫo{$tٛօ[DI͏Pe\g}ԈVn ?pҐn|bYIQ׀ pJ|BOA[:EcH>;7r2 q CPCvR{]ѐCbhCua6LL[=OZQ0Q\4zf=qH\ Y H ,GyODX6pݖ_{i#mq Yb_=egg.qy2;i V=TC1jksP!|T 7#N0gW?y1A}6Wo:҄p=UG._:[ ZzaqpQnQ$n^{(ز_?w}c6id08!xy뇊`al}.n?e0ܒP>zhk`vRzW}hB Lr'%zûDGw?f?scxq@k bI~Hab:.UU5pMcׅ% h/a(YĿa[yu5M1gQdwqσQ`,_ۀ?f@??Dc C3i1=)$zΘ( ?!xDCx%+"G'B$./H le$F3 ~,X2>?K󒈧,fɘm ?\%X4`e ?x jp.^;_8_~Jg97q |4[wg66 Gq82'Ch 8uau^]ku`E ułI~5R^{3Dv3k&K_1> `ڱT7N&qZ;65[,SlNJ K^ީۉq[o杺Pm呓uY} J 2UCcщ\?eۿL$nXybq層/YctioXRmo& <}78Vh@:!v ^U$^rN?qf2P',m&s#WgkXᧀJZtP?dop"y^H`-w~tK.^w yb?Zucghl>::dðhOjuj#FrH uG:%p2#k57L"xec5oB!nM˪>B2rqZpȠJY Yɚ R.H׎b`@"Hr9֪|_;#'P,mn "d0:{$cJav灒ppjpx%L7lPُ ;}0q!7-1&{d̬'3zOmw`l”o:"˫xҾ厨"1t#TpdS!Y;^ƃe֫h">c8xz83wdMaG'7XUaS2u~$lС*eMR gT2|VneEUjViqˀ{v<|PxeUF(:4\ >e)v1??y-wRs$o?$m(sH)ח/GW;o%w;T>"/-xټkVIU}t!LNj^:⿕pew6%c#)i[{"~u.45*eCY]{z? ǂ[!RgxS}NhEd\SavROr.&2HVLi58h~|w#QЉ?Q`yz?*)y'I:t  O9 tZ^LIzSn%35'ab}YkSO.݋;vNOŕ_OXEˊ4:Y$N),&3 ~!f[o,&CeQ4Fz(@v4Uy^t!.%?X&JvGgO8qN+sOhDC?+W +٤]hooǰ׳TW+qء.DҒ"&EFfxݤfœZ|bQ>$c8tĿ_v雟b N ?s כgNSOF `Â|28 /;H T(1KDLD])@m9BW%j qR/էV$A:ڟ~m۶<"r(nk{6bwC ^6 sçG?E@WP\?-ɥ:Ow8tk= U$ģxlE.ZBsjP`~jx>oݨ R.wbw(|cO_H9;wozN0JE:5Ῡgf?/uZE'V^=.yii/-=zFmƿ?=8RTG8K,O(#b_~@qt#8Tթ뱭ߺmU!T_J`@LD[XRP4 T/}'~܁u}mNTK.u-'SM풻V{< zиN(*'DÅO[gﭟ ~gh8/:e0;nIһ[CmH'.{-llᑈbgu~{MdK6wӃ/T:[ŨI1Q>ˍ,/_(Ѕl罿|(HHD)- 7O{;E7>^$ƮJ~yN;ѵ4BVSEQ}KT5?$?1Fݞa` |oWϯwe:jT'́bnOfw"􊗂`hH8&9h=YQ!$W5ƆuPA\-T& `BHytvM BWg]l{c8]b5"C U+Ti`_ZwG~K76[ -SZ:x2E(n,Wk+^jQN&R T.1H s]ML}<۝=ᶬ#A tĿ;IO8J]%/_ I8Xk>֒l'B?q7 j/:f8yc {[m},d$\ }ȯSN~|ϻN"v ,# u<55q)\#KE/vl u_sآblACkA콇l2}N/m4\GNu Аp9#!tu;6,1}\YˋәH1P\g̓@! aSA3Ф46"BϚg5l5%F玼 o)͛7Oz=7xp$a֙ㆀE8t@{KEb/ eˋ1:nȘ.T,$w!!o`E۷MM:$d_nȺ꽧MXHp3?C_+SiG/ϊmJ)S}}BLG&FQ SĠ[t]Gl;l؉H-Hw&nS{G~݋${+݅eoo;&^_P-ao%V7 6֍H6qmfuyA ko(W&-.X}lG?)SO|oQ̒Y\+^ ,1& MގĘXxո<=)0 :A9gXRH2ypb]P(j1^ ,ȤDRu۝HM;mebY.־ ; 򕧗?UQNY,/M41 lzjߺ2G'ܿyw<>5\\tWLr |f[}1^-ݞFeN{}kfLWW!7ux&;.wI_)z_juWU&>4glt}qh%Imk4;w c2a0VF/4οJ?h moDd:ɾ&?ZfU֮AJQw, ~=4 T,v^R u"Kuh/ 1$dH#>l{U y-HMۈbL7# Ug!/m4w4Pp݂*{r/ `mC蟭Ƈo\vbO8ڹwO<7OJ~gq*a?wq[gJ.; G+xB=3JEBUqn{8+7 HQ<,?7u%oˋ{"J`ɄvcHW\~Bj'1_LZvzi湩M$\uݣf*D#X%ı\߾W_9r|BC']Y]v``j9;woE* {aw3;=[VMr:`vHmL3O4r\fȌW+U'[piw50npn :E|$Ic:%(DldI#Ez/ӵc'2ڪv*xT)^P9PVG{B/{_i _:\}狋w?#{5>Ojȓ#o|hEVkp6?3 Ӑ>4F.'>f,5?#s,[|zc?wῙfvGȷ`{17?_R$.*7nKP3I2XfV:w_>W9:J2vƺdu 3I|k >]/ޏ٭e2k@!geڋ/ql֡-0[˗2} 6iChґXSr#a|/W6ge6xg33q><'ҌCƿ)]Gޥ 0C 80CH _)>$r#cû[q歜+AA;|G͛TYx5=_ [oZ~ _G0Pʇh7 Ddo_7iUd<8zz ' zrpM7%`Ҡ3q\YB*fƀ'_\u "XNu;^X^8dQL"1UMN>G )hJ_ ptȷoO14¶&[cXG2yɛ23!(N^Ms5 6{5zM3 At, W;'mP>$u25xCkcDSNT2TY¶)gW,xkzK8l;D4HT?w0wo_1v\k۰}'{.CEiGk[xZ}?(oAK4hߴhgj 6mY':TE߷Onl҅N<dZPbH I|>1>dgGnj__%V %ِ/4x(Wb$B>Xou5[ Zmdk%ˆ/p%zR۞obG\ i{>}9 mT;ZRH_ =T=oΣ_ƿӋ#deGr2i?Ӝ#Ǻt`5IXO<83oLÉ`J귵Qb5񆙔G(j[˳ s^Tgm`C>roTo!Ժ-)IU~aFGsϿ;_`qRۇGXmڇ;O q{߾ K=0ǟ½F1]^djr<7>peZ6?"tC{0wmWm VE=tBMeb| SN:9Xc8)uD{S6y2 !9K.pV\CbڙQ:%~8[We F?2HK؝xPns'5|8aMz&Sj1I=5^Ϳj%%.5WL{mt3}u#>㟷of/Ol2-@?2ͳGX;q Vf;ge#,Sca0^: =B?)6&%Kъ6 @pDZ{/8p9Lp#qv>?V Kfҙ'? G&߂xQk~f_[xhQ6ֽ\^=lz%.}ƿIj<JFE`DE;P\Pb_gMwbEh8nNS5)y/>4 ~a$-;?)?n|gnN<51Kf½)?_Q֑6LKcyˊ^J3o/$?Lޮ0qBn,(&FG8oGmjKhrJ})ǪfbCKoΫii#[UE'wg{6ynEѪdњ+KCE.O-[>h~4.<.K?-('Ǚ"|CFݖlnd=qB4,27 &XxK\wׄ/^43c|334wq(?A9;x!2 x%ki0?" ʾW'=L0hgxnx gt 0Q#CG!8^|!8{pe Ic 2}Blke8")#{˗n<=EWeGaoGIӎЪvv^R5 pJq"wNw3RcYpX`ެ@(㆝qU׍ 1a5훾#Y#$Oo{>/ޗZa2/[:g#cLv "'sh203cu_z>& 9E!FciCM(gdQXNmw1៚c/zi7f] AX/12hw ջiy۔gg69߼ _0__~ x_9C Efz~~}68]Ay{zv?~&23$S|ceg-|RgenjǸq>e^(߹g7شV%eo>Nώ'_"\&EqT!$edg+3[dgYCOF#1N68=C2gIuđWS`F T/,4-HHQTm2L#e<7 jYmPz  ɑ"(ygo}p5z!|q\bݪA,*ymӔ?] L}&)㿮ϵb"T֫\E /_أ:$!:'Ak}'7A\>^}vU뤨?⢸%֨ j^bO5J~od?CIetH?c?C14/uգk0[@ͫٸ6/yOb87sT%T/Z:(r!Gql䪩 V,ĐII٪¡}I^^LV.o_ ?:Ydz8qˡ]̘1U&RSD\du)KN.G6 nXMبy8-c8.^?tC?gA\&y; nAWzUXz?4ex4}6?ʩ0R+Gco$}J9qϜ~@C۾7BIUVVs6 }zԾMDrĹDBD/>ze/X]2|G޺nX@No,<(՘ DWKLeg|؆mtQEMS>$`btylzbBU=V~ v6?TV1"]-Z/)?x/Dw4eggfam->1UM@Ub>y Y`J󖪧VMO^G΄rtK _71@ihl359䕣 2\  'jޑN6FO7ع{)"+z)y?Cs`?(wϾ.95]:H^=ѣRUm2osQz[+&V]Zʴ i;*' V_D3IpN-\ҘFP"8L0|&3Aj/U'W/[9]mX~-KGه-Ns_ڏ6` zbەXUD㓄7 Gqc7%G;"SOӆ5;&y7 j_xdw" w&pUg?ϪܫZRdyLJatcs$gG=f}Y&uQ>Hޅ#A| n} X#"W 3yVbǁd9\94L}w lc4' 5M%“/th!Pò $)E售 Mq`1 P_Ƀq{cw1?1Y\XĮ.aozb|}-;oi׫7|e8xû_F f: |>2׼ .<<%|ӤjMybNJ;7- (@s8s |a Oհ Zt?ݨi+v֍r<1'3$ Q /}putI3qR$3oFր ZhՊ 9Mzhl+̡犃 *kWdD:,̢N|INd- ȵSt]\-6-E69zbpq@O?'T y |Zco&-<$"KQw0 ڝ>Y}@g~p;ZX[w[T/޶ff2^Ëo.dqiCcd$ڝr)䁀Ԃj.⿳Ҳz_kñ`ӴXejumhnW~vLzV?xnԦN6:rb/V>N-iN1Ls{DAg#3u~RmJz&L7Cn 7>n$Vm8i kWMoדltC Y&ʾiD,g*YnIr^3~}?" YGz_>Sva?ggObo$m|ty$FC+iK5IyAOD? }񟲭74w\ֱ2 V}L(JL:^p1g_E?%f"=ϝeT{WggK^o+V5^zJ{۪ʕ܋1QRl+畅sEQם.R̊oUۑUSG'[LVp1#ȸ']^a L,-:;{u?f]Ţ)6#/gO@ߌLJ9s^L-;놇u&_K } u0 >ׁ8 hH0 ݑu.Jj l7>WѰyva3c-cCEEٮ%ˊˊnj]?vOaպݲ ̪{?E4^`OaǽPa勁ߢc!_hb_"sq65Cl~Z[.5j#=FᇾV,_߯B<ٍqO· ~J/^?!|B])7||"yՋ삝/ڛp"/y|H~=_B/(?\]U1mdU^?_Ea{yD~\IĿMy[;;)J|}WVwDQEǔ7Snw#_wbF;GJvO;V9Ȼ5Ek/+"^XtEwZ]Ng?ֈ>EBaw{ǛOU?y^[$Owݷ 7gʘKc!ER[&'* >*x}=BjSDJȵ}߹k[l"\Vcmbe)w]U$o,OVvL`X.y$};qwKrHP!\wcc`w,/CAC`mH -gRd)O<6B b.C9hԐ!ԤWݟpmT}'t}'WNG"B-(e Ql,Kl^z3eEbR\9S "l-?w|k U靉O0xkb;s﯒W--:2^Ӵ\y<ʽGäWPsxͿ }m҄ůś_[{Ѯ]c;6w~C3kmx/ٝwM_ɮ?2}l+)u%L.j?0#>ܔ;Pzƻ_wLJ~ .dyY\./|_oeٷ-c7'/1I47u M_Gә6quCc/}~\,HH|xqk}hnZKY]4%_6\?|6b>bJwrmB' vAˎ;>߇ [_;2e>Ϝs8owiw]Mdo/ؤ= ճ ql[o1nOl"ګAm"юcWxXJ o%?]hĿ?ߒQԖU; /w20yH׫$s!_GnRxovZX;y2 @1=yrep@,Q8Ȃ UtzߎD.%8:yCvpkD W:)=:$5c5@=8س־<xbW:Q`CEpM<5FȅtSX")J5gs/4]rс6F:[ݟ*͓u_&Rd>RZhPrdmlmgV'Ƌo9~O>xq}/)WSIFMK|س;/SI yJpWtUos E>.om{O;bv)C39Juz]0i={d45H'U j:&Qσ]g kLGV?e@uPuޤ~ A t?|Ct«gQ7B<4M ⟪$):~Q{\'PILL1|sX-If9yh_wӶj7ݑegQ$³ԊUB`}ƆӋϳZT?7%:K}XWsˋϷ.侲3xqñ@{ qeuϮr76$梯Ofw^m^8׎bMW'S}r} ؤ¿e&j]^/n&>¬_{1\T Jx=GsyWv7Lw#EFDr3+O}ncNkʇokyf:yjc4 3-8|iŁr_W ,y5lvlZTL%UvX\9ԉ[ H eŔh'K>qÿIkY^T}\v{7Ls  R2*kq \Iy-?/挨g$ yQQ;-:HMVbKe ѫJiq>ڷK/OGO,2Cr1 6Hw`z6fSfۗ2ѵJvmy†k@;OFŴкwcr2_0 c~?x~]߅ۋ?6Yԯu0;*;1쏨a=hd6gPSX#M3Iy¿wٛO7d+uɉ//gO2v|f#M-FVK}VvI}g{kX6+G7Im vn1,ɞ_?V=*̿胳~:ᓎ!7+GUI08!)=F zaOQzǒ!_kqǨ㚻 *q=|/4iH2w}\RNe6vS˱ ~<̋QחʫYVvT&fr[g[@gEvϮF]F  NiA{c{Ӿo[o6\/ >o{T;~d}7jИ˟_-Xu.: ~ǶcY;TAsjo5ln8x퍖ilC1n%NLq;C(Я әf[;~Gse5ML~u-' ]ߚL_B @g[۳v{'xƖs:f^sC_lMBZ[W;J?I{q^aS%<>ϻʇ?nLD l PܹΟf.5w'ÿ{^lEK\ʷS 5Sc7ԧp V̙+e@;gvg?r}dƸT;"CCSLS.oc -e_6~;_&MWβFztʴ>pv"ܥd(@MHyBPԓ2^od$RO¼}ydp?˗& nwTmޙ{ܫk-w{.Z,k`U"k>lg23b,8qsdB߾!EBbkհ b+MZ:꿷nR# 2cLfżgY 1kMf%FV,Lfƈf0`#l# HVKjw5Oڻ][9Uv߮}`BlPU|tm;-[5#ѹ1檜_6Snwfuc縁tCSߗZ1XͮA$SzV`y BT[k?Z4a/?ӫ \o%N+HfGm5CX>9ܧ0 C#~UIYwt$`Ry0e;b)3 fFչ1r\OԷ' `Q`]k>7r=FWSy]ky;FDjE+3.VUQcr>Q~֭^$0q=?i>&l_e~C㩑PKqes{_w;"ۥtGۏ?0G:W>l@)*5&7ߴjY>0JOA򱹾c6lZэ}||d?ӛ>vm/_`mwݎOR_ƛcȦi]?V qà9imj!o'f0?.ۗ51Y/++'cLJc=kcƞCtFO1q3/Z| OFWp!ҩUϽ,346Pup Xۨs^u#/Ap,x=cy#j0ygĜ(Ӄ]{*!XE,ΛtJ`ղ[ \udLk\Q?iDq>5 [fW0=ӞʼnaR):Uˎ/ǖaIL?3C/`bQCL}f.h>@S篱g~VbvA ک|1jxׁWe5讃}n/;}bx2Ucl!PrC6{?mOtw%nOA4Mڈ.\.z5V ڕ o2Toy=;Q1H6W#MhA۸>|ۥO]?`0]WQpWm&U`Ian;,7gbY9BOhA.:ϳݷv=>w"=V5s竰v"_ 6ِ}v]Ez۔u}6AWVa-odc@)si'zƒ_|}e|&`zioG?Z~6雱ɽٻn97:(8[7I]Q1&g=A&*2&,Sm0s9 = 7_g˽a R ώGl"2xFׄB >U[_90l~(Wu@S4P~\X\T/c[JkFIQ 29(󨿨Ssb[;D~DkV":W'V:r@B^Μ^G*ȁ\j8(*,Zj󹴾gIkn7A#5ӦQS 'Ӷ =/:V;1 lȼq[V&WO;KDZ8>:a<Բi={ n8Vtm`F ]*o9} ]hWKV C=Q8 gE*:}5\!>︽ hj ZI{_gmp[٪A}d3H׾ru:!J:6dC:s:] f|Cفx >}muls(.z~'a4Uj?yǫ`j5NΗjgmS+ 0a$hۥ<klexU9C衇wmIh}YLTR &p+L)_[)VymXmHՕ( hGo1G h~FZXj^R|x9)84TDr[U0~FvC#{@b #to[w]{LƮr~"ndǃ<븾OA\Vq̞ةMYIRmD?9 q]g1"=ŝJIRCWeuOם.+Ɂ5bM4;swO:%"jn[cՎ#&sjĝ4h']2 S΃h"b)6v/`?؍㿾Gy?v_p'~uݺNL~b;.,8&jq|]Y֑~mʞTγnFGkrQL \\ƅ9@ߑ?pC[( 8sJ[ȴl7#;+t{gB5 ܕܙɻ:< 6u:"VZuܳ? EzF{Ya?\mm *OrÝN{égշx]2r6Y9LF~FfηO 98+;-#]Ė!Ѩj/35O tP$\^oj;z&m6A&8 XnwyԆOb'&CWR R3Q]bܬ K;>KtVmV~jCF7|i};o9jT Jʥ]|H:&u(x[7፯?/?s_N~ޮK]cH}x!ͬ~S[|^[?sˣ|LI]d׬ګZ?|쾰#O"3S i F..|/K;I^;ϟ]ScQ|u `Ln,'jD&Q -O^ЏE[QohEy^q+M0t>aX'xtgKtE뀟hvCQ:"Pj[D}.Y! "h0X܌~@z<ml:p=>} $G {n_6Fɻ.0OnY&Mm0?3=Ԗ~&+Xb-rI;S:h0@=|CMi|ղa]&, #U?ƓtN|įF%]S~ cʾ@.t4Nrԭ`˽=Qt>UWGt|:шu*)ǁ?!и &!%1鸳\G>]I%Um<.oX߅>D+jm?`Ϗ\ ĺ-]w }OkV?_bPiu{|߄?g3i'=YT;:G+TSS>MA2u4YXqg"uh*ErsXy]'Ȼ8Cdnr VD'LՐA쌩g:7i N7ѦWl[&3u2Aw|I=m)[C,/-i)n3T̂ je­WaF |ϛ;ZsG=X $7H fp'-vߦFTJ8GߏضTOp0=AaUzmAu~JY^_s \zg7wrIj?]?>ܔgO-'LC7Fw2ty=5=]l x={*8Hw?x<"֯o'Y3/*4h54@5= ߛ[⛻!)IX>[`3rgXx9}* ./uVQ-rR.henohޜMP8C֛ 鴟5,PB-M |p\3԰lg"?NGLكgiV[ќo硰eڈV_JH+ٓ]m^OYL2=mEƼ\ dBG`I-;r-STXݶῇP7p]~䶗U}ۙKmStb} >/očɡm[}&p#MgI?𨳃+>+CL茳z~߭{߃dqT4/3UTLGH9ՍBe)1r';S@Kߓ2^GDi{/;ʵ ;ߝqȷ6 : .`MfC .gj7jſRq25d ܖ>Bcg"_n33Ro2ٚ݊034:iܘ zݱNcf߂]AeN ;v2:3D(Wk{tYsN=WBY ݃TITumy&P^_47^nA(C:Pf+h;߳dd7 6h`x qrgn5''A 7:! ED_b|і5Kt{4xE+66:h`OENxⶂ˒r\_&jk؟"SO>}|i4ҭ`,h_QA4j[sn52>~p6`ƫ6ʄ6Wo{Q&—9M.C](p:龏c4];2Zd`gҕcͪIt=qm48=]-}\kɦ%h&6#ʼnf't(Dez}?㷿RGq;TϔurFſ‡a]:ȍ!DՌw顰@o4hl޾(~_ pbdM%aJj5RR_u&1x+ H{): [GAc'>n/ve>՛6$-4m?ZʹUg[G2/\gfS_Ȥs.K 1kD%*Xk>d^KRp;dj?8{H;ߚ67nz FgvPߗzN!b{?]$܃V ޛ:Gw6 M`*k(kڦbP*M;|4Z~imNmFP/˻>DS@ǿvyS6}|P+6QqAkVC3T(^޺#_'үY3mŎn Y_ӆ>3cXDM3mKQh~ҤM~Z-(xDͮ6w\yĔw%#&{hnBVL@ b$dY&{s+b{a^=FJJk&iĂ/gh,M&:ombEMHe' mÖB l\ATZdWhpq,ß䦳hUW|>kߨD A#fWPI품ޒHÜ( mu\(F爉Iy]Z3mKȟ/2OϦ/2С발5{ w_nF/l+V?{o'o [ VltVf|ՙ#3% |_}gKPn?zt^U4hs`C+uWk03IqO^u *`ͯ;[ԡg+"ί* ކץބ$ t:?3 }GŶhb#nKzmD8FN2~n^QB^&Z5 _55horCz#nO_/m`eE%((aiw:ސc!SWzS k _<or8>O ӻWC"@4 E\g; &K)pv5ii7c*{MpO+ҚUn[)db.8Y BZCQ7-xXIuꨆLV\'r? M ֟{΢G>R:+'h -*3A'^.?;6>ej.VHD(-zc|AuQ+h҆}#nItY+i 4ȇ^W& GS*'۴ʥ+sD{ob쐒ٯL(iX~4(?Pg2fBi{&G'~94Xؘ'b5i/oă^vq6M$'2]'̾7zRsbi|j{GP&ĺY USM4Gy] 15ӕ@ϛLg[ծ }V;YCG~)`V[JOn{YN\~׏zDw: v_]ezp8F\vm6_?43eLwWq1ytM6 e?_-1_[[4M梴܇UقU(U ߥ15nyѬ3! [* ȕR,Ӈ}*c^hYC뇼|ʮ*jŔ |1Ν&n6EG6sLʤ?p*:>˭z)oI>{0zl:2eFDi {@|KޙϽ[ž!ƉolՉLƢ\y^'MpSp2~x\UsSw'CI]--4n?F9+ '>aM[7W-O]0<4&1mel;TM݄dU]&\\$l3#R|`W *?Ik3MM6>ҳ[*皫T~ 6PUX=eMNG#6tt#)Zф&v{?w>9 ]D8h_ke T*s^XAx=6>A| s\q`7E=I3Se!$Nқo<|ӊ,7?$ag6<o5ڎF~oDʑ}i$KH$ƭ7\F4j?/G/>/.Cp3A/tFWg23_oTU^Ȯ\\=oCتe2'(xrdLV-KKe~e}֡E= lt.f S1OtDAѡt& vh0`Xt56uxo(0d0UT; ۃd5vQmO c:`&:A. z5A疭 &PCF~K/+ywF߇Lbs+r}~?M$ڂM9  י?A:M:xWgmaxPluRS-P 1u]cYEcgS`ڕ/0C| O'?wMg/g>-v*8%@Dj2_enQv;ѻo+͂Q۴SN$itί#h_ך Us oN?\Z vS߮uʓ(%o߶C|&5?j1ڷTnxP຋nG_{Yy[pέlFo|ᡸ%=Ɠ{X1rzVǍ/xQ_Qrީz9o,]?tyFKk^51PR#gAS/gx=h|O'2 JQ䭒pye8J 7Vg_ tJE<\Kp2>͒P刂WSUCgm.`5C2&s<h)vUT%+@q=קj#۹k췤f89qn1 #9f?l^߼JθܺmyC+(UVGc&VL{I0ت%&8vѫLQ[m6z4twITZ R_uLw`Ç㬫;xeKv7"bWZq; \Pw^*j]vVP{}V}i2r~'i26B 9ڟDiɉ]2Av|Jm%֣atן3: Dzq3}|?7>wZuMZTehޞ6^֩,p[O * 3ϫ 菟kćV~۫, Ivdlo^%g[KesvU_QӦ^Oқ~ww W}uU3L#]hSZefN=W75ˮg?-u_- oWH=Tى}|#>v>ߋX\G?U3^+*?Π|x=B=i cE(4ic,Kv W^Vv4rQVqS 25ݍ#Y:*f<8b:-p'z 9FI YW=& nCw̄eip2ԡ7ʣSvdC$/3e\nK)epu7 ҡ'2rQpeC'Z&#ďvifBF_;}FM5AF^=˔Ǵ`}?`-@w^ט_%T?̙3N?ښ&ktm)k;QO~8e{䭍z6V?C7.hφzYه :Vt]Ӕ!E词(pyJ5:ׁbGK.\1KA]ā{ ;@rFݖԿ~ us%Gd·k# 6UhyP]56"sԮ}s4>'Cߖw>`Yq]mWRPGwD?ZvCU؀m.qՍ  cis z?G3yT|= L AKcA%uQI?/™=_::4)U9P!%qkP([7Vx0zL9X ^PNA! 2@}׹ HZJg(/YdWqM`t +` ~lC-|?xȮH|ECS"H^0NdFÈbri{'~Y5=pf[nr [ϒJ.:} r2:VQ2& s>6&x;:hp2k }/G_}94PsF؈+o܀!G4|LBMOi5frڎ\ r0e6ՅMҙM/Xf&kC.}`өADGXT6ڼޅkn"/zʴט@Ϭ⿏*2iG7\AIPt dH!5o+EgtkPܞ+@C.#.m/VY:;ut[GIIDζI*6n[ ߔ;V @bȢJ[m)Sl[kuu#v:7/FZ4Ƿ:_Oyn!c1U_m)gf̰&c#4l5Z'ÝKip R5>=-tk췫KVǞz]LKɮ6\04Oh<Hl{*#Z]$*XnQ3:YڞDJ|fѡti3H] +Z)0s"zqD7 펞/7A6PP`3ox(tLȦ7{w= }Hq<͈4xaOV)n_e8sq.-N2q8V+ݗT6+<3f֞0'~X_TbmtpוDks301#`h?C:N)}x ;8qݏU+JsirNWnKCar~]8_3:cO>"Ӗ}VgqSfMĖmhUSJm?6ilȿM}OVޗW}hϰK0̱\Kv!qbnj7^UZ`e??jnlܫEoy_ ~΍]|N9OT!ں q/g˺m/4wi`} s4 jjx^2a N3ت%N.\FU5,qR}ܻn㢮mx;ݻSZ4DOixM  glZQVo.*Z`n"s3w6ޙg8Ѓt{~R_:.?-9>4uLI׾}wMͅ&)owMB&8BH,}ʧ_?+mGV&Ψp.A w^qQoѝ FemOzf(ۿiD,\ړ 33A׿+6BG8q_kۧya[᮵cx}n&{>+RY5P%5i۝H0a1q?~a4} A,G;ʤɃ]vՒkOaX/F|֭YTĢ`[㟂f6F{ggCPk'=y WΟM6n嚿aKB۰Z3>>2^gv,\͕^\~! q/0}XYQh@^Y^`EwpΑڛ5#G4; sm>xYn(m."CM.4A1_޿ Hk}{aZHgFur%RI:FrD&I' :R&䜪2 2k.?t)/ J30[tK੟?|xtV 'pf̺N6+!^dx3wvAsWe1;|f/c'զg_jtvp% NkHov␏-wJ?᫓2adqeUD$y3,C+G~;7YN68jVT^thbO#Zf-7Ig<5i !:I 3,eYLPl)SuX2: r-obm/?1myh?SyP:b[*`|Iɢp/ X]'ufy&t(A৬.k7UQ9_}9h@F[r}lNV=To&}m&To!SʑΧd{Ǐ;6J%ܪ7&z;.D'ӛ?sk s8 U&+ȹnbc>A ՊzOI/O~A؆M(OxNA'v5l4*&;YkP>S^|Sc~WK?G{VΑ}*]V4ޤ;]hfj+0:pXe{zD"]S;4(/C/4[cT/om/%7ea8;hB7?WW Rġc9+r3bvpBc~A: rJ$G\n|E _9lտ^ p2Zmux { KV:T9Ovzj+:j ]1O_D+6p t_x9Y6ZI7F޾ۤ2mO/`CY<yOA9 4\kT^:&w3Ƌ< ~C1ASa,>bdxv_N+2hr#mT'?4ؔx>lUer LJ|[y6:gdid%[0`&MZ zf"'BG[G_hMpkeu t @?jj| 2鄂E5*4f;O yOTKW`W~aZ9GӃ ԛndʻΔG+lq㟮ך-mugW[hg2*=ԈK ttk<[*Wa8:tn?-k*&K='V9{-&U&ؿqnX\\ܵ`u)uFY4řoVp9t?-"R:$2ԅq]"O6Oz/&uo D=4p /3?X?aks[^ZvkgKqk W۞}? 4S@t#+[Uhv˅I=y;[D0u>ji_nzCta))(q?E]A3L.1dq9#z+UjB|M3^lea[K_'N|;Cdeo3~T.w ѹïhm)gJG+wWEs_24|LB:v*)Yo︹CR>~o㡳E@ %!-JTu"CsL~.0vբ1jlY%Pf 4GLx*hs6xE1+.56M0fB:l{f;ʋׂ} -h#:W3|?8s(D\i gs S ߯u=cj x\ A.wM1 S0݄SEN:eZ7FTcզ+j|{PjhdI5Tj/JN:#fH:\׻_H.;e_z^dPh~@ PwT}g,!u*8s*uXJq'^gevN#)'uV@5%N'?-ʉ˕iؓWw*@|>,ߩ]5ЎmtqfV[r|s3pHS@{әrV4~N ;^fuzfF}U*y#'{+κ1JoNSf_9zϼUQ|<_w(w!:Hg؁bж*(7b':؀o+W ej>26|[iK`%Pםmp7Ø>ay{#fH?5!LT"CWI9kK.B?"qpInG Q?f3 & bYז=8wwC؁s9"=w(WEN_ Ct lb<@Sۅ%, >ٿ/mf:w:Ad}*'LKYf&%4m%;?V}NyMLPKK'2o[< ,Zi6j M}.2kt:a A:gf348+䵩 t( mDW@}s>qc]U+l}KOۿWa\=U%+mi-]F00h[U6QІ!~ D@5!0n0{lKp!(ΎV~{?Dzﭗ1=zGe?/$mpꜴ7 @\6X_Gd/r/+3n7%uO3 [@:Ьr)gIbr V[5șԃ׵q)Yk<&`diq+Vfo.:M9OM!_mbWK 6+ /oSP9C+O䓞zi; _@kog= +QڭYk}Sc{nD+q[O'קgnXu,?|,}YYkMV"=~fgI6ۗQQp|g;6'62zL Nd tG\WUOt"F^8\r6?tɕPЉnK%L4l5Nq>f-o2:֜O﬽m (w=&R:c8bFR?鷷Gթ›P_$+sfТl\AL fY4N vvxoӮ͎<\//xz)@5AhLwP4xz]/㐅d5Aj:c'6P3< Ou^VTo]eq0u9,7nQ5쯿ug67x ]'؂/%-۷H;QwO }k3upvXʆʷ=L"AJS@C6\̟:i -/jQm_-1SVހBgM[/wgJXTH#)f;~]5_//GoJ?@X.s:ޫ.\oѽoKydxbYA7G슬,܇{L k\j B*$Z9.{^UAA##<塀L}n.㸩%h8v2N~4|r+w,n7vxfL ia}/?ҥx[qnz/p`CrQyr/98mBcm MG6ۃ[KNYJ1 [dGt}*Wk#S @[T5ĺͷO>\WMzl!jxy:TNQdw­3*߅>RyϻnF3vŀmIdʼn"M<#hG3PJHDp_o 0Hֆ'˗X=./ 2E1ٿk&&VM`Er4}kt̞@ZgJuv%<*$5nj^uR=(9 :ew/?$ w/M7#Ցۺm_ YTB1gkE d.X[~NτͿrpU 0c GG~pԤeׄLui,ar&se_M '{ uK e-eh#ϛߋm0tjshO+V^_0B Դ `9 wNڳkU*J;`Y޶{,$g ˘[ԥ=8 VXqI6>D+p>W#sEi hC۾=+ |cmi%XGK9N-sNWK\rWǧ.[iҋ%+7+ǐб]S_67aL@˔٤ d-7Qrz[}:ޝeˁljfALnz筋浕v[! \9aVt&qݑ"g/wˌCWm_ 1aS %#L}d4uWcZ0ƞr AƶK~F ز <<=o@TuUjݚuy>qVv>zJ-Vhu\vb:[~^wyrm'#9 :{/Oms[Li m}ֶ9zː r=o2mN+JP?z*nQ7E9T^0z?oe<]ܴE.Mǡ?fy31#JH]Ǟط8mΐu~EʒOCYNW8R]g Z®SB=-%QMhng> 及 3\?wn6iZEg?0^ RJf@A\:ԗ5${ Ϩ!M䑵t\QJ2n %<  "R;g>ijd`]GyӡBޒo3q90'V8FY 3r$ O='֔T}8vj{565֏MNF:-_2*lʽ~T/zPt`Nt%&u l`*62Opl+0~l/u+E2LlP k4>%@&v{DI&J9nQ?t z[ 3ٴ} %i42zwv.V_RB8h(vD K/Hq+,,u{+Yq p0ie{woq]N2mܶrTC F7խ)&_mt(`+z cN;:qŸDwSK]🧂=reځ_Dq. CE7 d[r7an ySWd:V ?V̏_ElOV]JW& /s%9^њ!(RBVTHi^L., \Yf7/1 3YMЀ@+K' OCS a|VL0M;z#D+(hE? fW{P/#ujىNuZs V $=etMn dmWh4jK mTm78 ZA$*\>`O&,a݋6Ӽͬ8hh_Y%m*oqZ0(HU__rVhnE9Υ/84$a+(nhibރ5"?U!  1_D DRQǑ):Q^aI(ڋ{3pǣk{b) *J=eVMEđoz'H]VI^?ڵR>"__Hl@f پ\n:UqY_🧂&&omHPPмgJ!܈I9c U cZ[yso"uͣ;t4uQsL̓8+Qff2ګF4]ZL+y '%i%u ւt<.V)TLWSG2ն>@";롭 9u"&2@Gi ^^U/-q~ibN-/݂(m+B܋{ Q)? k_IXti6ydHU%kpҵ*jwE\OU|QuU'5PVH&0ѧ7cr' +$miO(> ~ R_NxrsQ*bsXԄK뮑us^J$癅B e9}_)Kpi` 'J$V ahFI~N[w(,]S\/kh2KMRPo,e2\x^VT@!ʔ g\;hׅp(ejLC=Mˣ\I&Ov(e9Y@?˗W -,*Y_:q_[Q9:ju8T O4:7La:?]XQ6B @axd2O: M!ZU7>GID %KuӉsy쾓9G3yj^Q2rАW $mB{{x{hyD/GUu\Ut>]%|Q ,EZ3`|Uezv ^?X.1&OQlxˮ\"ut^*<S @E0\Y*'-#5d/'i Q6Uo7ע&u]Ŧy;ݖ:()MCjhϚ(sumy Sy 3iQF 9"v+)/Gwʨ2Q@ &ruM7J~W:}r΂Z [9HjF2V,nrS;e{aMsƘK[:wk`4AM9amQgNAOֶhP9;>\Ҹr&?ԇ0<}Z4TWrvWߖ!MɝRXp "UL1 -g' _+PRA_# Lye~|ri͛͢.SG4l#&H+aZеlK͢MKxnYӓ4J5pd%:tws۴Me*Gk1 c0$'?Go..Gz(/4WI(O/<,C7?}bzA&_Q7U[y8H/SzTȓÐ4)Lj%o1Nӹr|sV,)铷_(IH!"9,:6;k!  ,AbTp`⫶e:x9I/%DY M _wLW+UX;L۝DK?V(ԟL hK>pcH1BU׺'[1HAA Wv魪EU Irn,J19h5WqB4wDdKjOM"S_*o׷ E>lB{7SpJ_:hԚW%k q% ʁʮAY<\AQyj5$.SuHt\F'rpVz!).DjNoEըf]ͩGm{*T#Ogz\\ pc6{4u޸ yFyY ςp_\/ />P__VFOd= zʖžDB3-Қ^7T1g ݂;żbuf:?D:`_EuYĹ<N[G_h%++zys2nsVzRÉ4vG䨢y= ֺ`zkr/:ZZ Ev)l3nWv2fl۞I!Kɤ_UU?f i}Ĵٴ]_?cXɿ{LKCNn1ԘE ΀^S`jP,_t]lZCdʼnꚖZ8addzzpbE}YNa 뾘\ A:W~F͐׉+U,cʏ%˴k[ C .rW,KGᶕ1m+(⓳ ݥ.\I!{v98#O@'1}r7?EEC{' G&B-]%3#)eVk*CluˮP.z|#ؒ] -2Do"R^!7À` 9ٵ=NjnrZLp@: 6su<,'Ja5SV,L[Ys#*WnOo: m+_y> DF:짉 ۂ t?Wn̰FBCixGTw$x%gVRv 4OkVpw||-O˫nNL^N^q?.gC?j)3d5DAe˧J5"xq1°;JĺEm %ςDžInɯ?Hb/M2{-/-!u>7([ ҷo!J.ZJKyxhjt%ʤWדe|Y#*^,,'8yC=+煼T=)x%[)QC͇sFs>@Z/ ns2! B#.[ۊ%ˍO^D̰V-V~hDe)!yj?˯.O!Sd.2ޗ]ܴ*7-oQ_? gBg*O] u%^>`WSK?j?:t/Se9Ӣ[D _:%0'WҐ&D2Q9A":3U, hq몡'5,?ywQT:SztAA.%ׂOl={*We@ޏ\F[Z9~'|LJgI; ކ[_ ?VfGES'Q(-F/zgX5jx]OADȑY!O&s|xp}y<w*9leT*tqE5b[r7lQ_%x|A]ɯTC9^Koܒ%vis;rU[o18P A6oߜMxpZcq&:Og]V]Rp^6o_U3'K+{+"vrѢ;GcESA2'2t#'SVW>wsue]kLܸCuhYWWdn-E7Dycz;2T_,o? ZjYGF J Cֳޕ%;OɂQ8]ɫ {;^<ugDlۜ_=)Pz mAsGjsHI~x4:bPg ^PvSˆl4AU0>QFݐlH6?.<6?K.m*G?:@ Vc'A-諂Q?K 0X 'qhKG[ԢEOӫ fp*&$aی7{VxUCM-SKUP-妤<TjCtmw\K@S\bzZl/t^ϨZ7g?N{j?VcZ-w_˵w~Z?X3\7WDC5~bwhLoYtFg>"U_ߞLsrd(Ql_z#җ ۨ7n<ٴfj&5Ӊ?lxao(Υ)؟w-7L\~B)Q8mʊ1ǟ9^q9 |%7=W//? Qoif_9dɖ e:?/nXDzz7%RJnH*s(kHQ3?Ʃe'6 @p$Q;-!@QP7;{C([>bSN'zB0GY,d TIh4*gv?sAEQ,@7f$8ኽY& f)HY dɂhE4 1T^Yf. +̘).E+B+z_!KW* UΙ/bLITM)E.q[ Qsrc{98 N{8 \'p@_,k@D"LHi vi,~::+?~!-” `ʖS5hS6s4L;aÌ=TX;Bw AQ $y  H" l@9pM8.& `|#! Dt!#!: AP4@Aˠ5P1TCP- t:]:P4>L5`v0cp:΃ p\u.,_C@-CB4D@RGZ66"G^!08 Ca18 S9i\`1߰>eaEBl)v.i,qP\.׀;x.sR|!~,D0"8 !PJ8H8CExN!͉^("BA#H$R,)TF']"="SRR2QT$TZTtDR':ن$!(şLR6Pj)(O(,eJ F[ʯU** y*y**TnR%Z2U9+T+TOvQ՜ԢԲ֫T֯WPRVKET&K]CCDiXj4245kth jkjk.֬<)B,XZYZjmkӮ׾=3M_SӠsW.M7H7Swnc=,Ez.齚1{wZѴ6Kw v\0xeeoaHh &Aˢ.Ce#&&q&& &MIt4ӭfFf3͖ՙ=4'̇-,-,Z4Y[X,,,YQZXݱYӭ3wZߴml667la[w[NEkwّvvuv=ZM6;9|stsrƙ\|Džҥ卫+u}7Lnn_=%f)]t :~'/w/Q3zϰgFǧGKMWg{oa`bp fz13"!EAAqAAOMӃCB ņnbZ`G6̰[f>4E6E(VԖlKBpس*f=v^Cs0Cl@88Y\kJyCIzI¤d|r|A6pνsϽ:Oo^ּUsK$L‰pRY\&w;%ϟ7oI JLaMFhFUpfTѬlBvJI(Stq :ŶB|m %}9Pܜf:ˬd?zr}s+r?._tlb%6K-yw)f)wi2e,g,^H]ѺteʾU!&\[c~I5 kZ VC]rkڪ1? XnǺoEkŎť_s_駲F7mq&&Ѧ{6(Q++2sKV֢vԵj;il,yَM; VT4TWyk*? _RXcQS;w={W~~k=jkX9tpz#ȋ_R~w4h1+OPO5BKMΓa'[[[NjSƧ*NkxtټC^O?:B;g]~mg\9ukkMݯ77*z߭oO?y}G_ Q>7z^j x/^_*SVW``ɛѷ}{ɇ#Eu?D9E__ʾZmh記#ጏpZo@IzYz\y{\@&vh.lW?]\"lVeXIfMM*iD ASCIIScreenshot (iTXtXML:com.adobe.xmp 267 1920 Screenshot zd@IDATxE.KN"I 3̘sB93|g39`:tt*1b# _ޚٙٝW:+uuSNn8QPJ@ (%PJ@ (%PJ@ (%PK`֔rm\ (%PJ@ (%PJ@ (%PJ@ (%PJ@ (%PJ@ (%PJ@ (%PJ@ 8,hߢtlբěWJ@ (%PJ@ (%PJ@ (%PJ@ 4n˵l._p|4w^&WJ@ (%PJ@ (%PJ@ (%PJ@ 8JŢeezMdOeJI_idy4iRtztݱczDSPJ@ (%PJ@ (%{a59SlU1_̲ *"KeX/A+Hz+FY+[֑Eג e-{`tȑ2|p۾ 6@:ꨒhk, ZI%PJ@ (%PJ@ (%P.Rz}UW)p| 1be?T Gv{Sż/yW֒r2=u1nКMt|!:>PLJH7G8 Y,2l=y,)+3+uJ.e˖o-r|d۴NgeҥKe֬YnR{8x_ϓDj|Rsa!RPCIͅʇ$RK哚 CI'5*H-Oj. U>$Z*\|H"T>0eĈ~@V-DE̙#q 0m<h0L4IF-Ϯ"ktN:餤o%TTT//fm>E(^qo᮷zN;7낦䧟~.d? 2k2m49/o.j{rJ֭eWK\sͤ<{9nH WPJ@ (%PJ@ (%K>lժUX ,0a|[p9^z۞L婧*_ͮCr +%2i?H}+ݻw[}7˳>+o> Ou,3veXY.W_}5U9!iFf̘!{챇viҼyy/V׺vjav-m'1WJ @ / n`"ArHLb /"Yp,^XƏ/ UW]Ks*e#8F7`ҥ]i뮻ZCi0_7~\}ұcGodԩrA n|ٍ3F^*fÛ ],3of."_c<\p/pnyH ۻ:xׁ:8『:8『6ڃ!7 -U9)ST}CQqN/vmr=[h'` oJ*"{g9_]vqGYym?]Ν;ёcǎ (ٚwѢE5fsn:͞=0Fӡl.R嬳Βgy&|3- q8w0 Q]*Ƭڀ׭/>M :Ce{キ!C_\|Ų;Xc,+K/dߺ[hX~wq''O>:|1yxγ}~x0 wӬY39/6ltM<@!`ח뮻Ζe(o֘x8l=z:.τ[Ư]WfסrRN:x『:8『:8『:8\%)?xf')"ƍǘ'"ߟ" E3ΐO?a>ny;L0A{nX.%`+~ p7xL8c8AϿu\VVfWca$Vnp߿ùT:m8ov^<iRqk3I'o2t0_guBmDC_ 2$϶ISw|\< #4 O?mWbwG8ǂ .Kr yajg3^n:%X]n،RɦR$ %P1X0L@dza sfΜiwׯ`1Lm6>K pl۶m[ *W*b sTn&aV7rÀp# HH!O>6_em5 b[A*wa`hgn*SHbt6mbc9J={tPqڥcaDrM6nA-$o00h #+4$ ^y[ȑ#Fcl݆ŏexxY}m\?HJ+ZhaWon5B_x]A`V5b^vja|&J~miڴ5,c;j:% vq>3+B{1$1o޼Xa#BCauKIUsV7bSy573w0 0 vp +10?#vop-p;|WApoa<wEX$_=X+ꎁAx 7 \ )6Vx p #o4.C]o6Mob 񄷛vXq#o%`#r^pRI7l̷뮻M[R0X#=AiM7+ualŅsp77Z<-]TT4qY?g'? fl#.^ah ʃ+V 4^?78~HW>8t|7fs1?y/p0Nz?hC}~ػX@ #^91Kp]̳bE$E1c\}F_asOC|j9:̇bd̋XoBg0LJϭ_,\"axaD N,HJ-[Zc!n71a܏k׍lGA/ ,3 m|L #E{ix5ª*a6O:> 6*IԽ{w{չXMA2B|E^ `al-|ptu5״`X5_%}58okamDZk- |x VոY8nM `#1čے &qy7,L2^7&86p02>%@~C<[8.nҢI G<8 0ӏ* H0?#xF^,0p{t`ï82~ןS~ >~~>HWPPCȘ:>()cUt zQ> 5QPp7^qP8k-X 4Fa Ud6VbN= 0;Qu[ebq}hm8g  lX:: :鄡h'8W眶06+M1ׇI;Fr^颸` s0UX6ُ,23>6aN00/`WD]G=H `ǜ;l 0Pέݼ0aN.1;y:QMT:,ptT6~h0&a Gk=n/`!&^^e{ǽ ^1q-mypѣG&W1 +[uP#laPA| &`a Fc`F||QM7FÖV4`O7Xx0/θaoV 4xX 7`3AÎ1cWb53 7~Wᐏ@Qh1fF pCqAM&(9a}A<~8o:gLja~;ۅ0E8`֐? / ?Q(%PJ@ (%PJ@ (+`pjGl@Lcnrٵ (0{=/s0O`BwX(b^1aq\%4ۈbW`,qWH8<),`9K,(Yn`FK6wpaòMp J{# >w]%9Դ L1X0c7J?` 6C,z\˝?,rAcsό 3=I0:`X4ǎXؖʥ*3U< SB6b92)3 /x /p|v'G6e1 ^TI GJcgYRB4[XU.:coa_}n߁7h t[bHo*+Keb[f8͗êP ȟuV}RQW27ŀ| B6ސ7x hWR哎1wE w4".o(]銗7L*&|K /IFx[[,uNzl#<- _w}`cpeÃ-7`n+Z^; [vgGn@hopOϧ/|)ܯB}E?B}GC ?j |2_}KZ\&9  . XD#-淰J7|sXjE(csf=zgc\|0G9>,stXhEGW=aJ/Lo8 $V`2V {֘ya|?!Fq1yN0+0Q*ҧ~'i8 9T 0xrQ=\7vzna~zH,Ž۶b.ch*6Pڮs%vI~0)%POd0>>vz|Jc5lm$ :'\mb>@5h { ( f(of["o[m vX)裏7.iQol 0F/8pwbãj2=0f}gX) ‘GiM[&&pl7!|`]*e:ސB }BY6OI~"A7('"J`cP/<`ou(?^q9F;pAa/ ͺtl8E o@ #8~(:cǍoT#?DZoo8 = <xExXwD?M*Gg+KW+ՏIdp XFzu9gT~gp~ ltW:8جbǜ7_n(vu\/f6 cӹ}^7Q`^^xvg_n=VJ 7^ox2t`e:>ba-^);gWWfƉx Yr o3 8c0{t08";``dn7EFe"a}~ݷ`CQނÃ*aFJ`oᆏVg}v3^u?~5*yf2#. pCW`b/"VP.Jd!=c:C2!>^w'pЯ >Cyu『:8『:8『:8@qF*mnS9GTXike+1`w wN0a~3qԛtqۤdp1ʥb 442Ta8izLڏ4`.?8;?{2şLʃ AO`̧K8!9ûqaNj|5v̤sm\u&.~q@Lcx&WgV՚o୭To]G.w-\m#`Ğ4 K.ľ3cƌtZ/o~}; q>37M`t(|B90,bU(͞[uү7Ao!_}r0#>Vbe2 ܯ[;SeMn |,XFs|IbGQJ@ (%PJ@ (%PJ A,+9l2kˣ=3b^o,˼2hlMɷQ7 }M(aLJ8nX>w{?oE15LӥcE\x nUl~>J:8*}]޶iWW˔֯~_fO8'&8;ׇ^I_7xiO?C߷|7z}&}2;Ejg~K&Cի[)%Px y|P܋Ǽyfѯ7wP> >ׇ:>ꃎ:>ノ>ル:>ꃎ:>PXe2!l(O'[VVHsU @8b>xoݧ-SJ@ (%PJ@ (%PJ@ (%PJ@ 4eZ#G޽{7V}^Zڊ[$LluDW=P=P=@tq@tq@tq 8`'t0*\gnbW>y(R&P뭸̦m駟ʔ)SJ6)z|'9mI%PJ@ (%PJ@ (%'T 8Clʪ/fYt{ɞV%uZ$P뭘e䠄xGPJ@ (%PJ@ (%PJ@ (%PJc+3J@ (%PJ@ (%PJ@ (%PJ@ (#Pel>\Y5VJ@ (%PJ@ (%PJ@ (%PJ@ (jfy VUVTH 4,;H>(m_nc@_[.F߼kAO+%)z{er~';찃}ҹsg ׼yLxJ@ x ѣGKϞ=JF)W^y\s52h jOrQPpc/"]vw]Ə/ .gL@j%zc^z>OsUUUzJ%;#W\q >\̙#~\wu6W/l`ꔀH&..0`F3@> qr7ڗomכ>W擾՘os='СC˹<٘@Z,SL/㱋v,yb@)e!jXnv.DPJ@ (zAcǎ֐L0AZjEodx-wvonvo$UP `5qHX]1s:ur#KNÔ@c'*?xX`߬z?KMŹ+ݖ +)%L;Z`nҤIG}J _CӬYN2`Zn27>W'4@`UWM7$}>@'CkvX:C삓G}Tz! ΛIwi\BTjJ@ (%l /ʦn&lri`?uJ@ dNXi'үZYg2~0}W2J@c#-g{vϋ/G y3ƂS_d·~q:LE&H^>c66#rnV{ʺ!Ou]+yg+&uuY ` Q W-5oԩSJaArg7i@/ܞJ@ 3<iJ@ #[::>qd %v;sdy띿K9VPJ@ (%PJ@ (%PJ@ (%P!VfvjR|5G%J@y-ʕWps,5nKj} UƷ4{_K[R8.5ZjU-,^j|7RֽKַZUj| K#R/o'*lEU˖*A֌@u h @{$PgXj|@鶥>_oa:,,_o>OCuFm\VkJ/3҉{t9_^gVΕ_oaږWכK@7FKzdҥIaÆ ;&.ͿiJ@ (%PJ@ (%PJ@ (%PJ@ ( p #J_PJtH 4X't0?DKmxy3uݰ'W!DU`cɀ3bY:4E_p֩ (eQ~DGC*iǔWn\daEڭ7b;'˂ Ya"dt|El{m: ^tig8,_us|cl:9c YdI$U&6ΙkI[n婌%7gΜgeǑHk=?e#Qo?gxF2S[g$:FJex*:xɀ+*!O,LKG2>N2YvN{əmϘm& ~&Mv_$!;(*,vqFfxldGŭ??DE# ??'%K?}?e%}g/(?óh; H,#/(?3%̶IAї;$ ?' -t4?qa:oOO!{Q-+)[5Le˝=D6B̘gdoC.[_ʃH/ roxɀ3 O/,tLǒA㙟 DeXϲspFܺϊ0a=ި~ؖpE~u}DfbaxEYX27ʲ.;߇RO̚X&u=qrbq7@z'm=1im~ƃ!ؗ8_g /߳PƂgHyܹsm5XS#̆_yҹsgi޼4駟O>)Ͽ['8ȣo,mZ2tP'6HvqG!Cm[{\ymm~G?(ᄏor-oVZI'pkFnv5{<jqr73 Hfj#kS_:4zxV8 Ȁ3f1:dچt2䏲k3Y' YI7rh3C;qNV2|l:oxa0O杋x'dQp W.m2 C6o"5U0O&? 8_ll\Cy}O&m]={.oE;UD.Q~g$R߃k t AşIҰq\BIuD,/YIOߣTGDrO>ǟue?Пxs\a>Yq&3o9.} XAs)wcW'6=y_x>qwxJ-6of(x>+i7hz\sXt>z7:LdH?ˋ-#'!GVwrTwd:O>eo[=WGV=̛9.}j1_"9/WɺoQ 99QG%x 6Lveydv ml9G:c[,PȊdR_|Q=Pfm+IaP|s _Y>`'`%K/d \kW"e8/nfWb-njcb[ ?][QQaKc+t뮳'O?cflVRgy_N:YX /VN>u{'Rtɧ| {キ޶w_0­ңGolO=T{/:q۴icނ|XG_2_?<({ӟJ<&]?9Og$g~6S":ǒ&/7=?ʀ3 ?\?d e$>%HzLo3?a~3<#p%2j0~FeYVJG8CWg6U\^ :1~Z1x/V2#'覛sH뻧HM%-̹n(Io=EFfUrn;6d ?? Ƕ!/8E"B:ǑHks[B%o*g%(C<ϹPp\!9 ͱQ| mIx[Mn)゚2q(Ϻqs@=w8YIog>g%J2<:퍯:\} IFrfė5o+y a2Ș|7lͬMK/P?%3_Ny*` jժ`{fZuƷn1Yt*Hv}Fva,7rp,V:μ`DM7nEp|+{is1k/q6xD5n89{e >#_}Uu2DrW8|'\pfǯyUfkZ7k,qR_X':AN\Dx^dѶf͚I͂yo o$SN[t7'h$c?Ƙ6.-D}r)ϟ7g,m3#q|l?kA?2{gUS'o[czϟdBNA0$iG9Y゚-w|*< 2b|;=D#>oxzBNywr%k";י 8Sه B}n5zL ,4\MD j"r@I)?g4N} !DP+]!Sb;ej&O?5Ν;מԫH ;[$c*FNJ4aaM8kkSoݦ|4#F A2:c|d0 ~.0~n82FwqGu޽ e0O?]sO˒/VwKj=ux3!X}!_ {0D;úrP(}o@7, uvwerLugx.y%w_NKKogf>gxNYϟYd J̘?B[ Yq\(o93:{ə? |c97yOl LǟXϼu1$ᱸ{{)wuǐܰlɗgx.ys\$Q~g, u˛Pϟ9Io,xC=mA8YKOߩ'omO0A}Y([A .oԓsdOfT&啕-6p}[si '|b Ɉү_?-ہdU *~Gn655go&vd$p1hտ9M2k2~v .anqߏBc{k8^cW_}n)vXe 7c ֭m p 1~aXJ0hٲtlU7A pWXatwe"'O`m;oߟ_g\J?CQ~g$d46߆$2''EgǶUVb /ǑH q'gxAe!{[c5oC<G"-\MxvX O%Yv>y3d6Bެ KfkM X2hۄ?yxYle\,lxHT/祍>s֟G̶2/ 6pc, eEhq2( _. ->UY\|E6]LG/n\^^n c9VtMOʹk (|``-±n=묳8s￿ 0>liӦO?~Cl׿H.]dԨQvg`y#+ia +x0M(?3ck97tK1G,Q~g, 5Cb3 O dYONlYJ=s0p?+Q M=g?FUpOߩL~\ DbIzLaV[zYǟǒsrf?[0ry~J?gL%wxH%/Y ׾~X3ڊc89";ź2e!{+<9k;9SU)mǛ 2k(kozu'CHL<^49/Щb'W:ϰoh٘; y0 C$0bY4-[nm7cǎvu+ +cZtRatСCh-/++t=zT@pK 6W .wZ(ꅺ`eBY{キ 6Wƪߣ>Z}-ّjF_ԅ1uOLbuFhFQ'v,1mw7[u Yͤf}-~` u3>$ma?ݻ[VڶKz`yM~+N+A"><#U7{'!?o5gDF)=~?"Ix'fט$/#B:D)ƙB hV-qh(?a8q!?Ë!Gm;) ;t͙=G33 l{-Ɩ}:W*Ҫ%'xעJ|Ga{[oѮ6l5u ^j Xa^{_  n +uV(YI!FkI>$ѕkg"O9PnǛ d5Or 7YSz;75+@BF(}8YIo/΢7+2XC7Jߜ>8Y+Qg>IHSx>+Mj-x7ߜ;#9YpqMj\?CO ?d1A(ĸr';!o273X| ҥK~bӧݐX4OƺvcVair\ح{왔բEd̘1ҫWH O!owlҗkԡ1xg)yPިǟǒ?Z1I굯゚2*T}^@extB}n&z90bFI=̛@f?K*h*g$׾~HGPz_]Fp=}'g#[o+x(g7XRGVp_}?5&I3^FR=Zk53;9ݚMxs\aČƓ{7b 7P&Md̰BQ믿.v 8p`ӻ{ ksV #%5JjL’kooyQ,79S?3 XvrfKbmWc.tOq~7ѪTFpHq(+=x縢7<W?9&wJo7X+oި>iwS}}8YIo(>Dv@UJOơGgz5$\V3/N:+|M:thxTl \^XM5 O,)˯7l#򴝟 _<̞Bx;Iu(@Xnڛ@^YK/=޾?G ,@I?Bgx2hӱU)#@,?˯?; ӟd{-oz#|g>oxŖbpLAFa>dFſ0ߐ? 3;ڿ2G8o?>Ӝ N*B 5PaRMD0_x~?ǟ3'gTe0g;&C$V~?W~\$3^%7ꉂ7~ Q,}^WjF'epۻ`KJ>9Ǜ K P8;Cǟud?Пy ?';?ŸzXҀMts: R֙ǐq\}޾r~׾~X0>7$O}Hu7uw{gxVRǛhTDϊsTBzT"k_ML XquGSW':Zo|Xܩ!'oGF.%nx W/9|.J0<2AϺ|(3uGt ?'Y=4! زP0ސ?'~BxSُ~52{LO%My+y0xCN9K? `?П3?3o^$=~?,6_bqwKNܙHK.0/ǝ(?sn]#:Zn׾~XJ߽ƒϟ9@VƐ8ސϟK&_5qK:#~Q>n5'seM_@=RJ@ (%PJ@ (%PJ@ (%PJ@ (4Z23].]yvURq@#é#hPN{ >N 4 #IaSz/YoqAa?1^J@ (%H&б6陯ҷ^YoȢ%՗l&%*ʛn i,^:f1kd֛˰ʅ&eJYsYi78 Yl~WY >+kOtKz3L՛ ϐK֯_sdߥSns886cB0|Ȏm9N}HT^ZV-*Ee6ekHDž9,jUd)ײ9RY\[[x\>tlU6>+t")VMג)G~M6}\Cy(Jצ+ɻ qneҿdi&Δ;OYh낝VZ}&L[.ôeFXBENܯdIg0RK7^A_KyvGf'GYHfrϛcI8uECd/3v=;wD=jզeڢ_xGF|W6j74"SU{ӳwf'dg6}Ce=ߧ+5Gwi#Slr]l|zet{֛3˖v6o|bQ-ް4ܶ#RYUI|Wh%vPn.,62X6AZm&7H~:L|i=otWLZ& M7.A谟,AYa= 'h!l-][,39˂uuFL/l"7[&.NH-YZ&Mem^^%||kbqw0疚styu?L* s}c3Ң`ikyfUYrgnYD^͍a;PNzئ,Y?i Wa43L@ǩ*^<ˊ dmL惶4jf5)Mon8v.su?VٲxNi)Ehw:;o尀ԚejfpF4#qբ41FTι mk Лԉ76טo.Mq(LZrLKwxwjFqx-k7qJs`jk$'Wa>?W}䔅O9Z\TTp\&-quZن&L.U)LYc3AsG5MŒWt%FY[`sS/s}5k#-TJ +kb&qeMImDffP¯Q,&5/O!iY"<8ckQ\ƨRi}ӞVK047F։5}zA$iĜo(:bpJ}:J@ 3WppmջkSN5~wq<=uf5CC w F1tٲer'#F-R~wFTD/CxC$C=&M$sIuF}&}7-NP\} K?_uSe|8[ӧ[o)ou^& iB%PJ >o1(C(96WOx/TsPJ!X2qyi7<%KnyP3QJ@ (%PJ@ (𛿙?)?SI'$n)ba\y TGqc鉬eO=*'OYҜ9Om[˚V.w''NѶK.Hub#΁{]{QG,<;m>^ol^l̜93)(Gyjcg͚U'[OWڥmlĉTm@?ggDO:eܹ;پ9C|N$%(E 5۪-2[vfZi/ee9\񊌜48mr8kP%аL7Qy\Yef+ex钥Gf ܨ!#c0HzB (%@ |?ynٗ9ggPJ@ (%PחQ:u5~Fƌ#"^5Y ]w>&:o'|ZzK>S9O!C_7Jk̻O߾v%5W_%GuzcoR̞#GqZI0r҉Iay鶜;[n{]c,^n6SNq.*oJF~ƚQ 6Ġ{]n]!0]vlsI% MPJQɂ%㯔'|' yH^CuSe-󍨋<(_OYl!r1?7u4pr¿cvdŏ?~)O}9ls֟aɈ:]WP^3y2ǤeSd^.^x.E3y@9q=wu_FˣG{ ô3峉d+IyNf̟#&^ lp RS䥋?Mxɷʒy e!w._|pi|[]塣=7ZYzF;uoyIUFm2*[lq2}:V_<Ҫc;ecw} i\k9riv巯ݖ9X=p2W~K2廉 v?-#L6M8rS,W&`{e>7e#UEmPJu$9S\q 乯ߗ{<0+ҹu{vdeϵ#&7Upǥgnrpߝ]BJ@ (%j!Jۭ#vW>} vZ)ҥIwYplڶmg \{urhW5o^F7NZL|{GrUWշ短k9֞ޣݻF\̟o&.oIi"@|xg.TڴikV+Z>QyKMn͢Kgc%h.h_4Fwv=R%ye6r{>W,9vBiu6}v+\rkmm8WMW^Cn]UU86dlve 9|Wdٯ্߳Ayu@ g^\Q2y9{dJ-G3J%W` [)۝5dVz?A/^-|2V>~-V|i ?$IUXCbcTue~˺?g_19ߌqx{h V"%ze|{Hcdz/S2lkuOfuFzmDj.9o>佟?xQYt<0yIQ4}rsA@2" IQ1!꫈9T&PPH9 mzwf8^tgzz榦Qip~g޻BETZ+WmY^MԶo[;VH_`^M*G'_۶7%[vҹ>)x{Kaz WΝ/ zIi:aCpN?t _ udYYi65_sM<@GJ\M֬^M w5/ߞf6&_Qfѳ/YzwP~Siq~s瘧iy?Ko:p[۾Ϝ9ܲe琧V9nWkխ>Ӏswصk/]amHK.k 6lPh~B@!o&p)2^|(VK0q<{?ueYbj)jlBXLģgaK&eOdO8F}ZCa$E3KD)XU*F+zq9I(]u5šU $4-0Q涋! ='tOGٹ9J'A\=껗.}9Fn AG{>?q ! V˗GEVHG8 ˖/[PZcQ^DD}qgݨQ#_V6mዯ})eUxJJnyp0jhԏ2曇cZR ۷WDU=?I)5l eE/sYl8JkN2<`ֳ]jͽ wq8]w݉nݻwkǏ+s6Y~чxbco򭫬=O˚(%dVS-ϘI/*ڥoZv:0c̝| ɓX0ʗ/sX|TKEEDD8}_*V„=DZ,v/IOXAkwŻO^/Q}8j64l=RU.K3@/O>Źs0~>iۻ?_|RZlv؞/L<B@hx\775cf;SaգuҺ(ɩ*J8dIJVãC5,MW裷ZwFĦ_%.glDGyDB@)в)Q|LIO?4ƽw,uw"C! B@3 Tŋc̘1JƤ?ڵ+xY$Zִx"<Á~< 1+l0p @3n%;n{}ʧ[v-Usd.ҋCc[n+b?GC+*7{`Y.['<_~,,7ovCxq ]-oYhw`Tk5oR1Q{5aS15/':5wy3&pR)R֤nzh԰>؊z8MIݹKWR{x;tkۮjxꀚ-dž1OIx-/+W3 &O= ZN_7h vjUd%<} [?WѺCu;tz#c85Jk!%ĕ/s:]=.B j `k'hH=rH9DkA,B"Bqr!U5:4LU6璅pf|`߄2Ow <~ X2'/+.Q1rfT.< ='o:i*ʩ$\W> "UK$55wzmaTd&@2pT/]qfK! +}AZp!Ƒ.d.X@Y 0O墵QG+Hf]Tإxe`Tl}yAa\M8͛p,ghMPZse}J)񥗔zʕ`+k;u+RSK#n)Coj޾ӧ1s g}Go޼^'>C||<232|5ew =_r?`ȪXk?Ԭ4 US*e}pa۸u7ohkw*wB@!A@Y.Nl &tmӦ|O:/^QuYI4x0|Oyu-KjJZ#izީ2HfWn:99 KVXq ċ5k)XVתUF*\5ş.kP\||#[ڲzwSk޽{Q#6WV]՟5/}([Sg:"11)na1w(%8["z }>WBEev_XX鯭 Kġ) /! >76ҨR-I&j5otoܴJp+;-.ލګM+[ݩ}^H_?6E7 9w mSsR@Jf7 hxm߶zⱣtjc%KJ妃< 防%B@; kQ1x>؎yGaw7͇7V;YG! J@[ l`[7x Ck׮v4+|tO[>󟧕/OsOߔ\ Vjzz$'M})Xy-Y^Yi/.Vj֪5yaja^aüupV~{r̙3j[Ӽi-{ȱ\|f+J*pΏ*41ilinT=Uir{_{kV?䡃K|iZT?wP>w;@vMXY+/Cm7x]MIja-~o)%wYBs~U3Wqqqd_}j i~ܝ[(Oѳr7˷OII~Mn׾ڇz"5/hyj燭/lC&MԴ<4x=`M:{.M|~u}mUh!=Hj^! B@! B@! B&Y jhDDD WZ~4띙ZA~G{(.Vz-d51+/V^~ us<3[~j.]Ʃӧpi4Kk{]wM+ ?PhUjy H{﫩/_XV`5JyYDI4nժ{@᧟rϣv:`)+K5y70lMJi|lYj5-ߞmӦ-vډǏSʩ6p,pzrX#+GʨZ"l-ym$ڴmիVmnҤ)y=`2i jM'/싲bjlڸaaOI s~&}f>IIIZoK Ofnz>-[)>!Jn&U'~TVYi,җ ,}T%-Uz}0*j4\fu[P֭;؉unޢ%,X%Ku洭Z! U(] m#GiUB@! B@! B@! UB p `7|fšdMI5$jǎ`(ceY0 #%[ixRzBW4oтx-|^eʔ9X)96r`v Chy^L1fZBWMoϛz$u欪cGsq*ּ-N$Z6ʗ#C ! B@! B@! B@'-XE3רQ孳67fGx{>H\`}BKR>1wE c@LYf:}.DϾ 8m+ iDq˰f{l5טĪ& 9ODd7E³1-)Po GeR{un'^ڒv.ltZ}??NxoFXxMٕ_Yyqwri7y|*!{IfHuw4t55~kWnļ :ӘkG`LFL'WvZez;,Vo#-h~@mpy*vi\#~4iEW*gsS]e:Mt^L6-{;Vkn;iA4*g}>0R^?w,8\pjlTOy,YtTos[3Hpg!S2W#9la4%7؜0&9ΐKW,M'YՐݻwJ**/5o`4hR/\0x_! j|P:R! Br#ί=;AȎ"'x诪@R~ք˱>)]iѺ148G/m 0=UnK/W/= St &H/\u< #}odRo震q Hb+*U|ǒ踿 ֎ޓ\Z&J1Rɒ6s"YX/ 88;Ċ^LM?.UB 6fvE[Fb.1ažZJZ)D(\SlJ# -h=2GzH __79BEM#"j[ڈ;:ttopn;)$tu42Q-Ą ?ANRgލs(Ik tL8TD<)X($Y :"q^J%CmHw L}t3e%jRSpDdìIRjFN QX1 N `Yw:x'ބ纖B I8شO &K-ƼHHYŚvs#A$Z+F,ƈ~ +ױqY2)%b@Zp4_U $o~s)lI .s&zZ!bv2J+vuؓa*I}]tYW ,0pƲH'\`74ߥ9NQ^e#]Ϲx<ދjp0ic 5bH5a+_^EtOd%vJ?^P"Tl F;;\ 6yZ=`e$7UIpsb&pm46x%u 轛k l^js`1H:YqnEt>fF]qp uBF4{aP~/)sjlWl0ttU޶mb|AEbtܣ1E@nh8C3ܳm-j0 I1k/8R:$"dZLfA;7OT㹾#1h"h]ʆO ݇tPM1.c&,Js)B)ޏBrӌPT#nڻHag!}c1EWn4Dt0ҽEuQo`?C ˫:AH4GiBq%h\h,vH{D*ClN%%/xZ|C! B@! H ԿQPzA5){cQQz>x^TB^fE˔^,{^ pza=gZ^ 9+3P;N^*1  H^!?P 3EI^,&D#^~pH!H&"xX-)r+njWtyX>}Em3è,?m2B}D %5}`'u f6[ }N =%qW\ZߢMJV? 3h0.Bub 6^H~m7VW(]cZ-{=՘BcKil`f +m,Wl Ejt]LW~j'߳QĞ R qŢ΍L}ǯq%VNC=nXŬ杖l%HQ뽶pUw8iKYycx(]0j^O~W$MI%t$LKKfsj/)<{߆357[/#Yk1-;W"X٫[5篋a-bf 8&}5©hB;Oyc `"ޱtݯ,q wzfF0ӱ>?)ȏ[o0B!dpȑ8|0ʗ/'_3%%%gϞҌ F|B@! @vڌ雿.4IC ! OYjs ^|O1pS^|R! B@!pu,luV߿]w/Q0`߱>2:/yB@! B@hͧI+~.f;U]lyl;4-;~+YNgS[ !/&0kR<riӌn]Nlk,NtA~΢Ǟ p*"B^\Mͮ[Ĵ5B@! gkkej*L2gԩS7l؀;v`%)iq! B@!O@}>|ٗlCxz$-l)tl nv~ qϹ-RalV^q=m'6`ɵxw[4o ɇcKUqk^V8{+V= 7͸I|'~3ϩ4O.㏘u':כav߫+CrҒzp{Ǵd/L`Hxr ؽ֥Wg}=qoeĽ'pԧQg,Iktq;2[yxN'J7n:ߨ:R?xuO/LB@!'0y$8|~EEKz^yh)NYcs+^S=:s3Qf*\L~̝{f2! 5オs222  :8xbB@! G?]sb= V[cW7uw,Elh Om{,)Z?>b+q$qW6L 1g-{ \Yp|-ކMwżK_Te8r2}&TzZWh?.F7*-Q 0`x+^:}vWk&'OVxhS% ,BjNRvO\,V߾YdNfW&qOt셀B mqo΃Цr]t߬X,x.$?#M`W`OƵJYvX<LY$;4c딮]g2hB@!P\˷ MY>vnL%S &ܮ}=sJ1q6+oyOBJ"[対MݻwLj#[oIvZMȡm4L<B@! %'p&,JiNq4@ַVRZ8ѥjG|Ke)0{e"hj?|nRض*/-}I%h4\7L)6u+cTF@V/њ}k$K4)PYs:WNlhZߗ-I5ki-3)G5/4)<\'[*<^M ! *GϢF|yL:kWTHV 7.3GA8{/1r8FV,zw`ͧ`-x{YC/8~g,0\|ڒILv.>q8qLX9B@@-a봣Bt9H:kl z6^~t%223u_|~*0Ϡs>+Opݾꧼ|tt-=sLZ8i!ejR s;UnGn+0mZ)tʁe5 -7O# \L/: ߩVnARu8! lZA_iiK5fdHjZ߷f*am0dDơۮJ}dx 7lyKs;l6h\n4Ť;f"#PTU?.RTјv[h|*Wݖ4SZxÉ}Xqx;Z'jxi*! @xx | oㄆTI&wyퟏi0C۴_Pay^~?.pJ2/Y: 1t8 _mIdIw!  YJBBf̘;',*:OI$B@! ,xcpxdhMڎלS@Mf&y~Vu~59wOq_Km쾮V/zNKDք㳔*I^мվY7h҄"B@\\Ѧ8WOw>*2[dgPyϯN =M ֦/ *;'9o*! fy_x{g}~B@w&а|3I&uo빗^^OmZ`O},D>c - ! !Y LQg0}{s0%`ִEĔ`! B@! B2"߾w\F! B@! @a֎ JӰaCaXz5ʗ/A- Z^(q%P! B@! B@! B@!  'v\P{Zj9s&;lڴ &C J7`tGP! B@! B@! B@! hڬ́2մ|R7n~嗂xY(l>0_8$-Aex=<9c| ,Ց7)'uR;9i97'RSggHk^x+U7Nv^CWWdygօr1f10 w;"\rSk^t+kN!^~ɼ}v_pyֵ\wun/|pP^6҃U&r2 ].Us3oo9+rw:-KN.gP6>r|`(Р13ʠ93^~^/k80F]|~-6Ϙb_ޱA ̐&J+ A]Y^ @QWnހ!H8o8;΃W T+rA'v =6{]cno|28A47SY*PP)?w\7{clOiSXi{D}L(4vfͰeRZZH|c<,tQ򘺉qHzLm&,\S731!ΐhvS4ilRJAHnMguga{.rF8גB˲~ʥ2Tt,Ȗ,Eʷ9r(}v! 4oྠvDEEq*vzA_N@OyNAB@@v ;)DZ/vJ[B@#`hsHiYߧ:VR^\TB8a_k#79 !P7rHt!LF9C&ڗv&ɍ#e)O&Rw ^m&ʐYup5T$O D'FK_܃J4t)u-Ih1w7~C._ k9m>P| "Br6Hb6G= umP)1QJQvEH~-˒4rp-UEKM@y'ҵZ(>U=L dWJ@jon^}q`䁝Oʄz ^g-j#ѫ3zޒ');N ta7hV0ORU#p>@NŽ!;!3!RZNn>N$i;xkFhwR`S <ā>}4W VY"#OIt8'AZHnF͞u|} c.싪K}aؐd!6͖.GJU>gbܤ4I3E`SP*}):wV.kʨ]+A,w["5/[k1!6pwj䪔|_8J%Y2PHil'Gfr8"9y0<܎RY; Tdi {ǃhawtueC3йaX 5pAZ{#6 ][ 0p6`l+Mo ] ȩXg nve0ۼUCH-^Oz˪Dd8΁95Mo3r?Rfݮڍy=f+ \Re{("qθ:"oƠUiL9u<^kRݡV.oMGlXn /V/vg◶ i6#kgd.͔Xs_]HA_-ot1KTeራ\#vU/vU+|9c$]Xd"]4.j7Zw q:lvy\*j=&^Y1 72rr2{bѦB2e1H2t/3ytޤrh J>K >'z࢏oΧA$b˜R.7!%ruCtPt./N_3q !p,GScN@w y ! 8" OˎˬjR! B2% b₋^&My"tl \IY) Ŗ>-םJ/D$+$r6 kw6 S'V"H>.b\/p75 .ʏ̠|мNf n*I}B/Iω/8mdr2u syۯ3ī UǒP`] t~YA(^fZ3XqTNbԍ! =v夺BJܴ>c R'[ͬ0)DUHt_%\tuΝ/e]/2$4ΥvŹSw è|YE}I ;ife?7oN}>d*3"z^2{ι;\Qž~3X8J[|O3h'ea9E<:> B飕X:Q_g%0+Yrbn2LtMp{vqs:N[hs apwy{GYnyc]/Ψ}h T7΃ˎk$q_kyz㓢mv5¨MWh8I &!:w(L"fBmq$XC*%`q|KGڸ*/B!v|[@53Kd(B@! ߄뾺&g&W" ! .dL՛3'"B@! B@&@Ϩ> `qA&&&b~Xv- )R/9B@! ٛ+Q+vZLϕc^?qN,0}Φn)0!pwl;歞Q9hsz8i_75;R]|r#ZPւ}/}k1j~>XRutx)F|:Tepy|=Rb˫--c SX9կgUY'ؾd?WE}/~w֕)Svs/ !P<GNØWxg5<݁x7^w =37?ON~9/xwXuj[c*g˾U*͌b_ yl;q}0qlKLx3p<.dSkVD?~/|yXk`d/8xnzoۉ?=;>_߀w kRLU ! )=Oྠ_-ZO7VNA'~4wk[.xh^XOYQ!"^-l:V|#=U{zǭ·c~F MҳGb Fr y2:1S ,۷=Ŭo^AՒq{סS\neI=zi4٘۳{"d[?{H_>*?FھV} wݐ7inNOۧW!&$l9YXJdd(kǾ1q*(6F'S/Uc_#"Ӗ On`?1h?&b#Koł?o~q@! @R5l*xD1tzߋjY7˥n 9mxsA;XB@+6+f\Tm6TSNĶmqݺuwTfM B@! Bo [N@zb3vڮq'B Yw %#E3"J"k6-rB­wFW:7p&+( [T/UKVQ~P_{5U#!:.Ϫ+p-)bV%omRUVeˀB! ]G6Fks*JYY!m;8ƠbA~ &BCƒEc("B"┌11t~~%wx PRe'! UJࣥйi}S=DC!D'Ö Z*E'B@f/Gҥ1k,x4h/jn;we ` r$B@!w8| U()l5a6]ϛ' ]NS5=Ko_S1o(a8!P4BMjvp:Ubݡxt~tlqܳxjiίntM]{Zø}Nqj}_.D>ͯ My|U'["21^D+SH(QY4rFg0r_ۂ {ZGi\KΞ1Ns7cݮ%M q|< kw/U[Y? `ϗ>*i/mwSke$3IմO~y*n WP'0g 5I_jG~TN+ߡgCθ`7<6ܣ/:+pH;4KTBj5;7MŌu_+VW5>XÛ6;wpa6y&)#K"! .#{>Dl=|HM\Z346Ά:.kg :<!Pi}q;zƠ1𭳫y-ޯ^IN20JJi_uWkqï+Fزrʿo0y^7.*MkGRZT+W*4T7^_xhT^̷ח}t3XlOϪ|G! }]DRf")o.ѳTj,3ozxa61ӤQV9k8|zr_S-t6-)aƽf!n rrrHGEwi `".! B@! B@! B@! %&d5 ࢲ߰aԩ*U`Μ9*ҥKq“L05t9HB@! B@! B@! B@F@e1{lXz5>C坚%KhQ.6鈯B@! B@! B@! B'drʡFHOOWΝ;삒xi `"bJB@! B@! B@! B@\7p_PVe˖Ÿq4;v@Ϟ=~z̝;$^ R,W^e! F٘ %i+M1;q2ѱ+{춓^/҅8Y+YlFTnFٹ7?zreeP} -,d}\Z{ 'D9+y\S-R6b?Ugv^m7ᦗ=Ή;+ 1f*swd2N868ݡ&Ny)3\vP^LHvjxtuh.s6 Cӈ|P7cN(D_J 7,Mjl%_﹅B w\BB_-'ݨKM0CDZӁUn+)혰w$VZrEp%Ț:Z&9?J "S) ݶO!6e Kydnی*Jĥ {>0!g*<܄8<ZzHߧ\)_<¾2ΪRV 3Lj=˱?ڪrиD.r7}AX14,MYwVFѹF[J%]/!ӰJ]aH¹O">uW;i]:`(Q<-#u+Gx)iܨGח&|q}4ؾ 9QpTI!zثlA[u]Æ S&)5Ꮟ&6sX&k}܏گ\@rYQ b<gz;2=:BEW˹ۨ:}cB]HI[u"m ֤48d, $^r;).3*Q$AA1bĜyg:=1{L("9(ArT2HX6ǯ^Mޞ݉./}+Uu0o8ΣqD{/Z ;?D jQvr:c͜ԙDi렞bDGSlJfB 9knWVկKmfԛWZ}1[Rʓc8.x%o0t5glݪ~o>O9^ӧO޽{5sLڵk釿Jd       %P ^MY"qI[B $rA3zrQ$i EjE[lZmWګb^`PUgЎՃ1%R'A}]KT+`S]lV_HQZZ@E*5ӳS$*xSėrD]P19ũG9F`!&Fr)$9F}ORC=)Ղ TZQ91TlʉSϊ#?=&!0HZ,V9nRrjyxs=:Q6bTZ;_trj[{GO7yYd+xpQIbځ!^}8lU~u΍t:9<VƔ:}ʲާ| $%%V %T~UGڪu/~H$eH.™@9הn Y|v[Nʿo?o^<{ @'p03w5GUn͊ iҌ%?#6o3H #PG|{.nٶE["hhe@M"`+Sȑ#iܹG[nMM-nC`̥di v[֙xyj˱>UnLvNW{>IaJ+׮p@JdZCm]l(R[*Gނ6M3Xٳlo;+mɂm:҃BBڞ#Z  p3]{GӠAiӦO>tqQ&M Ԟ<7Ia29~atP:鵻/B99Ow]>s. в!`X!G?P!COriVgs ]t ]ۅvkMo>WW*i?.n8MjӴ-Y5K2zh㶽}AT7˩y:Yv+=۳t]5z01u}m׌)7{q odSfQ wV鶋JUĎ^JoޫSa__r6zS۳]1rߒ638|=7zkle{3fT&@BU9ju[^e^K S-[1lWt\Їh)Nmr*zi4pzt>-"(ϨEVم94I4cӏA3/\J~( 0OIkߔ}7 e۲]+hhƾAN>;~U+'zNva@%+Y>l7\h9@u%Цm6X jѢEԸqcݻw8T A@@@@@97ӤǮ_޿j>)Ծfܙʑty7^]1_vN~:۵5TNۂBBN,hUejNmW{g,o|$S9>;ch/gHqzn{ MSKG_A+7jzvw~ήٹyKQmïPh֛w^j JtS9%wmJמ}<=|htᛛ_@O-ueV$H|H8hq_^N;B,2v`ه3sͻnv av̶ے&_J'ݣ|oEmE]!T&.˱oVA@jH[:PÐ  GYkJ}7i&}'oNsb{@@@@B [6vϏ5hǮUw?3Ie9[yulJR%&Q|ڐ%ptSSt /RtXZEˡaz)*޸uK WNI7Qlhr?.^kǨ [R&(V}1{1h#MBܢzMtBT;5KMg&i$@tUo^kֲXmTrF(2['䉥))3?fn9oT!5H6ꚝ8Ӭ *KguIDypV'6^nhq,ed?2?'mBfXHYH  p\rhL@VҭͫWt*((W_}>c3O}'^S.xӟiZ{YtM5W[|mH^q?zOZ7A9RizkAO崨 MKY9 vrfUj́mwUq-8%&SFt։=u:o_Fvdgdj.s#59,]-|03v @LWÞ uj֪N mDW\KKnmo&T&%R|b:;5ݜo"_(G4V]t~1ځ7y^iOoIuuWZ*gSQ?QiO V׻:n@@0ݭ!=z4sLիnݚ.\TL".    ؽ/C9Ck/crӔ3rZv.:S m|#k?-\ŵ9[$ڪܒ+Qgذ];__,۠V[Mf!-Rr:{vjIϩժ}5O:owZxOf;dJ/OQ}O4\jP'5oݣhbZt.c6qzuR#^:7(R9=C+m۽\NUnEo՟]O[l*\^9\5OI׫vq{\L/.|.`pV醾WM_'K!mO~yV ($8 D O?CӒ7̡{Raaݻ6CRGJ<Xt7{GM Lzt;رcݲxcbST\T~"?n8Dw(nTtLt\:6/F۔3w_cKc5{?kL깛i:wr"s*+6ӹP尽hwK'yWޞ3޶+୛שEԙ:;nDy7K ̯KwU&k%=Gn/9zkǍHC˻өwVԲq=:VqUό?>j+Y;^˚ߵVx0 >G/iIJ!h )TX\H?D-ߧ/CmISQ64er>H}}/?Z߳IƹK:0^]Ӓ?紋#iܝf(?~Qi1'W$_TUNڦGS@@97K^hM_D+o1PG@ve6[KRRRogy:w.4UPBqMSQ]ogt˅C8v傓m/5o4h\61D\y rZ娕 NSUyaⷋhZnmhXtR99&x\ѩucJKڷh@0__9%HZaT\ m@ ,6dž?nGkGǡr_2}G7cg;m?r=0_=pZWӟQT?O7զز_0(Gچ|?gw<5jmC>;|=ܬxci?Lu:ŀ<Ш@eSb JbK?r/fob(gє۴]Sixj;b|˟}k)ϓj똓uY\b6m'ԝ5b(Ʃɴc%ޖBmS UB$$jS?σTʻKY'zyT񯯷R/PtT23fyMA=[([)%dRa|1dG9{SꒅDiuRooPW8OO*._ʄ"JܵR-SkQ=(Ÿq$V9p}Z]"=U.f*iѐ;ʾcmꝚA%I_C]+m[;̋xSR]rJte ;[r%8Fk-3V.AZK=\J(ԗ99mq\q!2/[qdפ2򗺆8߄q )DBctx?Bg:6D9m'w8 /vr0ǟ}s-בJb)Q4Qe'6őm6ROJJRʗ^WgV_s(v8Y|b3+ e*RNxŅ~9w?N5ZQ?zRS㕈\8**1Nbk:7yܛt}0~1``ojK¬K[ʏSL~I<+v:8s l(2H@B2lbc=zT0QIjXt+]'eP%&3/XND56=08Tvr)Is̤89/;R+vtfg';9HL/_N_DKk~FY~+)=2hz(IAZX>ھ@R/ r[)HHH  Ps lVComp4~Ď` w?=z+~Nm*s  P < ђ՝Ou_V2)8@@@@@ oє_PZhOӏ!^s9 'ʥK|;hZۮA#ƿ5ltm 'J}!=8m"qt/ۮ3>jQRu{j+Dr0o/vE˶lJ;2й.ь+hٶ'irȾҝqg4=vX:e;R߶ф˗P6/MAL>C$a~S5Yڻe-}TE;kVzCp4GͫiOSǟIczR7.GwLZ|%y9ԮIq񏔛AW5Xf}4iPJtvn{֗V%ڸNtu  Pv~~j֢ ]r7|s=pljS3iÚ4e#UeoAgnԎSNοAUۣi4@7mC3ms>/>G۷;,eˢBiS&kFMҼY_ξn:;.@@*7nhɪ#61)#uċs0"_JC`₟i-4ї7EjΌ'r{3Qk^bTY\yhݨ)MOOөdSrrT+s )Oŷ׀\MYȗ8~a:tY9oGJ9^|5x 5Y:ҡ}ΣZu3/4nE<1wߛ5k^N[vĔTHu=:<]M?t3C0?6mMW? g./cWT/=Iۀ?  5Gjuni³s*ϵvPۡ~|:ٷ(פYow7w_Ijg ?̜W7B_v螇h;/ 脡ԉ*gwk}Ϡ];@4hrt`3EFU     P ,}=vQ\zőZǢ-ܞ{iIvWѿo"s+eշ~Zeg(E~qd4mC4ˣRd޲V shlӑOVg-V[V_hCFM(юiut|:u)WmO'8 f9^+pͰmͯkrG~OzNU`CRjק,3Uu~39vLsd;fvf}+g/QNvQN/c^=-Nl^N}T>V}@@ٿRnkn1<XP\چ(&e@[9Y+ zv:[15V=zeU _pqIO9>[EmE]F:;W$#нji?BΛIcH;wx.Y9^ª[ CF&)39䇯-cy-(^Cs8}/>1,[&G߹4h]۾Q&B/= >Rj/}@5hI{v݆OP7bEǐ_ܱN?u0 oߡ= ܸ|'/>ǍSϸ^(IN/)??:uӁ}  Մo!Z`8̔]M_u@֭=4s2t@mNQ(gZu~/Ӻ'b/:VfpZ~X|^i~߾<@-!rB;sUЂ 5WgvG˛niunԖ\z҇ Bm[[]:oYA:8uo{ڿ}m~Zu"8-1Iv[N7yWۉجWMw(}'h_hE4'mja]i)QCqf5@ &л4ѺU?ZiuϜ&>˶Woټv\ڸn߿kЂrQaz+''P?Qv1sáT7j賩}ci4GimYq   Fg_}UPݔ`{v:}m唞rxq uj vRm r47J.Z=NS7_k]$$a m/ync3i늅ԤQ4ڹ?mQfmttWs `^{Ǥ:uzv Zuׅ$ Eu>qN8I*}'}>N?  53A'պߕ:{ {[6F;ަ{h岙tw䘿RfƟ~|9f9Z;ZJu5KzBKigޤ˹h)G4>zKѝP=銛^wq216gWЪ?QRrZ;8'++fML,EmYA@@@@8e;zv ]crqҖ;}}-s\|o08V:4kҔ'XZKw׈s6Ӻ~q}{'7)t~k6ifZvGrM[ Pn3r}-yy>A/1i kмɯ[I@@fc_BW/gNח^ҩO|snӟ=n}+y Gy:۶EmoLF'$Ⓧ_iGI1y/Aohm}Km퉋+7nDM"SPm6THIה[V8#_4Jqz<@e8XY޽{YRWF=\l~MZ<*#**Л`g߶1NKY٩4lŲ͓KCuR *"ոhH1X}V #\IIrZ4 N?IuR+(CV N=yKE_M ;=Z_D#VjɉGdTT9@V%ŖP oG *N(ef_i`,l),XmG9؜uq$;9j12Urꖼ@.N(~Nd#?;ƋWkgv5rCց=3)eUߢ<>vdƪqG<RmQyr{H%$7y VbY睔˒ .}vژr ps(YW\D/Cֶ܎1/9v=hɊvY.W&%)qQ@a %ꁏqS$ȂbO..=|uIWRsCǜГ2X 6'X5YWy (S:_J|uyG ŮVfkaa!PZZ[;ޓMIpH[SukDt>:ޢyݺuԮ];8q" :Ts{qn:u=?]xv7n;RZ"R]0.|֭[dž5}t?~|D_F>@@@,@ /ۙNCU#ֆ@,h`ǟh>Iǎ<(eKs>篷޲_7'_DT'?qr@<ץ?(NN[Ӱ.o@,Yf(RfLGH>'I_^99qTw岝lUݝNvrp{kR&6Zx<eǯO?7_Z3\>! +Myk<       P% ;ǖvϝi/>M7'JA^.}huΏir @@@@@ `8 M[(33ӼU^^f͈3M 5~AmKhÆ `X gwv}:3UIVAE<D;j~VSWPZUiCty>_+~_VāF|n_O"z(!Q3 `Kw^N8̫_~ԭ[7zg%OQqܦ;ӄgUyKJ(nv>ν>z5oפYow7w_I3ǟOsfN׼W;,?=M_O{юw^M C/7Їn#Աkg\T!O>$U"F@ve6[zjZr%tSRRuޝ ⒵zuhHXz 8<]\bRbm;g~%%jWΙ}}C:>~3Oݻ6ܟ&cA~ }Kӛ~Y%7x QhْԷ(J]_'w, :!0uTbgakkn_c`Gdٲe4aٳ'tMTn]i̙4bD}()&O       ] J 1\XOn]_Z4 ߰$r O߾C{qӶT;vO=z;μHNSGPͩNt`_f     U; yKh \F,3eĻ'xBݵk}4|?~C        Π{֭O?JK۷޳~j-׮K-U߿5hA9Y(;I?@}sԨi;P}*}͛uMamԾzlDm鏴yïQ+q.p! Ӝ9s{{ފz($       .ag^ONTu+u^4(MmW8p-+?@@@@@@@K FmwhzArZit (}}Ou68vwk^/gqZuٶ}/j{C/g2Jԯ >6iiitwꕡk֬W_}.f*kJӧOٳg>￧oƍGO=DI3$T}< u҅nzWwߥR5B B@IDAT9^\"dbGƌ9W/)8o%% @@@@@@@@@@@@@0auؑrrr.iӦ۵#mEh1b,8޽իGqqjsGED'tk 233_^t)M20Yj+"`+[Bz'O>ԭ[7jҤ =TTTŊ!Oqqqp@@@@@@@@@@@@@p`/u^ _J^#+>,wsCo=2e&JI21KGoϜ]qx锔ajT2st<9$]Vk0`ߌ.cbgaq2PG`:tIZVZ̩-xуzvt`\^r[u“(׺??4tE GrZ+-RY&m5uI8s8L9⥌.,oom\r%P+ %>$rk[kig[%9 &oS_Ԣ7%j׺j9%](p2WsމY;̵2ZVif3uH:8Q[ ?Sp</Ip6EpWZu1/M_IUo`.I<:_Ǘo*$:Kg\W8K}6c1 l_DGr^2xGUFۍj= rm[!G8CXoE'H&@nxM[LÒg gu+^8iuB%lmgj:siÑ\3J˵"mv[M]$- N//.Sm"k\'qRFXj *mxÒ,g~ֺ/nx!cG 77Kڪ;Sg:cY ErxWfF?zD[d#[8Kċ^)҅7 6%ﭵz{nƹ&cL\&r^^|kYN.H^ftxF/5ʯ'oM UdO(iwW ah7, zXҍ/B, zز\ԱiӦJm٤~n.(YI&s~7uINx7u;*əݭSN9 ;\-3.WjkKDފݯ,gXh!u҅tݏ>(1B;?H R5S׿E-[$ө*yYnذ9_ᆪ R={WTj֬ԩS'իx_m8 /@oիWG#GzJ{o.PN'|2=#7lCQv/Tpk+;w[:W.!t!`Fn[X2H2Ò)[E|/zز 7|/c i̅|Vy*miMPXgACn]!eťӧOaÆћoIC>Cnc;Fm۶ў={݊9m۶}ڝF8:gO?ƎKl޼9]}ՔASLQN[oƍ .27۷/M6Ma'y8DkVbi oaOj.?o63d۸bG~nŋ^)҅ryWAWr/L n BJFn%>$pk[k8u=,əUroa%mu8'ʼ6yF,1+1tat-`SVkzJu>cg^cҥK飏>7I#b6YwŴ拝ͼuĉ~r(xբN4Wj*bg[ 7lАkt? [>k3敪==Cyۿ:묳gy:(]:MI;Vyyѣ5?ԫlYz뭖]^b^6Gwкl'nNAﴝۭxÖ$)G4~=,Y紽w,,Gkw/{< Fce\fITc1@'Q[Q˸xxуVv>C–#7e^py.m^.!.ß(m>hebT-ҏ1[(9vT.&cJpdwStKIbɛх9ƻg7>_ǻ*̺ǻvf"ċ [ꉸ/N2p[cr 7z,:y}H;nx >߸-|N[5ލo~Qna>͸\|oy]g)ҞʖR?KynTKo8>O8>_C}8s@W]u~?3n?M6~z+w66:P:ut#{=]/K[;9|-e bߧOs{9|..C}hvY:RYf ]s5>:t)9FҎY^́`'+9r>Wv3?]9;\%HY0g4óպN=TgyG5O۠\/rzV^,_Y}}?f+se瀫r#4'Њt4."‘XY!sQrT2/%<!%csޢ(?/#_4ǻKʖneqn_ǻ/jaZWVxq-/aּ" Hw).W*b x&z-@R|# EYZ(|-mN/kޢ(?|b|/M}-~~p%> iR6iqM:*F.o)^1oFz1ƻs|Q ަ}$Boi`K+^Y /%^dY3PDUZUqF=|Kx7uIWR RKU95.EFgW(7o uɪ'AG,AڱcG}Zh9vjI+[)vv~4|(,-v:HֹX6i҄߯kxW{n}iӦs:)9cϜ1N[qC 6evvPy4;9 Ҷ2K/miuu1& Zx$ LDWkc(fy,nꇐulX Z!r[u~+!IW$;;?ՉffwIbɛ.?fx7uIx7;m|L|csMoE2RYQ5ԔxyEî; .I- RAKo_lB_8K?$»r_}?Teb9M]*xW|q/!I`7]ush.opX|#t4 gI!zX/es zH2Z]=d|wvߊ1Mu/|Mަ.WMPbyH"e|Zӳ=^M],$.o)G4K|H%ٗ.#G/I{c8_.} |˖-'E<޽t2GW>Z]x^l;y->KHPso±sG_0F Bk׮zJLZRDUr-gϦmdxgehٲe3'-{nz5kS'gk\歬5;9=o1͎g> No׮MUMshӦ [_;jbXക9mF=HÑWx"ѵ4tK GZBUݫ u%> iq6r l3^K,S -RŠ/zX⬋x_;/zX3`6uo(7lNL06x:Sqi9 DkVb衶' o{g&o6ysŎZa݊R o1%6)~W|u+^t-/L$:FC2[“-ך36i&KMv[M]l HZ]^^\\REל㰑qZZ|.NJR vUMVaIU3?_k݊]K#cl0LJ"u&o6uIUiq:MbAI7n/Cc:ƩKP$e:kCHUrZV$TGp68PJ#*.e(a`[5(w,m>rj6Kcs3 )d9ZizX2nKřeʵ-U}2GYeHY!I1\3pWz0mEKZVZ♏RwpvT?[-H$I &%)*VPTDP&tPAK!BIHwޛ)fgߙs3g&; ZƎcfaoMGrM7d#BxcqG9  #zAdof}L$#OO?%~,oruCn㝝{y P>Е4~U-?YOS1yn3u gu}_fɯ}G(ITʍs6G7O8>*P+VsřwtMtۤ2:h޼yv1yƛnql䴯|+v'0/>֠qX=:NtGOY7pY>yѢEtZN2G}}70;t͹sf 9_nWy{|gSYi'z}hL4(K.N4} }/B<័5W` Z J7xdHS$;xeo/xΙcNC ްsc1}z";w|%on.L5߹Ob)eo7M<I;99'iRGEڻԑ/D_$')yKB^lcB@0#K1I}y==U:E>I{:򅕰kiRG8} oT$Zڻԑ/̗g.y3G@]n}2eMw-~,3;}m cc~5~0/_veQi^m&G>#,X`w~*q2;N(ˋ0 w]b2']V^f9/A/^lzQGq#_(+tgU惿wعފ,Qo9's@V)3z")}R?JRG$aޥ|{ a|Β9D~C zRGݘ}R yH*`*4.]ח$Zڻԑ/OmH_@ ׋s(=woYF*3z")PDŸ_6,]Ԯ'q~^@cm(lyU]ctǑk(+9vS)IükkyYk<s9I^WimxrvczqkoY{"c`~Z?xQK/;vy:^SZ~`{oAdm3x4X{%ޅQw;^$o #I [C.k _V>ރI7 XD+Οî=83#>4ffv$;إ!=T|ߤ@g @&?o ,.7 rD+A#n>)`"itsS~cg!I$Ӿ%oX2eAdw1S~(~C[稿L)%M>|Ëa7Fİᡇe=^{م\ąYS]CUvђ)=K{BzXɛO.YJ--sy}Q+Vd|C98n#~-xqF!KS f8i㏭M4)&\ڿJ>p\YW"o2p'n&))#z"MѸw#__kiRGPR=؄ `kTw䃿wp! +ʟ/.n>)<@O$i?.uKv-]JPv-\Ha,CW1?3RQ7z?xc'?ش K{:% .u %ރMWw䃿wp0⍲k\k_]$0J`~ mUU|켂7 Oԍy^O<^d Y~p+?^I'|}rqPGTʆC -ˣ̟4+~|krPgxY/plp:CKg7=%`aq` <ی3Dv\:owe$KopFH ;'B zb*D=18z"i:{.u -{>y#>̗]K{Bz$FP7y{w/>w?m&wF0yUOb)R_A=8͋̔?xKXҴvn9I%(]ZE1?7bb`ᇐI}jAd:+]o|oH t}upK -=cI1ی7,1/Hzcq=irn]<,~鑤cowp9 )?%)a?σ07OM&n2|7>W#ٯ,;a ;3uI *K"SNZ ި彆s_g8{BSMeRx[8Ŗ?<@1)ޥrꅞ?xc'= 8hq%JV[=߈ /]Wh s1ɟm{!HG|(;$?n"z]szEYعw}Snѻn7ǹ77 #Pmޝsy {p<-Dv>zI6NYiy{ǒ= 8hqeo(F MJ{:Zwc+H-]Eȏ#- oZ,îwQ_8_wpQH@&[ڻiX: Naba|OL?/vڅq@O"Ѿw#_eMf"&Հ7nw5xQ$E'[y^}wvxzieh?C:GŻڠU#,wt/̙3 .aÆqG;ea62W?z@{6i+4޸[f8wタD[}Rf7 !Y!!7'^3?Q/ԑ/ >IMϑhRݐ( \Cww7}l7.-?x#z[ l$:=5p׋7݀'7k=Q7}Rw񉸻LpF l$~z"&c:ww(]ĠAhڴiAy{A+{`ޠ#+bwU}UTTGvw˩+jPJ@ (%PJ@ (%PJ@ (%PJpxO'x>8kb7W&.뮻Ҙ1css¾87f/I 6kv2dH~ʬ}w͝;u\{x:JmDuP'h+_jPJ |/_}e ZZѹk|dV:dz|i'SGiq;дj!vd66e*LFi> Ə9*ʇ2ͫi?_ҶUQTjRQuhmˍM@CZ5cc-7x܋'\Aڢ|#ڽj=5bCO{%DJ'А[z}v-ٺF -{”}s:h_=%R 5":Zu̔rYezW=36Գθi@Ȗ2eq5o[ ckCv6l0w]K RDCkvEϺθL;khF*DxV;5*3WJbp[Fok߾n9s Ъ6K;QMM+!ꉶ=45nßAţҺ͇uWczD$f4w#PJkeX3v۰j?h|3}29jn]KcG쑙֚sѰ^VO sNyV/6<ؼo/Mקr|kKۤn XrI^WR3ū8>Vj0^IG|Fo-Slt_:ڨۅ̼n9 R\C0אl"x] jWi=UZuXBQ&or:7.?v~3}6,OyiݩGӪSHekQ͓33Zh4vlOQCjm;t |VyNZH6۶SVZC?_,3pgrT7c<=Z>FuS[~WRanFiû:b4ӥtӨ#i[SۀTb=7 ܌&?gY ~6U<4ZYyQi?iIjCQ92-TRTJm&<o3TU 4JMo)drPYI%57Rm 7Qڟ&˶?z*FҰAḧӏ_z?m1x[Ï7~tuhm9DVF+׼['̥־is zV#hv=}C: vfwjkku3g} *#}g͚e채үC{,UWW|x=-~)sJ6swj,kJ@ (%PJ@ (%zbcCHRd0JYQ-"r֔ܦB a^6_oIǩByomUeebhUkizVy}v2s\-4l6?"h坌9/^S8-FU$~j|L+K(3)_ ĜVdf"-$^ZURq9k2a JSTY|FFRyဉ+Kɝ~Q!6JhhLk ו·1.vap"ri~}cb{ϋ3"gf!1T(PCd*qmuw^倸~k%|PGyg9+UG*5 2`$8^~]ut>3W:uя\_yAr{i,SHoÔ>_/1\Z8qVlStXZ\aXtpce-T_k>-N#>RS7g̘A[m=t{+3<;馛;vX5?j+y'R"B;RUMIO4A (%PJ@ (%PJ@ (%PJ@ oxn͚5TYYI_җ裏>E<[k"հ:y䴗fJSj s55tu׻~WD74B (%PJ@ (%PJ@ (%PJ@ G=~z=Bw z4=;綕TmXmfEUg4/㯃,=ǟcYGwe{ ~wy_jU*%PJ@ (%PJ@ (%PJ@ (^MH?x`Z*J^t;߾4lm[Gy=t.;He}[o.q[#SOիW4nԓOmMi=hʗ<&m{hc4fytʩ6~Kh[n#UWWE^hwμnN>yqwct%PJ@ (%PJ@ (%PJ@ (%5a„ fCd TTf[7;ک7^of+w}k@͙Nõyq|}KF˖-+UT^^N'r䨑vN;LngΚE;x<ƷEG}klj.uן;ז%PJ@ (%PJ@ (%PJ@ (%!wO<,YB;3a&wbp>{[:yT[bw~,;6ro}򼫯.|{=x_~پ}ͦz؅0h@s^xmiM7WJ@ (%PJ@ (%PJ@ (%PJhjjٳgSZ[62˯Fރ~y饴o{yV^c,]yN/^LwIϚa1oGn#l},x+mqزO (%PJ@ (%PJ@ (%PJ@ (%K*e.HUD̻deotOJ_TRRqټr*--(w?^z/Yf޻-#_."v7%PJ@ (%PJ@ (%PJ@ (%@lcWE+WSCUU1]PݟGb +w1f|-lg}͋ewϡ9sz_GƏwf5k7{kh̙ꫯ[tܜV=G[n7}ǝ讻~^yrá=5ߝuԛqUhmˬ7 fgi%}.R›$ڃPkE˖֜=|Wj8dff>̼YcњRIy-q6:VqWk{SZ ̸%WiqYnKo#Vw5v uΞ?qY|g>EM*XnfG# qFB Wo~ٞqtG[?qf.{nlcH5ٞT\g/y!ZuTbS{ʧ7fmM.ɴѯS.ǰ5{]"f3qw\ċ8ul3GPTTLޯՔ/6IYI1T?ZL^.ؖ~geթrrZ-!-:S:0ם1z!>}zbŊ `篔c|^̫|[!̻oPڬ(,۩W~*O'ښ;VZe;x3h!z:wE))^()nm`6ִgf,*75ך&[nu:im36X,׸f161Vכyigf}:,ʴ&ŕfY|lꚚך5~1N]4djm$9vck͂gf>s^Y;`m*zq RgN2o5~3kPn9q;_>F5]I0h5ߥ%v2]nY*\fǵmmԿV6؅;H O4fy8J߯D͉SO?[EUSUY\oQk:?(q6Ë f+ͲK*٧՛L{nkTZL280MmT5T\=|.-)7n.mڦ4sfךT1kYm\@è?շy9K9%661R /y]AQwk{|i13uvyvةo@G |\z5=QTWWG/"MXxsMM99nBXd9[̏}^o۔}\n|^Im(d4PJ@ (%PJ@ ( @<, $$< #VN_|i:O/g02/^_e[eŷjZ咝_l U6շyǼ|a㕙|(_Y/gJy/r/^mv5Y߿z-p Xc_پ/v !6o,ꉲň:2/Bbik6yig+1c񗓊9>lm簷˥-r|9rlj_|QqUlg/9-rG1;qVi6?rp=r>;/r&ˇ5É8`񗏭w1>9M\y//ϳe񗣱v/~ؗlu^KŦ}R~P~;|.q4?-[6iś_]̃4J@ .xz%c07;ЄVPJ@ (%PJ@ (%PJ@ (%)rS (%PJ@ (%PJ@ (%PJ@ (% x;ݻϡKVVt>>( hJ@ 4}oOπ_H潿(}oXkKJ@7%P8z|SP#Pi@w_zI1D[600G?x#z"5ߕs?ᇐe0Bz$)=7785wH*d7.'wS7F">)y=W[ח$JRG8<%o Sp}JS1X~KvʥïHG}J Iqq@2O]ܿK{WWw~z_KްkNj7jxzj[bРAKvf7 ]_LbN%n$:㨝JhzAdu(H,Z gg,+ cNeTP:iRGPPc KRNt#.<&ss< s+yKL*_Ƞq+yr*o9ڀ'?<@SZC~+#>R>e%qrCKQ>|i.xï8ŖS5>mǥG ݻq@);Z^_1Naz\$o#_^e> {g?h\*yKJ+yiGJێ)= "K<@+F|P?/3z"0<[Dp [i+=]8Ŗ7C}h'$w#_(+{w@)Y HJfq9Q%jG9OG(̇tw8#>Dᇐ)xï- 8Ŗ?<@ s[PJϫO=c^}zWMFnijzLc`ޥ|s蘗Rػ/Sh/wJnG)[bf kkk.JK-c}4()tO*?-1t&<%C;$KXtćbRv)OCO$Cǘ"I_߹xJ-{>;cNC ްs_1;G}<+K ްs>/DBX)7z"i ,^ yK"IakO s .u#яFeN&Gxd wT(_8 N rq ړzI W$1^Q<@O$tXRgIJW[I?$]V®KH%'QkiRG2_. 2#'gĠ}rfI+i]DLatć=i>JRG-8.&g$MH$]!uFή1 *S>1Hcb/'H`f;Չן?kCI+5e p;/H$M9q߹B.xW'lǙx%Wd B۱dj3zd)xQOf+?w;?[w'j"K~IJoy=0}WB [FS;y=Dޥ|e$*yKBKWgbި {Bz$iFZ?î1>_ {{2IͰw{6u߃Sl)葥 z"y@[eߤ&ې.u -Ƣ y"'~鑤A٧ 7ٯ|FsBs/$]?wDAv*'zd)xï")n;g& ?8c'ι7 ?l >>hu9chȐ!ċii])\Lwiޘy#>D?W;ҿN=>t^{E[lpY1\Q ˥ mxv@@/"fmhɒ%rJ:N9s/llmmvǻuK.5Sy9upnqvLfo1b+++iȑ .sze.So8[;@O=/vqg_ ʧ~]vm qPBsx.?6<IM'I:G}\CAx 6fGzL?GlCʧϹ  ޮ68si&xs78uŒ?A H`n|x$6nA4C$MٴB4/?xK~>H\-PfrFƍl$ Aiӄs7}?߮iktwF=,,%6tPiqqq 4wޢe˖emWn;A3Bwpy>C:t(ogy&UUU ||]wѣ=Jӟhĉh"jjjN8.Bs=Ϧ￟OnM7{oBHRG21/H/oIᇐI}jAG<0I> ВwS-G/r`2e%w̃6ttB^oIm}I{:.I}&~^oR' 8, ĿzL}`J~E`xV +(;Sh+=>.zbY +f8/bqGl.AGwLA?Hx\7;cI5&7 wɟAԕ]2466ʨP:gyR;#C5"3 ' h=J;#}_4xt+ )ȠP^^N5#tS)7"Eo۴bŊ2?nֹq!`z̙[W#F8_B>( 6x /gN-ҖѼ|t䝪SROImؓ"H2`QH@Цܩz9qE#q9F WzBswp\;c [?ڗ.u+MCZs1l:q^ =\DR$#> o+îy^w)SO=ۿUՋX惿wp(Hb@{7 R6)1.?8;l9 0NÜ1x\427#m&K -U;CKWc ΒZ? t:T,^mSl)}?:lA|drx`Jq%*W[n$K{:ZwLߤ P=);}eXj N/=|q}g?T| c?In`N^ͮ8`$ܸq;ﴋuuu6iy',/h^ ݻݙw̦H[9)$/8n_^@ wr#x}ξ_k+쳵2忻ロ~_ѡjM mSO=t_.dna~ OSmф)⻑k FDG3 Φ"_txBG{po |rpk:wg__m=^-p8M'yMdo+>o9_޹LܛcI?w.7.fIw#_h/{ CH$MMJ$d^opf[yia+8_b]GBd׸J_oxL߸ q@%e JRGH@' a%[HHEftI<-ǹKc=,1_+GBl_;8Kٺe (O$83=QC"O]wy|Ē7} V/!HG|H{gtT1<."~/#vYl};ͩc\˯印%%%Wrݥ{{s+38Fď ~@h3;t?re^Tݱ]^wf, q;]f>l*Xf GH3=؃?C[mٳg"a|J%6dSZvݕ̅m<7y]Jgs=׶|:׼Kͻy䑌 g8!B}F>xI$M8tۋܺKwd|-B֎ƀHqN{Rn4:I7p~;VۦGIS<[CR'\[\y1?16ofH.A(Rr)zN;,#oܰUXw "x+|-?S.z$E\WO6- ԙ͛D 211.zc:J:<.Ώ?Ǟ41e|}|"K7p=.zAdj^S^\-u˫tƄA$o_62|q$uCOlD9_[C&y6>)de'{g)h=yҭ:bKn_:ujuԑ/ty@\'=i"cG:PW,}h듋H.A]`zXyG].^~3?bZ>ìqnV|r_޾4~W//0yG/?zq` Uvޙcۼ[?ݻ7pÏ~h~._|uz߯xW1-W[I帏6@8qe{<$vk!SUP/6/O %xCUPRɐчHRCO,{v xs_ccٹ+yssfR~(v߹YtI,= &5޼b A:#IazR>I{:/ޥ|q$"#>W[Zb{g+ǒw5 }E<2؊~%}V _䟭K] +(ǟ'3ah3& I,4yY yH #豤Θe/ԑ/,Iڻԑ/]K{:őKNx2'aޥ|e]餓n4V9o_r惿w\p!,ׯoءWOf):[ޘ艤T8IY*yK]K{:j&T{]3'e> {g_yC1 yHMѸw#__oiRG8}W~0o>uF~rZTbk{{;uwлk&|9 knFkX/RYDh1`P7/~tAٿja0О44=6$HR/3ڣ7$wϝïP  ~'g;W]L.Go;#I7 xd wp7`S)+h#~'S;V?x/ +Q`VNF|()87.ڷ-2dHA 'z\JRGJF(=H+̱SOMUh }b<@O$wvu-  ( .u䃝Cg{Q1CKvh'$!'N+' wصCB|oR-b>/% '7/J+yiGJ~)Q7zWN=EzhƢ/ \%G, y,l綋,r`-ug.RAyw"Qwpp-^9Hȧ,îqpݓ?85h|Β9' =Dݒwzȕc)xï8Ŗ?<@*~顥ѮJĜ1ɨy\<2^IgѰw#:81^h$!'Nec&RdvFx5hwϟ.Rr>~-?V4~jmwKbڳ8R; 层Sl)n ͋ҩ"<Rϯ&˰馛Ҿ+}zr" 05v &mY rBG;$_{Q2 =m՗߂78NWQNeH+opFHGGag?x IJ@z"ies}BmsK*K e] 67#Iwr9db)#>:G wp#_C,oԃ@&!]W~wa'eH+o'?y=X'ι7zh[zcdw1A?HRS-Zuz,//7XR;89% U7L" h=zb?CBoCO$ьo38fs\z9HCx#=x~EZ K9~鑤hRG|$)CO,ſ &s^xa_[Ђ Ҋp544w}i烰w.Ӆ;đ; ]8(#%GXۉ R&,VW=S'>c25^i'B-oT^C|'xv֏B*6)f8^: G[ h?'7,anyB1x\4`9Ѿw#_%J{ϕx$❈zo AYD9fO; x=&_A'7EC,_;8Kn@Шک2Wd){r-un bKca|OL?/vڅq@O"Ѿw#_eMf"&Հ7nw5x=&WdW_ܶKO.Üq}g} xȺ?>z{w&#;W}t~顥8Gk?sl~d7)ɟ\__YyXxR3caÆٸ5k HAR;.'ȗV]> {z`B ow`8豤 \<2;AeD8yb?_op<ğ Kf87ͮ}炂'g2HN9O?Ɛa߈Hzͤ@7 Y&p|q$+yï-oI }jAM &gi-ï`CvJ`w;כ(:'eH+3!w#uKH8 IJ@$<%o#_$Y N\%o#_Hތ. I%opsG@,ss=0ރ>nV,i+T|?Tcٹk/~(u /&o﮻Jmmm/}G?6fPJ@ (%PJ@ (%PJ@ (%PJ w4}ȕafqOPyy9S5)S~,C/ӳ>k{ihڴiALyW!ͱmȥIJ@ b̻ΝۋG]W\NߦPP*%PJ@ (%PJ@ (G2WtP˽m5n77Vw;,͹n:zw+3N(Iey{,UWWۅFĩ=̕A--Zc0a|6/.Y"*nu9!Eg+^>0+ts\F|w.Bs鑇PJ@ W5*%PJ@ (%PJ@ (@yԬ%zjA1bq@EEEaA8.cu7z?ڮڴ|ׁomq3,z=#ü3}xICKK }y-vE~]wFJヒ^}դMiy%PJ@ (%c>zWoP{;jkm·TsC==_uY6LUhW~J,Zm?ްj}|z{e6뮥_xu6Bx淗ӧ ނR (%PJ@ 34ߝ+T;Go_z8xm瞣+>&k+VԩS⋊oO?/1~{yC?d9?<}͞z6K[Y|l2奿ڂ,駟ҥ5~}zƮW *%PJ@ (%+_N<?ktUfԨ40WO=Eu-eB 7L/B{>E0}V|SA4O>IƎ>/[.>4},n v r+s.o1ҽLGv;UUuY&*%PJ@ (O&?IDAT"ׯ_ޛl.^S ٳi̙iavw΃vV2T ?ɏiʕ4tд 7r?nرiq| hWuwWh]i>1?zO]Qzhm1q$Ed/ F+%PJ@ (%@$Pb˙gّn;|I忡S7Wn x:\89D*1?Xh酫_{~91_ɇIO;-瞡Kʅo'oE'<87Ryٌv S* c:CYUygt7tUxo>ţovOS8}ggsǛ_\s<¼{M{GcRdN;?wیw>SOիW\ALD*%PJ@ (%@ f~ϏK^3gѨ:~~ʹ$>z5<{j{W,h_g.5 8/l4?їzu>m穥,nK?>?se+O_0Oz-;g}Cm5;{:.@Knx㾜 ><4 71|ĭ5(%PJ@ (%zdnf]`=i{G E%T|-;b(qWͯun6h~;zhDu)oF){O93w shjn]leo&:Si&ПIk뭷ޤοn;2te~a+fk?g~.RCCaY$޿Vf͘a*$)?>]־6`{9۬ޯ}Χ6GzPJ@ (%P}3?h_\J5#FBo^Hӧݱh,z⟴پSqI 3O<CQ;G7;@_Wn͏L]e6M~>qM7m-ϋċ5G`8ˋ]^5G:k÷"hK:*1Plv3h́w#5(%PJ@ (%zdf?NJĬvَv**s>K /;q'Ob& ^K.K:O]K.'odNzx¸ѭBgu^v~oy.^yls-[xG1I&iPJ@ (%PJ@ lq\ndv>z?IwJ#^m0?bjv֌1wJQbie?KYU.7]kby1/WWQm1wYe}Ԕ{6mD>Hu%TROV)%PJ@ (&hѢua„ kKJQ 4kCG旾dÇ}t!=ؓn:ۋ9j&|fzp@ZfٝjBrr7[^ 0vz͂O>Ej2|H;ͼ3aذav1xbćswu·SYPJ@ (%PJ@ (ӖB/]w ۜ-8_t! 8y!Yg{ݥ;8i67ϤE B1؝v7ۼw}b?>c2?ȼ3'70*;}Gj2OZ4<2M'MPJ@ (%P=M`yO B^{ 駟 S(WoxyrsסJ[[[mڛr_pI/Kt]`WN[^HЭoϺ]ws7~]>[e_]/yqzuͼa1ch 'ijjDJ%PJ@ (%Swcʡh܌P׭i /76B[zgcFћ mvʼ0/Г4f_>{՛ocAIbpȑ6m72o.KiCwist]ilFm>[٭ۇSW]7!&]_ھvӧM|`UԿ#J""J{.{?w`'I&f9h~}&ƪ*xdx{dSqjgO--fUuzmdVulU"R0LԜ*W*/q{[Tc    K&Pӯe{}o|kjTKqNukk|"(?|\,7o@ `v_)_+)-+ZZZ7.77O^ݼ׷cxڬ[}{M7ٻW.JBtQw(((UƬӹjZnťݻ"20?3quf@@@@@@R_@#ƨ>Du}ۨ`#v?^޽oxuj…Qv1omkq+W[Vǩ*,4N~Æ ͫWu4Twl1).7k_@@@@@@@ 5t_8*USNɝ;wdhh^blݶm[@$j#z=Y4#      t3<6֫ybeJz4م ݻR__/󌓉w!7:jٚx=@@@@@@@_IF{׭['V2s?QfɩG@&00r7mݬ@@@@ LG_H5wzŪϭ[$ JYYӐvB+@vvv/!@$sX"2,;.*|$ `//y\ {s/y\ D"˗wd d>Ȕz?5###M*++$jdYh~f<NӯYF:;;fܹsRPP8avI9R      @*\tI\] ف?f\@l:-DGn6Q|DmdB@@@@@@ %T`7 Mh-; 䉀>zdD%=jAA@`. ?΅@@@@;z*v֥YuhB>4̬w'i>IO!i &F8x.#0 nȣ@`Bl`Bojn,otoyl {sӡ oY(Qx{|8SK WNCF'MO̍      S3gLz5:znyS,[L QNsH      ̞@~~lݺUN>m.q1MMMfb D"}iYzuZ:ƻG233uL&_85*@@@@@@HUܪ*xJ#J4+m16mq]ܼys\Y G@P      @J ڵkѣG)//7q9J͛'j]T|DC=      !555Q]fQ'9-$''Glݾ}<ڶHko@@@@@@RFÇkI:Ƥ 褼:p_<ĉ,3 òk.)T8LA@@@@@@Hk:=,==]8 "Ϟ=/K.}Iff6S@@@@@@@ uw ji/%@^XTߍ7Juu[qmqmT       ,xUjY SLv'Ѫ<'O4F*|5soQQMT|bl       $G *#2v;=- JEEٳG:::nHDBӐ4#lAA@@@@@@H@lhlV;=mxb͕![ iYoę9UF@@@@@@@`zƑΪ{Yٳ*O>Ǐ9 sE-4JIENDB`accelerate-1.9.0/docs/source/index.md000066400000000000000000000101201503574341000174660ustar00rootroot00000000000000 # Accelerate Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration by adding just four lines of code! In short, training and inference at scale made simple, efficient and adaptable. ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) + accelerator.backward(loss) optimizer.step() scheduler.step() ``` Built on `torch_xla` and `torch.distributed`, Accelerate takes care of the heavy lifting, so you don't have to write any custom code to adapt to these platforms. Convert existing codebases to utilize [DeepSpeed](usage_guides/deepspeed), perform [fully sharded data parallelism](usage_guides/fsdp), and have automatic support for mixed-precision training! To get a better idea of this process, make sure to check out the [Tutorials](basic_tutorials/overview)! This code can then be launched on any system through Accelerate's CLI interface: ```bash accelerate launch {my_script.py} ``` accelerate-1.9.0/docs/source/package_reference/000077500000000000000000000000001503574341000214545ustar00rootroot00000000000000accelerate-1.9.0/docs/source/package_reference/accelerator.md000066400000000000000000000021451503574341000242640ustar00rootroot00000000000000 # Accelerator The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script. ## Accelerator[[api]] [[autodoc]] Accelerator ## Utilities [[autodoc]] accelerate.utils.gather_object accelerate-1.9.0/docs/source/package_reference/big_modeling.md000066400000000000000000000044341503574341000244220ustar00rootroot00000000000000 # Working with large models ## Dispatch and offload ### init_empty_weights [[autodoc]] big_modeling.init_empty_weights ### cpu_offload [[autodoc]] big_modeling.cpu_offload ### cpu_offload_with_hook [[autodoc]] big_modeling.cpu_offload_with_hook ### disk_offload [[autodoc]] big_modeling.disk_offload ### dispatch_model [[autodoc]] big_modeling.dispatch_model ### load_checkpoint_and_dispatch [[autodoc]] big_modeling.load_checkpoint_and_dispatch ### load_checkpoint_in_model [[autodoc]] big_modeling.load_checkpoint_in_model ### infer_auto_device_map [[autodoc]] utils.infer_auto_device_map ## Hooks ### ModelHook [[autodoc]] hooks.ModelHook ### AlignDevicesHook [[autodoc]] hooks.AlignDevicesHook ### SequentialHook [[autodoc]] hooks.SequentialHook ### LayerwiseCastingHook [[autodoc]] hooks.LayerwiseCastingHook ## Adding Hooks ### add_hook_to_module [[autodoc]] hooks.add_hook_to_module ### attach_execution_device_hook [[autodoc]] hooks.attach_execution_device_hook ### attach_align_device_hook [[autodoc]] hooks.attach_align_device_hook ### attach_align_device_hook_on_blocks [[autodoc]] hooks.attach_align_device_hook_on_blocks ### attach_layerwise_casting_hooks [[autodoc]] big_modeling.attach_layerwise_casting_hooks ## Removing Hooks ### remove_hook_from_module [[autodoc]] hooks.remove_hook_from_module ### remove_hook_from_submodules [[autodoc]] hooks.remove_hook_from_submodules ## Utilities ### has_offloaded_params [[autodoc]] utils.has_offloaded_params ### align_module_device [[autodoc]] utils.align_module_device accelerate-1.9.0/docs/source/package_reference/cli.md000066400000000000000000000453051503574341000225540ustar00rootroot00000000000000 # The Command Line Below is a list of all the available commands 🤗 Accelerate with their parameters ## accelerate config **Command**: `accelerate config` or `accelerate-config` Launches a series of prompts to create and save a `default_config.yml` configuration file for your training system. Should always be ran first on your machine. **Usage**: ```bash accelerate config [arguments] ``` **Optional Arguments**: * `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. * `-h`, `--help` (`bool`) -- Show a help message and exit ## accelerate config default **Command**: `accelerate config default` or `accelerate-config default` Create a default config file for Accelerate with only a few flags set. **Usage**: ```bash accelerate config default [arguments] ``` **Optional Arguments**: * `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. * `-h`, `--help` (`bool`) -- Show a help message and exit * `--mixed_precision {no,fp16,bf16}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later. ## accelerate config update **Command**: `accelerate config update` or `accelerate-config update` Update an existing config file with the latest defaults while maintaining the old configuration. **Usage**: ```bash accelerate config update [arguments] ``` **Optional Arguments**: * `--config_file CONFIG_FILE` (`str`) -- The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. * `-h`, `--help` (`bool`) -- Show a help message and exit ## accelerate env **Command**: `accelerate env` or `accelerate-env` or `python -m accelerate.commands.env` Lists the contents of the passed 🤗 Accelerate configuration file. Should always be used when opening an issue on the [GitHub repository](https://github.com/huggingface/accelerate). **Usage**: ```bash accelerate env [arguments] ``` **Optional Arguments**: * `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. * `-h`, `--help` (`bool`) -- Show a help message and exit ## accelerate launch **Command**: `accelerate launch` or `accelerate-launch` or `python -m accelerate.commands.launch` Launches a specified script on a distributed system with the right parameters. **Usage**: ```bash accelerate launch [arguments] {training_script} --{training_script-argument-1} --{training_script-argument-2} ... ``` **Positional Arguments**: - `{training_script}` -- The full path to the script to be launched in parallel - `--{training_script-argument-1}` -- Arguments of the training script **Optional Arguments**: * `-h`, `--help` (`bool`) -- Show a help message and exit * `--config_file CONFIG_FILE` (`str`)-- The config file to use for the default values in the launching script. * `-m`, `--module` (`bool`) -- Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'. * `--no_python` (`bool`) -- Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script. * `--debug` (`bool`) -- Whether to print out the torch.distributed stack trace when something fails. * `-q`, `--quiet` (`bool`) -- Silence subprocess errors from the launch stack trace to only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations). The rest of these arguments are configured through `accelerate config` and are read in from the specified `--config_file` (or default configuration) for their values. They can also be passed in manually. **Hardware Selection Arguments**: * `--cpu` (`bool`) -- Whether or not to force the training on the CPU. * `--multi_gpu` (`bool`) -- Whether or not this should launch a distributed GPU training. * `--tpu` (`bool`) -- Whether or not this should launch a TPU training. * `--ipex` (`bool`) -- Whether or not this should launch an Intel Pytorch Extension (IPEX) training. **This argument is deprecated, will be removed in Accelerate v1.10** **Resource Selection Arguments**: The following arguments are useful for fine-tuning how available hardware should be used * `--mixed_precision {no,fp16,bf16,fp8}` (`str`) -- Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later. * `--num_processes NUM_PROCESSES` (`int`) -- The total number of processes to be launched in parallel. * `--num_machines NUM_MACHINES` (`int`) -- The total number of machines used in this training. * `--num_cpu_threads_per_process NUM_CPU_THREADS_PER_PROCESS` (`int`) -- The number of CPU threads per process. Can be tuned for optimal performance. * `--enable_cpu_affinity` (`bool`) -- Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware. **Training Paradigm Arguments**: The following arguments are useful for selecting which training paradigm to use. * `--use_deepspeed` (`bool`) -- Whether or not to use DeepSpeed for training. * `--use_fsdp` (`bool`) -- Whether or not to use FullyShardedDataParallel for training. * `--use_megatron_lm` (`bool`) -- Whether or not to use Megatron-LM for training. * `--use_xpu` (`bool`) -- Whether to use IPEX plugin to speed up training on XPU specifically. **This argument is deprecated and ignored, will be removed in Accelerate v1.10** **Distributed GPU Arguments**: The following arguments are only useful when `multi_gpu` is passed or multi-gpu training is configured through `accelerate config`: * `--gpu_ids` (`str`) -- What GPUs (by id) should be used for training on this machine as a comma-separated list * `--same_network` (`bool`) -- Whether all machines used for multinode training exist on the same local network. * `--machine_rank` (`int`) -- The rank of the machine on which this script is launched. * `--main_process_ip` (`str`) -- The IP address of the machine of rank 0. * `--main_process_port` (`int`) -- The port to use to communicate with the machine of rank 0. * `-t`, `--tee` (`str`) -- Tee std streams into a log file and also to console. * `--log_dir` (`str`) -- Base directory to use for log files when using torchrun/torch.distributed.run as launcher. Use with --tee to redirect std streams info log files. * `--role` (`str`) -- User-defined role for the workers. * `--rdzv_backend` (`str`) -- The rendezvous method to use, such as 'static' (the default) or 'c10d' * `--rdzv_conf` (`str`) -- Additional rendezvous configuration (=,=,...). * `--max_restarts` (`int`) -- Maximum number of worker group restarts before failing. * `--monitor_interval` (`int`) -- Interval, in seconds, to monitor the state of workers. **TPU Arguments**: The following arguments are only useful when `tpu` is passed or TPU training is configured through `accelerate config`: * `--tpu_cluster` (`bool`) -- Whether to use a GCP TPU pod for training. * `--tpu_use_sudo` (`bool`) -- Whether to use `sudo` when running the TPU training script in each pod. * `--vm` (`str`) -- List of single Compute VM instance names. If not provided we assume usage of instance groups. For TPU pods. * `--env` (`str`) -- List of environment variables to set on the Compute VM instances. For TPU pods. * `--main_training_function` (`str`) -- The name of the main function to be executed in your script (only for TPU training). * `--downcast_bf16` (`bool`) -- Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32. **DeepSpeed Arguments**: The following arguments are only useful when `use_deepspeed` is passed or `deepspeed` is configured through `accelerate config`: * `--deepspeed_config_file` (`str`) -- DeepSpeed config file. * `--zero_stage` (`int`) -- DeepSpeed's ZeRO optimization stage. * `--offload_optimizer_device` (`str`) -- Decides where (none|cpu|nvme) to offload optimizer states. * `--offload_param_device` (`str`) -- Decides where (none|cpu|nvme) to offload parameters. * `--offload_optimizer_nvme_path` (`str`) -- Decides Nvme Path to offload optimizer states. * `--gradient_accumulation_steps` (`int`) -- No of gradient_accumulation_steps used in your training script. * `--gradient_clipping` (`float`) -- Gradient clipping value used in your training script. * `--zero3_init_flag` (`str`) -- Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3. * `--zero3_save_16bit_model` (`str`) -- Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3. * `--deepspeed_hostfile` (`str`) -- DeepSpeed hostfile for configuring multi-node compute resources. * `--deepspeed_exclusion_filter` (`str`) -- DeepSpeed exclusion filter string when using multi-node setup. * `--deepspeed_inclusion_filter` (`str`) -- DeepSpeed inclusion filter string when using multi-node setup. * `--deepspeed_multinode_launcher` (`str`) -- DeepSpeed multi-node launcher to use. * `--deepspeed_moe_layer_cls_names` (`str`) -- comma-separated list of transformer MoE layer class names (case-sensitive) to wrap, e.g, `MixtralSparseMoeBlock` `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` **Fully Sharded Data Parallelism Arguments**: The following arguments are only useful when `use_fsdp` is passed or Fully Sharded Data Parallelism is configured through `accelerate config`: * `--fsdp_offload_params` (`str`) -- Decides Whether (true|false) to offload parameters and gradients to CPU. * `--fsdp_min_num_params` (`int`) -- FSDP's minimum number of parameters for Default Auto Wrapping. * `--fsdp_sharding_strategy` (`int`) -- FSDP's Sharding Strategy. * `--fsdp_auto_wrap_policy` (`str`) -- FSDP's auto wrap policy. * `--fsdp_transformer_layer_cls_to_wrap` (`str`) -- Transformer layer class name (case-sensitive) to wrap, e.g, `BertLayer`, `GPTJBlock`, `T5Block` ... * `--fsdp_backward_prefetch_policy` (`str`) -- FSDP's backward prefetch policy. * `--fsdp_state_dict_type` (`str`) -- FSDP's state dict type. * `--fsdp_forward_prefetch` (`str`) -- FSDP forward prefetch. * `--fsdp_use_orig_params` (`str`) -- If True, allows non-uniform `requires_grad` mixed in a FSDP unit. * `--fsdp_cpu_ram_efficient_loading` (`str`) -- If true, only the first process loads the pretrained model checkoint while all other processes have empty weights. When using this, `--fsdp_sync_module_states` needs to True. * `--fsdp_sync_module_states` (`str`) -- If true, each individually wrapped FSDP unit will broadcast module parameters from rank 0. * `--fsdp_activation_checkpointing` (`bool`) -- Decides Whether intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder **Megatron-LM Arguments**: The following arguments are only useful when `use_megatron_lm` is passed or Megatron-LM is configured through `accelerate config`: * `--megatron_lm_tp_degree` (``) -- Megatron-LM's Tensor Parallelism (TP) degree. * `--megatron_lm_pp_degree` (``) -- Megatron-LM's Pipeline Parallelism (PP) degree. * `--megatron_lm_num_micro_batches` (``) -- Megatron-LM's number of micro batches when PP degree > 1. * `--megatron_lm_sequence_parallelism` (``) -- Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. * `--megatron_lm_recompute_activations` (``) -- Decides Whether (true|false) to enable Selective Activation Recomputation. * `--megatron_lm_use_distributed_optimizer` (``) -- Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Parallel (DP) ranks. * `--megatron_lm_gradient_clipping` (``) -- Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). **FP8 Arguments**: * `--fp8_backend` (`str`) -- Choose a backend to train with FP8 (`te` or `msamp`) * `--fp8_use_autocast_during_eval` (`bool`) -- Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed. * `--fp8_margin` (`int`) -- The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed). * `--fp8_interval` (`int`) -- The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed). * `--fp8_format` (`str`) -- The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed). * `--fp8_amax_history_len` (`int`) -- The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed). * `--fp8_amax_compute_algo` (`str`) -- The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed). * `--fp8_override_linear_precision` (`Tuple[bool, bool, bool]`) -- Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. * `--fp8_opt_level` (`str`) -- What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed) **AWS SageMaker Arguments**: The following arguments are only useful when training in SageMaker * `--aws_access_key_id AWS_ACCESS_KEY_ID` (`str`) -- The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job * `--aws_secret_access_key AWS_SECRET_ACCESS_KEY` (`str`) -- The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job ## accelerate estimate-memory **Command**: `accelerate estimate-memory` or `accelerate-estimate-memory` or `python -m accelerate.commands.estimate` Estimates the total vRAM a particular model hosted on the Hub needs to be loaded in with an estimate for training. Requires that `huggingface_hub` be installed. When performing inference, typically add ≤20% to the result as overall allocation [as referenced here](https://blog.eleuther.ai/transformer-math/). We will have more extensive estimations in the future that will automatically be included in the calculation. **Usage**: ```bash accelerate estimate-memory {MODEL_NAME} --library_name {LIBRARY_NAME} --dtypes {dtype_1} {dtype_2} ... ``` **Required Arguments**: * `MODEL_NAME` (`str`)-- The model name on the Hugging Face Hub **Optional Arguments**: * `--library_name {timm,transformers}` (`str`) -- The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub * `--dtypes {float32,float16,int8,int4}` (`[{float32,float16,int8,int4} ...]`) -- The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4` * `--trust_remote_code` (`bool`) -- Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be passed for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. ## accelerate tpu-config `accelerate tpu-config` **Usage**: ```bash accelerate tpu-config [arguments] ``` **Optional Arguments**: * `-h`, `--help` (`bool`) -- Show a help message and exit **Config Arguments**: Arguments that can be configured through `accelerate config`. * `--config_file` (`str`) -- Path to the config file to use for accelerate. * `--tpu_name` (`str`) -- The name of the TPU to use. If not specified, will use the TPU specified in the config file. * `--tpu_zone` (`str`) -- The zone of the TPU to use. If not specified, will use the zone specified in the config file. **TPU Arguments**: Arguments for options ran inside the TPU. * `--command_file` (`str`) -- The path to the file containing the commands to run on the pod on startup. * `--command` (`str`) -- A command to run on the pod. Can be passed multiple times. * `--install_accelerate` (`bool`) -- Whether to install accelerate on the pod. Defaults to False. * `--accelerate_version` (`str`) -- The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub. * `--debug` (`bool`) -- If set, will print the command that would be run instead of running it. ## accelerate test `accelerate test` or `accelerate-test` Runs `accelerate/test_utils/test_script.py` to verify that 🤗 Accelerate has been properly configured on your system and runs. **Usage**: ```bash accelerate test [arguments] ``` **Optional Arguments**: * `--config_file CONFIG_FILE` (`str`) -- The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory (`~/.cache` or the content of `XDG_CACHE_HOME`) suffixed with `huggingface`. * `-h`, `--help` (`bool`) -- Show a help message and exit accelerate-1.9.0/docs/source/package_reference/deepspeed.md000066400000000000000000000023761503574341000237440ustar00rootroot00000000000000 # DeepSpeed utilities ## DeepSpeedPlugin ## get_active_deepspeed_plugin [[autodoc]] utils.get_active_deepspeed_plugin [[autodoc]] utils.DeepSpeedPlugin [[autodoc]] utils.deepspeed.DummyScheduler ## DeepSpeedEnginerWrapper [[autodoc]] utils.deepspeed.DeepSpeedEngineWrapper ## DeepSpeedOptimizerWrapper [[autodoc]] utils.deepspeed.DeepSpeedOptimizerWrapper ## DeepSpeedSchedulerWrapper [[autodoc]] utils.deepspeed.DeepSpeedSchedulerWrapper ## DummyOptim [[autodoc]] utils.deepspeed.DummyOptim ## DummyScheduleraccelerate-1.9.0/docs/source/package_reference/fp8.md000066400000000000000000000022111503574341000224670ustar00rootroot00000000000000 # FP8 Below are functions and classes relative to the underlying FP8 implementation ## FP8RecipeKwargs [[autodoc]] utils.FP8RecipeKwargs ## convert_model [[autodoc]] utils.convert_model ## has_transformer_engine_layers [[autodoc]] utils.has_transformer_engine_layers ## contextual_fp8_autocast [[autodoc]] utils.contextual_fp8_autocast ## apply_fp8_autowrap [[autodoc]] utils.apply_fp8_autowrap accelerate-1.9.0/docs/source/package_reference/fsdp.md000066400000000000000000000025571503574341000227430ustar00rootroot00000000000000 # Fully Sharded Data Parallel utilities ## enable_fsdp_ram_efficient_loading [[autodoc]] utils.enable_fsdp_ram_efficient_loading ## disable_fsdp_ram_efficient_loading [[autodoc]] utils.disable_fsdp_ram_efficient_loading ## merge_fsdp_weights [[autodoc]] utils.merge_fsdp_weights ## FullyShardedDataParallelPlugin [[autodoc]] utils.FullyShardedDataParallelPlugin ## fsdp2_load_full_state_dict [[autodoc]] utils.fsdp2_load_full_state_dict ## fsdp2_switch_optimizer_parameters [[autodoc]] utils.fsdp2_switch_optimizer_parameters ## fsdp2_prepare_model [[autodoc]] utils.fsdp2_prepare_model ## fsdp2_prepare_auto_wrap_policy accelerate-1.9.0/docs/source/package_reference/inference.md000066400000000000000000000017661503574341000237460ustar00rootroot00000000000000 # Pipeline parallelism Accelerate supports pipeline parallelism for large-scale training with the PyTorch [torch.distributed.pipelining](https://pytorch.org/docs/stable/distributed.pipelining.html) API. ## prepare_pippy [[autodoc]] inference.prepare_pippy accelerate-1.9.0/docs/source/package_reference/kwargs.md000066400000000000000000000024671503574341000233050ustar00rootroot00000000000000 # Kwargs handlers The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects related to distributed training or mixed precision are created. ## AutocastKwargs [[autodoc]] AutocastKwargs ## DistributedDataParallelKwargs [[autodoc]] DistributedDataParallelKwargs ## FP8RecipeKwargs [[autodoc]] utils.FP8RecipeKwargs ## ProfileKwargs [[autodoc]] utils.ProfileKwargs ## GradScalerKwargs [[autodoc]] GradScalerKwargs ## InitProcessGroupKwargs [[autodoc]] InitProcessGroupKwargs ## KwargsHandler [[autodoc]] utils.KwargsHandler accelerate-1.9.0/docs/source/package_reference/launchers.md000066400000000000000000000016641503574341000237710ustar00rootroot00000000000000 # Launchers Functions for launching training on distributed processes. ## notebook_launcher [[autodoc]] accelerate.notebook_launcher ## debug_launcher [[autodoc]] accelerate.debug_launcheraccelerate-1.9.0/docs/source/package_reference/logging.md000066400000000000000000000016561503574341000234340ustar00rootroot00000000000000 # Logging Refer to the [Troubleshooting guide](../usage_guides/troubleshooting#logging) or to the example below to learn how to use Accelerate's logger. [[autodoc]] logging.get_loggeraccelerate-1.9.0/docs/source/package_reference/megatron_lm.md000066400000000000000000000024111503574341000243000ustar00rootroot00000000000000 # Megatron-LM utilities ## MegatronLMPlugin [[autodoc]] utils.MegatronLMPlugin ## MegatronLMDummyScheduler [[autodoc]] utils.MegatronLMDummyScheduler ## MegatronLMDummyDataLoader [[autodoc]] utils.MegatronLMDummyDataLoader ## AbstractTrainStep [[autodoc]] utils.AbstractTrainStep ## GPTTrainStep [[autodoc]] utils.GPTTrainStep ## BertTrainStep [[autodoc]] utils.BertTrainStep ## T5TrainStep [[autodoc]] utils.T5TrainStep ## avg_losses_across_data_parallel_group [[autodoc]] utils.avg_losses_across_data_parallel_group accelerate-1.9.0/docs/source/package_reference/state.md000066400000000000000000000023031503574341000231140ustar00rootroot00000000000000 # Stateful Classes Below are variations of a [singleton class](https://en.wikipedia.org/wiki/Singleton_pattern) in the sense that all instances share the same state, which is initialized on the first instantiation. These classes are immutable and store information about certain configurations or states. ## PartialState [[autodoc]] state.PartialState ## AcceleratorState [[autodoc]] state.AcceleratorState ## GradientState [[autodoc]] state.GradientStateaccelerate-1.9.0/docs/source/package_reference/torch_wrappers.md000066400000000000000000000026341503574341000250450ustar00rootroot00000000000000 # DataLoaders, Optimizers, and Schedulers The internal classes Accelerate uses to prepare objects for distributed training when calling [`~Accelerator.prepare`]. ## DataLoader utilities [[autodoc]] data_loader.prepare_data_loader [[autodoc]] data_loader.skip_first_batches ## BatchSamplerShard [[autodoc]] data_loader.BatchSamplerShard ## IterableDatasetShard [[autodoc]] data_loader.IterableDatasetShard ## DataLoaderShard [[autodoc]] data_loader.DataLoaderShard ## DataLoaderDispatcher [[autodoc]] data_loader.DataLoaderDispatcher ## AcceleratedOptimizer [[autodoc]] optimizer.AcceleratedOptimizer ## AcceleratedScheduler [[autodoc]] scheduler.AcceleratedScheduleraccelerate-1.9.0/docs/source/package_reference/tracking.md000066400000000000000000000025541503574341000236060ustar00rootroot00000000000000 # Experiment Trackers ## GeneralTracker [[autodoc]] tracking.GeneralTracker ## TensorBoardTracker [[autodoc]] tracking.TensorBoardTracker - __init__ ## WandBTracker [[autodoc]] tracking.WandBTracker - __init__ ## Trackio [[autodoc]] tracking.TrackioTracker - __init__ ## CometMLTracker [[autodoc]] tracking.CometMLTracker - __init__ ## AimTracker [[autodoc]] tracking.AimTracker - __init__ ## MLflowTracker [[autodoc]] tracking.MLflowTracker - __init__ ## ClearMLTracker [[autodoc]] tracking.ClearMLTracker - __init__ ## SwanLabTracker [[autodoc]] tracking.SwanLabTracker - __init__ accelerate-1.9.0/docs/source/package_reference/utilities.md000066400000000000000000000142511503574341000240140ustar00rootroot00000000000000 # Utility functions and classes Below are a variety of utility functions that 🤗 Accelerate provides, broken down by use-case. ## Constants Constants used throughout 🤗 Accelerate for reference The following are constants used when utilizing [`Accelerator.save_state`] `utils.MODEL_NAME`: `"pytorch_model"` `utils.OPTIMIZER_NAME`: `"optimizer"` `utils.RNG_STATE_NAME`: `"random_states"` `utils.SCALER_NAME`: `"scaler.pt` `utils.SCHEDULER_NAME`: `"scheduler` The following are constants used when utilizing [`Accelerator.save_model`] `utils.WEIGHTS_NAME`: `"pytorch_model.bin"` `utils.SAFE_WEIGHTS_NAME`: `"model.safetensors"` `utils.WEIGHTS_INDEX_NAME`: `"pytorch_model.bin.index.json"` `utils.SAFE_WEIGHTS_INDEX_NAME`: `"model.safetensors.index.json"` ## Data Classes These are basic dataclasses used throughout 🤗 Accelerate and they can be passed in as parameters. ### Standalone These are standalone dataclasses used for checks, such as the type of distributed system being used [[autodoc]] utils.ComputeEnvironment [[autodoc]] utils.DistributedType [[autodoc]] utils.DynamoBackend [[autodoc]] utils.LoggerType [[autodoc]] utils.PrecisionType [[autodoc]] utils.RNGType [[autodoc]] utils.SageMakerDistributedType ### Kwargs These are configurable arguments for specific interactions throughout the PyTorch ecosystem that Accelerate handles under the hood. [[autodoc]] utils.AutocastKwargs [[autodoc]] utils.DistributedDataParallelKwargs [[autodoc]] utils.FP8RecipeKwargs [[autodoc]] utils.GradScalerKwargs [[autodoc]] utils.InitProcessGroupKwargs [[autodoc]] utils.KwargsHandler ## Plugins These are plugins that can be passed to the [`Accelerator`] object. While they are defined elsewhere in the documentation, for convenience all of them are available to see here: [[autodoc]] utils.DeepSpeedPlugin [[autodoc]] utils.FullyShardedDataParallelPlugin [[autodoc]] utils.GradientAccumulationPlugin [[autodoc]] utils.MegatronLMPlugin [[autodoc]] utils.TorchDynamoPlugin ## Configurations These are classes which can be configured and passed through to the appropriate integration [[autodoc]] utils.BnbQuantizationConfig [[autodoc]] utils.DataLoaderConfiguration [[autodoc]] utils.ProjectConfiguration ## Environmental Variables These are environmental variables that can be enabled for different use cases * `ACCELERATE_DEBUG_MODE` (`str`): Whether to run accelerate in debug mode. More info available [here](../usage_guides/debug.md). ## Data Manipulation and Operations These include data operations that mimic the same `torch` ops but can be used on distributed processes. [[autodoc]] utils.broadcast [[autodoc]] utils.broadcast_object_list [[autodoc]] utils.concatenate [[autodoc]] utils.convert_outputs_to_fp32 [[autodoc]] utils.convert_to_fp32 [[autodoc]] utils.gather [[autodoc]] utils.gather_object [[autodoc]] utils.get_grad_scaler [[autodoc]] utils.get_mixed_precision_context_manager [[autodoc]] utils.listify [[autodoc]] utils.pad_across_processes [[autodoc]] utils.recursively_apply [[autodoc]] utils.reduce [[autodoc]] utils.send_to_device [[autodoc]] utils.slice_tensors ## Environment Checks These functionalities check the state of the current working environment including information about the operating system itself, what it can support, and if particular dependencies are installed. [[autodoc]] utils.is_bf16_available [[autodoc]] utils.is_ipex_available [[autodoc]] utils.is_mps_available [[autodoc]] utils.is_npu_available [[autodoc]] utils.is_torch_version [[autodoc]] utils.is_torch_xla_available [[autodoc]] utils.is_xpu_available ## Environment Manipulation [[autodoc]] utils.patch_environment [[autodoc]] utils.clear_environment [[autodoc]] utils.write_basic_config When setting up 🤗 Accelerate for the first time, rather than running `accelerate config` [~utils.write_basic_config] can be used as an alternative for quick configuration. [[autodoc]] utils.set_numa_affinity [[autodoc]] utils.environment.override_numa_affinity [[autodoc]] utils.purge_accelerate_environment ## Memory [[autodoc]] utils.find_executable_batch_size ## Modeling These utilities relate to interacting with PyTorch models [[autodoc]] utils.calculate_maximum_sizes [[autodoc]] utils.compute_module_sizes [[autodoc]] utils.extract_model_from_parallel [[autodoc]] utils.get_balanced_memory [[autodoc]] utils.get_max_layer_size [[autodoc]] utils.infer_auto_device_map [[autodoc]] utils.load_checkpoint_in_model [[autodoc]] utils.load_offloaded_weights [[autodoc]] utils.load_state_dict [[autodoc]] utils.offload_state_dict [[autodoc]] utils.retie_parameters [[autodoc]] utils.set_module_tensor_to_device [[autodoc]] utils.get_module_children_bottom_up ## Parallel These include general utilities that should be used when working in parallel. [[autodoc]] utils.extract_model_from_parallel [[autodoc]] utils.save [[autodoc]] utils.load [[autodoc]] utils.wait_for_everyone ## Random These utilities relate to setting and synchronizing of all the random states. [[autodoc]] utils.set_seed [[autodoc]] utils.synchronize_rng_state [[autodoc]] utils.synchronize_rng_states ## PyTorch XLA These include utilities that are useful while using PyTorch with XLA. [[autodoc]] utils.install_xla ## Loading model weights These include utilities that are useful to load checkpoints. [[autodoc]] utils.load_checkpoint_in_model ## Quantization These include utilities that are useful to quantize model. [[autodoc]] utils.load_and_quantize_model accelerate-1.9.0/docs/source/quicktour.md000066400000000000000000000265441503574341000204260ustar00rootroot00000000000000 # Quicktour There are many ways to launch and run your code depending on your training environment ([torchrun](https://pytorch.org/docs/stable/elastic/run.html), [DeepSpeed](https://www.deepspeed.ai/), etc.) and available hardware. Accelerate offers a unified interface for launching and training on different distributed setups, allowing you to focus on your PyTorch training code instead of the intricacies of adapting your code to these different setups. This allows you to easily scale your PyTorch code for training and inference on distributed setups with hardware like GPUs and TPUs. Accelerate also provides Big Model Inference to make loading and running inference with really large models that usually don't fit in memory more accessible. This quicktour introduces the three main features of Accelerate: * a unified command line launching interface for distributed training scripts * a training library for adapting PyTorch training code to run on different distributed setups * Big Model Inference ## Unified launch interface Accelerate automatically selects the appropriate configuration values for any given distributed training framework (DeepSpeed, FSDP, etc.) through a unified configuration file generated from the [`accelerate config`](package_reference/cli#accelerate-config) command. You could also pass the configuration values explicitly to the command line which is helpful in certain situations like if you're using SLURM. But in most cases, you should always run [`accelerate config`](package_reference/cli#accelerate-config) first to help Accelerate learn about your training setup. ```bash accelerate config ``` The [`accelerate config`](package_reference/cli#accelerate-config) command creates and saves a default_config.yaml file in Accelerates cache folder. This file stores the configuration for your training environment, which helps Accelerate correctly launch your training script based on your machine. After you've configured your environment, you can test your setup with [`accelerate test`](package_reference/cli#accelerate-test), which launches a short script to test the distributed environment. ```bash accelerate test ``` > [!TIP] > Add `--config_file` to the `accelerate test` or `accelerate launch` command to specify the location of the configuration file if it is saved in a non-default location like the cache. Once your environment is setup, launch your training script with [`accelerate launch`](package_reference/cli#accelerate-launch)! ```bash accelerate launch path_to_script.py --args_for_the_script ``` To learn more, check out the [Launch distributed code](basic_tutorials/launch) tutorial for more information about launching your scripts. We also have a [configuration zoo](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates) which showcases a number of premade **minimal** example configurations for a variety of setups you can run. ## Adapt training code The next main feature of Accelerate is the [`Accelerator`] class which adapts your PyTorch code to run on different distributed setups. You only need to add a few lines of code to your training script to enable it to run on multiple GPUs or TPUs. ```diff + from accelerate import Accelerator + accelerator = Accelerator() + device = accelerator.device + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for batch in training_dataloader: optimizer.zero_grad() inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) + accelerator.backward(loss) optimizer.step() scheduler.step() ``` 1. Import and instantiate the [`Accelerator`] class at the beginning of your training script. The [`Accelerator`] class initializes everything necessary for distributed training, and it automatically detects your training environment (a single machine with a GPU, a machine with several GPUs, several machines with multiple GPUs or a TPU, etc.) based on how the code was launched. ```python from accelerate import Accelerator accelerator = Accelerator() ``` 2. Remove calls like `.cuda()` on your model and input data. The [`Accelerator`] class automatically places these objects on the appropriate device for you. > [!WARNING] > This step is *optional* but it is considered best practice to allow Accelerate to handle device placement. You could also deactivate automatic device placement by passing `device_placement=False` when initializing the [`Accelerator`]. If you want to explicitly place objects on a device with `.to(device)`, make sure you use `accelerator.device` instead. For example, if you create an optimizer before placing a model on `accelerator.device`, training fails on a TPU. > [!WARNING] > Accelerate does not use non-blocking transfers by default for its automatic device placement, which can result in potentially unwanted CUDA synchronizations. You can enable non-blocking transfers by passing a [`~utils.dataclasses.DataLoaderConfiguration`] with `non_blocking=True` set as the `dataloader_config` when initializing the [`Accelerator`]. As usual, non-blocking transfers will only work if the dataloader also has `pin_memory=True` set. Be wary that using non-blocking transfers from GPU to CPU may cause incorrect results if it results in CPU operations being performed on non-ready tensors. ```py device = accelerator.device ``` 3. Pass all relevant PyTorch objects for training (optimizer, model, dataloader(s), learning rate scheduler) to the [`~Accelerator.prepare`] method as soon as they're created. This method wraps the model in a container optimized for your distributed setup, uses Accelerates version of the optimizer and scheduler, and creates a sharded version of your dataloader for distribution across GPUs or TPUs. ```python model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, lr_scheduler ) ``` 4. Replace `loss.backward()` with [`~Accelerator.backward`] to use the correct `backward()` method for your training setup. ```py accelerator.backward(loss) ``` Read [Accelerate’s internal mechanisms](concept_guides/internal_mechanism) guide to learn more details about how Accelerate adapts your code. ### Distributed evaluation To perform distributed evaluation, pass your validation dataloader to the [`~Accelerator.prepare`] method: ```python validation_dataloader = accelerator.prepare(validation_dataloader) ``` Each device in your distributed setup only receives a part of the evaluation data, which means you should group your predictions together with the [`~Accelerator.gather_for_metrics`] method. This method requires all tensors to be the same size on each process, so if your tensors have different sizes on each process (for instance when dynamically padding to the maximum length in a batch), you should use the [`~Accelerator.pad_across_processes`] method to pad you tensor to the largest size across processes. Note that the tensors needs to be 1D and that we concatenate the tensors along the first dimension. ```python for inputs, targets in validation_dataloader: predictions = model(inputs) # Gather all predictions and targets all_predictions, all_targets = accelerator.gather_for_metrics((predictions, targets)) # Example of use with a *Datasets.Metric* metric.add_batch(all_predictions, all_targets) ``` For more complex cases (e.g. 2D tensors, don't want to concatenate tensors, dict of 3D tensors), you can pass `use_gather_object=True` in `gather_for_metrics`. This will return the list of objects after gathering. Note that using it with GPU tensors is not well supported and inefficient. > [!TIP] > Data at the end of a dataset may be duplicated so the batch can be equally divided among all workers. The [`~Accelerator.gather_for_metrics`] method automatically removes the duplicated data to calculate a more accurate metric. ## Big Model Inference Accelerate's Big Model Inference has two main features, [`~accelerate.init_empty_weights`] and [`~accelerate.load_checkpoint_and_dispatch`], to load large models for inference that typically don't fit into memory. > [!TIP] > Take a look at the [Handling big models for inference](concept_guides/big_model_inference) guide for a better understanding of how Big Model Inference works under the hood. ### Empty weights initialization The [`~accelerate.init_empty_weights`] context manager initializes models of any size by creating a *model skeleton* and moving and placing parameters each time they're created to PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. This way, not all weights are immediately loaded and only a small part of the model is loaded into memory at a time. For example, loading an empty [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model takes significantly less memory than fully loading the models and weights on the CPU. ```py from accelerate import init_empty_weights from transformers import AutoConfig, AutoModelForCausalLM config = AutoConfig.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") with init_empty_weights(): model = AutoModelForCausalLM.from_config(config) ``` ### Load and dispatch weights The [`~accelerate.load_checkpoint_and_dispatch`] function loads full or sharded checkpoints into the empty model, and automatically distribute weights across all available devices. The `device_map` parameter determines where to place each model layer, and specifying `"auto"` places them on the GPU first, then the CPU, and finally the hard drive as memory-mapped tensors if there's still not enough memory. Use the `no_split_module_classes` parameter to indicate which modules shouldn't be split across devices (typically those with a residual connection). ```py from accelerate import load_checkpoint_and_dispatch model_checkpoint = "your-local-model-folder" model = load_checkpoint_and_dispatch( model, checkpoint=model_checkpoint, device_map="auto", no_split_module_classes=['Block'] ) ``` ## Next steps Now that you've been introduced to the main Accelerate features, your next steps could include: * Check out the [tutorials](basic_tutorials/overview) for a gentle walkthrough of Accelerate. This is especially useful if you're new to distributed training and the library. * Dive into the [guides](usage_guides/explore) to see how to use Accelerate for specific use-cases. * Deepen your conceptual understanding of how Accelerate works internally by reading the [concept guides](concept_guides/internal_mechanism). * Look up classes and commands in the [API reference](package_reference/accelerator) to see what parameters and options are available. accelerate-1.9.0/docs/source/usage_guides/000077500000000000000000000000001503574341000205075ustar00rootroot00000000000000accelerate-1.9.0/docs/source/usage_guides/big_modeling.md000066400000000000000000000133731503574341000234570ustar00rootroot00000000000000 # Big Model Inference One of the biggest advancements Accelerate provides is [Big Model Inference](../concept_guides/big_model_inference), which allows you to perform inference with models that don't fully fit on your graphics card. This tutorial will show you how to use Big Model Inference in Accelerate and the Hugging Face ecosystem. ## Accelerate A typical workflow for loading a PyTorch model is shown below. `ModelClass` is a model that exceeds the GPU memory of your device (mps or cuda or xpu). ```py import torch my_model = ModelClass(...) state_dict = torch.load(checkpoint_file) my_model.load_state_dict(state_dict) ``` With Big Model Inference, the first step is to init an empty skeleton of the model with the `init_empty_weights` context manager. This doesn't require any memory because `my_model` is "parameterless". ```py from accelerate import init_empty_weights with init_empty_weights(): my_model = ModelClass(...) ``` Next, the weights are loaded into the model for inference. The [`load_checkpoint_and_dispatch`] method loads a checkpoint inside your empty model and dispatches the weights for each layer across all available devices, starting with the fastest devices (GPU, MPS, XPU, NPU, MLU, SDAA, MUSA) first before moving to the slower ones (CPU and hard drive). Setting `device_map="auto"` automatically fills all available space on the GPU(s) first, then the CPU, and finally, the hard drive (the absolute slowest option) if there is still not enough memory. > [!TIP] > Refer to the [Designing a device map](../concept_guides/big_model_inference#designing-a-device-map) guide for more details on how to design your own device map. ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint=checkpoint_file, device_map="auto" ) ``` If there are certain “chunks” of layers that shouldn’t be split, pass them to `no_split_module_classes` (see [here](../concept_guides/big_model_inference#loading-weights) for more details). A models weights can also be sharded into multiple checkpoints to save memory, such as when the `state_dict` doesn't fit in memory (see [here](../concept_guides/big_model_inference#sharded-checkpoints) for more details). Now that the model is fully dispatched, you can perform inference. ```py input = torch.randn(2,3) device_type = next(iter(model.parameters())).device.type input = input.to(device_type) output = model(input) ``` Each time an input is passed through a layer, it is sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and the layer is removed from the GPU going back down the line. While this adds some overhead to inference, it enables you to run any size model on your system, as long as the largest layer fits on your GPU. Multiple GPUs, or "model parallelism", can be utilized but only one GPU will be active at any given moment. This forces the GPU to wait for the previous GPU to send it the output. You should launch your script normally with Python instead of other tools like torchrun and accelerate launch. > [!TIP] > You may also be interested in *pipeline parallelism* which utilizes all available GPUs at once, instead of only having one GPU active at a time. This approach is less flexbile though. For more details, refer to the [Memory-efficient pipeline parallelism](./distributed_inference#memory-efficient-pipeline-parallelism-experimental) guide. Take a look at a full example of Big Model Inference below. ```py import torch from accelerate import init_empty_weights, load_checkpoint_and_dispatch with init_empty_weights(): model = MyModel(...) model = load_checkpoint_and_dispatch( model, checkpoint=checkpoint_file, device_map="auto" ) input = torch.randn(2,3) device_type = next(iter(model.parameters())).device.type input = input.to(device_type) output = model(input) ``` ## Hugging Face ecosystem Other libraries in the Hugging Face ecosystem, like Transformers or Diffusers, supports Big Model Inference in their [`~transformers.PreTrainedModel.from_pretrained`] constructors. You just need to add `device_map="auto"` in [`~transformers.PreTrainedModel.from_pretrained`] to enable Big Model Inference. For example, load Big Sciences T0pp 11 billion parameter model with Big Model Inference. ```py from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") ``` After loading the model, the empty init and smart dispatch steps from before are executed and the model is fully ready to make use of all the resources in your machine. Through these constructors, you can also save more memory by specifying the `torch_dtype` parameter to load a model in a lower precision. ```py from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16) ``` ## Next steps For a more detailed explanation of Big Model Inference, make sure to check out the [conceptual guide](../concept_guides/big_model_inference)! accelerate-1.9.0/docs/source/usage_guides/checkpoint.md000066400000000000000000000075121503574341000231650ustar00rootroot00000000000000 # Checkpointing When training a PyTorch model with Accelerate, you may often want to save and continue a state of training. Doing so requires saving and loading the model, optimizer, RNG generators, and the GradScaler. Inside Accelerate are two convenience functions to achieve this quickly: - Use [`~Accelerator.save_state`] for saving everything mentioned above to a folder location - Use [`~Accelerator.load_state`] for loading everything stored from an earlier `save_state` To further customize where and how states are saved through [`~Accelerator.save_state`] the [`~utils.ProjectConfiguration`] class can be used. For example if `automatic_checkpoint_naming` is enabled each saved checkpoint will be located then at `Accelerator.project_dir/checkpoints/checkpoint_{checkpoint_number}`. It should be noted that the expectation is that those states come from the same training script, they should not be from two separate scripts. - By using [`~Accelerator.register_for_checkpointing`], you can register custom objects to be automatically stored or loaded from the two prior functions, so long as the object has a `state_dict` **and** a `load_state_dict` functionality. This could include objects such as a learning rate scheduler. Below is a brief example using checkpointing to save and reload a state during training: ```python from accelerate import Accelerator import torch accelerator = Accelerator(project_dir="my/save/path") my_scheduler = torch.optim.lr_scheduler.StepLR(my_optimizer, step_size=1, gamma=0.99) my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader) # Register the LR scheduler accelerator.register_for_checkpointing(my_scheduler) # Save the starting state accelerator.save_state() device = accelerator.device my_model.to(device) # Perform training for epoch in range(num_epochs): for batch in my_training_dataloader: my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) accelerator.backward(loss) my_optimizer.step() my_scheduler.step() # Restore the previous state accelerator.load_state("my/save/path/checkpointing/checkpoint_0") ``` ## Restoring the state of the DataLoader After resuming from a checkpoint, it may also be desirable to resume from a particular point in the active `DataLoader` if the state was saved during the middle of an epoch. You can use [`~Accelerator.skip_first_batches`] to do so. ```python from accelerate import Accelerator accelerator = Accelerator(project_dir="my/save/path") train_dataloader = accelerator.prepare(train_dataloader) accelerator.load_state("my_state") # Assume the checkpoint was saved 100 steps into the epoch skipped_dataloader = accelerator.skip_first_batches(train_dataloader, 100) # After the first iteration, go back to `train_dataloader` # First epoch for batch in skipped_dataloader: # Do something pass # Second epoch for batch in train_dataloader: # Do something pass ``` accelerate-1.9.0/docs/source/usage_guides/compilation.md000066400000000000000000000111301503574341000233430ustar00rootroot00000000000000# Compilation ## Overview Pytorch 2.0 introduced `torch.compile`, a powerful feature that makes PyTorch code run faster by JIT-compiling PyTorch code into optimized kernels. Key features of `torch.compile` include: - **Performance Improvement**: Significantly speeds up model execution by optimizing the computation graph. - **Ease of Use**: Requires minimal code changes to implement, making it highly accessible. - **Compatibility**: Works seamlessly with existing PyTorch code and models. When used with Accelerate, `torch.compile` integrates smoothly into distributed training workflows, allowing you to benefit from both distributed execution and compilation optimizations simultaneously. The first execution of compiled code typically takes longer as it includes the compilation time, but subsequent runs are significantly faster. For optimal performance in different scenarios, `torch.compile` offers various modes like `"default"`, `"reduce-overhead"` (which uses CUDA graphs to further reduce overhead), and `"max-autotune"` (which performs extensive autotuning to find the best kernels for your model). ## Using `torch.compile` with Accelerate Accelerate provides `TorchDynamoPlugin` for easy and seemless integration of `torch.compile` into your training scripts. ```python from accelerate import Accelerator from accelerate.utils import TorchDynamoPlugin # Configure the compilation backend dynamo_plugin = TorchDynamoPlugin( backend="inductor", # Options: "inductor", "aot_eager", "aot_nvfuser", etc. mode="default", # Options: "default", "reduce-overhead", "max-autotune" fullgraph=True, dynamic=False ) # Initialize accelerator with the plugin accelerator = Accelerator(dynamo_plugin=dynamo_plugin) # This will apply torch.compile to your model model = accelerator.prepare(model) ``` It is compatible with all other features and plugins of Accelerate, including mixed precision, distributed training (DDP, FSDP, Deepspeed), etc. ## Regional Compilation Instead of trying to compile the whole model, which usually has a big problem space for optimization. Regional compilation targets repeated blocks of the same class and compiles them sequentially to hit the compiler's cache. For example, in `GPT2LMHeadModel`, the repeated block/class is `GPT2Block`, and can be accessed as `model.transformer.h[0]`. The rest of the model (e.g model.lm_head) is compiled separately. This allows us to speed up the compilation overhead / cold start of models like LLMs and Transformers in general. See for more details. ### How to Use Regional Compilation It can be enabled by setting `use_regional_compilation=True` in the `TorchDynamoPlugin` configuration: ```python # Configure the compilation backend dynamo_plugin = TorchDynamoPlugin( use_regional_compilation=True, ... # other parameters ) # Initialize accelerator with the plugin accelerator = Accelerator(dynamo_plugin=dynamo_plugin) # This will apply compile_regions to your model model = accelerator.prepare(model) ``` You could also use the `accelerate.utils.compile_regions` utility directly the same way you would use `torch.compile`. ### Benefits of Regional Compilation We have conducted extensive benchmarks comparing full compilation and regional compilation using the `torch.compile` feature in PyTorch. The full results are available in the [accelerate repository](https://github.com/huggingface/accelerate/tree/main/benchmarks/torch.compile/regional_compilation). The key findings from our benchmarks are: 1. **Comparable Performance**: Regional compilation delivers performance speedups similar to full compilation, especially for larger models. 2. **Faster Compilation**: Regional compilation significantly reduces the time taken to compile models, making it a more efficient choice for deployment. 3. **Batch Size Impact**: The performance difference between compilation strategies diminishes with larger batch sizes, indicating that the overhead of compilation is less impactful in those scenarios. 4. **Model Size Consideration**: The benefits of regional compilation are more pronounced in larger models, where the compilation time savings can be substantial. 5. **Practical Application**: For real-world applications, regional compilation is a practical choice for optimizing training cold start times, especially when working with large models. ## Conclusion Both full and regional compilation can significantly speed up your models. Regional compilation offers a practical balance between compilation time and runtime performance, especially for training large models with substantial batch sizes. accelerate-1.9.0/docs/source/usage_guides/ddp_comm_hook.md000066400000000000000000000246061503574341000236430ustar00rootroot00000000000000 # DDP Communication Hooks Distributed Data Parallel (DDP) communication hooks provide a generic interface to control how gradients are communicated across workers by overriding the vanilla allreduce in `DistributedDataParallel`. A few built-in communication hooks are provided, and users can easily apply any of these hooks to optimize communication. - **FP16 Compression Hook**: Compresses gradients by casting them to half-precision floating-point format (`torch.float16`), reducing communication overhead. - **BF16 Compression Hook**: Similar to FP16, but uses the Brain Floating Point format (`torch.bfloat16`), which can be more efficient on certain hardware. - **PowerSGD Hook**: An advanced gradient compression algorithm that provides high compression rates and can accelerate bandwidth-bound distributed training. In this tutorial, you will see how to quickly set up DDP communication hooks and perform training with the utilities provided in Accelerate, which can be as simple as adding just one new line of code! This demonstrates how to use DDP communication hooks to optimize gradient communication in distributed training with the Accelerate library. ## FP16 Compression Hook ```python import torch from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.algorithms.ddp_comm_hooks import default_hooks from accelerate.test_utils.testing import get_backend device_type, _, _ = get_backend() device_id = getattr(torch, device_type, torch.cuda).current_device() class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) model = MyModel() model = DDP(model, device_ids=[device_id]) model.register_comm_hook(state=None, hook=default_hooks.fp16_compress_hook) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) loss.backward() optimizer.step() optimizer.zero_grad() ``` ```python from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs import torch class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) # DDP Communication Hook setup ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.FP16) accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) model = MyModel() optimizer = torch.optim.Adam(model.parameters()) data_loader = DataLoader(dataset, batch_size=16) model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` ### BF16 Compression Hook BF16 Compression Hook API is experimental, and it requires NCCL version later than 2.9.6. ```python import torch from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.algorithms.ddp_comm_hooks import default_hooks from accelerate.test_utils.testing import get_backend device_type, _, _ = get_backend() device_id = getattr(torch, device_type, torch.cuda).current_device() class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) model = MyModel() model = DDP(model, device_ids=[device_id]) model.register_comm_hook(state=None, hook=default_hooks.bf16_compress_hook) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) loss.backward() optimizer.step() optimizer.zero_grad() ``` ```python from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs import torch class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) # DDP Communication Hook setup ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.BF16) accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) model = MyModel() optimizer = torch.optim.Adam(model.parameters()) data_loader = DataLoader(dataset, batch_size=16) model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` ### PowerSGD Hook PowerSGD typically requires extra memory of the same size as the model’s gradients to enable error feedback, which can compensate for biased compressed communication and improve accuracy. ```python import torch from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook from accelerate.test_utils.testing import get_backend device_type, _, _ = get_backend() device_id = getattr(torch, device_type, torch.cuda).current_device() class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) model = MyModel() model = DDP(model, device_ids=[device_id]) state = powerSGD_hook.PowerSGDState(process_group=None) model.register_comm_hook(state=state, hook=powerSGD_hook.powerSGD_hook) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) loss.backward() optimizer.step() optimizer.zero_grad() ``` ```python from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs import torch class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) # DDP Communication Hook setup ddp_kwargs = DistributedDataParallelKwargs(comm_hook=DDPCommunicationHookType.POWER_SGD) accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) model = MyModel() optimizer = torch.optim.Adam(model.parameters()) data_loader = DataLoader(dataset, batch_size=16) model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` ## DDP Communication Hooks utilities There are two additional utilities for supporting optional functionalities with the communication hooks. ### comm_wrapper `comm_wrapper` is an option to wrap a communication hook with additional functionality. For example, it can be used to combine FP16 compression with other communication strategies. Currently supported wrappers are `no`, `fp16`, and `bf16`. ```python from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs import torch class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) # DDP Communication Hook setup ddp_kwargs = DistributedDataParallelKwargs( comm_hook=DDPCommunicationHookType.POWER_SGD, comm_wrapper=DDPCommunicationHookType.FP16 ) accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) model = MyModel() optimizer = torch.optim.Adam(model.parameters()) data_loader = DataLoader(dataset, batch_size=16) model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` ### comm_state_option `comm_state_option` allows you to pass additional state information required by certain communication hooks. This is particularly useful for stateful hooks like `PowerSGD`, which require maintaining hyperparameters and internal states across training steps. Below is an example showcasing the use of `comm_state_option` with the `PowerSGD` hook. ```python from accelerate import Accelerator, DDPCommunicationHookType, DistributedDataParallelKwargs import torch class MyModel(torch.nn.Module): def __init__(self): super().__init__() self.layer = torch.nn.Linear(10, 10) def forward(self, x): return self.layer(x) # DDP Communication Hook setup ddp_kwargs = DistributedDataParallelKwargs( comm_hook=DDPCommunicationHookType.POWER_SGD, comm_state_option={"matrix_approximation_rank": 2} ) accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) model = MyModel() optimizer = torch.optim.Adam(model.parameters()) data_loader = DataLoader(dataset, batch_size=16) model, optimizer, data_loader = accelerator.prepare(model, optimizer, data_loader) # Training loop for data, targets in data_loader: outputs = model(data) loss = criterion(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` For more advanced usage and additional hooks, refer to the [PyTorch DDP Communication Hooks documentation](https://pytorch.org/docs/stable/ddp_comm_hooks.html). accelerate-1.9.0/docs/source/usage_guides/deepspeed.md000066400000000000000000000747121503574341000230020ustar00rootroot00000000000000 # DeepSpeed [DeepSpeed](https://github.com/deepspeedai/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are: 1. Optimizer state partitioning (ZeRO stage 1) 2. Gradient partitioning (ZeRO stage 2) 3. Parameter partitioning (ZeRO stage 3) 4. Custom mixed precision training handling 5. A range of fast CUDA-extension-based optimizers 6. ZeRO-Offload to CPU and Disk/NVMe 7. Hierarchical partitioning of model parameters (ZeRO++) ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857). DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference. DeepSpeed ZeRO-3 can be used for inference as well since it allows huge models to be loaded on multiple GPUs, which won't be possible on a single GPU. Accelerate integrates [DeepSpeed](https://github.com/deepspeedai/DeepSpeed) via 2 options: 1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. User may have to change a few lines of code depending on the config. 2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed. ## What is integrated? Training: 1. Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++. Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/) ![ZeRO Data Parallelism](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) (Source: [link](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)) a. **Stage 1** : Shards optimizer states across data parallel workers/GPUs b. **Stage 2** : Shards optimizer states + gradients across data parallel workers/GPUs c. **Stage 3**: Shards optimizer states + gradients + model parameters across data parallel workers/GPUs d. **Optimizer Offload**: Offloads the gradients + optimizer states to CPU/Disk building on top of ZERO Stage 2 e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3 f. **Hierarchical Partitioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3. Note: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk Inference: 1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: [deepspeed-zero-inference](#deepspeed-zero-inference). ## How it works? **Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/deepspeedai/DeepSpeed#installation) for more information. We will first look at easy to use integration via `accelerate config`. Followed by more flexible and feature rich `deepspeed config file` integration. ### Accelerate DeepSpeed Plugin On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. It will ask whether you want to use a config file for DeepSpeed to which you should answer no. Then answer the following questions to generate a basic DeepSpeed config. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with DeepSpeed Plugin: **ZeRO Stage-2 DeepSpeed Plugin Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero_stage: 2 distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py --mixed_precision fp16 ``` **ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py --mixed_precision fp16 ``` Currently, `Accelerate` supports following config through the CLI: ```bash `zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning `gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. `gradient_clipping`: Enable gradient clipping with value. `offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. `offload_optimizer_nvme_path`: Decides Nvme Path to offload optimizer states. If unspecified, will default to 'none'. `offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. `offload_param_nvme_path`: Decides Nvme Path to offload parameters. If unspecified, will default to 'none'. `zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. `zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. `mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. `deepspeed_moe_layer_cls_names`: Comma-separated list of transformer Mixture-of-Experts (MoE) layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... `deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources. `deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup. `deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup. `deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use, e.g. `pdsh`, `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5). If unspecified, will default to `pdsh`. `deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this. ``` To be able to tweak more options, you will need to use a DeepSpeed config file. ### DeepSpeed Config File On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes and provide the path to the deepspeed config file. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File: **ZeRO Stage-2 DeepSpeed Config File Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/ubuntu/accelerate/examples/deepspeed_config_templates/zero_stage2_config.json zero3_init_flag: true distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` with the contents of `zero_stage2_config.json` being: ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` ```bash accelerate launch examples/by_feature/deepspeed_with_config_support.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name "wikitext" \ --dataset_config_name "wikitext-2-raw-v1" \ --block_size 128 \ --output_dir "./clm/clm_deepspeed_stage2_accelerate" \ --learning_rate 5e-4 \ --per_device_train_batch_size 24 \ --per_device_eval_batch_size 24 \ --num_train_epochs 3 \ --with_tracking \ --report_to "wandb"\ ``` **ZeRO Stage-3 with CPU offload DeepSpeed Config File Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/ubuntu/accelerate/examples/deepspeed_config_templates/zero_stage3_offload_config.json zero3_init_flag: true distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` with the contents of `zero_stage3_offload_config.json` being: ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "sub_group_size": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` ```bash accelerate launch examples/by_feature/deepspeed_with_config_support.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name "wikitext" \ --dataset_config_name "wikitext-2-raw-v1" \ --block_size 128 \ --output_dir "./clm/clm_deepspeed_stage3_offload_accelerate" \ --learning_rate 5e-4 \ --per_device_train_batch_size 32 \ --per_device_eval_batch_size 32 \ --num_train_epochs 3 \ --with_tracking \ --report_to "wandb"\ ``` **ZeRO++ Config Example** You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/): ```json { "zero_optimization": { "stage": 3, "reduce_bucket_size": "auto", "zero_quantized_weights": true, "zero_hpz_partition_size": 8, "zero_quantized_gradients": true, "contiguous_gradients": true, "overlap_comm": true } } ``` For hierarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node) **Important code changes when using DeepSpeed Config File** 1. DeepSpeed Optimizers and Schedulers. For more information on these, see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation. We will look at the changes needed in the code when using these. a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys are present in the DeepSpeed config file. In this situation, those will be used and the user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code. Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer optimizer_cls = ( torch.optim.AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate) # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) else: lr_scheduler = DummyScheduler( optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps ) ``` b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin. In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. In this situation, the user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. This will result in an error because you can only use DS Scheduler when using DS Optim. 2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user. The `auto` values are calculated as: - `reduce_bucket_size`: `hidden_size * hidden_size` - `stage3_prefetch_bucket_size`: `int(0.9 * hidden_size * hidden_size)` - `stage3_param_persistence_threshold`: `10 * hidden_size` For the `auto` feature to work for these 3 config entries - Accelerate will use `model.config.hidden_size` or `max(model.config.hidden_sizes)` as `hidden_size`. If neither of these is available, the launching will fail and you will have to set these 3 config entries manually. Remember the first 2 config entries are the communication buffers - the larger they are the more efficient the comms will be, and the larger they are the more GPU memory they will consume, so it's a tunable performance trade-off. **Things to note when using DeepSpeed Config File** Below is a sample script using `deepspeed_config_file` in different scenarios. Code `test.py`: ```python from accelerate import Accelerator from accelerate.state import AcceleratorState def main(): accelerator = Accelerator() accelerator.print(f"{AcceleratorState()}") if __name__ == "__main__": main() ``` **Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries. 1. Content of the `accelerate` config: ```yaml command_file: null commands: null compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: 'cpu' offload_param_device: 'cpu' zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 deepspeed_config_file: 'ds_config.json' distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} gpu_ids: null machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: {} num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_name: null tpu_zone: null use_cpu: false ``` 2. `ds_config.json`: ```json { "bf16": { "enabled": true }, "zero_optimization": { "stage": 3, "stage3_gather_16bit_weights_on_model_save": false, "offload_optimizer": { "device": "none" }, "offload_param": { "device": "none" } }, "gradient_clipping": 1.0, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": 10, "steps_per_print": 2000000 } ``` 3. Output of `accelerate launch test.py`: ```bash ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: ['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', 'zero3_save_16bit_model', 'mixed_precision']. Please specify them appropriately in the DeepSpeed config file. If you are using an accelerate config file, remove other config variables mentioned in the above specified list. The easiest method is to create a new config following the questionnaire via `accelerate config`. It will only ask for the necessary config variables when using `deepspeed_config_file`. ``` **Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown. 1. Run `accelerate config`: ```bash $ accelerate config ------------------------------------------------------------------------------------------------------------------------------- In which compute environment are you running? This machine ------------------------------------------------------------------------------------------------------------------------------- Which type of machine are you using? multi-GPU How many different machines will you use (use more than 1 for multi-node training)? [1]: Do you wish to optimize your script with torch dynamo?[yes/NO]: Do you want to use DeepSpeed? [yes/NO]: yes Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes Please enter the path to the json DeepSpeed config file: ds_config.json Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes How many GPU(s) should be used for distributed training? [1]:4 accelerate configuration saved at ds_config_sample.yaml ``` 2. Content of the `accelerate` config: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: ds_config.json zero3_init_flag: true distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true use_cpu: false ``` 3. Output of `accelerate launch test.py`: ```bash Distributed environment: DEEPSPEED Backend: nccl Num processes: 4 Process index: 0 Local process index: 0 Device: cuda:0 Mixed precision type: bf16 ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}} ``` **Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `"auto"` in the DeepSpeed` configuration file and check that things work as expected. 1. New `ds_config.json` with `"auto"` for the `accelerate launch` DeepSpeed command arguments: ```json { "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": "auto", "stage3_gather_16bit_weights_on_model_save": "auto", "offload_optimizer": { "device": "auto" }, "offload_param": { "device": "auto" } }, "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto", "steps_per_print": 2000000 } ``` 2. Output of `accelerate launch --mixed_precision="fp16" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device="cpu" --offload_optimizer_device="nvme" --zero3_save_16bit_model="true" test.py`: ```bash Distributed environment: DEEPSPEED Backend: nccl Num processes: 4 Process index: 0 Local process index: 0 Device: cuda:0 Mixed precision type: fp16 ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}} ``` **Note**: 1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of `Important code changes when using DeepSpeed Config File`. 2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object. ## Saving and loading 1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2. 2. under ZeRO Stage-3, `state_dict` contains just the placeholders since the model weights are partitioned across multiple GPUs. ZeRO Stage-3 has 2 options: a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`. For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set `zero3_save_16bit_model` to True in DeepSpeed Plugin. **Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.** Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python unwrapped_model = accelerator.unwrap_model(model) # New Code # # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or # `zero3_save_16bit_model` is True in DeepSpeed Plugin. # For Zero Stages 1 and 2, models are saved as usual in the output directory. # The model name saved is `pytorch_model.bin` unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model), ) ``` b. To get 32bit weights, first save the model using `model.save_checkpoint()`. Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict) status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}" if success: logging.info(f"Success {status_msg}") else: logging.warning(f"Failure {status_msg}") ``` This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory. You can use this script to do offline consolidation. It requires no configuration files or GPUs. Here is an example of its usage: ```bash $ cd /path/to/checkpoint_dir $ ./zero_to_fp32.py . pytorch_model.bin Processing zero checkpoint at global_step1 Detected checkpoint of type zero stage 3, world_size: 2 Saving fp32 state dict to pytorch_model.bin (total_numel=60506624) ``` To get 32bit model for saving/inference, you can perform: ```python from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint unwrapped_model = accelerator.unwrap_model(model) fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir) ``` If you are only interested in the `state_dict`, you can do the following: ```python from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) ``` Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint. ## ZeRO Inference DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. With accelerate integration, you just need to prepare the model and dataloader as shown below: ```python model, eval_dataloader = accelerator.prepare(model, eval_dataloader) ``` ## Few caveats to be aware of 1. Current integration doesn’t support Pipeline Parallelism of DeepSpeed. 2. Current integration doesn’t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM. 3. Current integration doesn’t support multiple models. ## Multi-node DeepSpeed DeepSpeed supports multi-node inference and training over a variety of different launchers. You can specify a different launcher by setting the `deepspeed_multinode_launcher` config in the CLI or in the DeepSpeed config file. Currently, accelerate supports passing configuration for the following DeepSpeed multi-node launchers: `pdsh` (default), `standard`, `openmpi`, `mvapich`, `mpich`, `slurm`, `nossh` (requires DeepSpeed >= 0.14.5). Please read the [DeepSpeed documentation](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node) for more information on the different launchers. By default, DeepSpeed will attempt to use passwordless SSH from the main machine node to the other nodes to perform the launcher command. In this configuration, the accelerate launch command only needs to be run on the main node. If using the `nossh` launcher, you will need to run the accelerate launch command on every node using copied configuration. ## DeepSpeed Resources The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed). - [Project's github](https://github.com/deepspeedai/DeepSpeed) - [Usage docs](https://www.deepspeed.ai/getting-started/) - [API docs](https://deepspeed.readthedocs.io/en/latest/index.html) - [Blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed) Papers: - [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) - [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840) - [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857) - [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209) Finally, please, remember that `Accelerate` only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/deepspeedai/DeepSpeed/issues). For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)! accelerate-1.9.0/docs/source/usage_guides/deepspeed_multiple_model.md000066400000000000000000000215171503574341000260700ustar00rootroot00000000000000 # Using multiple models with DeepSpeed This guide assumes that you have read and understood the [DeepSpeed usage guide](./deepspeed.md). Running multiple models with Accelerate and DeepSpeed is useful for: * Knowledge distillation * Post-training techniques like RLHF (see the [TRL](https://github.com/huggingface/trl) library for more examples) * Training multiple models at once Currently, Accelerate has a **very experimental API** to help you use multiple models. This tutorial will focus on two common use cases: 1. Knowledge distillation, where a smaller student model is trained to mimic a larger, better-performing teacher. If the student model fits on a single GPU, we can use ZeRO-2 for training and ZeRO-3 to shard the teacher for inference. This is significantly faster than using ZeRO-3 for both models. 2. Training multiple *disjoint* models at once. ## Knowledge distillation Knowledge distillation is a good example of using multiple models, but only training one of them. Normally, you would use a single [`utils.DeepSpeedPlugin`] for both models. However, in this case, there are two separate configurations. Accelerate allows you to create and use multiple plugins **if and only if** they are in a `dict` so that you can reference and enable the proper plugin when needed. ```python from accelerate.utils import DeepSpeedPlugin zero2_plugin = DeepSpeedPlugin(hf_ds_config="zero2_config.json") zero3_plugin = DeepSpeedPlugin(hf_ds_config="zero3_config.json") deepspeed_plugins = {"student": zero2_plugin, "teacher": zero3_plugin} ``` The `zero2_config.json` should be configured for full training (so specify `scheduler` and `optimizer` if you are not utilizing your own), while `zero3_config.json` should only be configured for the inference model, as shown in the example below. ```json { "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": 3, "overlap_comm": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": "auto", "stage3_max_reuse_distance": "auto", }, "train_micro_batch_size_per_gpu": 1 } ``` An example `zero2_config.json` configuration is shown below. ```json { "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", } ``` DeepSpeed will raise an error if `train_micro_batch_size_per_gpu` isn't specified, even if this particular model isn't being trained. From here, create a single [`Accelerator`] and pass in both configurations. ```python from accelerate import Accelerator accelerator = Accelerator(deepspeed_plugins=deepspeed_plugins) ``` Now let's see how to use them. ### Student model By default, Accelerate sets the first item in the `dict` as the default or enabled plugin (`"student"` plugin). Verify this by using the [`utils.deepspeed.get_active_deepspeed_plugin`] function to see which plugin is enabled. ```python active_plugin = get_active_deepspeed_plugin(accelerator.state) assert active_plugin is deepspeed_plugins["student"] ``` [`AcceleratorState`] also keeps the active DeepSpeed plugin saved in `state.deepspeed_plugin`. ```python assert active_plugin is accelerator.deepspeed_plugin ``` Since `student` is the currently active plugin, let's go ahead and prepare the model, optimizer, and scheduler. ```python student_model, optimizer, scheduler = ... student_model, optimizer, scheduler, train_dataloader = accelerator.prepare(student_model, optimizer, scheduler, train_dataloader) ``` Now it's time to deal with the teacher model. ### Teacher model First, you need to specify in [`Accelerator`] that the `zero3_config.json` configuration should be used. ```python accelerator.state.select_deepspeed_plugin("teacher") ``` This disables the `"student"` plugin and enables the `"teacher"` plugin instead. The DeepSpeed stateful config inside of Transformers is updated, and it changes which plugin configuration gets called when using `deepspeed.initialize()`. This allows you to use the automatic `deepspeed.zero.Init` context manager integration Transformers provides. ```python teacher_model = AutoModel.from_pretrained(...) teacher_model = accelerator.prepare(teacher_model) ``` Otherwise, you should manually initialize the model with `deepspeed.zero.Init`. ```python with deepspeed.zero.Init(accelerator.deepspeed_plugin.config): model = MyModel(...) ``` ### Training From here, your training loop can be whatever you like, as long as `teacher_model` is never being trained on. ```python teacher_model.eval() student_model.train() for batch in train_dataloader: with torch.no_grad(): output_teacher = teacher_model(**batch) output_student = student_model(**batch) # Combine the losses or modify it in some way loss = output_teacher.loss + output_student.loss accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` ## Train multiple disjoint models Training multiple models is a more complicated scenario. In its current state, we assume each model is **completely disjointed** from the other during training. This scenario still requires two [`utils.DeepSpeedPlugin`]'s to be made. However, you also need a second [`Accelerator`], since different `deepspeed` engines are being called at different times. A single [`Accelerator`] can only carry one instance at a time. Since the [`state.AcceleratorState`] is a stateful object though, it is already aware of both [`utils.DeepSpeedPlugin`]'s available. You can just instantiate a second [`Accelerator`] with no extra arguments. ```python first_accelerator = Accelerator(deepspeed_plugins=deepspeed_plugins) second_accelerator = Accelerator() ``` You can call either `first_accelerator.state.select_deepspeed_plugin()` to enable or disable a particular plugin, and then call [`prepare`]. ```python # can be `accelerator_0`, `accelerator_1`, or by calling `AcceleratorState().select_deepspeed_plugin(...)` first_accelerator.state.select_deepspeed_plugin("first_model") first_model = AutoModel.from_pretrained(...) # For this example, `get_training_items` is a nonexistent function that gets the setup we need for training first_optimizer, first_scheduler, train_dl, eval_dl = get_training_items(model1) first_model, first_optimizer, first_scheduler, train_dl, eval_dl = accelerator.prepare( first_model, first_optimizer, first_scheduler, train_dl, eval_dl ) second_accelerator.state.select_deepspeed_plugin("second_model") second_model = AutoModel.from_pretrained(...) # For this example, `get_training_items` is a nonexistent function that gets the setup we need for training second_optimizer, second_scheduler, _, _ = get_training_items(model2) second_model, second_optimizer, second_scheduler = accelerator.prepare( second_model, second_optimizer, second_scheduler ) ``` And now you can train: ```python for batch in dl: outputs1 = first_model(**batch) first_accelerator.backward(outputs1.loss) first_optimizer.step() first_scheduler.step() first_optimizer.zero_grad() outputs2 = model2(**batch) second_accelerator.backward(outputs2.loss) second_optimizer.step() second_scheduler.step() second_optimizer.zero_grad() ``` ## Resources To see more examples, please check out the [related tests](https://github.com/huggingface/accelerate/blob/main/src/accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py) currently in [Accelerate]. accelerate-1.9.0/docs/source/usage_guides/distributed_inference.md000066400000000000000000000237721503574341000254040ustar00rootroot00000000000000 # Distributed inference Distributed inference can fall into three brackets: 1. Loading an entire model onto each GPU and sending chunks of a batch through each GPU's model copy at a time 2. Loading parts of a model onto each GPU and processing a single input at one time 3. Loading parts of a model onto each GPU and using what is called scheduled Pipeline Parallelism to combine the two prior techniques. We're going to go through the first and the last bracket, showcasing how to do each as they are more realistic scenarios. ## Sending chunks of a batch automatically to each loaded model This is the most memory-intensive solution, as it requires each GPU to keep a full copy of the model in memory at a given time. Normally when doing this, users send the model to a specific device to load it from the CPU, and then move each prompt to a different device. A basic pipeline using the `diffusers` library might look something like so: ```python import torch import torch.distributed as dist from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) ``` Followed then by performing inference based on the specific prompt: ```python def run_inference(rank, world_size): dist.init_process_group("nccl", rank=rank, world_size=world_size) pipe.to(rank) if torch.distributed.get_rank() == 0: prompt = "a dog" elif torch.distributed.get_rank() == 1: prompt = "a cat" result = pipe(prompt).images[0] result.save(f"result_{rank}.png") ``` One will notice how we have to check the rank to know what prompt to send, which can be a bit tedious. A user might then also think that with Accelerate, using the `Accelerator` to prepare a dataloader for such a task might also be a simple way to manage this. (To learn more, check out the relevant section in the [Quick Tour](../quicktour#distributed-evaluation)) Can it manage it? Yes. Does it add unneeded extra code however: also yes. With Accelerate, we can simplify this process by using the [`Accelerator.split_between_processes`] context manager (which also exists in `PartialState` and `AcceleratorState`). This function will automatically split whatever data you pass to it (be it a prompt, a set of tensors, a dictionary of the prior data, etc.) across all the processes (with a potential to be padded) for you to use right away. Let's rewrite the above example using this context manager: ```python import torch from accelerate import PartialState # Can also be Accelerator or AcceleratorState from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: result = pipe(prompt).images[0] result.save(f"result_{distributed_state.process_index}.png") ``` And then to launch the code, we can use the Accelerate: If you have generated a config file to be used using `accelerate config`: ```bash accelerate launch distributed_inference.py ``` If you have a specific config file you want to use: ```bash accelerate launch --config_file my_config.json distributed_inference.py ``` Or if don't want to make any config files and launch on two GPUs: > Note: You will get some warnings about values being guessed based on your system. To remove these you can do `accelerate config default` or go through `accelerate config` to create a config file. ```bash accelerate launch --num_processes 2 distributed_inference.py ``` We've now reduced the boilerplate code needed to split this data to a few lines of code quite easily. But what if we have an odd distribution of prompts to GPUs? For example, what if we have 3 prompts, but only 2 GPUs? Under the context manager, the first GPU would receive the first two prompts and the second GPU the third, ensuring that all prompts are split and no overhead is needed. *However*, what if we then wanted to do something with the results of *all the GPUs*? (Say gather them all and perform some kind of post processing) You can pass in `apply_padding=True` to ensure that the lists of prompts are padded to the same length, with extra data being taken from the last sample. This way all GPUs will have the same number of prompts, and you can then gather the results. This is only needed when trying to perform an action such as gathering the results, where the data on each device needs to be the same length. Basic inference does not require this. For instance: ```python import torch from accelerate import PartialState # Can also be Accelerator or AcceleratorState from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt: result = pipe(prompt).images ``` On the first GPU, the prompts will be `["a dog", "a cat"]`, and on the second GPU it will be `["a chicken", "a chicken"]`. Make sure to drop the final sample, as it will be a duplicate of the previous one. You can find more complex examples [here](https://github.com/huggingface/accelerate/tree/main/examples/inference/distributed) such as how to use it with LLMs. ## Memory-efficient pipeline parallelism (experimental) This next part will discuss using *pipeline parallelism*. This is an **experimental** API that utilizes [torch.distributed.pipelining](https://pytorch.org/docs/stable/distributed.pipelining.html#) as a native solution. The general idea with pipeline parallelism is: say you have 4 GPUs and a model big enough it can be *split* on four GPUs using `device_map="auto"`. With this method you can send in 4 inputs at a time (for example here, any amount works) and each model chunk will work on an input, then receive the next input once the prior chunk finished, making it *much* more efficient **and faster** than the method described earlier. Here's a visual taken from the PyTorch repository: ![Pipeline parallelism example](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/accelerate/pipeline_parallel.png) To illustrate how you can use this with Accelerate, we have created an [example zoo](https://github.com/huggingface/accelerate/tree/main/examples/inference) showcasing a number of different models and situations. In this tutorial, we'll show this method for GPT2 across two GPUs. Before you proceed, please make sure you have the latest PyTorch version installed by running the following: ```bash pip install torch ``` Start by creating the model on the CPU: ```{python} from transformers import GPT2ForSequenceClassification, GPT2Config config = GPT2Config() model = GPT2ForSequenceClassification(config) model.eval() ``` Next you'll need to create some example inputs to use. These help `torch.distributed.pipelining` trace the model. However you make this example will determine the relative batch size that will be used/passed through the model at a given time, so make sure to remember how many items there are! ```{python} input = torch.randint( low=0, high=config.vocab_size, size=(2, 1024), # bs x seq_len device="cpu", dtype=torch.int64, requires_grad=False, ) ``` Next we need to actually perform the tracing and get the model ready. To do so, use the [`inference.prepare_pippy`] function and it will fully wrap the model for pipeline parallelism automatically: ```{python} from accelerate.inference import prepare_pippy example_inputs = {"input_ids": input} model = prepare_pippy(model, example_args=(input,)) ``` There are a variety of parameters you can pass through to `prepare_pippy`: * `split_points` lets you determine what layers to split the model at. By default we use wherever `device_map="auto" declares, such as `fc` or `conv1`. * `num_chunks` determines how the batch will be split and sent to the model itself (so `num_chunks=1` with four split points/four GPUs will have a naive MP where a single input gets passed between the four layer split points) From here, all that's left is to actually perform the distributed inference! When passing inputs, we highly recommend to pass them in as a tuple of arguments. Using `kwargs` is supported, however, this approach is experimental. ```{python} args = some_more_arguments with torch.no_grad(): output = model(*args) ``` When finished all the data will be on the last process only: ```{python} from accelerate import PartialState if PartialState().is_last_process: print(output) ``` If you pass in `gather_output=True` to [`inference.prepare_pippy`], the output will be sent across to all the GPUs afterwards without needing the `is_last_process` check. This is `False` by default as it incurs a communication call. And that's it! To explore more, please check out the inference examples in the [Accelerate repo](https://github.com/huggingface/accelerate/tree/main/examples/inference/pippy) and our [documentation](../package_reference/inference) as we work to improving this integration. accelerate-1.9.0/docs/source/usage_guides/explore.md000066400000000000000000000034621503574341000225140ustar00rootroot00000000000000 # Start Here! Please use the interactive tool below to help you get started with learning about a particular feature of Accelerate and how to utilize it! It will provide you with a code diff, an explanation towards what is going on, as well as provide you with some useful links to explore more within the documentation! Most code examples start from the following python code before integrating Accelerate in some way: ```python for batch in dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss.backward() optimizer.step() scheduler.step() ```
accelerate-1.9.0/docs/source/usage_guides/fsdp.md000066400000000000000000000260171503574341000217730ustar00rootroot00000000000000 # Fully Sharded Data Parallel To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/). We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config. ## How it works out of the box On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run `examples/nlp_example.py` (from the root of the repo) with FSDP enabled: ```bash compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: false fsdp_cpu_ram_efficient_loading: true fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py ``` Currently, `Accelerate` supports the following config through the CLI: `fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy). For more information, please refer the official [PyTorch docs](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.ShardingStrategy). `fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU `fsdp_auto_wrap_policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP `fsdp_transformer_layer_cls_to_wrap`: Only applicable for Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible. `fsdp_min_num_params`: minimum number of parameters when using `fsdp_auto_wrap_policy=SIZE_BASED_WRAP`. `fsdp_backward_prefetch_policy`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH `fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iteration’s execution order. i.e., if the sub-modules' order changes dynamically during the model's execution do not enable this feature. `fsdp_state_dict_type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT `fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP. `fsdp_cpu_ram_efficient_loading`: Only applicable for Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class. `fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0. For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`. When creating `FullyShardedDataParallelPlugin` object, pass it the parameters that weren't part of the accelerate config or if you want to override them. The FSDP parameters will be picked based on the accelerate config file or launch command arguments and other parameters that you will pass directly through the `FullyShardedDataParallelPlugin` object will set/override that. Below is an example: ```py from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig fsdp_plugin = FullyShardedDataParallelPlugin( state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False), optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False), ) accelerator = Accelerator(fsdp_plugin=fsdp_plugin) ``` ## Saving and loading The new recommended way of checkpointing when using FSDP models is to use `SHARDED_STATE_DICT` as `StateDictType` when setting up the accelerate config. Below is the code snippet to save using `save_state` utility of accelerate. ```py accelerator.save_state("ckpt") ``` Inspect the checkpoint folder to see model and optimizer as shards per process: ``` ls ckpt # optimizer_0 pytorch_model_0 random_states_0.pkl random_states_1.pkl scheduler.bin cd ckpt ls optimizer_0 # __0_0.distcp __1_0.distcp ls pytorch_model_0 # __0_0.distcp __1_0.distcp ``` To load them back for resuming the training, use the `load_state` utility of accelerate ```py accelerator.load_state("ckpt") ``` When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict. Below is an example: ```diff unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, + state_dict=accelerator.get_state_dict(model), ) ``` ### State Dict `accelerator.get_state_dict` will call the underlying `model.state_dict` implementation using `FullStateDictConfig(offload_to_cpu=True, rank0_only=True)` context manager to get the state dict only for rank 0 and it will be offloaded to CPU. You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html). If you choose to use `StateDictType.SHARDED_STATE_DICT`, the weights of the model during `Accelerator.save_state` will be split into `n` files for each sub-split on the model. To merge them back into a single dictionary to load back into the model later after training you can use the `merge_weights` utility: ```py from accelerate.utils import merge_fsdp_weights # Our weights are saved usually in a `pytorch_model_fsdp_{model_number}` folder merge_fsdp_weights("pytorch_model_fsdp_0", "output_path", safe_serialization=True) ``` The final output will then either be saved to `model.safetensors` or `pytorch_model.bin` (if `safe_serialization=False` is passed). This can also be called using the CLI: ```bash accelerate merge-weights pytorch_model_fsdp_0/ output_path ``` ## Mapping between FSDP sharding strategies and DeepSpeed ZeRO Stages * `FULL_SHARD` maps to the DeepSpeed `ZeRO Stage-3`. Shards optimizer states, gradients and parameters. * `SHARD_GRAD_OP` maps to the DeepSpeed `ZeRO Stage-2`. Shards optimizer states and gradients. * `NO_SHARD` maps to `ZeRO Stage-0`. No sharding wherein each GPU has full copy of model, optimizer states and gradients. * `HYBRID_SHARD` maps to `ZeRO++ Stage-3` wherein `zero_hpz_partition_size=`. Here, this will shard optimizer states, gradients and parameters within each node while each node has full copy. ## A few caveats to be aware of - In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour. - This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of `Transformers` library. For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation. For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code. For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)! accelerate-1.9.0/docs/source/usage_guides/gaudi.md000066400000000000000000000050371503574341000221270ustar00rootroot00000000000000 # Intel Gaudi Users can take advantage of Intel Gaudi AI accelerators for significantly faster and cost-effective model training and inference. The Intel Gaudi AI accelerator family currently includes three product generations: [Intel Gaudi 1](https://habana.ai/products/gaudi/), [Intel Gaudi 2](https://habana.ai/products/gaudi2/), and [Intel Gaudi 3](https://habana.ai/products/gaudi3/). Each server is equipped with 8 devices, known as Habana Processing Units (HPUs), providing 128GB of memory on Gaudi 3, 96GB on Gaudi 2, and 32GB on the first-gen Gaudi. For more details on the underlying hardware architecture, check out the [Gaudi Architecture Overview](https://docs.habana.ai/en/latest/Gaudi_Overview/Gaudi_Architecture.html). ## How it works out of the box It is enabled by default if an Intel Gaudi device is detected. To disable it, pass `--cpu` flag to `accelerate launch` command or answer the corresponding question when answering the `accelerate config` questionnaire. You can directly run the following script to test it out on Intel Gaudi: ```bash accelerate launch /examples/cv_example.py --data_dir images ``` ## Limitations The following features are not part of the Accelerate library and requires [Optimum for Intel Gaudi](https://huggingface.co/docs/optimum/main/en/habana/index): - `fast_ddp` which implements DDP by applying an all-reduce on gradients instead of the Torch DDP wrapper. - `minimize_memory` which is used for fp8 training and enables keeping fp8 weights in memory between the forward and backward passes, leading to a smaller memory footprint at the cost of additional fp8 casts. - `context_parallel_size` which is used for Context/Sequence Parallelism (CP/SP) and partitions the network inputs and activations along sequence dimension to reduce memory footprint and increase throughput. accelerate-1.9.0/docs/source/usage_guides/gradient_accumulation.md000066400000000000000000000512251503574341000253770ustar00rootroot00000000000000 # Performing gradient accumulation with Accelerate Gradient accumulation is a technique where you can train on bigger batch sizes than your machine would normally be able to fit into memory. This is done by accumulating gradients over several batches, and only stepping the optimizer after a certain number of batches have been performed. While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient method for doing so and you may experience considerable slowdowns! In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in Accelerate, which can total to adding just one new line of code! This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: ```python device = "cuda" model.to(device) gradient_accumulation_steps = 2 for index, batch in enumerate(training_dataloader): inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps loss.backward() if (index + 1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` ## Converting it to Accelerate First the code shown earlier will be converted to utilize Accelerate without the special gradient accumulation helper: ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for index, batch in enumerate(training_dataloader): inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps + accelerator.backward(loss) if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](../concept_guides/gradient_synchronization)! ## Letting Accelerate handle gradient accumulation All that is left now is to let Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`~Accelerator.backward`]: ```diff from accelerate import Accelerator - accelerator = Accelerator() + accelerator = Accelerator(gradient_accumulation_steps=2) ``` Alternatively, you can pass in a `gradient_accumulation_plugin` parameter to the [`Accelerator`] object's `__init__`, which will allow you to further customize the gradient accumulation behavior. Read more about that in the [GradientAccumulationPlugin](../package_reference/accelerator#accelerate.utils.GradientAccumulationPlugin) docs. From here you can use the [`~Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you! You just wrap it around the entire training part of our code: ```diff - for index, batch in enumerate(training_dataloader): + for batch in training_dataloader: + with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) ``` You can remove all the special checks for the step number and the loss adjustment: ```diff - loss = loss / gradient_accumulation_steps accelerator.backward(loss) - if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are training on. Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this. The [`state.GradientState`] is sync'd with the active dataloader being iterated upon. As such it assumes naively that when we have reached the end of the dataloader everything will sync and a step will be performed. To disable this, set `sync_with_dataloader` to be `False` in the [`GradientAccumulationPlugin`]: ```{python} from accelerate import Accelerator from accelerate.utils import GradientAccumulationPlugin plugin = GradientAccumulationPlugin(sync_with_dataloader=False) accelerator = Accelerator(..., gradient_accumulation_plugin=plugin) ``` ## The finished code Below is the finished implementation for performing gradient accumulation with Accelerate ```python from accelerate import Accelerator accelerator = Accelerator(gradient_accumulation_steps=2) model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` It's important that **only one forward/backward** should be done inside the context manager `with accelerator.accumulate(model)`. To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](../concept_guides/gradient_synchronization) ## Self-contained example Here is a self-contained example that you can run to see gradient accumulation in action with Accelerate: ```python import torch import copy from accelerate import Accelerator from accelerate.utils import set_seed from torch.utils.data import TensorDataset, DataLoader # seed set_seed(0) # define toy inputs and labels x = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8.]) y = torch.tensor([2., 4., 6., 8., 10., 12., 14., 16.]) gradient_accumulation_steps = 4 per_device_batch_size = len(x) // gradient_accumulation_steps # define dataset and dataloader dataset = TensorDataset(x, y) dataloader = DataLoader(dataset, batch_size=per_device_batch_size) # define model, optimizer and loss function class SimpleLinearModel(torch.nn.Module): def __init__(self): super(SimpleLinearModel, self).__init__() self.weight = torch.nn.Parameter(torch.zeros((1, 1))) def forward(self, inputs): return inputs @ self.weight model = SimpleLinearModel() model_clone = copy.deepcopy(model) criterion = torch.nn.MSELoss() model_optimizer = torch.optim.SGD(model.parameters(), lr=0.02) accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps) model, model_optimizer, dataloader = accelerator.prepare(model, model_optimizer, dataloader) model_clone_optimizer = torch.optim.SGD(model_clone.parameters(), lr=0.02) print(f"initial model weight is {model.weight.mean().item():.5f}") print(f"initial model weight is {model_clone.weight.mean().item():.5f}") for i, (inputs, labels) in enumerate(dataloader): with accelerator.accumulate(model): inputs = inputs.view(-1, 1) print(i, inputs.flatten()) labels = labels.view(-1, 1) outputs = model(inputs) loss = criterion(outputs, labels) accelerator.backward(loss) model_optimizer.step() model_optimizer.zero_grad() loss = criterion(x.view(-1, 1) @ model_clone.weight, y.view(-1, 1)) model_clone_optimizer.zero_grad() loss.backward() model_clone_optimizer.step() print(f"w/ accumulation, the final model weight is {model.weight.mean().item():.5f}") print(f"w/o accumulation, the final model weight is {model_clone.weight.mean().item():.5f}") ``` ``` initial model weight is 0.00000 initial model weight is 0.00000 0 tensor([1., 2.]) 1 tensor([3., 4.]) 2 tensor([5., 6.]) 3 tensor([7., 8.]) w/ accumulation, the final model weight is 2.04000 w/o accumulation, the final model weight is 2.04000 ``` ## Gradient accumulation on training samples of variable size As was pointed out in this [blog-post](https://huggingface.co/blog/gradient_accumulation), which points out a common error that occurs when performing gradient accumulation on training samples of variable size: > [...] for gradient accumulation across token-level tasks like causal LM training, the correct loss should be computed by the **total loss across all batches in a gradient accumulation step** divided by the **total number of all non padding tokens in those batches**. This is not the same as the average of the per-batch loss values. In other words, some adjustments must be made on losses that operate on a token-level basis. ### Skeleton code ```python from accelerate import Accelerator import math import contextlib gradient_accumulation_steps = 2 accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps) model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) training_iterator = iter(training_dataloader) num_samples_in_epoch = len(training_dataloader) remainder = num_samples_in_epoch % gradient_accumulation_steps remainder = remainder if remainder != 0 else gradient_accumulation_steps total_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps) total_batched_samples = 0 for update_step in range(total_updates): # In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss # we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples batch_samples = [] num_batches_in_step = gradient_accumulation_steps if update_step != (total_updates - 1) else remainder for _ in range(num_batches_in_step): batch_samples += [next(training_iterator)] # get local num items in batch num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples]) # to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch. num_items_in_batch = accelerator.gather(num_items_in_batch).sum().item() for i, batch in enumerate(batch_samples): # if we perform gradient accumulation in a multi-devices set-up, we want to avoid unnecessary communications when accumulating # cf: https://muellerzr.github.io/blog/gradient_accumulation.html if (i < len(batch_samples) - 1 and accelerator.num_processes > 1): ctx = model.no_sync else: ctx = contextlib.nullcontext total_batched_samples += 1 with ctx(): inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) # the loss function should sum over samples rather than averaging # We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices # Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps loss = (loss * gradient_accumulation_steps * accelerator.num_processes) / num_items_in_batch accelerator.backward(loss) # Sync gradients and perform optimization steps once every gradient_accumulation_steps optimizer.step() scheduler.step() optimizer.zero_grad() ``` ### Self-contained causal LM example ```py import torch import copy from accelerate import Accelerator from accelerate.utils import set_seed from accelerate.logging import get_logger from torch.utils.data import Dataset, DataLoader import math import contexlib # seed set_seed(0) logger = get_logger(__name__) class MyDataset(Dataset): def __init__(self, num_samples): super().__init__() self.len = num_samples def __getitem__(self, index): input_ids = torch.arange(1, index+2, dtype=torch.float32) labels = torch.remainder(input_ids, 2) return {"input_ids": input_ids, "labels": labels} def __len__(self): return self.len def collate_fn(features): input_ids = torch.nn.utils.rnn.pad_sequence([f["input_ids"] for f in features], batch_first=True, padding_value=-100) labels = torch.nn.utils.rnn.pad_sequence([f["labels"] for f in features], batch_first=True, padding_value=-100) return {"input_ids": input_ids[..., None], "labels": labels[..., None]} # define toy inputs and labels gradient_accumulation_steps = 2 per_device_batch_size = 4 # define accelerator accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps) # define dataset and dataloader # for this toy example, we'll compute gradient descent over one single global batch dataset = MyDataset(per_device_batch_size*gradient_accumulation_steps*accelerator.num_processes) dataloader = DataLoader(dataset, batch_size=per_device_batch_size, collate_fn=collate_fn) # define model, model_optimizer and loss function model = torch.nn.Linear(1, 2, bias=False) model_clone = copy.deepcopy(model) criterion = torch.nn.CrossEntropyLoss(reduction="sum") # must sum over samples rather than averaging model_optimizer = torch.optim.SGD(model.parameters(), lr=0.08) logger.warning(f"initial model weight is {model.weight.detach().cpu().squeeze()}") logger.warning(f"initial model clone weight is {model_clone.weight.detach().cpu().squeeze()}") # prepare artifacts - accelerator handles device placement and dataloader splitting model, model_optimizer = accelerator.prepare(model, model_optimizer) dataloader = accelerator.prepare_data_loader(dataloader, device_placement=True) training_iterator = iter(dataloader) num_samples_in_epoch = len(dataloader) remainder = num_samples_in_epoch % gradient_accumulation_steps remainder = remainder if remainder != 0 else gradient_accumulation_steps total_gradient_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps) total_batched_samples = 0 for update_step in range(total_gradient_updates): # In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss # we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples batch_samples = [] num_batches_in_step = gradient_accumulation_steps if update_step != (total_gradient_updates - 1) else remainder for _ in range(num_batches_in_step): batch_samples += [next(training_iterator)] # get local num items in batch local_num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples]) logger.warning(f"Step {update_step} - Device {accelerator.process_index} - num items in the local batch {local_num_items_in_batch}", main_process_only=False) # to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch. num_items_in_batch = accelerator.gather(local_num_items_in_batch).sum().item() logger.warning(f"Total num items {num_items_in_batch}") for i, batch in enumerate(batch_samples): inputs, labels = batch["input_ids"], batch["labels"] total_batched_samples += 1 # if we perform gradient accumulation in a multi-devices set-up, we want to avoid unnecessary communications when accumulating # cf: https://muellerzr.github.io/blog/gradient_accumulation.html if (i < len(batch_samples) - 1 and accelerator.num_processes > 1): ctx = model.no_sync else: ctx = contextlib.nullcontext with ctx(): outputs = model(inputs) loss = criterion(outputs.view(-1, 2), labels.view(-1).to(torch.int64)) # We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices # Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps loss = (loss * gradient_accumulation_steps * accelerator.num_processes) / num_items_in_batch accelerator.backward(loss) model_optimizer.step() model_optimizer.zero_grad() logger.warning(f"Device {accelerator.process_index} - w/ accumulation, the final model weight is {accelerator.unwrap_model(model).weight.detach().cpu().squeeze()}", main_process_only=False) # We know do the same operation but on a single device and without gradient accumulation if accelerator.is_main_process: # prepare one single entire batch dataloader = DataLoader(dataset, batch_size=len(dataset), collate_fn=collate_fn) full_batch_without_accum = next(iter(dataloader)) total_inputs, total_labels = full_batch_without_accum["input_ids"], full_batch_without_accum["labels"] model_clone_optimizer = torch.optim.SGD(model_clone.parameters(), lr=0.08) # train the cloned model loss = torch.nn.CrossEntropyLoss(reduction="mean")(model_clone(total_inputs).view(-1, 2), total_labels.view(-1).to(torch.int64)) model_clone_optimizer.zero_grad() loss.backward() model_clone_optimizer.step() # We should have the same final weights. logger.warning(f"w/o accumulation, the final model weight is {model_clone.weight.detach().cpu().squeeze()}") ``` Results on a single device - gradient accumulation steps set to 1 and batch_size set to 8: ``` initial model weight is tensor([-0.0075, 0.5364]) initial model clone weight is tensor([-0.0075, 0.5364]) Step 0 - Device 0 - num items in the local batch 36 Total num items 36 Device 0 - w/ accumulation, the final model weight is tensor([0.0953, 0.4337]) w/o accumulation, the final model weight is tensor([0.0953, 0.4337]) ``` Results on a two devices set-up - gradient accumulation steps set to 2 and batch_size set to 4. ``` initial model weight is tensor([-0.0075, 0.5364]) initial model clone weight is tensor([-0.0075, 0.5364]) Step 0 - Device 0 - num items in the local batch 52 Step 0 - Device 1 - num items in the local batch 84 Total num items 136 Device 1 - w/ accumulation, the final model weight is tensor([0.2117, 0.3172]) Device 0 - w/ accumulation, the final model weight is tensor([0.2117, 0.3172]) w/o accumulation, the final model weight is tensor([0.2117, 0.3172]) ``` ### To go further: Please find a complete example script on a real world training run in the examples folder at the path [`accelerate/examples/by_feature/gradient_accumulation_for_autoregressive_models.py`](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation_for_autoregressive_models.py). Running it on several training configurations with constant global batch size equal to 32 gives the following graph:
Note that the training losses are exactly the same up to training step 20. The small deviation after this training step occurs at the very end of the first epoch, because, by [default](https://huggingface.co/docs/accelerate/en/package_reference/torch_wrappers#accelerate.data_loader.prepare_data_loader.even_batches), the dataloader duplicates the samples at the beginning of the dataset when the total batch size doesn't exactly divide the dataset. accelerate-1.9.0/docs/source/usage_guides/intel_cpu.md000066400000000000000000000161741503574341000230240ustar00rootroot00000000000000 # Training on Intel CPU ## How It Works For Training optimization in CPU Accelerate has full support for Intel CPU, all you need to do is enabling it through the config. **Scenario 1**: Acceleration of No distributed CPU training Run accelerate config on your machine: ```bash $ accelerate config ----------------------------------------------------------------------------------------------------------------------------------------------------------- In which compute environment are you running? This machine ----------------------------------------------------------------------------------------------------------------------------------------------------------- Which type of machine are you using? No distributed training Do you want to run your training on CPU only (even if a GPU / Apple Silicon device is available)? [yes/NO]:yes Do you wish to optimize your script with torch dynamo?[yes/NO]:NO Do you want to use DeepSpeed? [yes/NO]: NO ----------------------------------------------------------------------------------------------------------------------------------------------------------- Do you wish to use FP16 or BF16 (mixed precision)? bf16 ``` This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with `default_config.yaml` which is generated by `accelerate config` ```bash compute_environment: LOCAL_MACHINE distributed_type: 'NO' downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: true ``` ```bash accelerate launch examples/nlp_example.py ``` > [!CAUTION] > `accelerator.prepare` can currently only handle simultaneously preparing multiple models (and no optimizer) OR a single model-optimizer pair for training. Other attempts (e.g., two model-optimizer pairs) will raise a verbose error. To work around this limitation, consider separately using `accelerator.prepare` for each model-optimizer pair. **Scenario 2**: Acceleration of distributed CPU training we use Intel oneCCL for communication, combined with Intel® MPI library to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. you could refer the [here](https://huggingface.co/docs/transformers/perf_train_cpu_many) for the installation guide Run accelerate config on your machine(node0): ```bash $ accelerate config ----------------------------------------------------------------------------------------------------------------------------------------------------------- In which compute environment are you running? This machine ----------------------------------------------------------------------------------------------------------------------------------------------------------- Which type of machine are you using? multi-CPU How many different machines will you use (use more than 1 for multi-node training)? [1]: 4 ----------------------------------------------------------------------------------------------------------------------------------------------------------- What is the rank of this machine? 0 What is the IP address of the machine that will host the main process? 36.112.23.24 What is the port you will use to communicate with the main process? 29500 Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes Do you want accelerate to launch mpirun? [yes/NO]: yes Please enter the path to the hostfile to use with mpirun [~/hostfile]: ~/hostfile Enter the number of oneCCL worker threads [1]: 1 Do you wish to optimize your script with torch dynamo?[yes/NO]:NO How many processes should be used for distributed training? [1]:16 ----------------------------------------------------------------------------------------------------------------------------------------------------------- Do you wish to use FP16 or BF16 (mixed precision)? bf16 ``` For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with IPEX enabled for distributed CPU training. `default_config.yaml` which is generated by `accelerate config` ```bash compute_environment: LOCAL_MACHINE distributed_type: MULTI_CPU downcast_bf16: 'no' machine_rank: 0 main_process_ip: 36.112.23.24 main_process_port: 29500 main_training_function: main mixed_precision: bf16 mpirun_config: mpirun_ccl: '1' mpirun_hostfile: /home/user/hostfile num_machines: 4 num_processes: 16 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: true ``` Set following env and using intel MPI to launch the training In `node0`, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument. If you selected to let Accelerate launch `mpirun`, ensure that the location of your hostfile matches the path in the config. ```bash $ cat hostfile xxx.xxx.xxx.xxx #node0 ip xxx.xxx.xxx.xxx #node1 ip xxx.xxx.xxx.xxx #node2 ip xxx.xxx.xxx.xxx #node3 ip ``` Before executing `accelerate launch` command, you need source the oneCCL bindings `setvars.sh` to get your Intel MPI environment properly. Note that both the python script and environment need to be available on all of the machines being used for multi-CPU training. ```bash oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)") source $oneccl_bindings_for_pytorch_path/env/setvars.sh accelerate launch examples/nlp_example.py ``` You can also directly launch distributed training with `mpirun` command, you need to run the following command in node0 and **16DDP** will be enabled in node0,node1,node2,node3 with BF16 mixed precision. When using this method, the python script, python environment, and accelerate config file need to be available on all of the machines used for multi-CPU training. ```bash oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)") source $oneccl_bindings_for_pytorch_path/env/setvars.sh export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip export CCL_ATL_TRANSPORT=ofi mpirun -f hostfile -n 16 -ppn 4 accelerate launch examples/nlp_example.py ``` accelerate-1.9.0/docs/source/usage_guides/local_sgd.md000066400000000000000000000115651503574341000227700ustar00rootroot00000000000000 # Using Local SGD with Accelerate Local SGD is a technique for distributed training where gradients are not synchronized every step. Thus, each process updates its own version of the model weights and after a given number of steps these weights are synchronized by averaging across all processes. This improves communication efficiency and can lead to substantial training speed up especially when a computer lacks a faster interconnect such as NVLink. Unlike gradient accumulation (where improving communication efficiency requires increasing the effective batch size), Local SGD does not require changing a batch size or a learning rate / schedule. However, if necessary, Local SGD can be combined with gradient accumulation as well. In this tutorial you will see how to quickly setup Local SGD Accelerate. Compared to a standard Accelerate setup, this requires only two extra lines of code. This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: ```python device = "cuda" model.to(device) gradient_accumulation_steps = 2 for index, batch in enumerate(training_dataloader): inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps loss.backward() if (index + 1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` ## Converting it to Accelerate First the code shown earlier will be converted to use Accelerate with neither a LocalSGD or a gradient accumulation helper: ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for index, batch in enumerate(training_dataloader): inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps + accelerator.backward(loss) if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() ``` ## Letting Accelerate handle model synchronization All that is left now is to let Accelerate handle model parameter synchronization **and** the gradient accumulation for us. For simplicity let us assume we need to synchronize every 8 steps. This is achieved by adding one `with LocalSGD` statement and one call `local_sgd.step()` after every optimizer step: ```diff +local_sgd_steps=8 +with LocalSGD(accelerator=accelerator, model=model, local_sgd_steps=8, enabled=True) as local_sgd: for batch in training_dataloader: with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() + local_sgd.step() ``` Under the hood, the Local SGD code **disables** automatic gradient synchronization (but accumulation still works as expected!). Instead it averages model parameters every `local_sgd_steps` steps (as well as at the end of the training loop). ## Limitations The current implementation works only with basic multi-GPU (or multi-CPU) training without, e.g., [DeepSpeed.](https://github.com/deepspeedai/DeepSpeed). ## References Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes back to at least: Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint arXiv:1606.07365.](https://arxiv.org/abs/1606.07365) We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of). Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767) accelerate-1.9.0/docs/source/usage_guides/low_precision_training.md000066400000000000000000000175271503574341000256140ustar00rootroot00000000000000 # Low Precision Training Methods Accelerate provides integrations to train on lower precision methods using specified supported hardware through the `TransformersEngine`, `MS-AMP`, and `torchao` packages. This documentation will help guide you through what hardware is supported, how to configure your [`Accelerator`] to leverage the low precision methods, and what you can expect when training. ## What training on FP8 means To explore more of the nitty-gritty in training in FP8 with PyTorch and Accelerate, check out the [concept_guide](../concept_guides/low_precision_training) on why this can be difficult. But essentially rather than training in BF16, some (or all) aspects of training a model can be performed using 8 bits instead of 16. The challenge is doing so without degrading final performance. This is only enabled on specific NVIDIA hardware, namely: * Anything after the 3000 series consumer graphics cards (such as the 4090) * Hopper-based GPU architectures (such as the `H100` and `H200`) What this will result in is some reduction in the memory used (as we've cut the needed memory in half for some parts of training) and an increase in throughput *should* be seen as well for larger models that can replace certain layers with FP8-enabled ones. ## Configuring the Accelerator Currently three different backends for FP8 are supported (`TransformersEngine`, `torchao`, and `MS-AMP`), each with different capabilities and configurations. To use either, the same core API is used. Just pass `mixed_precision="fp8"` to either the [`Accelerator`], during `accelerate config` when prompted about mixed precision, or as part of your `config.yaml` file in the `mixed_precision` key: ```{python} from accelerate import Accelerator accelerator = Accelerator(mixed_precision="fp8") ``` By default, if `MS-AMP` is available in your environment, Accelerate will automatically utilize it as a backend. To specify it yourself (and customize other parts of the FP8 mixed precision setup), you can utilize one of the `RecipeKwargs` dataclasses such as [`utils.AORecipeKwargs`], [`utils.TERecipeKwargs`], or [`utils.MSAMPRecipeKwargs`]; you can also clarify it in your config `yaml`/during `accelerate launch`: ```{python} from accelerate import Accelerator from accelerate.utils import MSAMPRecipeKwargs kwargs = [MSAMPRecipeKwargs()] # Or to specify the backend as `TransformersEngine` even if MS-AMP is installed # kwargs = [TERecipeKwargs()] # Or to use torchao # kwargs = [AORecipeKwargs()] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` ```{yaml} mixed_precision: fp8 fp8_config: amax_compute_algo: max amax_history_len: 1024 backend: TE fp8_format: HYBRID interval: 1 margin: 0 override_linear_precision: (false, false, false) use_autocast_during_eval: false ``` ## Configuring MS-AMP Of the two, `MS-AMP` is traditionally the easier one to configure as there is only a single argument: the optimization level. Currently two levels of optimization are supported in the Accelerate integration, `"O1"` and `"O2"` (using the letter 'o', not zero). * `"O1"` will cast the weight gradients and `all_reduce` communications to happen in 8-bit, while the rest are done in 16 bit. This reduces the general GPU memory usage and speeds up communication bandwidths. * `"O2"` will also cast first-order optimizer states into 8 bit, while the second order states are in FP16. (Currently just the `Adam` optimizer is supported). This tries its best to minimize final accuracy degradation and will save the highest potential memory. To specify an optimization level, pass it to the `FP8KwargsHandler` by setting the `optimization_level` argument: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="msamp", optimization_level="O2")] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` Or during `accelerate launch` via `--fp8_backend=msamp --fp8_opt_level=O2` Similarly this can be set in your `config.yaml`: ```{yaml} mixed_precision: fp8 fp8_config: backend: MSAMP opt_level: O2 ``` ## Configuring TransformersEngine TransformersEngine has many options for customizing how and what FP8 calculations are performed. A full list of supported arguments and what they mean are available in [NVIDIA's documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html), however they are restated as part of [`FP8KwargsHandler`]'s docstring for your convenience. Accelerate tries to set sensible defaults, but exploring and tweaking the various parameters yourself can lead to better performance potentially. To use it, specify `backend="te"` and modify any of the arguments you want as part of your kwarg handler: ```{python} from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = [FP8RecipeKwargs(backend="te", ...)] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` Or during `accelerate launch` via `--fp8_backend=te ...`. Use `accelerate launch --fp8_backend=te -h` to see relevent arguments. Similarly this can be set in your `config.yaml`: ```{yaml} mixed_precision: fp8 fp8_config: amax_compute_algo: max amax_history_len: 1024 backend: TE fp8_format: HYBRID interval: 1 margin: 0 override_linear_precision: (false, false, false) use_autocast_during_eval: false ``` ## Configuring `torchao` `torchao` is a [PyTorch-driven](https://github.com/pytorch/ao/tree/main/torchao/float8) hackable FP8 backend, aiming to be more approchable than the prior two engines. One of the core differences with `ao` compared to the prior two is that for numerical stability, it's found to be generally better off keeping the first *and* last layers in the model at the regular precision (be it FP32 or BF16), and then the other layers quantized down to FP8. As a result, a config for `ao` looks a bit differently: > Note: this API is experimental and is subject to change ```{python} from accelerate import Accelerator from accelerate.utils import AORecipeKwargs kwargs = [AORecipeKwargs()] accelerator = Accelerator(mixed_precision="fp8", kwarg_handlers=kwargs) ``` To learn more about the specific parameters to be used, please see the official `torchao` repo. ## Example Zoo We have examples showcasing training with FP8 both with accelerate and its underlying implementation available in the accelerate repo. Currently we support scripts showcasing: * Single GPU * Distributed Data Parallelism (Multi-GPU) * Fully Sharded Data Parallelism * DeepSpeed ZeRO 1 through 3 Find out more [here](https://github.com/huggingface/accelerate/tree/main/benchmarks/fp8) ## Further Reading To learn more about training in FP8 please check out the following resources: * [Our concept guide](../concept_guides/low_precision_training) detailing into more about both TransformersEngine and MS-AMP * [The `transformers-engine` documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html) * [The `MS-AMP` documentation](https://azure.github.io/MS-AMP/docs/) * [The `torchao` documentation](https://github.com/pytorch/ao/tree/main/torchao/float8) accelerate-1.9.0/docs/source/usage_guides/megatron_lm.md000066400000000000000000000713271503574341000233470ustar00rootroot00000000000000 # Megatron-LM [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) enables training large transformer language models at scale. It provides efficient tensor, pipeline and sequence based model parallelism for pre-training transformer based Language Models such as [GPT](https://arxiv.org/abs/2005.14165) (Decoder Only), [BERT](https://arxiv.org/pdf/1810.04805.pdf) (Encoder Only) and [T5](https://arxiv.org/abs/1910.10683) (Encoder-Decoder). For detailed information and how things work behind the scene please refer to the github [repo](https://github.com/NVIDIA/Megatron-LM). ## What is integrated? Accelerate integrates following feature of Megatron-LM to enable large scale pre-training/finetuning of BERT (Encoder), GPT (Decoder) or T5 models (Encoder and Decoder): a. **Tensor Parallelism (TP)**: Reduces memory footprint without much additional communication on intra-node ranks. Each tensor is split into multiple chunks with each shard residing on separate GPU. At each step, the same mini-batch of data is processed independently and in parallel by each shard followed by syncing across all GPUs (`all-reduce` operation). In a simple transformer layer, this leads to 2 `all-reduces` in the forward path and 2 in the backward path. For more details, please refer to the research paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf) and this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#tensor-parallelism). b. **Pipeline Parallelism (PP)**: Reduces memory footprint and enables large scale training via inter-node parallelization. Reduces the bubble of naive PP via PipeDream-Flush schedule/1F1B schedule and Interleaved 1F1B schedule. Layers are distributed uniformly across PP stages. For example, if a model has `24` layers and we have `4` GPUs for pipeline parallelism, each GPU will have `6` layers (24/4). For more details on schedules to reduce the idle time of PP, please refer to the research paper [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/pdf/2104.04473.pdf) and this section of blogpost [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism). c. **Sequence Parallelism (SP)**: Reduces memory footprint without any additional communication. Only applicable when using TP. It reduces activation memory required as it prevents the same copies to be on the tensor parallel ranks post `all-reduce` by replacing them with `reduce-scatter` and `no-op` operation would be replaced by `all-gather`. As `all-reduce = reduce-scatter + all-gather`, this saves a ton of activation memory at no added communication cost. To put it simply, it shards the outputs of each transformer layer along sequence dimension, e.g., if the sequence length is `1024` and the TP size is `4`, each GPU will have `256` tokens (1024/4) for each sample. This increases the batch size that can be supported for training. For more details, please refer to the research paper [Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). d. **Data Parallelism (DP)** via Distributed Optimizer: Reduces the memory footprint by sharding optimizer states and gradients across DP ranks (versus the traditional method of replicating the optimizer state across data parallel ranks). For example, when using Adam optimizer with mixed-precision training, each parameter accounts for 12 bytes of memory. This gets distributed equally across the GPUs, i.e., each parameter would account for 3 bytes (12/4) if we have 4 GPUs. For more details, please refer to the research paper [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/pdf/1910.02054.pdf) and following section of blog [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed#zero-data-parallelism). e. **Selective Activation Recomputation**: Reduces the memory footprint of activations significantly via smart activation checkpointing. It doesn't store activations occupying large memory while being fast to recompute thereby achieving great tradeoff between memory and recomputation. For example, for GPT-3, this leads to 70% reduction in required memory for activations at the expense of only 2.7% FLOPs overhead for recomputation of activations. For more details, please refer to the research paper [Reducing Activation Recomputation in Large Transformer Models](https://arxiv.org/pdf/2205.05198.pdf). f. **Fused Kernels**: Fused Softmax, Mixed Precision Fused Layer Norm and Fused gradient accumulation to weight gradient computation of linear layer. PyTorch JIT compiled Fused GeLU and Fused Bias+Dropout+Residual addition. g. **Support for Indexed datasets**: Efficient binary format of datasets for large scale training. Support for the `mmap`, `cached` index file and the `lazy` loader format. h. **Checkpoint reshaping and interoperability**: Utility for reshaping Megatron-LM checkpoints of variable tensor and pipeline parallel sizes to the beloved Transformers sharded checkpoints as it has great support with plethora of tools such as Accelerate Big Model Inference, Megatron-DeepSpeed Inference etc. Support is also available for converting Transformers sharded checkpoints to Megatron-LM checkpoint of variable tensor and pipeline parallel sizes for large scale training. ## Pre-Requisites You will need to install the latest pytorch, cuda, nccl, and NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) releases and the nltk library. See [documentation](https://github.com/NVIDIA/Megatron-LM#setup) for more details. Another way to setup the environment is to pull an NVIDIA PyTorch Container that comes with all the required installations from NGC. Below is a step-by-step method to set up the conda environment: 1. Create a virtual environment ``` conda create --name ml ``` 2. Assuming that the machine has CUDA 11.3 installed, installing the corresponding PyTorch GPU Version ``` conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch ``` 3. Install Nvidia APEX ``` git clone https://github.com/NVIDIA/apex cd apex pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ cd .. ``` 4. Installing Megatron-LM ``` git clone https://github.com/NVIDIA/Megatron-LM.git cd Megatron-LM git checkout core_r0.5.0 pip install --no-use-pep517 -e . ``` ## Accelerate Megatron-LM Plugin Important features are directly supported via the `accelerate config` command. An example of the corresponding questions for using Megatron-LM features is shown below: ```bash :~$ accelerate config --config_file "megatron_gpt_config.yaml" In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0 Which type of machine are you using? ([0] No distributed training, [1] multi-CPU, [2] multi-GPU, [3] TPU): 2 How many different machines will you use (use more than 1 for multi-node training)? [1]: Do you want to use DeepSpeed? [yes/NO]: Do you want to use FullyShardedDataParallel? [yes/NO]: Do you want to use Megatron-LM ? [yes/NO]: yes What is the Tensor Parallelism degree/size? [1]:2 Do you want to enable Sequence Parallelism? [YES/no]: What is the Pipeline Parallelism degree/size? [1]:2 What is the number of micro-batches? [1]:2 Do you want to enable selective activation recomputation? [YES/no]: Do you want to use distributed optimizer which shards optimizer state and gradients across data parallel ranks? [YES/no]: What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: How many GPU(s) should be used for distributed training? [1]:4 Do you wish to use FP16 or BF16 (mixed precision)? [NO/fp16/bf16]: bf16 ``` The resulting config is shown below: ``` ~$ cat megatron_gpt_config.yaml compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: MEGATRON_LM downcast_bf16: 'no' fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: megatron_lm_gradient_clipping: 1.0 megatron_lm_num_micro_batches: 2 megatron_lm_pp_degree: 2 megatron_lm_recompute_activations: true megatron_lm_sequence_parallelism: true megatron_lm_tp_degree: 2 megatron_lm_use_distributed_optimizer: true mixed_precision: bf16 num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true use_cpu: false ``` We will take the example of GPT pre-training. The minimal changes required to the official `run_clm_no_trainer.py` to use Megatron-LM are as follows: 1. As Megatron-LM uses its own implementation of Optimizer, the corresponding scheduler compatible with it needs to be used. As such, support for only the Megatron-LM's scheduler is present. User will need to create `accelerate.utils.MegatronLMDummyScheduler`. Example is given below: ```python from accelerate.utils import MegatronLMDummyScheduler if accelerator.distributed_type == DistributedType.MEGATRON_LM: lr_scheduler = MegatronLMDummyScheduler( optimizer=optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps, ) else: lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) ``` 2. Getting the details of the total batch size now needs to be cognization of tensor and pipeline parallel sizes. Example of getting the effective total batch size is shown below: ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size else: total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ``` 3. When using Megatron-LM, the losses are already averaged across the data parallel group ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses.append(loss) else: losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses = torch.tensor(losses) else: losses = torch.cat(losses) ``` 4. For Megatron-LM, we need to save the model using `accelerator.save_state` ```python if accelerator.distributed_type == DistributedType.MEGATRON_LM: accelerator.save_state(args.output_dir) else: unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) ``` That's it! We are good to go 🚀. Please find the example script in the examples folder at the path `accelerate/examples/by_feature/megatron_lm_gpt_pretraining.py`. Let's run it for `gpt-large` model architecture using 4 A100-80GB GPUs. ```bash accelerate launch --config_file megatron_gpt_config.yaml \ examples/by_feature/megatron_lm_gpt_pretraining.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --block_size 1024 \ --learning_rate 5e-5 \ --per_device_train_batch_size 24 \ --per_device_eval_batch_size 24 \ --num_train_epochs 5 \ --with_tracking \ --report_to "wandb" \ --output_dir "awesome_model" ``` Below are some important excerpts from the output logs: ```bash Loading extension module fused_dense_cuda... >>> done with compiling and loading fused kernels. Compilation time: 3.569 seconds > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) Building gpt model in the pre-training mode. The Megatron LM model weights are initialized at random in `accelerator.prepare`. Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup. Preparing dataloader Preparing dataloader Preparing model > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 210753280 > number of parameters on (tensor, pipeline) model parallel rank (1, 1): 209445120 > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 210753280 > number of parameters on (tensor, pipeline) model parallel rank (0, 1): 209445120 Preparing optimizer Preparing scheduler > learning rate decay style: linear 10/10/2022 22:57:22 - INFO - __main__ - ***** Running training ***** 10/10/2022 22:57:22 - INFO - __main__ - Num examples = 2318 10/10/2022 22:57:22 - INFO - __main__ - Num Epochs = 5 10/10/2022 22:57:22 - INFO - __main__ - Instantaneous batch size per device = 24 10/10/2022 22:57:22 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 48 10/10/2022 22:57:22 - INFO - __main__ - Gradient Accumulation steps = 1 10/10/2022 22:57:22 - INFO - __main__ - Total optimization steps = 245 20%|████████████▍ | 49/245 [01:04<04:09, 1.27s/it] 10/10/2022 22:58:29 - INFO - __main__ - epoch 0: perplexity: 1222.1594275215962 eval_loss: 7.10837459564209 40%|████████████████████████▊ | 98/245 [02:10<03:07, 1.28s/it] 10/10/2022 22:59:35 - INFO - __main__ - epoch 1: perplexity: 894.5236583794557 eval_loss: 6.796291351318359 60%|████████████████████████████████████▌ | 147/245 [03:16<02:05, 1.28s/it] 10/10/2022 23:00:40 - INFO - __main__ - epoch 2: perplexity: 702.8458788508042 eval_loss: 6.555137634277344 80%|████████████████████████████████████████████████▊ | 196/245 [04:22<01:02, 1.28s/it] 10/10/2022 23:01:46 - INFO - __main__ - epoch 3: perplexity: 600.3220028695281 eval_loss: 6.39746618270874 100%|█████████████████████████████████████████████████████████████| 245/245 [05:27<00:00, 1.28s/it] ``` There are a large number of other options/features that one can set using `accelerate.utils.MegatronLMPlugin`. ## Advanced features to leverage writing custom train step and Megatron-LM Indexed Datasets For leveraging more features, please go through below details. 1. Below is an example of changes required to customize the Train Step while using Megatron-LM. You will implement the `accelerate.utils.AbstractTrainStep` or inherit from their corresponding children `accelerate.utils.GPTTrainStep`, `accelerate.utils.BertTrainStep` or `accelerate.utils.T5TrainStep`. ```python from accelerate.utils import MegatronLMDummyScheduler, GPTTrainStep, avg_losses_across_data_parallel_group # Custom loss function for the Megatron model class GPTTrainStepWithCustomLoss(GPTTrainStep): def __init__(self, megatron_args, **kwargs): super().__init__(megatron_args) self.kwargs = kwargs def get_loss_func(self): def loss_func(inputs, loss_mask, output_tensor): batch_size, seq_length = output_tensor.shape losses = output_tensor.float() loss_mask = loss_mask.view(-1).float() loss = losses.view(-1) * loss_mask # Resize and average loss per sample loss_per_sample = loss.view(batch_size, seq_length).sum(axis=1) loss_mask_per_sample = loss_mask.view(batch_size, seq_length).sum(axis=1) loss_per_sample = loss_per_sample / loss_mask_per_sample # Calculate and scale weighting weights = torch.stack([(inputs == kt).float() for kt in self.kwargs["keytoken_ids"]]).sum(axis=[0, 2]) weights = 1.0 + self.kwargs["alpha"] * weights # Calculate weighted average weighted_loss = (loss_per_sample * weights).mean() # Reduce loss across data parallel groups averaged_loss = avg_losses_across_data_parallel_group([weighted_loss]) return weighted_loss, {"lm loss": averaged_loss[0]} return loss_func def get_forward_step_func(self): def forward_step(data_iterator, model): """Forward step.""" # Get the batch. tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator) output_tensor = model(tokens, position_ids, attention_mask, labels=labels) return output_tensor, partial(self.loss_func, tokens, loss_mask) return forward_step def main(): # Custom loss function for the Megatron model keytoken_ids = [] keywords = ["plt", "pd", "sk", "fit", "predict", " plt", " pd", " sk", " fit", " predict"] for keyword in keywords: ids = tokenizer([keyword]).input_ids[0] if len(ids) == 1: keytoken_ids.append(ids[0]) accelerator.print(f"Keytoken ids: {keytoken_ids}") accelerator.state.megatron_lm_plugin.custom_train_step_class = GPTTrainStepWithCustomLoss accelerator.state.megatron_lm_plugin.custom_train_step_kwargs = { "keytoken_ids": keytoken_ids, "alpha": 0.25, } ``` 2. For using the Megatron-LM datasets, a few more changes are required. Dataloaders for these datasets are available only on rank 0 of each tensor parallel group. As such, there are rank where dataloader won't be available and this requires tweaks to the training loop. Being able to do all this shows how flexible and extensible Accelerate is. The changes required are as follows. a. For Megatron-LM indexed datasets, we need to use `MegatronLMDummyDataLoader` and pass the required dataset args to it such as `data_path`, `seq_length` etc. See [here](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/arguments.py#L804) for the list of available args. ```python from accelerate.utils import MegatronLMDummyDataLoader megatron_dataloader_config = { "data_path": args.data_path, "splits_string": args.splits_string, "seq_length": args.block_size, "micro_batch_size": args.per_device_train_batch_size, } megatron_dataloader = MegatronLMDummyDataLoader(**megatron_dataloader_config) accelerator.state.megatron_lm_plugin.megatron_dataset_flag = True ``` b. `megatron_dataloader` is repeated 3 times to get training, validation and test dataloaders as per the `args.splits_string` proportions ```python model, optimizer, lr_scheduler, train_dataloader, eval_dataloader, _ = accelerator.prepare( model, optimizer, lr_scheduler, megatron_dataloader, megatron_dataloader, megatron_dataloader ) ``` c. Changes to training and evaluation loops as dataloader is only available on tensor parallel ranks 0 So, we need to iterate only if the dataloader isn't `None` else provide empty dict As such, we loop using `while` loop and break when `completed_steps` is equal to `args.max_train_steps` This is similar to the Megatron-LM setup wherein user has to provide `max_train_steps` when using Megaton-LM indexed datasets. This displays how flexible and extensible Accelerate is. ```python while completed_steps < args.max_train_steps: model.train() batch = next(train_dataloader) if train_dataloader is not None else {} outputs = model(**batch) loss = outputs.loss ... if completed_steps % eval_interval == 0: eval_completed_steps = 0 losses = [] while eval_completed_steps < eval_iters: model.eval() with torch.no_grad(): batch = next(eval_dataloader) if eval_dataloader is not None else {} outputs = model(**batch) ``` ## Utility for Checkpoint reshaping and interoperability 1. The scripts for these are present in Transformers library under respective models. Currently, it is available for GPT model [checkpoint_reshaping_and_interoperability.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py) 2. Below is an example of conversion of checkpoint from Megatron-LM to universal Transformers sharded checkpoint. ```bash python checkpoint_reshaping_and_interoperability.py \ --convert_checkpoint_from_megatron_to_transformers \ --load_path "gpt/iter_0005000" \ --save_path "gpt/trfs_checkpoint" \ --max_shard_size "200MB" \ --tokenizer_name "gpt2" \ --print-checkpoint-structure ``` 3. Conversion of checkpoint from transformers to megatron with `tp_size=2`, `pp_size=2` and `dp_size=2`. ```bash python checkpoint_utils/megatgron_gpt2/checkpoint_reshaping_and_interoperability.py \ --load_path "gpt/trfs_checkpoint" \ --save_path "gpt/megatron_lm_checkpoint" \ --target_tensor_model_parallel_size 2 \ --target_pipeline_model_parallel_size 2 \ --target_data_parallel_size 2 \ --target_params_dtype "bf16" \ --make_vocab_size_divisible_by 128 \ --use_distributed_optimizer \ --print-checkpoint-structure ``` ## Megatron-LM GPT models support returning logits and `megatron_generate` function for text generation 1. Returning logits require setting `require_logits=True` in MegatronLMPlugin as shown below. These would be available in the last stage of pipeline. ```python megatron_lm_plugin = MegatronLMPlugin(return_logits=True) ``` 2. `megatron_generate` method for Megatron-LM GPT model: This will use Tensor and Pipeline Parallelism to complete generations for a batch of inputs when using greedy with/without top_k/top_p sampling and for individual prompt inputs when using beam search decoding. Only a subset of features of transformers generate is supported. This will help in using large models via tensor and pipeline parallelism for generation (already does key-value caching and uses fused kernels by default). This requires data parallel size to be 1, sequence parallelism and activation checkpointing to be disabled. It also requires specifying path to tokenizer's vocab file and merges file. Below example shows how to configure and use `megatron_generate` method for Megatron-LM GPT model. ```python # specifying tokenizer's vocab and merges file vocab_file = os.path.join(args.resume_from_checkpoint, "vocab.json") merge_file = os.path.join(args.resume_from_checkpoint, "merges.txt") other_megatron_args = {"vocab_file": vocab_file, "merge_file": merge_file} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) # inference using `megatron_generate` functionality tokenizer.pad_token = tokenizer.eos_token max_new_tokens = 64 batch_texts = [ "Are you human?", "The purpose of life is", "The arsenal was constructed at the request of", "How are you doing these days?", ] batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) # top-p sampling generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, top_p=0.8, top_p_decay=0.5, temperature=0.9, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # top-k sampling generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, top_k=50, temperature=0.9, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # adding `bos` token at the start generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, add_BOS=True ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) # beam search => only takes single prompt batch_texts = ["The purpose of life is"] batch_encodings = tokenizer(batch_texts, return_tensors="pt", padding=True) generated_tokens = model.megatron_generate( batch_encodings["input_ids"], batch_encodings["attention_mask"], max_new_tokens=max_new_tokens, num_beams=20, length_penalty=1.5, ) decoded_preds = tokenizer.batch_decode(generated_tokens.cpu().numpy()) accelerator.print(decoded_preds) ``` 3. An end-to-end example of using `megatron_generate` method for Megatron-LM GPT model is available at [megatron_gpt2_generation.py](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/inference/megatron_gpt2_generation.py) with config file [megatron_lm_gpt_generate_config.yaml](https://github.com/pacman100/accelerate-megatron-test/blob/main/src/Configs/megatron_lm_gpt_generate_config.yaml). The bash script with accelerate launch command is available at [megatron_lm_gpt_generate.sh](https://github.com/pacman100/accelerate-megatron-test/blob/main/megatron_lm_gpt_generate.sh). The output logs of the script are available at [megatron_lm_gpt_generate.log](https://github.com/pacman100/accelerate-megatron-test/blob/main/output_logs/megatron_lm_gpt_generate.log). ## Support for ROPE and ALiBi Positional embeddings and Multi-Query Attention 1. For ROPE/ALiBi attention, pass `position_embedding_type` with `("absolute" | "rotary" | "alibi")` to `MegatronLMPlugin` as shown below. ```python other_megatron_args = {"position_embedding_type": "alibi"} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) ``` 2. For Multi-Query Attention, pass `attention_head_type` with `("multihead" | "multiquery")` to `MegatronLMPlugin` as shown below. ```python other_megatron_args = {"attention_head_type": "multiquery"} megatron_lm_plugin = MegatronLMPlugin(other_megatron_args=other_megatron_args) ``` ## Caveats 1. Supports Transformers GPT2, Megatron-BERT and T5 models. This covers Decoder only, Encode only and Encoder-Decoder model classes. 2. Only loss is returned from model forward pass as there is quite complex interplay of pipeline, tensor and data parallelism behind the scenes. The `model(**batch_data)` call return loss(es) averaged across the data parallel ranks. This is fine for most cases wherein pre-training jobs are run using Megatron-LM features and you can easily compute the `perplexity` using the loss. For GPT model, returning logits in addition to loss(es) is supported. These logits aren't gathered across data parallel ranks. Use `accelerator.utils.gather_across_data_parallel_groups` to gather logits across data parallel ranks. These logits along with labels can be used for computing various performance metrics. 3. The main process is the last rank as the losses/logits are available in the last stage of pipeline. `accelerator.is_main_process` and `accelerator.is_local_main_process` return `True` for last rank when using Megatron-LM integration. 4. In `accelerator.prepare` call, a Megatron-LM model corresponding to a given Transformers model is created with random weights. Please use `accelerator.load_state` to load the Megatron-LM checkpoint with matching TP, PP and DP partitions. 5. Currently, checkpoint reshaping and interoperability support is only available for GPT. Soon it will be extended to BERT and T5. 6. `gradient_accumulation_steps` needs to be 1. When using Megatron-LM, micro batches in pipeline parallelism setting is synonymous with gradient accumulation. 7. When using Megatron-LM, use `accelerator.save_state` and `accelerator.load_state` for saving and loading checkpoints. 8. Below are the mapping from Megatron-LM model architectures to the equivalent transformers model architectures. Only these transformers model architectures are supported. a. Megatron-LM [BertModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/bert_model.py) : transformers models with `megatron-bert` in config's model type, e.g., [MegatronBERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert) b. Megatron-LM [GPTModel](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py) : transformers models with `gpt2` in config's model type, e.g., [OpenAI GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2) c. Megatron-LM [T5Model](https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/t5_model.py) : transformers models with `t5` in config's model type, e.g., [T5](https://huggingface.co/docs/transformers/model_doc/t5) and [MT5](https://huggingface.co/docs/transformers/model_doc/mt5) accelerate-1.9.0/docs/source/usage_guides/model_size_estimator.md000066400000000000000000000140661503574341000252610ustar00rootroot00000000000000 # Model memory estimator One very difficult aspect when exploring potential models to use on your machine is knowing just how big of a model will *fit* into memory with your current graphics card (such as loading the model onto CUDA). To help alleviate this, Accelerate has a CLI interface through `accelerate estimate-memory`. This tutorial will help walk you through using it, what to expect, and at the end link to the interactive demo hosted on the Hub which will even let you post those results directly on the model repo! Currently we support searching for models that can be used in `timm` and `transformers`. This API will load the model into memory on the `meta` device, so we are not actually downloading and loading the full weights of the model into memory, nor do we need to. As a result it's perfectly fine to measure 8 billion parameter models (or more), without having to worry about if your CPU can handle it! ## Gradio Demos Below are a few gradio demos related to what was described above. The first is the official Hugging Face memory estimation space, utilizing Accelerate directly:
A community member has taken the idea and expanded it further, allowing you to filter models directly and see if you can run a particular LLM given GPU constraints and LoRA configurations. To play with it, see [here](https://huggingface.co/spaces/Vokturz/can-it-run-llm) for more details. ## The Command When using `accelerate estimate-memory`, you need to pass in the name of the model you want to use, potentially the framework that model utilizing (if it can't be found automatically), and the data types you want the model to be loaded in with. For example, here is how we can calculate the memory footprint for `bert-base-cased`: ```bash accelerate estimate-memory bert-base-cased ``` This will download the `config.json` for `bert-based-cased`, load the model on the `meta` device, and report back how much space it will use: Memory Usage for loading `bert-base-cased`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 84.95 MB | 418.18 MB | 1.61 GB | | float16 | 42.47 MB | 206.59 MB | 826.36 MB | | int8 | 21.24 MB | 103.29 MB | 413.18 MB | | int4 | 10.62 MB | 51.65 MB | 206.59 MB | By default it will return all the supported dtypes (`int4` through `float32`), but if you are interested in specific ones these can be filtered. ### Specific libraries If the source library cannot be determined automatically (like it could in the case of `bert-base-cased`), a library name can be passed in. ```bash accelerate estimate-memory HuggingFaceM4/idefics-80b-instruct --library_name transformers ``` Memory Usage for loading `HuggingFaceM4/idefics-80b-instruct`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 3.02 GB | 297.12 GB | 1.16 TB | | float16 | 1.51 GB | 148.56 GB | 594.24 GB | | int8 | 772.52 MB | 74.28 GB | 297.12 GB | | int4 | 386.26 MB | 37.14 GB | 148.56 GB | ```bash accelerate estimate-memory timm/resnet50.a1_in1k --library_name timm ``` Memory Usage for loading `timm/resnet50.a1_in1k`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 9.0 MB | 97.7 MB | 390.78 MB | | float16 | 4.5 MB | 48.85 MB | 195.39 MB | | int8 | 2.25 MB | 24.42 MB | 97.7 MB | | int4 | 1.12 MB | 12.21 MB | 48.85 MB | ### Specific dtypes As mentioned earlier, while we return `int4` through `float32` by default, any dtype can be used from `float32`, `float16`, `int8`, and `int4`. To do so, pass them in after specifying `--dtypes`: ```bash accelerate estimate-memory bert-base-cased --dtypes float32 float16 ``` Memory Usage for loading `bert-base-cased`: | dtype | Largest Layer | Total Size | Training using Adam | |---------|---------------|------------|---------------------| | float32 | 84.95 MB | 413.18 MB | 1.61 GB | | float16 | 42.47 MB | 206.59 MB | 826.36 MB | ## Caveats with this calculator This calculator will tell you how much memory is needed to purely load the model in, *not* to perform inference. This calculation is accurate within a few % of the actual value, so it is a very good view of just how much memory it will take. For instance loading `bert-base-cased` actually takes `413.68 MB` when loaded on CUDA in full precision, and the calculator estimates `413.18 MB`. When performing inference you can expect to add up to an additional 20% as found by [EleutherAI](https://blog.eleuther.ai/transformer-math/). We'll be conducting research into finding a more accurate estimate to these values, and will update this calculator once done. accelerate-1.9.0/docs/source/usage_guides/mps.md000066400000000000000000000057531503574341000216420ustar00rootroot00000000000000 # Accelerated PyTorch Training on Mac With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac. Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `"mps"` device. This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS. For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html). ### Benefits of Training and Inference using Apple Silicon Chips 1. Enables users to train larger networks or batch sizes locally 2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. Therefore, improving end-to-end performance. 3. Reduces costs associated with cloud-based development or the need for additional local GPUs. **Pre-requisites**: To install torch with mps support, please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1). ## How it works out of the box It is enabled by default on MacOs machines with MPS enabled Apple Silicon GPUs. To disable it, pass `--cpu` flag to `accelerate launch` command or answer the corresponding question when answering the `accelerate config` questionnaire. You can directly run the following script to test it out on MPS enabled Apple Silicon machines: ```bash accelerate launch /examples/cv_example.py --data_dir images ``` ## A few caveats to be aware of 1. Distributed setups `gloo` and `nccl` are not working with `mps` device. This means that currently only single GPU of `mps` device type can be used. Finally, please, remember that, `Accelerate` only integrates MPS backend, therefore if you have any problems or questions with regards to MPS backend usage, please, file an issue with [PyTorch GitHub](https://github.com/pytorch/pytorch/issues).accelerate-1.9.0/docs/source/usage_guides/profiler.md000066400000000000000000000310221503574341000226510ustar00rootroot00000000000000 # Profiler Profiler is a tool that allows the collection of performance metrics during training and inference. Profiler’s context manager API can be used to better understand what model operators are the most expensive, examine their input shapes and stack traces, study device kernel activity, and visualize the execution trace. It provides insights into the performance of your model, allowing you to optimize and improve it. This guide explains how to use PyTorch Profiler to measure the time and memory consumption of the model’s operators and how to integrate this with Accelerate. We will cover various use cases and provide examples for each. ## Using profiler to analyze execution time Profiler allows one to check which operators were called during the execution of a code range wrapped with a profiler context manager. Let’s see how we can use profiler to analyze the execution time: ```python import torch import torchvision.models as models from torch.profiler import profile, record_function, ProfilerActivity model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) with profile(activities=[ProfilerActivity.CPU], record_shapes=True) as prof: model(inputs) print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) ``` ```python from accelerate import Accelerator, ProfileKwargs import torch import torchvision.models as models model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) profile_kwargs = ProfileKwargs( activities=["cpu"], record_shapes=True ) accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: with torch.no_grad(): model(inputs) print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) ``` The resulting table output (omitting some columns): ``` --------------------------------- ------------ ------------ ------------ ------------ Name Self CPU CPU total CPU time avg # of Calls --------------------------------- ------------ ------------ ------------ ------------ aten::conv2d 171.000us 52.260ms 2.613ms 20 aten::convolution 227.000us 52.089ms 2.604ms 20 aten::_convolution 270.000us 51.862ms 2.593ms 20 aten::mkldnn_convolution 51.273ms 51.592ms 2.580ms 20 aten::batch_norm 118.000us 7.059ms 352.950us 20 aten::_batch_norm_impl_index 315.000us 6.941ms 347.050us 20 aten::native_batch_norm 6.305ms 6.599ms 329.950us 20 aten::max_pool2d 40.000us 4.008ms 4.008ms 1 aten::max_pool2d_with_indices 3.968ms 3.968ms 3.968ms 1 aten::add_ 780.000us 780.000us 27.857us 28 --------------------------------- ------------ ------------ ------------ ------------ Self CPU time total: 67.016ms ``` To get a finer granularity of results and include operator input shapes, pass `group_by_input_shape=True` (note: this requires running the profiler with `record_shapes=True`): ```python print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10)) ``` ## Using profiler to analyze memory consumption Profiler can also show the amount of memory (used by the model’s tensors) that was allocated (or released) during the execution of the model’s operators. To enable memory profiling functionality pass `profile_memory=True`. ```python model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) with profile(activities=[ProfilerActivity.CPU], profile_memory=True, record_shapes=True) as prof: model(inputs) print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10)) ``` ```python model = models.resnet18() inputs = torch.randn(5, 3, 224, 224) profile_kwargs = ProfileKwargs( activities=["cpu"], profile_memory=True, record_shapes=True ) accelerator = Accelerator(cpu=True, kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: model(inputs) print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10)) ``` The resulting table output (omitting some columns): ``` --------------------------------- ------------ ------------ ------------ Name CPU Mem Self CPU Mem # of Calls --------------------------------- ------------ ------------ ------------ aten::empty 94.85 Mb 94.85 Mb 205 aten::max_pool2d_with_indices 11.48 Mb 11.48 Mb 1 aten::addmm 19.53 Kb 19.53 Kb 1 aten::mean 10.00 Kb 10.00 Kb 1 aten::empty_strided 492 b 492 b 5 aten::cat 240 b 240 b 6 aten::abs 480 b 240 b 4 aten::masked_select 120 b 112 b 1 aten::ne 61 b 53 b 3 aten::eq 30 b 30 b 1 --------------------------------- ------------ ------------ ------------ Self CPU time total: 69.332ms ``` ## Exporting chrome trace You can examine the sequence of profiled operators and CUDA kernels in Chrome trace viewer (`chrome://tracing`): ![profile_export](https://github.com/huggingface/accelerate/assets/100389977/5acb193f-6d11-4f7b-9873-c600c19e8172) ```python model = models.resnet18().cuda() inputs = torch.randn(5, 3, 224, 224).cuda() with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof: model(inputs) prof.export_chrome_trace("trace.json") ``` ```python model = models.resnet18() inputs = torch.randn(5, 3, 224, 224).cuda() profile_kwargs = ProfileKwargs( activities=["cpu", "cuda"], output_trace_dir="trace" ) accelerator = Accelerator(kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: model(inputs) # The trace will be saved to the specified directory ``` For other hardware accelerators, e.g. XPU, you can change `cuda` to `xpu` in the above example code. ## Using Profiler to Analyze Long-Running Jobs Profiler offers an additional API to handle long-running jobs (such as training loops). Tracing all of the execution can be slow and result in very large trace files. To avoid this, use optional arguments: - `schedule_option`: Scheduling options allow you to control when profiling is active. This is useful for long-running jobs to avoid collecting too much data. Available keys are `wait`, `warmup`, `active`, `repeat` and `skip_first`. The profiler will skip the first `skip_first` steps, then wait for `wait` steps, then do the warmup for the next `warmup` steps, then do the active recording for the next `active` steps and then repeat the cycle starting with `wait` steps. The optional number of cycles is specified with the `repeat` parameter, the zero value means that the cycles will continue until the profiling is finished. - `on_trace_ready`: specifies a function that takes a reference to the profiler as an input and is called by the profiler each time the new trace is ready. To illustrate how the API works, consider the following example: ```python from torch.profiler import schedule my_schedule = schedule( skip_first=1, wait=5, warmup=1, active=3, repeat=2 ) def trace_handler(p): output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10) print(output) p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json") with profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], schedule=my_schedule, on_trace_ready=trace_handler ) as p: for idx in range(8): model(inputs) p.step() ``` ```python def trace_handler(p): output = p.key_averages().table(sort_by="self_cuda_time_total", row_limit=10) print(output) p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json") profile_kwargs = ProfileKwargs( activities=["cpu", "cuda"], schedule_option={"wait": 5, "warmup": 1, "active": 3, "repeat": 2, "skip_first": 1}, on_trace_ready=trace_handler ) accelerator = Accelerator(kwargs_handlers=[profile_kwargs]) model = accelerator.prepare(model) with accelerator.profile() as prof: for idx in range(8): model(inputs) prof.step() ``` ## FLOPS Use formula to estimate the FLOPs (floating point operations) of specific operators (matrix multiplication and 2D convolution). To measure floating-point operations (FLOPS): ```python with profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], with_flops=True ) as prof: model(inputs) print(prof.key_averages().table(sort_by="flops", row_limit=10)) ``` ```python profile_kwargs = ProfileKwargs( with_flops=True ) accelerator = Accelerator(kwargs_handlers=[profile_kwargs]) with accelerator.profile() as prof: model(inputs) print(prof.key_averages().table(sort_by="flops", row_limit=10)) ``` The resulting table output (omitting some columns): ``` ------------------------------------------------------- ------------ ------------ ------------ Name Self CPU Self CUDA Total FLOPs ------------------------------------------------------- ------------ ------------ ------------ aten::conv2d 197.000us 0.000us 18135613440.000 aten::addmm 103.000us 17.000us 5120000.000 aten::mul 29.000us 2.000us 30.000 aten::convolution 409.000us 0.000us -- aten::_convolution 253.000us 0.000us -- aten::cudnn_convolution 5.465ms 2.970ms -- cudaEventRecord 138.000us 0.000us -- cudaStreamIsCapturing 43.000us 0.000us -- cudaStreamGetPriority 40.000us 0.000us -- cudaDeviceGetStreamPriorityRange 10.000us 0.000us -- ------------------------------------------------------- ------------ ------------ ------------ Self CPU time total: 21.938ms Self CUDA time total: 4.165ms ``` ## Conclusion and Further Information PyTorch Profiler is a powerful tool for analyzing the performance of your models. By integrating it with Accelerate, you can easily profile your models and gain insights into their performance, helping you to optimize and improve them. For more detailed information, refer to the [PyTorch Profiler documentation](https://pytorch.org/docs/stable/profiler.html).accelerate-1.9.0/docs/source/usage_guides/quantization.md000066400000000000000000000147031503574341000235640ustar00rootroot00000000000000 # Model quantization ## `bitsandbytes` Integration Accelerate brings `bitsandbytes` quantization to your model. You can now load any pytorch model in 8-bit or 4-bit with a few lines of code. If you want to use Transformers models with `bitsandbytes`, you should follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization). To learn more about how the `bitsandbytes` quantization works, check out the blog posts on [8-bit quantization](https://huggingface.co/blog/hf-bitsandbytes-integration) and [4-bit quantization](https://huggingface.co/blog/4bit-transformers-bitsandbytes). ### Pre-Requisites You will need to install the following requirements: - Install `bitsandbytes` library ```bash pip install bitsandbytes ``` For non-cuda devices, you can refer to the bitsandbytes installation guide [here](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend). - Install latest `accelerate` from source ```bash pip install git+https://github.com/huggingface/accelerate.git ``` - Install `minGPT` and `huggingface_hub` to run examples ```bash git clone https://github.com/karpathy/minGPT.git pip install minGPT/ pip install huggingface_hub ``` ### How it works First, we need to initialize our model. To save memory, we can initialize an empty model using the context manager [`init_empty_weights`]. Let's take the GPT2 model from minGPT library. ```py from accelerate import init_empty_weights from mingpt.model import GPT model_config = GPT.get_default_config() model_config.model_type = 'gpt2-xl' model_config.vocab_size = 50257 model_config.block_size = 1024 with init_empty_weights(): empty_model = GPT(model_config) ``` Then, we need to get the path to the weights of your model. The path can be the state_dict file (e.g. "pytorch_model.bin") or a folder containing the sharded checkpoints. ```py from huggingface_hub import snapshot_download weights_location = snapshot_download(repo_id="marcsun13/gpt2-xl-linear-sharded") ``` Finally, you need to set your quantization configuration with [`~utils.BnbQuantizationConfig`]. Here's an example for 8-bit quantization: ```py from accelerate.utils import BnbQuantizationConfig bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, llm_int8_threshold = 6) ``` Here's an example for 4-bit quantization: ```py from accelerate.utils import BnbQuantizationConfig bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4") ``` To quantize your empty model with the selected configuration, you need to use [`~utils.load_and_quantize_model`]. ```py from accelerate.utils import load_and_quantize_model quantized_model = load_and_quantize_model(empty_model, weights_location=weights_location, bnb_quantization_config=bnb_quantization_config) ``` ### Saving and loading 8-bit model You can save your 8-bit model with accelerate using [`~Accelerator.save_model`]. ```py from accelerate import Accelerator accelerate = Accelerator() new_weights_location = "path/to/save_directory" accelerate.save_model(quantized_model, new_weights_location) quantized_model_from_saved = load_and_quantize_model(empty_model, weights_location=new_weights_location, bnb_quantization_config=bnb_quantization_config, device_map = "auto") ``` Note that 4-bit model serialization is currently not supported. ### Offload modules to cpu and disk You can offload some modules to cpu/disk if you don't have enough space on the GPU to store the entire model on your GPUs. This uses big model inference under the hood. Check this [documentation](https://huggingface.co/docs/accelerate/usage_guides/big_modeling) for more details. For 8-bit quantization, the selected modules will be converted to 8-bit precision. For 4-bit quantization, the selected modules will be kept in `torch_dtype` that the user passed in `BnbQuantizationConfig`. We will add support to convert these offloaded modules in 4-bit when 4-bit serialization will be possible. You just need to pass a custom `device_map` in order to offload modules on cpu/disk. The offload modules will be dispatched on the GPU when needed. Here's an example : ```py device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.drop": 0, "transformer.h": "cpu", "transformer.ln_f": "disk", "lm_head": "disk", } ``` ### Fine-tune a quantized model It is not possible to perform pure 8bit or 4bit training on these models. However, you can train these models by leveraging parameter efficient fine tuning methods (PEFT) and train for example adapters on top of them. Please have a look at [peft](https://github.com/huggingface/peft) library for more details. Currently, you can't add adapters on top of any quantized model. However, with the official support of adapters with Transformers models, you can fine-tune quantized models. If you want to finetune a Transformers model , follow this [documentation](https://huggingface.co/docs/transformers/main_classes/quantization) instead. Check out this [demo](https://colab.research.google.com/drive/1VoYNfYDKcKRQRor98Zbf2-9VQTtGJ24k?usp=sharing) on how to fine-tune a 4-bit Transformers model. Note that you don’t need to pass `device_map` when loading the model for training. It will automatically load your model on your GPU. Please note that `device_map=auto` should be used for inference only. ### Example demo - running GPT2 1.5b on a Google Colab Check out the Google Colab [demo](https://colab.research.google.com/drive/1T1pOgewAWVpR9gKpaEWw4orOrzPFb3yM?usp=sharing) for running quantized models on a GPT2 model. The GPT2-1.5B model checkpoint is in FP32 which uses 6GB of memory. After quantization, it uses 1.6GB with 8-bit modules and 1.2GB with 4-bit modules. accelerate-1.9.0/docs/source/usage_guides/sagemaker.md000066400000000000000000000172151503574341000227760ustar00rootroot00000000000000 # Amazon SageMaker Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/). ## Getting Started ### Setup & Installation Before you can run your Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). After you have your AWS Account you need to install the `sagemaker` sdk for Accelerate with: ```bash pip install "accelerate[sagemaker]" --upgrade ``` Accelerate currently uses the DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency: ``` accelerate ``` You should also add any other dependencies you have to this `requirements.txt`. ### Configure Accelerate You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with the Accelerate CLI: ```bash accelerate config # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1 ``` Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit. Accelerate is not saving any of your credentials. ### Prepare a Accelerate fine-tuning script The training script is very similar to a training script you might run outside of SageMaker, but to save your model after training you need to specify either `/opt/ml/model` or use `os.environ["SM_MODEL_DIR"]` as your save directory. After training, artifacts in this directory are uploaded to S3: ```diff - torch.save('/opt/ml/model`) + accelerator.save('/opt/ml/model') ``` SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script). ### Launch Training You can launch your training with Accelerate CLI with: ``` accelerate launch path_to_script.py --args_to_the_script ``` This will launch your training script using your configuration. The only thing you have to do is provide all the arguments needed by your training script as named arguments. **Examples** If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it. ```bash accelerate launch ./examples/sagemaker_example.py ``` Outputs: ``` Configuring Amazon SageMaker environment Converting Arguments to Hyperparameters Creating Estimator 2021-04-08 11:56:50 Starting - Starting the training job... 2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress ......... 2021-04-08 11:58:54 Starting - Preparing the instances for training......... 2021-04-08 12:00:24 Downloading - Downloading input data 2021-04-08 12:00:24 Training - Downloading the training image.................. 2021-04-08 12:03:39 Training - Training image download completed. Training in progress.. ........ epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037} epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689} epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304} ........ 2021-04-08 12:05:40 Uploading - Uploading generated training model 2021-04-08 12:05:40 Completed - Training job completed Training seconds: 331 Billable seconds: 331 You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz ``` ## Advanced Features ### Distributed Training: Data Parallelism Set up the accelerate config by running `accelerate config` and answer the SageMaker questions and set it up. To use SageMaker DDP, select it when asked `What is the distributed mode? ([0] No distributed training, [1] data parallelism):`. Example config below: ```yaml base_job_name: accelerate-sagemaker-1 compute_environment: AMAZON_SAGEMAKER distributed_type: DATA_PARALLEL ec2_instance_type: ml.p3.16xlarge iam_role_name: xxxxx image_uri: null mixed_precision: fp16 num_machines: 1 profile: xxxxx py_version: py10 pytorch_version: 2.5.0 region: us-east-1 transformers_version: 4.17.0 use_cpu: false ``` ### Distributed Training: Model Parallelism *currently in development, will be supported soon.* ### Python packages and dependencies Accelerate currently uses the DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages will be installed before your training script is started. ### Local Training: SageMaker Local mode The local mode in the SageMaker SDK allows you to run your training script locally inside the HuggingFace DLC (Deep Learning container) or using your custom container image. This is useful for debugging and testing your training script inside the final container environment. Local mode uses Docker compose (*Note: Docker Compose V2 is not supported yet*). The SDK will handle the authentication against ECR to pull the DLC to your local environment. You can emulate CPU (single and multi-instance) and GPU (single instance) SageMaker training jobs. To use local mode, you need to set your `ec2_instance_type` to `local`. ```yaml ec2_instance_type: local ``` ### Advanced configuration The configuration allows you to override parameters for the [Estimator](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html). These settings have to be applied in the config file and are not part of `accelerate config`. You can control many additional aspects of the training job, e.g. use Spot instances, enable network isolation and many more. ```yaml additional_args: # enable network isolation to restrict internet access for containers enable_network_isolation: True ``` You can find all available configuration [here](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html). ### Use Spot Instances You can use Spot Instances e.g. using (see [Advanced configuration](#advanced-configuration)): ```yaml additional_args: use_spot_instances: True max_wait: 86400 ``` *Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in Accelerate out of the box. Contact us if you would like this feature.* ### Remote scripts: Use scripts located on Github *undecided if feature is needed. Contact us if you would like this feature.*accelerate-1.9.0/docs/source/usage_guides/tracking.md000066400000000000000000000206301503574341000226340ustar00rootroot00000000000000 # Experiment trackers There are a large number of experiment tracking APIs available, however getting them all to work in a multi-processing environment can oftentimes be complex. Accelerate provides a general tracking API that can be used to log useful items during your script through [`Accelerator.log`] ## Integrated Trackers Currently `Accelerate` supports eight trackers out-of-the-box: - TensorBoard - WandB - Trackio - CometML - Aim - MLFlow - ClearML - DVCLive To use any of them, pass in the selected type(s) to the `log_with` parameter in [`Accelerate`]: ```python from accelerate import Accelerator from accelerate.utils import LoggerType accelerator = Accelerator(log_with="all") # For all available trackers in the environment accelerator = Accelerator(log_with="wandb") accelerator = Accelerator(log_with=["wandb", LoggerType.TENSORBOARD]) ``` At the start of your experiment [`Accelerator.init_trackers`] should be used to setup your project, and potentially add any experiment hyperparameters to be logged: ```python hps = {"num_iterations": 5, "learning_rate": 1e-2} accelerator.init_trackers("my_project", config=hps) ``` When you are ready to log any data, [`Accelerator.log`] should be used. A `step` can also be passed in to correlate the data with a particular step in the training loop. ```python accelerator.log({"train_loss": 1.12, "valid_loss": 0.8}, step=1) ``` Once you've finished training, make sure to run [`Accelerator.end_training`] so that all the trackers can run their finish functionalities if they have any. ```python accelerator.end_training() ``` A full example is below: ```python from accelerate import Accelerator accelerator = Accelerator(log_with="all") config = { "num_iterations": 5, "learning_rate": 1e-2, "loss_function": str(my_loss_function), } accelerator.init_trackers("example_project", config=config) my_model, my_optimizer, my_training_dataloader = accelerator.prepare(my_model, my_optimizer, my_training_dataloader) device = accelerator.device my_model.to(device) for iteration in range(config["num_iterations"]): for step, batch in enumerate(my_training_dataloader): my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) accelerator.backward(loss) my_optimizer.step() accelerator.log({"training_loss": loss}, step=step) accelerator.end_training() ``` If a tracker requires a directory to save data to, such as `TensorBoard`, then pass the directory path to `project_dir`. The `project_dir` parameter is useful when there are other configurations to be combined with in the [`~utils.ProjectConfiguration`] data class. For example, you can save the TensorBoard data to `project_dir` and everything else can be logged in the `logging_dir` parameter of [`~utils.ProjectConfiguration`: ```python accelerator = Accelerator(log_with="tensorboard", project_dir=".") # use with ProjectConfiguration config = ProjectConfiguration(project_dir=".", logging_dir="another/directory") accelerator = Accelerator(log_with="tensorboard", project_config=config) ``` ## Implementing Custom Trackers To implement a new tracker to be used in `Accelerator`, a new one can be made through implementing the [`GeneralTracker`] class. Every tracker must implement three functions and have three properties: - `__init__`: - Should store a `run_name` and initialize the tracker API of the integrated library. - If a tracker stores their data locally (such as TensorBoard), a `logging_dir` parameter can be added. - `store_init_configuration`: - Should take in a `values` dictionary and store them as a one-time experiment configuration - `log`: - Should take in a `values` dictionary and a `step`, and should log them to the run - `name` (`str`): - A unique string name for the tracker, such as `"wandb"` for the wandb tracker. - This will be used for interacting with this tracker specifically - `requires_logging_directory` (`bool`): - Whether a `logging_dir` is needed for this particular tracker and if it uses one. - `tracker`: - This should be implemented as a `@property` function - Should return the internal tracking mechanism the library uses, such as the `run` object for `wandb`. Each method should also utilize the [`state.PartialState`] class if the logger should only be executed on the main process for instance. A brief example can be seen below with an integration with Weights and Biases, containing only the relevant information and logging just on the main process: ```python from accelerate.tracking import GeneralTracker, on_main_process from typing import Optional import wandb class MyCustomTracker(GeneralTracker): name = "wandb" requires_logging_directory = False @on_main_process def __init__(self, run_name: str): self.run_name = run_name run = wandb.init(self.run_name) @property def tracker(self): return self.run.run @on_main_process def store_init_configuration(self, values: dict): wandb.config(values) @on_main_process def log(self, values: dict, step: Optional[int] = None): wandb.log(values, step=step) ``` When you are ready to build your `Accelerator` object, pass in an **instance** of your tracker to [`Accelerator.log_with`] to have it automatically be used with the API: ```python tracker = MyCustomTracker("some_run_name") accelerator = Accelerator(log_with=tracker) ``` These also can be mixed with existing trackers, including with `"all"`: ```python tracker = MyCustomTracker("some_run_name") accelerator = Accelerator(log_with=[tracker, "all"]) ``` ## Accessing the internal tracker If some custom interactions with a tracker might be wanted directly, you can quickly access one using the [`Accelerator.get_tracker`] method. Just pass in the string corresponding to a tracker's `.name` attribute and it will return that tracker on the main process. This example shows doing so with wandb: ```python wandb_tracker = accelerator.get_tracker("wandb") ``` From there you can interact with `wandb`'s `run` object like normal: ```python wandb_tracker.log_artifact(some_artifact_to_log) ``` Trackers built in Accelerate will automatically execute on the correct process, so if a tracker is only meant to be ran on the main process it will do so automatically. If you want to truly remove Accelerate's wrapping entirely, you can achieve the same outcome with: ```python wandb_tracker = accelerator.get_tracker("wandb", unwrap=True) if accelerator.is_main_process: wandb_tracker.log_artifact(some_artifact_to_log) ``` ## When a wrapper cannot work If a library has an API that does not follow a strict `.log` with an overall dictionary such as Neptune.AI, logging can be done manually under an `if accelerator.is_main_process` statement: ```diff from accelerate import Accelerator + import neptune accelerator = Accelerator() + run = neptune.init_run(...) my_model, my_optimizer, my_training_dataloader = accelerate.prepare(my_model, my_optimizer, my_training_dataloader) device = accelerator.device my_model.to(device) for iteration in config["num_iterations"]: for batch in my_training_dataloader: my_optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = my_model(inputs) loss = my_loss_function(outputs, targets) total_loss += loss accelerator.backward(loss) my_optimizer.step() + if accelerator.is_main_process: + run["logs/training/batch/loss"].log(loss) ``` accelerate-1.9.0/docs/source/usage_guides/training_zoo.md000066400000000000000000000411111503574341000235310ustar00rootroot00000000000000 # Example Zoo Below contains a non-exhaustive list of tutorials and scripts showcasing Accelerate. ## Official Accelerate Examples: ### Basic Examples These examples showcase the base features of Accelerate and are a great starting point - [Barebones NLP example](https://github.com/huggingface/accelerate/blob/main/examples/nlp_example.py) - [Barebones distributed NLP example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb) - [Barebones computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/cv_example.py) - [Barebones distributed computer vision example in a Jupyter Notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) - [Using Accelerate in Kaggle](https://www.kaggle.com/code/muellerzr/multi-gpu-and-accelerate) ### Feature Specific Examples These examples showcase specific features that the Accelerate framework offers - [Automatic memory-aware gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/automatic_gradient_accumulation.py) - [Checkpointing states](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py) - [Cross validation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/cross_validation.py) - [DeepSpeed](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/deepspeed_with_config_support.py) - [Fully Sharded Data Parallelism](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/fsdp_with_peak_mem_tracking.py) - [Gradient accumulation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation.py) - [Memory-aware batch size finder](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/memory.py) - [Metric Computation](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/multi_process_metrics.py) - [Using Trackers](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/tracking.py) - [Using Megatron-LM](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/megatron_lm_gpt_pretraining.py) ### Full Examples These examples showcase every feature in Accelerate at once that was shown in "Feature Specific Examples" - [Complete NLP example](https://github.com/huggingface/accelerate/blob/main/examples/complete_nlp_example.py) - [Complete computer vision example](https://github.com/huggingface/accelerate/blob/main/examples/complete_cv_example.py) - [Very complete and extensible vision example showcasing SLURM, hydra, and a very extensible usage of the framework](https://github.com/yuvalkirstain/PickScore) - [Causal language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm_no_trainer.py) - [Masked language model fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_no_trainer.py) - [Speech pretraining example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py) - [Translation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py) - [Text classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue_no_trainer.py) - [Semantic segmentation fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py) - [Question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_no_trainer.py) - [Beam search question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py) - [Multiple choice question answering fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py) - [Named entity recognition fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py) - [Image classification fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/image-classification/run_image_classification_no_trainer.py) - [Summarization fine-tuning example](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py) - [End-to-end examples on how to use AWS SageMaker integration of Accelerate](https://github.com/huggingface/notebooks/blob/main/sagemaker/22_accelerate_sagemaker_examples/README.md) - [Megatron-LM examples for various NLp tasks](https://github.com/pacman100/accelerate-megatron-test) ## Integration Examples These are tutorials from libraries that integrate with Accelerate: > Don't find your integration here? Make a PR to include it! ### Amphion - [Training Text-to-Speech Models with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/tts/README.md) - [Training Singing Voice Conversion Models with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/svc/README.md) - [Training Vocoders with Amphion](https://github.com/open-mmlab/Amphion/blob/main/egs/vocoder/README.md) ### Catalyst - [Distributed training tutorial with Catalyst](https://catalyst-team.github.io/catalyst/tutorials/ddp.html) ### DALLE2-pytorch - [Fine-tuning DALLE2](https://github.com/lucidrains/DALLE2-pytorch#usage) ### Diffusers - [Performing textual inversion with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) - [Training DreamBooth with diffusers](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) ### fastai - [Distributed training from Jupyter Notebooks with fastai](https://docs.fast.ai/tutorial.distributed.html) - [Basic distributed training examples with fastai](https://docs.fast.ai/examples/distributed_app_examples.html) ### GradsFlow - [Auto Image Classification with GradsFlow](https://docs.gradsflow.com/en/latest/examples/nbs/01-ImageClassification/) ### imagen-pytorch - [Fine-tuning Imagen](https://github.com/lucidrains/imagen-pytorch#usage) ### Kornia - [Fine-tuning vision models with Kornia's Trainer](https://kornia.readthedocs.io/en/latest/get-started/training.html) ### PyTorch Accelerated - [Quickstart distributed training tutorial with PyTorch Accelerated](https://pytorch-accelerated.readthedocs.io/en/latest/quickstart.html) ### PyTorch3D - [Perform Deep Learning with 3D data](https://pytorch3d.org/tutorials/) ### Stable-Dreamfusion - [Training with Stable-Dreamfusion to convert text to a 3D model](https://colab.research.google.com/drive/1MXT3yfOFvO0ooKEfiUUvTKwUkrrlCHpF?usp=sharing) ### Tez - [Leaf disease detection with Tez and Accelerate](https://www.kaggle.com/code/abhishek/tez-faster-and-easier-training-for-leaf-detection/notebook) ### trlx - [How to implement a sentiment learning task with trlx](https://github.com/CarperAI/trlx#example-how-to-add-a-task) ### Comfy-UI - [Enabling using large Stable Diffusion Models in low-vram settings using Accelerate](https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_management.py#L291-L296) ## In Science Below contains a non-exhaustive list of papers utilizing Accelerate. > Don't find your paper here? Make a PR to include it! * Yuval Kirstain, Adam Polyak, Uriel Singer, Shahbuland Matiana, Joe Penna, Omer Levy: “Pick-a-Pic: An Open Dataset of User Preferences for Text-to-Image Generation”, 2023; [arXiv:2305.01569](http://arxiv.org/abs/2305.01569). * Lei Wang, Wanyu Xu, Yihuai Lan, Zhiqiang Hu, Yunshi Lan, Roy Ka-Wei Lee, Ee-Peng Lim: “Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models”, 2023; [arXiv:2305.04091](http://arxiv.org/abs/2305.04091). * Arthur Câmara, Claudia Hauff: “Moving Stuff Around: A study on efficiency of moving documents into memory for Neural IR models”, 2022; [arXiv:2205.08343](http://arxiv.org/abs/2205.08343). * Ying Sheng, Lianmin Zheng, Binhang Yuan, Zhuohan Li, Max Ryabinin, Daniel Y. Fu, Zhiqiang Xie, Beidi Chen, Clark Barrett, Joseph E. Gonzalez, Percy Liang, Christopher Ré, Ion Stoica, Ce Zhang: “High-throughput Generative Inference of Large Language Models with a Single GPU”, 2023; [arXiv:2303.06865](http://arxiv.org/abs/2303.06865). * Peter Melchior, Yan Liang, ChangHoon Hahn, Andy Goulding: “Autoencoding Galaxy Spectra I: Architecture”, 2022; [arXiv:2211.07890](http://arxiv.org/abs/2211.07890). * Jiaao Chen, Aston Zhang, Mu Li, Alex Smola, Diyi Yang: “A Cheaper and Better Diffusion Language Model with Soft-Masked Noise”, 2023; [arXiv:2304.04746](http://arxiv.org/abs/2304.04746). * Ayaan Haque, Matthew Tancik, Alexei A. Efros, Aleksander Holynski, Angjoo Kanazawa: “Instruct-NeRF2NeRF: Editing 3D Scenes with Instructions”, 2023; [arXiv:2303.12789](http://arxiv.org/abs/2303.12789). * Luke Melas-Kyriazi, Christian Rupprecht, Iro Laina, Andrea Vedaldi: “RealFusion: 360° Reconstruction of Any Object from a Single Image”, 2023; [arXiv:2302.10663](http://arxiv.org/abs/2302.10663). * Xiaoshi Wu, Keqiang Sun, Feng Zhu, Rui Zhao, Hongsheng Li: “Better Aligning Text-to-Image Models with Human Preference”, 2023; [arXiv:2303.14420](http://arxiv.org/abs/2303.14420). * Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, Yueting Zhuang: “HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in HuggingFace”, 2023; [arXiv:2303.17580](http://arxiv.org/abs/2303.17580). * Yue Yang, Wenlin Yao, Hongming Zhang, Xiaoyang Wang, Dong Yu, Jianshu Chen: “Z-LaVI: Zero-Shot Language Solver Fueled by Visual Imagination”, 2022; [arXiv:2210.12261](http://arxiv.org/abs/2210.12261). * Sheng-Yen Chou, Pin-Yu Chen, Tsung-Yi Ho: “How to Backdoor Diffusion Models?”, 2022; [arXiv:2212.05400](http://arxiv.org/abs/2212.05400). * Junyoung Seo, Wooseok Jang, Min-Seop Kwak, Jaehoon Ko, Hyeonsu Kim, Junho Kim, Jin-Hwa Kim, Jiyoung Lee, Seungryong Kim: “Let 2D Diffusion Model Know 3D-Consistency for Robust Text-to-3D Generation”, 2023; [arXiv:2303.07937](http://arxiv.org/abs/2303.07937). * Or Patashnik, Daniel Garibi, Idan Azuri, Hadar Averbuch-Elor, Daniel Cohen-Or: “Localizing Object-level Shape Variations with Text-to-Image Diffusion Models”, 2023; [arXiv:2303.11306](http://arxiv.org/abs/2303.11306). * Dídac Surís, Sachit Menon, Carl Vondrick: “ViperGPT: Visual Inference via Python Execution for Reasoning”, 2023; [arXiv:2303.08128](http://arxiv.org/abs/2303.08128). * Chenyang Qi, Xiaodong Cun, Yong Zhang, Chenyang Lei, Xintao Wang, Ying Shan, Qifeng Chen: “FateZero: Fusing Attentions for Zero-shot Text-based Video Editing”, 2023; [arXiv:2303.09535](http://arxiv.org/abs/2303.09535). * Sean Welleck, Jiacheng Liu, Ximing Lu, Hannaneh Hajishirzi, Yejin Choi: “NaturalProver: Grounded Mathematical Proof Generation with Language Models”, 2022; [arXiv:2205.12910](http://arxiv.org/abs/2205.12910). * Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, Daniel Cohen-Or: “TEXTure: Text-Guided Texturing of 3D Shapes”, 2023; [arXiv:2302.01721](http://arxiv.org/abs/2302.01721). * Puijin Cheng, Li Lin, Yijin Huang, Huaqing He, Wenhan Luo, Xiaoying Tang: “Learning Enhancement From Degradation: A Diffusion Model For Fundus Image Enhancement”, 2023; [arXiv:2303.04603](http://arxiv.org/abs/2303.04603). * Shun Shao, Yftah Ziser, Shay Cohen: “Erasure of Unaligned Attributes from Neural Representations”, 2023; [arXiv:2302.02997](http://arxiv.org/abs/2302.02997). * Seonghyeon Ye, Hyeonbin Hwang, Sohee Yang, Hyeongu Yun, Yireun Kim, Minjoon Seo: “In-Context Instruction Learning”, 2023; [arXiv:2302.14691](http://arxiv.org/abs/2302.14691). * Shikun Liu, Linxi Fan, Edward Johns, Zhiding Yu, Chaowei Xiao, Anima Anandkumar: “Prismer: A Vision-Language Model with An Ensemble of Experts”, 2023; [arXiv:2303.02506](http://arxiv.org/abs/2303.02506). * Haoyu Chen, Zhihua Wang, Yang Yang, Qilin Sun, Kede Ma: “Learning a Deep Color Difference Metric for Photographic Images”, 2023; [arXiv:2303.14964](http://arxiv.org/abs/2303.14964). * Van-Hoang Le, Hongyu Zhang: “Log Parsing with Prompt-based Few-shot Learning”, 2023; [arXiv:2302.07435](http://arxiv.org/abs/2302.07435). * Keito Kudo, Yoichi Aoki, Tatsuki Kuribayashi, Ana Brassard, Masashi Yoshikawa, Keisuke Sakaguchi, Kentaro Inui: “Do Deep Neural Networks Capture Compositionality in Arithmetic Reasoning?”, 2023; [arXiv:2302.07866](http://arxiv.org/abs/2302.07866). * Ruoyao Wang, Peter Jansen, Marc-Alexandre Côté, Prithviraj Ammanabrolu: “Behavior Cloned Transformers are Neurosymbolic Reasoners”, 2022; [arXiv:2210.07382](http://arxiv.org/abs/2210.07382). * Martin Wessel, Tomáš Horych, Terry Ruas, Akiko Aizawa, Bela Gipp, Timo Spinde: “Introducing MBIB -- the first Media Bias Identification Benchmark Task and Dataset Collection”, 2023; [arXiv:2304.13148](http://arxiv.org/abs/2304.13148). DOI: [https://dx.doi.org/10.1145/3539618.3591882 10.1145/3539618.3591882]. * Hila Chefer, Yuval Alaluf, Yael Vinker, Lior Wolf, Daniel Cohen-Or: “Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models”, 2023; [arXiv:2301.13826](http://arxiv.org/abs/2301.13826). * Marcio Fonseca, Yftah Ziser, Shay B. Cohen: “Factorizing Content and Budget Decisions in Abstractive Summarization of Long Documents”, 2022; [arXiv:2205.12486](http://arxiv.org/abs/2205.12486). * Elad Richardson, Gal Metzer, Yuval Alaluf, Raja Giryes, Daniel Cohen-Or: “TEXTure: Text-Guided Texturing of 3D Shapes”, 2023; [arXiv:2302.01721](http://arxiv.org/abs/2302.01721). * Tianxing He, Jingyu Zhang, Tianle Wang, Sachin Kumar, Kyunghyun Cho, James Glass, Yulia Tsvetkov: “On the Blind Spots of Model-Based Evaluation Metrics for Text Generation”, 2022; [arXiv:2212.10020](http://arxiv.org/abs/2212.10020). * Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, Yoav Shoham: “In-Context Retrieval-Augmented Language Models”, 2023; [arXiv:2302.00083](http://arxiv.org/abs/2302.00083). * Dacheng Li, Rulin Shao, Hongyi Wang, Han Guo, Eric P. Xing, Hao Zhang: “MPCFormer: fast, performant and private Transformer inference with MPC”, 2022; [arXiv:2211.01452](http://arxiv.org/abs/2211.01452). * Baolin Peng, Michel Galley, Pengcheng He, Chris Brockett, Lars Liden, Elnaz Nouri, Zhou Yu, Bill Dolan, Jianfeng Gao: “GODEL: Large-Scale Pre-Training for Goal-Directed Dialog”, 2022; [arXiv:2206.11309](http://arxiv.org/abs/2206.11309). * Egil Rønningstad, Erik Velldal, Lilja Øvrelid: “Entity-Level Sentiment Analysis (ELSA): An exploratory task survey”, 2023, Proceedings of the 29th International Conference on Computational Linguistics, 2022, pages 6773-6783; [arXiv:2304.14241](http://arxiv.org/abs/2304.14241). * Charlie Snell, Ilya Kostrikov, Yi Su, Mengjiao Yang, Sergey Levine: “Offline RL for Natural Language Generation with Implicit Language Q Learning”, 2022; [arXiv:2206.11871](http://arxiv.org/abs/2206.11871). * Zhiruo Wang, Shuyan Zhou, Daniel Fried, Graham Neubig: “Execution-Based Evaluation for Open-Domain Code Generation”, 2022; [arXiv:2212.10481](http://arxiv.org/abs/2212.10481). * Minh-Long Luu, Zeyi Huang, Eric P. Xing, Yong Jae Lee, Haohan Wang: “Expeditious Saliency-guided Mix-up through Random Gradient Thresholding”, 2022; [arXiv:2212.04875](http://arxiv.org/abs/2212.04875). * Jun Hao Liew, Hanshu Yan, Daquan Zhou, Jiashi Feng: “MagicMix: Semantic Mixing with Diffusion Models”, 2022; [arXiv:2210.16056](http://arxiv.org/abs/2210.16056). * Yaqing Wang, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed Hassan Awadallah, Jianfeng Gao: “LiST: Lite Prompted Self-training Makes Parameter-Efficient Few-shot Learners”, 2021; [arXiv:2110.06274](http://arxiv.org/abs/2110.06274). accelerate-1.9.0/examples/000077500000000000000000000000001503574341000154315ustar00rootroot00000000000000accelerate-1.9.0/examples/README.md000066400000000000000000000324731503574341000167210ustar00rootroot00000000000000 # In this folder we showcase various full examples using 🤗 Accelerate ## Simple NLP example The [nlp_example.py](./nlp_example.py) script is a simple example to train a Bert model on a classification task ([GLUE's MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398)). Prior to running it you should install 🤗 Dataset and 🤗 Transformers: ```bash pip install datasets evaluate transformers ``` The same script can be run in any of the following configurations: - single CPU or single GPU - multi CPUs - multi GPUs (using PyTorch distributed mode) - (multi) TPUs - fp16 (mixed-precision) or fp32 (normal precision) To run it in each of these various modes, use the following commands: - single CPU: * from a server without GPU ```bash python ./nlp_example.py ``` * from any server by passing `cpu=True` to the `Accelerator`. ```bash python ./nlp_example.py --cpu ``` * from any server with Accelerate launcher ```bash accelerate launch --cpu ./nlp_example.py ``` - single GPU: ```bash python ./nlp_example.py # from a server with a GPU ``` - with fp16 (mixed-precision) * from any server by passing `mixed_precison=fp16` to the `Accelerator`. ```bash python ./nlp_example.py --mixed_precision fp16 ``` * from any server with Accelerate launcher ```bash accelerate launch --mixed_precision fp16 ./nlp_example.py - multi CPUs (requires Open MPI, Intel MPI, or MVAPICH) * With Accelerate config and launcher, execute the following from node 0: ```bash accelerate config # Select to have accelerate launch mpirun accelerate launch ./nlp_example.py # This will run the script on each server ``` * With Intel MPI: ```bash export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip mpirun -f hostfile -n 16 -ppn 4 python ./nlp_example.py ``` - multi GPUs (using PyTorch distributed mode) * With Accelerate config and launcher ```bash accelerate config # This will create a config file on your server accelerate launch ./nlp_example.py # This will run the script on your server ``` * With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`) ```bash torchrun --nproc_per_node 2 ./nlp_example.py ``` - multi GPUs, multi node (several machines, using PyTorch distributed mode) * With Accelerate config and launcher, on each machine: ```bash accelerate config # This will create a config file on each server accelerate launch ./nlp_example.py # This will run the script on each server ``` * With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node: ```bash torchrun \ # python -m torch.distributed.run --nproc_per_node 2 \ --nnodes 2 \ --rdzv_id 2299 \ # A unique job id --rdzv_backend c10d \ --rdzv_endpoint master_node_ip_address:29500 \ ./nlp_example.py ``` - (multi) TPUs * With Accelerate config and launcher ```bash accelerate config # This will create a config file on your TPU server accelerate launch ./nlp_example.py # This will run the script on each server ``` * In PyTorch: Add an `xmp.spawn` line in your script as you usually do. ## Simple vision example The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a ResNet-50 on a classification task ([Ofxord-IIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/)). The same script can be run in any of the following configurations: - single CPU or single GPU - multi CPUs - multi GPUs (using PyTorch distributed mode) - (multi) TPUs - fp16 (mixed-precision) or fp32 (normal precision) Prior to running it you should install timm and torchvision: ```bash pip install timm torchvision ``` and you should download the data with the following commands: ```bash wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz tar -xzf images.tar.gz ``` To run it in each of these various modes, use the following commands: - single CPU: * from a server without GPU ```bash python ./cv_example.py --data_dir path_to_data ``` * from any server by passing `cpu=True` to the `Accelerator`. ```bash python ./cv_example.py --data_dir path_to_data --cpu ``` * from any server with Accelerate launcher ```bash accelerate launch --cpu ./cv_example.py --data_dir path_to_data ``` - single GPU: ```bash python ./cv_example.py # from a server with a GPU ``` - with fp16 (mixed-precision) * from any server by passing `mixed_precison=fp16` to the `Accelerator`. ```bash python ./cv_example.py --data_dir path_to_data --mixed_precison fp16 ``` * from any server with Accelerate launcher ```bash accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data - multi CPUs (requires Open MPI, Intel MPI, or MVAPICH) * With Accelerate config and launcher, run the following from node 0: ```bash accelerate config --config_file config.yaml # Select to have accelerate launch mpirun accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * With Intel MPI, execute mpirun from node 0: ```bash export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip mpirun -f hostfile -n 16 -ppn 4 python ./cv_example.py --data_dir path_to_data ``` - multi GPUs (using PyTorch distributed mode) * With Accelerate config and launcher ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on your server ``` * With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`) ```bash torchrun --nproc_per_node 2 ./cv_example.py --data_dir path_to_data ``` - multi GPUs, multi node (several machines, using PyTorch distributed mode) * With Accelerate config and launcher, on each machine: ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node: ```bash torchrun \ # python -m torch.distributed.run --nproc_per_node 2 \ --nnodes 2 \ --rdzv_id 2299 \ # A unique job id --rdzv_backend c10d \ --rdzv_endpoint master_node_ip_address:29500 \ ./cv_example.py --data_dir path_to_data ``` - (multi) TPUs * With Accelerate config and launcher ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * In PyTorch: Add an `xmp.spawn` line in your script as you usually do. ### Simple vision example (GANs) - [huggan project](https://github.com/huggingface/community-events/tree/main/huggan) ### Using AWS SageMaker integration - [Examples showcasing AWS SageMaker integration of 🤗 Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker) ## Configuration zoo In [/config_yaml_templates](./config_yaml_templates/) we have a variety of *minimal* `config.yaml` templates and examples to help you learn how to create your own configuration files depending on the scenario. ## SLURM Scripts In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) and [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we present two scripts for running the examples on a machine with [SLURM](https://slurm.schedmd.com/documentation.html) workload manager. In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in the launcher that needs to be modified is `--num_processes`, which determines the number of GPUs we will use. In this case, using the environment variable `$SLURM_GPUS`, we indicate that we want to utilize all the GPUs available on the node we have requested. In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. In [/slurm/submit_multicpu.sh](./slurm/submit_multicpu.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many CPU processes we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. `mpirun_hostfile` specifies to run the job using MPIRun. In both scripts, we run `activateEnvironment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster. ```bash # activateEnvironment.sh module purge module load anaconda3/2020.02 cuda/10.2 cudnn/8.0.5 nccl/2.9.9 arrow/7.0.0 openmpi source activate /home/nct01/nct01328/pytorch_antoni_local export HF_HOME=/gpfs/projects/nct01/nct01328/ export HF_LOCAL_HOME=/gpfs/projects/nct01/nct01328/HF_LOCAL export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 export PYTHONPATH=/home/nct01/nct01328/transformers-in-supercomputers:$PYTHONPATH export GPUS_PER_NODE=4 ``` ## Simple Multi-GPU Hardware Launcher (using an external platform) [multigpu_remote_launcher.py](./multigpu_remote_launcher.py) is a minimal script that demonstrates launching accelerate on multiple remote GPUs, and with automatic hardware environment and dependency setup for reproducibility. You can easily customize the training function used, training arguments, hyperparameters, and type of compute hardware, and then run the script to automatically launch multi GPU training on remote hardware. This script uses [Runhouse](https://github.com/run-house/runhouse) to launch on self-hosted hardware (e.g. in your own cloud account or on-premise cluster) but there are other options for running remotely as well. Runhouse can be installed with `pip install runhouse`, and you can refer to [hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup) for hardware setup instructions, or this [Colab tutorial](https://colab.research.google.com/drive/1qVwYyLTCPYPSdz9ZX7BZl9Qm0A3j7RJe) for a more in-depth walkthrough. ## Finer Examples While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations. ### `by_feature` examples These scripts are *individual* examples highlighting one particular feature or use-case within Accelerate. They all stem from the [nlp_example.py](./nlp_example.py) script, and any changes or modifications is denoted with a `# New Code #` comment. Read the README.md file located in the `by_feature` folder for more information. ### `complete_*` examples These two scripts contain *every* single feature currently available in Accelerate in one place, as one giant script. New arguments that can be passed include: - `checkpointing_steps`, whether the various states should be saved at the end of every `n` steps, or `"epoch"` for each epoch. States are then saved to folders named `step_{n}` or `epoch_{n}` - `resume_from_checkpoint`, should be used if you want to resume training off of a previous call to the script and passed a `checkpointing_steps` to it. - `with_tracking`, should be used if you want to log the training run using all available experiment trackers in your environment. Currently supported trackers include TensorBoard, Weights and Biases, and CometML. accelerate-1.9.0/examples/by_feature/000077500000000000000000000000001503574341000175565ustar00rootroot00000000000000accelerate-1.9.0/examples/by_feature/README.md000066400000000000000000000136731503574341000210470ustar00rootroot00000000000000# What are these scripts? All scripts in this folder originate from the `nlp_example.py` file, as it is a very simplistic NLP training example using Accelerate with zero extra features. From there, each further script adds in just **one** feature of Accelerate, showing how you can quickly modify your own scripts to implement these capabilities. A full example with all of these parts integrated together can be found in the `complete_nlp_example.py` script and `complete_cv_example.py` script. Adjustments to each script from the base `nlp_example.py` file can be found quickly by searching for "# New Code #" ## Example Scripts by Feature and their Arguments ### Base Example (`../nlp_example.py`) - Shows how to use `Accelerator` in an extremely simplistic PyTorch training loop - Arguments available: - `mixed_precision`, whether to use mixed precision. ("no", "fp16", or "bf16") - `cpu`, whether to train using only the CPU. (yes/no/1/0) All following scripts also accept these arguments in addition to their added ones. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torch.distributed.run`), such as: ```bash accelerate launch ../nlp_example.py --mixed_precision fp16 --cpu 0 ``` ### Checkpointing and Resuming Training (`checkpointing.py`) - Shows how to use `Accelerator.save_state` and `Accelerator.load_state` to save or continue training - **It is assumed you are continuing off the same training script** - Arguments available: - `checkpointing_steps`, after how many steps the various states should be saved. ("epoch", 1, 2, ...) - `output_dir`, where saved state folders should be saved to, default is current working directory - `resume_from_checkpoint`, what checkpoint folder to resume from. ("epoch_0", "step_22", ...) These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: (Note, `resume_from_checkpoint` assumes that we've ran the script for one epoch with the `--checkpointing_steps epoch` flag) ```bash accelerate launch ./checkpointing.py --checkpointing_steps epoch output_dir "checkpointing_tutorial" --resume_from_checkpoint "checkpointing_tutorial/epoch_0" ``` ### Cross Validation (`cross_validation.py`) - Shows how to use `Accelerator.free_memory` and run cross validation efficiently with `datasets`. - Arguments available: - `num_folds`, the number of folds the training dataset should be split into. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./cross_validation.py --num_folds 2 ``` ### Experiment Tracking (`tracking.py`) - Shows how to use `Accelerate.init_trackers` and `Accelerator.log` - Can be used with Weights and Biases, TensorBoard, or CometML. - Arguments available: - `with_tracking`, whether to load in all available experiment trackers from the environment. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./tracking.py --with_tracking ``` ### Gradient Accumulation (`gradient_accumulation.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. - Arguments available: - `gradient_accumulation_steps`, the number of steps to perform before the gradients are accumulated and the optimizer and scheduler are stepped + zero_grad These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./gradient_accumulation.py --gradient_accumulation_steps 5 ``` ### LocalSGD (`local_sgd.py`) - Shows how to use `Accelerator.no_sync` to prevent gradient averaging in a distributed setup. However, unlike gradient accumulation, this method does not change the effective batch size. Local SGD can be combined with gradient accumulation. These arguments should be added at the end of any method for starting the python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./local_sgd.py --local_sgd_steps 4 ``` ### DDP Communication Hook (`ddp_comm_hook.py`) - Shows how to use DDP Communication Hooks to control and optimize gradient communication across workers in a DistributedDataParallel setup. - Arguments available: - `ddp_comm_hook`, the type of DDP communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`. These arguments should be added at the end of any method for starting the python script (such as `accelerate launch`, `python -m torch.distributed.run`), such as: ```bash accelerate launch ./ddp_comm_hook.py --mixed_precision fp16 --ddp_comm_hook power_sgd ``` ### Profiler (`profiler.py`) - Shows how to use the profiling capabilities of `Accelerate` to profile PyTorch models during training. - Uses the `ProfileKwargs` handler to customize profiling options, including activities, scheduling, and additional profiling options. - Can generate and save profiling traces in JSON format for visualization in Chrome's tracing tool. Arguments available: - `--record_shapes`: If passed, records shapes for profiling. - `--profile_memory`: If passed, profiles memory usage. - `--with_stack`: If passed, profiles stack traces. - `--with_flops`: If passed, profiles floating point operations (FLOPS). - `--output_trace_dir`: If specified, saves the profiling trace to the given dir in JSON format. - `--cpu`: If passed, trains on the CPU instead of GPU. These arguments should be added at the end of any method for starting the Python script (such as `python`, `accelerate launch`, `python -m torchrun`), such as: ```bash accelerate launch ./profiler.py --record_shapes --profile_memory --with_flops --output_trace_dir "profiler" ``` accelerate-1.9.0/examples/by_feature/automatic_gradient_accumulation.py000066400000000000000000000233411503574341000265420ustar00rootroot00000000000000# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to combine both the gradient accumulation # and automatic batch size finder utilities of Accelerate to perfrom # automatic gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) observed_batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") # New Code # # We use the `find_executable_batch_size` decorator, passing in the desired observed batch size # to train on. If a CUDA OOM error occurs, it will retry this loop cutting the batch size in # half each time. From this, we can calculate the number of gradient accumulation steps needed # and modify the Accelerator object as a result @find_executable_batch_size(starting_batch_size=int(observed_batch_size)) def inner_training_loop(batch_size): # Since we need to modify the outside accelerator object, we need to bring it # to the local scope nonlocal accelerator # We can calculate the number of gradient accumulation steps based on the current # batch size vs the starting batch size num_gradient_accumulation_steps = observed_batch_size // batch_size # And then set it in the Accelerator directly: accelerator.gradient_accumulation_steps = num_gradient_accumulation_steps # Next we need to free all of the stored model references in the Accelerator each time accelerator.free_memory() # And set the seed so our results are reproducable each reset set_seed(seed) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # And perform gradient accumulation with accelerator.accumulate(model): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() # New Code # # We modify the starting batch size to be an observed batch size of 256, to guarentee an initial CUDA OOM config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 256} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/checkpointing.py000066400000000000000000000333241503574341000227620ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup from accelerate import Accelerator, DataLoaderConfiguration, DistributedType from accelerate.utils import set_seed ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the checkpointing capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # Initialize accelerator dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader) accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) # New Code # # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # New Code # # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the stating epoch so files are named properly starting_epoch = 0 # We need to load the checkpoint back in before training here with `load_state` # The total number of epochs is adjusted based on where the state is being loaded from, # as we assume continuation of the same training script if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # Now we train the model for epoch in range(starting_epoch, num_epochs): model.train() # New Code # if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step only if we are not using a stateful dataloader if not args.use_stateful_dataloader: active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() # New Code # overall_step += 1 # New Code # # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state` # These are saved to folders named `step_{overall_step}` # Will contain files: "pytorch_model.bin", "optimizer.bin", "scheduler.bin", and "random_states.pkl" # If mixed precision was used, will also save a "scalar.bin" file if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) # New Code # # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state` # These are saved to folders named `epoch_{epoch}` # Will contain files: "pytorch_model.bin", "optimizer.bin", "scheduler.bin", and "random_states.pkl" # If mixed precision was used, will also save a "scalar.bin" file if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--use_stateful_dataloader", action="store_true", help="If the dataloader should be a resumable stateful dataloader.", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/cross_validation.py000066400000000000000000000264541503574341000235060ustar00rootroot00000000000000# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 # New Code # # We need a different `get_dataloaders` function that will build dataloaders by index def get_fold_dataloaders( accelerator: Accelerator, dataset: DatasetDict, train_idxs: list[int], valid_idxs: list[int], batch_size: int = 16 ): """ Gets a set of train, valid, and test dataloaders for a particular fold Args: accelerator (`Accelerator`): The main `Accelerator` object train_idxs (list of `int`): The split indices for the training dataset valid_idxs (list of `int`): The split indices for the validation dataset batch_size (`int`): The size of the minibatch. Default is 16 """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = DatasetDict( { "train": dataset["train"].select(train_idxs), "validation": dataset["train"].select(valid_idxs), "test": dataset["validation"], } ) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) test_dataloader = DataLoader( tokenized_datasets["test"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader, test_dataloader def training_function(config, args): # New Code # test_predictions = [] # Download the dataset datasets = load_dataset("glue", "mrpc") # Create our splits kfold = StratifiedKFold(n_splits=int(args.num_folds)) # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE set_seed(seed) # New Code # # Create our folds: folds = kfold.split(np.zeros(datasets["train"].num_rows), datasets["train"]["label"]) test_references = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(folds): train_dataloader, eval_dataloader, test_dataloader = get_fold_dataloaders( accelerator, datasets, train_idxs, valid_idxs, ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) # New Code # # We also run predictions on the test set at the very end fold_predictions = [] for step, batch in enumerate(test_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) fold_predictions.append(predictions.cpu()) if i == 0: # We need all of the test predictions test_references.append(references.cpu()) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(fold_predictions, dim=0)) # We now need to release all our memory and get rid of the current model, optimizer, etc model, optimizer = accelerator.free_memory(model, optimizer) # New Code # # Finally we check the accuracy of our folded results: test_references = torch.cat(test_references, dim=0) preds = torch.stack(test_predictions, dim=0).sum(dim=0).div(int(args.num_folds)).argmax(dim=-1) test_metric = metric.compute(predictions=preds, references=test_references) accelerator.print("Average test metrics from all folds:", test_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") # New Code # parser.add_argument("--num_folds", type=int, default=3, help="The number of splits to perform across the dataset") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/ddp_comm_hook.py000066400000000000000000000217501503574341000227370ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import DDPCommunicationHookType, DistributedDataParallelKwargs ######################################################################## # This is a fully working simple example to use Accelerate # and perform ddp communication hook # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # ddp_comm_hook_type = DDPCommunicationHookType(args.ddp_comm_hook) ddp_comm_wrapper = DDPCommunicationHookType(args.ddp_comm_wrapper) ddp_kwargs = DistributedDataParallelKwargs(comm_hook=ddp_comm_hook_type, comm_wrapper=ddp_comm_wrapper) # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, kwargs_handlers=[ddp_kwargs]) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # We use the new `accumulate` context manager to perform gradient accumulation with accelerator.accumulate(model): output = model(**batch) loss = output.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) # New Code # parser.add_argument( "--ddp_comm_hook", type=str, default="no", choices=["no", "fp16", "bf16", "power_sgd", "batched_power_sgd"], help="DDP Communication hook to use. Choose between `no`, `fp16`, `bf16`, `power_sgd`, and `batched_power_sgd`.", ) # New Code # parser.add_argument( "--ddp_comm_wrapper", type=str, default="no", choices=["no", "fp16", "bf16"], help="DDP Communication wrapper to use. Choose between `no`, `fp16`, and `bf16`.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/deepspeed_with_config_support.py000077500000000000000000000736551503574341000262650ustar00rootroot00000000000000#!/usr/bin/env python # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset without using HuggingFace Trainer. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import argparse import json import logging import math import os import random from itertools import chain from pathlib import Path import datasets import torch import transformers from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils.versions import require_version from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger from accelerate.utils import DummyOptim, DummyScheduler, set_seed logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--block_size", type=int, default=None, help=( "Optional input sequence length after tokenization. The training dataset will be truncated in block of" " this size for training. Default to the model max input length for single sentence inputs (take into" " account special tokens)." ), ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) # New Code # # Whether to load the best model at the end of training parser.add_argument( "--load_best_model", action="store_true", help="Whether to load the best model at the end of training", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"`, `"dvclive"`, and `"swanlab"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args # New Code # def evaluate(args, model, eval_dataloader, accelerator, eval_dataset): model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) losses = torch.cat(losses) try: eval_loss = torch.mean(losses) perplexity = math.exp(eval_loss) except OverflowError: perplexity = float("inf") return perplexity, eval_loss def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment # when using DeepSpeed, the `gradient_accumulation_steps` is properly set from the DeepSpeed plugin/config # or from `accelerate launch` via `--gradient_accumulation_steps` else # defaulting to the passed `args.gradient_accumulation_steps` accelerator = ( Accelerator( log_with=args.report_to, project_dir=args.output_dir, gradient_accumulation_steps=args.gradient_accumulation_steps, ) if args.with_tracking else Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps) ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: api = HfApi(token=args.hub_token) # Create repo (repo_name from args or inferred) repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name repo_id = api.create_repo(repo_name, exist_ok=True).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", ) else: data_files = {} dataset_args = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{args.validation_split_percentage}%]", **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{args.validation_split_percentage}%:]", **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained(args.config_name) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) with accelerator.main_process_first(): tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) if args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx." ) block_size = 1024 else: if args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map with accelerator.main_process_first(): lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=not args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", ) train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] # New Code # # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer optimizer_cls = ( torch.optim.AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate) # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. if accelerator.distributed_type == DistributedType.XLA: model.tie_weights() # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / accelerator.gradient_accumulation_steps) overrode_max_train_steps = False if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # New Code # # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) else: lr_scheduler = DummyScheduler( optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / accelerator.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("clm_no_trainer", experiment_config) # Train! total_batch_size = ( args.per_device_train_batch_size * accelerator.num_processes * accelerator.gradient_accumulation_steps ) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {accelerator.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 best_metric = None best_metric_checkpoint = None # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint) accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") path = os.path.basename(args.resume_from_checkpoint) training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // num_update_steps_per_epoch resume_step -= starting_epoch * num_update_steps_per_epoch completed_steps = resume_step # update progress bar if resumed from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 # skip new `skip_first_batches` to skip the batches when resuming from ckpt if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: # After the first iteration though, we need to go back to the original dataloader active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): # In particular, DeepSpeed handles `gradient_accumulation` via `DeepSpeedEngine`. # Below, we use `accelerator.accumulate` if the user # wants to switch to other approaches such as plain DDP, PyTorch FSDP ... # This avoids having to change any code as things are all handled across different distributed setups. with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 # We keep track of the loss at each epoch if args.with_tracking: step_loss = accelerator.reduce(loss.detach().clone()).item() total_loss += step_loss if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset) logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}") if args.with_tracking: accelerator.log( { "perplexity": perplexity, "eval_loss": eval_loss, "train_loss": total_loss / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if isinstance(checkpointing_steps, str) and checkpointing_steps == "epoch": accelerator.save_state(os.path.join(args.output_dir, f"epoch_{epoch}")) # New Code # # Tracks the best checkpoint and best metric if best_metric is None or best_metric > perplexity: best_metric = perplexity best_metric_checkpoint = os.path.join(args.output_dir, "best_checkpoint") accelerator.save_state(best_metric_checkpoint) accelerator.print(f"New best metric: {best_metric} at epoch {epoch}") accelerator.print(f"best_metric_checkpoint: {best_metric_checkpoint}") # New Code # # Loads the best checkpoint after the training is finished if args.load_best_model: accelerator.load_state(best_metric_checkpoint) # New Code # # Evaluates using the best checkpoint perplexity, eval_loss = evaluate(args, model, eval_dataloader, accelerator, eval_dataset) logger.info(f"Best model metrics: perplexity: {perplexity} eval_loss: {eval_loss}") if perplexity != best_metric: raise AssertionError( f"Best metric {best_metric} does not match the metric {perplexity} of the loaded best model." ) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) # New Code # # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or # `zero3_save_16bit_model` is True in DeepSpeed Plugin. # For Zero Stages 1 and 2, models are saved as usual in the output directory. # The model name saved is `pytorch_model.bin` unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model), ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump({"perplexity": perplexity, "eval_loss": eval_loss.item()}, f) accelerator.end_training() if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/early_stopping.py000066400000000000000000000221641503574341000231740ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # specifically showcasing how to perform early stopping, # and builds off the `nlp_example.py` script # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE, drop_last=(accelerator.mixed_precision == "fp8"), ) return train_dataloader, eval_dataloader # New code class EarlyStoppingCallback: "A callback class that helps with early stopping" def __init__(self, min_delta=0, patience=5): self.min_delta = min_delta self.patience = patience self.counter = 0 self.lowest_loss = float("inf") def check_early_stopping(self, eval_loss): delta = self.lowest_loss - eval_loss if delta >= self.min_delta: self.lowest_loss = eval_loss self.counter = 0 else: self.counter += 1 if self.counter >= self.patience: return True return False callback = EarlyStoppingCallback() def training_function(config, args): # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() # New code # Check if we should stop the training on any processes if callback.check_early_stopping(loss.item()): accelerator.set_trigger() # If so, we break the loop if accelerator.check_trigger(): break model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/fsdp_with_peak_mem_tracking.py000066400000000000000000000436651503574341000256550ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import os import threading import evaluate import psutil import torch from datasets import load_dataset from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig from torch.utils.data import DataLoader from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, ) from accelerate import Accelerator, DistributedType, FullyShardedDataParallelPlugin from accelerate.utils import is_npu_available, is_xpu_available ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # - FSDP # # This example also demonstrates the checkpointing and sharding capabilities # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 # New Code # # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # New Code # # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.xpu.memory_allocated() elif is_npu_available(): torch.npu.empty_cache() torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.npu.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() elif is_xpu_available(): torch.xpu.empty_cache() self.end = torch.xpu.memory_allocated() self.peak = torch.xpu.max_memory_allocated() elif is_npu_available(): torch.npu.empty_cache() self.end = torch.npu.memory_allocated() self.peak = torch.npu.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # # Pass the advanced FSDP settings not part of the accelerate config by creating fsdp_plugin fsdp_plugin = FullyShardedDataParallelPlugin( state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False), optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False), ) # Initialize accelerator if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="wandb", project_dir=args.logging_dir, fsdp_plugin=fsdp_plugin, ) else: accelerator = Accelerator(fsdp_plugin=fsdp_plugin) accelerator.print(accelerator.distributed_type) if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) accelerator.init_trackers("fsdp_glue_no_trainer", experiment_config) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) datasets = load_dataset("glue", "mrpc") metric = evaluate.load("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) set_seed(seed) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained( args.model_name_or_path, return_dict=True, low_cpu_mem_usage=True ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": 0.003, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=lr, weight_decay=2e-4) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=10, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) overall_step = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: num_epochs -= int(training_difference.replace("epoch_", "")) resume_step = None else: resume_step = int(training_difference.replace("step_", "")) num_epochs -= resume_step // len(train_dataloader) # If resuming by step, we also need to know exactly how far into the DataLoader we went resume_step = (num_epochs * len(train_dataloader)) - resume_step # Now we train the model for epoch in range(num_epochs): # New Code # # context manager to track the peak memory usage during the training epoch with TorchTracemalloc() as tracemalloc: model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == 0: if resume_step is not None and step < resume_step: pass # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() # accelerator.print(lr_scheduler.get_lr()) overall_step += 1 if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # New Code # # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) # Logging the peak memory usage of the GPU to the tracker if args.with_tracking: accelerator.log( { "train_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), }, step=epoch, ) # New Code # # context manager to track the peak memory usage during the evaluation with TorchTracemalloc() as tracemalloc: model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(train_dataloader), }, step=epoch, ) if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # New Code # # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print(f"Memory before entering the eval : {b2mb(tracemalloc.begin)}") accelerator.print(f"Memory consumed at the end of the eval (end-begin): {tracemalloc.used}") accelerator.print(f"Peak Memory consumed during the eval (max-begin): {tracemalloc.peaked}") accelerator.print( f"Total Peak Memory consumed during the eval (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) # Logging the peak memory usage of the GPU to the tracker if args.with_tracking: accelerator.log( { "eval_total_peak_memory": tracemalloc.peaked + b2mb(tracemalloc.begin), }, step=epoch, ) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help="Location on where to store experiment tracking logs`", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/gradient_accumulation.py000066400000000000000000000214761503574341000245030ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # gradient_accumulation_steps = int(args.gradient_accumulation_steps) # Initialize accelerator accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps ) if accelerator.distributed_type == DistributedType.XLA and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(model): output = model(**batch) loss = output.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) # New Code # parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="The number of minibatches to be ran before gradients are accumulated.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/gradient_accumulation_for_autoregressive_models.py000066400000000000000000000334161503574341000320400ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import contextlib import math import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, get_constant_schedule, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation on samples of variable size # # This example trains a SmolLM base model on WikiText-2 v1 # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, max_training_samples=500): """ Creates a set of `DataLoader`s for the `Salesforce/wikitext` dataset, using "HuggingFaceTB/SmolLM-360M" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM-360M") tokenizer.pad_token = tokenizer.eos_token with accelerator.local_main_process_first(): datasets = load_dataset("Salesforce/wikitext", "wikitext-2-v1") datasets["train"] = datasets["train"].select(range(max_training_samples)) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["text"], truncation=True, max_length=None, return_attention_mask=False) return outputs # Filter out empty texts with accelerator.main_process_first(): datasets = datasets.filter( lambda x: len(x) > 0, input_columns="text", ) # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["text"], ) # Filter out empty samples with accelerator.main_process_first(): tokenized_datasets = tokenized_datasets.filter( lambda x: len(x) > 0, input_columns="input_ids", ) def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = ( 128 if accelerator.distributed_type == DistributedType.XLA else max([len(e["input_ids"]) for e in examples]) ) # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None batch = tokenizer.pad( examples, padding="max_length", max_length=max_length + 1, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) batch["labels"] = batch["input_ids"][:, 1:] batch["input_ids"] = batch["input_ids"][:, :-1] batch["labels"] = torch.where(batch["labels"] == tokenizer.pad_token_id, -100, batch["labels"]) return batch # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders_for_autoregressive_models get_dataloaders = mocked_dataloaders_for_autoregressive_models # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 gradient_accumulation_steps = int(args.gradient_accumulation_steps) # Initialize accelerator if args.with_wandb_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps, log_with="wandb", ) else: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps ) if accelerator.distributed_type == DistributedType.XLA and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) max_grad_norm = config["max_grad_norm"] # We need to initialize the trackers we use, and also store our configuration if args.with_wandb_tracking: run = os.path.split(__file__)[-1].split(".")[0] run_name = f"{accelerator.num_processes}GPU-grad{gradient_accumulation_steps}-bs{batch_size}" accelerator.init_trackers( run, config, init_kwargs={"wandb": {"name": run_name}}, ) set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/SmolLM-360M") # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_constant_schedule( optimizer=optimizer, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) num_samples_in_epoch = len(train_dataloader) remainder = num_samples_in_epoch % gradient_accumulation_steps remainder = remainder if remainder != 0 else gradient_accumulation_steps total_gradient_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps) total_batched_samples = 0 # Now we train the model for epoch in range(num_epochs): model.train() training_iterator = iter(train_dataloader) for update_step in range(total_gradient_updates): # In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss # we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples batch_samples = [] num_batches_in_step = ( gradient_accumulation_steps if update_step != (total_gradient_updates - 1) else remainder ) for _ in range(num_batches_in_step): batch_samples += [next(training_iterator)] # get local num items in batch local_num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples]) # to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch. num_items_in_batch = accelerator.gather(local_num_items_in_batch).sum().item() losses = [] for i, batch in enumerate(batch_samples): # if we perform gradient accumulation in a multi-devices set-up, we want to avoid unecessary communications when accumulating # cf: https://muellerzr.github.io/blog/gradient_accumulation.html ctx = ( model.no_sync if (i < len(batch_samples) - 1 and accelerator.num_processes > 1) else contextlib.nullcontext ) with ctx(): total_batched_samples += 1 outputs = model(**batch, use_cache=False, num_items_in_batch=num_items_in_batch) loss = outputs.loss # We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices # Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps # Because the loss is already divided by `num_items_in_batch` in the `transformers` code, we don't need to do it again loss = loss * gradient_accumulation_steps * accelerator.num_processes accelerator.backward(loss) losses.append(loss.detach()) # Sync gradients and perform optimization steps once every gradient_accumulation_steps grad_norm = accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() losses = accelerator.gather(sum(losses)).sum().item() / ( accelerator.num_processes * gradient_accumulation_steps ) grad_norm = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm accelerator.print( f"epoch {epoch} - update step {update_step}:: grad norm: {grad_norm} ::train loss: {losses}" ) if args.with_wandb_tracking: accelerator.log( { "train/grad_norm": grad_norm, "train/epoch": epoch, "train/loss": losses, }, step=update_step + total_gradient_updates * epoch, ) model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch, use_cache=False) eval_loss = outputs.loss losses.append(accelerator.gather_for_metrics(loss.repeat(EVAL_BATCH_SIZE))) losses = torch.cat(losses) try: eval_loss = torch.mean(losses) perplexity = math.exp(eval_loss) except OverflowError: perplexity = float("inf") # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:: eval perplexity: {perplexity} eval_loss: {eval_loss}") if args.with_wandb_tracking: accelerator.log( { "eval/perplexity": perplexity, "eval/loss": eval_loss, "eval/epoch": epoch, }, step=update_step + total_gradient_updates * epoch, ) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="The number of minibatches to be ran before gradients are accumulated.", ) parser.add_argument( "--per_device_batch_size", type=int, default=2, help="The size of each minibatch", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--with_wandb_tracking", action="store_true", help="Whether to load in wandb from the environment and use them for logging.", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": args.per_device_batch_size, "max_grad_norm": 1.0} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/local_sgd.py000066400000000000000000000223121503574341000220570ustar00rootroot00000000000000# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # gradient_accumulation_steps = int(args.gradient_accumulation_steps) local_sgd_steps = int(args.local_sgd_steps) # Initialize accelerator accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=gradient_accumulation_steps ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() with LocalSGD( accelerator=accelerator, model=model, local_sgd_steps=local_sgd_steps, enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(model): output = model(**batch) loss = output.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) # New Code # parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="The number of minibatches to be ran before gradients are accumulated.", ) parser.add_argument( "--local_sgd_steps", type=int, default=8, help="Number of local SGD steps or None to disable local SGD" ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/megatron_lm_gpt_pretraining.py000066400000000000000000000734201503574341000257160ustar00rootroot00000000000000#!/usr/bin/env python # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset without using HuggingFace Trainer. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import argparse import json import logging import math import os import random from itertools import chain from pathlib import Path import datasets import torch import transformers from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger from accelerate.utils import MegatronLMDummyScheduler, set_seed # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.23.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--block_size", type=int, default=None, help=( "Optional input sequence length after tokenization. The training dataset will be truncated in block of" " this size for training. Default to the model max input length for single sentence inputs (take into" " account special tokens)." ), ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"`, and `"dvclive"`, and `"swanlab"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: api = HfApi(token=args.hub_token) # Create repo (repo_name from args or inferred) repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name repo_id = api.create_repo(repo_name, exist_ok=True).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", ) else: data_files = {} dataset_args = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{args.validation_split_percentage}%]", **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{args.validation_split_percentage}%:]", **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained(args.config_name) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) with accelerator.main_process_first(): tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) if args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx." ) block_size = 1024 else: if args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map with accelerator.main_process_first(): lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=not args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", ) train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "layer_norm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True # New Code # For Megatron-LM, we need to use `MegatronLMDummyScheduler` instead of regular schedulers if accelerator.distributed_type == DistributedType.MEGATRON_LM: lr_scheduler = MegatronLMDummyScheduler( optimizer=optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps, ) else: lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. if accelerator.distributed_type == DistributedType.XLA: model.tie_weights() # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("clm_no_trainer", experiment_config) # Train! # New Code # For Megatron-LM, we need to get `global_batch_size` from megatron_lm_plugin # as it handles the specifics related to data parallelism, tensor model parallelism and pipeline parallelism if accelerator.distributed_type == DistributedType.MEGATRON_LM: total_batch_size = accelerator.state.megatron_lm_plugin.global_batch_size else: total_batch_size = ( args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(starting_epoch * num_update_steps_per_epoch) completed_steps = starting_epoch * num_update_steps_per_epoch for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == starting_epoch: if resume_step is not None and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) completed_steps += 1 continue with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss # New Code # For Megatron-LM, the losses are already averaged across the data parallel group if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses.append(loss) else: losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) try: if accelerator.distributed_type == DistributedType.MEGATRON_LM: losses = torch.tensor(losses) else: losses = torch.cat(losses) eval_loss = torch.mean(losses) perplexity = math.exp(eval_loss) except OverflowError: perplexity = float("inf") logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}") if args.with_tracking: accelerator.log( { "perplexity": perplexity, "eval_loss": eval_loss, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message=f"Training in progress epoch {epoch}", run_as_future=True, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # this is causing some issue with Megatron-LM when using `wandb` at the end of the main function. # Everything works fine inspite of commenting this out. (wandb finishes/closes the run without error) # if args.with_tracking: # accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() # New Code # For Megatron-LM, we need to save the model using `accelerator.save_state` if accelerator.distributed_type == DistributedType.MEGATRON_LM: accelerator.save_state(args.output_dir) else: unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump({"perplexity": perplexity}, f) accelerator.end_training() if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/memory.py000066400000000000000000000222261503574341000214440ustar00rootroot00000000000000# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=batch_size) def inner_training_loop(batch_size): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(seed) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/multi_process_metrics.py000066400000000000000000000231061503574341000245500ustar00rootroot00000000000000# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather((predictions, batch["labels"])) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(eval_dataloader) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/profiler.py000066400000000000000000000227161503574341000217620ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import ProfileKwargs ######################################################################## # This is a fully working simple example to use Accelerate # and perform profiling # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single device (CUDA GPU, Intel XPU etc.) # - multi devices (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # New Code # profile_kwargs = ProfileKwargs( record_shapes=args.record_shapes, profile_memory=args.profile_memory, with_flops=args.with_flops, output_trace_dir=args.output_trace_dir, ) # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision, kwargs_handlers=[profile_kwargs]) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() # New Code # with accelerator.profile() as prof: for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # We use the new `accumulate` context manager to perform gradient accumulation with accelerator.accumulate(model): output = model(**batch) loss = output.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # New Code # accelerator.print( prof.key_averages().table( sort_by="self_cpu_time_total" if args.cpu else f"self_{accelerator.device.type}_time_total", row_limit=-1, ) ) model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU or an Intel XPU.", ) # New Code # parser.add_argument( "--record_shapes", action="store_true", default=False, help="If passed, will record shapes for profiling.", ) # New Code # parser.add_argument( "--profile_memory", action="store_true", default=False, help="If passed, will profile memory.", ) # New Code # parser.add_argument( "--with_flops", action="store_true", default=False, help="If passed, will profile flops.", ) # New Code # parser.add_argument( "--output_trace_dir", type=str, default=None, help="If passed, will save a json trace to the specified path.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/schedule_free.py000066400000000000000000000206531503574341000227330ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import is_schedulefree_available if is_schedulefree_available(): import schedulefree else: raise ImportError( "This example requires the `schedulefree` library. Please install it with `pip install schedulefree`" ) ######################################################################## # This is a fully working simple example to use Accelerate and Facebook's # scheduler-free optimizer: https://github.com/facebookresearch/schedule_free/ # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # For Torchxla, it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE, drop_last=(accelerator.mixed_precision == "fp8"), ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer with warmup steps optimizer = schedulefree.AdamWScheduleFree( model.parameters(), lr=lr, warmup_steps=100, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Now we train the model for epoch in range(num_epochs): model.train() optimizer.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() model.eval() optimizer.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/by_feature/tracking.py000066400000000000000000000247441503574341000217450ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir ) else: accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: run = os.path.split(__file__)[-1].split(".")[0] accelerator.init_trackers(run, config) # Now we train the model for epoch in range(num_epochs): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, }, step=epoch, ) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=str, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/complete_cv_example.py000066400000000000000000000332121503574341000220170ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator, DataLoaderConfiguration ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a ResNet50 on the Oxford-IIT Pet Dataset # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## # Function to get the label from the filename def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} def training_function(config, args): # Initialize accelerator dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader) if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir, dataloader_config=dataloader_config, ) else: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) image_size = config["image_size"] if not isinstance(image_size, (list, tuple)): image_size = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: run = os.path.split(__file__)[-1].split(".")[0] accelerator.init_trackers(run, config) # Grab all the image filenames file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] # Build the label correspondences all_labels = [extract_label(fname) for fname in file_names] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} # Set the seed before splitting the data. np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # Split our filenames between train and validation random_perm = np.random.permutation(len(file_names)) cut = int(0.8 * len(file_names)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training we use a simple RandomResizedCrop train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset( [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id ) # For evaluation, we use a deterministic Resize eval_tfm = Compose([Resize(image_size), ToTensor()]) eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders. train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Freezing the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # We normalize the batches of images to be a bit faster. mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) # Instantiate optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) # Instantiate learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the starting epoch so files are named properly starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # Now we train the model for epoch in range(starting_epoch, num_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader active_dataloader = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) model.eval() accurate = 0 num_elems = 0 for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) accurate_preds = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, }, step=overall_step, ) if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument("--data_dir", required=True, help="The data folder on disk.") parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--use_stateful_dataloader", action="store_true", help="If the dataloader should be a resumable stateful dataloader.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=str, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) args = parser.parse_args() config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/complete_nlp_example.py000066400000000000000000000317361503574341000222110ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DataLoaderConfiguration, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # This example also demonstrates the checkpointing and sharding capabilities # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def training_function(config, args): # Initialize accelerator dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader) if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config, log_with="all", project_dir=args.project_dir, ) else: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config ) if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: run = os.path.split(__file__)[-1].split(".")[0] accelerator.init_trackers(run, config) tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") metric = evaluate.load("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) set_seed(seed) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the stating epoch so files are named properly starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # Now we train the model for epoch in range(starting_epoch, num_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step if not args.use_stateful_dataloader: active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, }, step=epoch, ) if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--use_stateful_dataloader", action="store_true", help="If the dataloader should be a resumable stateful dataloader.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--project_dir", type=str, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/config_yaml_templates/000077500000000000000000000000001503574341000217765ustar00rootroot00000000000000accelerate-1.9.0/examples/config_yaml_templates/README.md000066400000000000000000000007211503574341000232550ustar00rootroot00000000000000# Config Zoo This folder contains a variety of minimal configurations for `Accelerate` achieving certain goals. You can use these direct config YAML's, or build off of them for your own YAML's. These are highly annoted versions, aiming to teach you what each section does. Each config can be run via `accelerate launch --config_file {file} run_me.py` `run_me.py` will then print out how the current environment is setup (the contents of the `AcceleratorState`)accelerate-1.9.0/examples/config_yaml_templates/deepspeed.yaml000066400000000000000000000014641503574341000246250ustar00rootroot00000000000000# Similar to FSDP, we set the distributed type as DEEPSPEED distributed_type: DEEPSPEED # With DeepSpeed, we utilize a deepspeed config file for the entire configuration deepspeed_config: # Can also be any of the config json's in accelerate/examples/deepspeed_config_templates deepspeed_config_file: ../deepspeed_config_templates/zero_stage1_config.json # If using ZeRO-3 and wanting to load big models in, this should be set to `true` so # `transformers` uses the right `init` function zero3_init_flag: false # true # Finally we need to specify the number of GPUs to use num_processes: 2 # Optionally we can set the mixed precision now instead of in the deepspeed config file, # however this requires the `fp16` and `bf16` options to be set to `auto` in the deepspeed config file # mixed_precision: "bf16" accelerate-1.9.0/examples/config_yaml_templates/fp8.yaml000066400000000000000000000014351503574341000233620ustar00rootroot00000000000000# This config template simply setups up the TransformersEngine config (and a config for a single GPU), # this can interop with the other configs in this folder distributed_type: "NO" mixed_precision: "fp8" # Then we specify the fp8 configuration: fp8_config: backend: TE # Can be TE | MS-AMP # The following are TE specific arguments. # See https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#common-api for more details amax_history_len: 1024 fp8_format: E4M3 interval: 1 margin: 0 override_linear_precision: [false, false, false] # Generally this should always be set to `false` to have the most realistic fp8 eval performance use_autocast_during_eval: false # If using MS-AMP, we ignore all of the prior and set a opt_level #opt_level: O1 accelerate-1.9.0/examples/config_yaml_templates/fsdp.yaml000066400000000000000000000013361503574341000236210ustar00rootroot00000000000000# Since we are doing FSDP (even though it's multi-GPU), we need to specify the distributed type as FSDP distributed_type: FSDP # Can be one of "no", "fp16", or "bf16" (see `transformer_engine.yaml` for `fp8`, but it works for FSDP as well) mixed_precision: 'bf16' # Specify the number of GPUs to use num_processes: 2 # Then we can specify the FSDP config fsdp_config: fsdp_activation_checkpointing: false fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_use_orig_params: true accelerate-1.9.0/examples/config_yaml_templates/multi_gpu.yaml000066400000000000000000000003561503574341000246730ustar00rootroot00000000000000# Specify distributed_type as `MULTI_GPU` for DDP distributed_type: "MULTI_GPU" # Can be one of "no", "fp16", or "bf16" (see `transformer_engine.yaml` for `fp8`) mixed_precision: "bf16" # Specify the number of GPUs to use num_processes: 2accelerate-1.9.0/examples/config_yaml_templates/multi_node.yaml000066400000000000000000000013771503574341000250310ustar00rootroot00000000000000# This config template is for a multi-node setup. This assumes DDP, but can be interop'd with the other configs in this folder # Generally it's recommended to look at the SLURM config template for a more robust multi-node setup distributed_type: MULTI_GPU # We need to specify the current machine's rank machine_rank: 0 # We then need to specify the IP address and port of the main process main_process_ip: '1234' main_process_port: 9999 # We need to specify the number of machines num_machines: 2 # We need to specify the *total* number of processes num_processes: 8 # And then we need to specify how rdvz comms will be handled rdzv_backend: static # or c10d # If the compute nodes are on the same network (cloud will more than likely be false) same_network: false accelerate-1.9.0/examples/config_yaml_templates/run_me.py000066400000000000000000000017431503574341000236420ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A base script which outputs the accelerate config for the given environment """ from accelerate import Accelerator accelerator = Accelerator() accelerator.print(f"Accelerator state from the current environment:\n{accelerator.state}") if accelerator.fp8_recipe_handler is not None: accelerator.print(f"FP8 config:\n{accelerator.fp8_recipe_handler}") accelerator.end_training() accelerate-1.9.0/examples/config_yaml_templates/single_gpu.yaml000066400000000000000000000002771503574341000250240ustar00rootroot00000000000000# Since this is single GPU, we don't need distributed training distributed_type: "NO" # Can be one of "no", "fp16", or "bf16" (see `transformer_engine.yaml` for `fp8`) mixed_precision: "bf16"accelerate-1.9.0/examples/cv_example.py000066400000000000000000000206611503574341000201330ustar00rootroot00000000000000# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator from accelerate.utils import set_seed ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a ResNet50 on the Oxford-IIT Pet Dataset # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## # Function to get the label from the filename def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} def training_function(config, args): # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) image_size = config["image_size"] if not isinstance(image_size, (list, tuple)): image_size = (image_size, image_size) # Grab all the image filenames file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] # Build the label correspondences all_labels = [extract_label(fname) for fname in file_names] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} # Set the seed before splitting the data. set_seed(seed) # Split our filenames between train and validation random_perm = np.random.permutation(len(file_names)) cut = int(0.8 * len(file_names)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training we use a simple RandomResizedCrop train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset( [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id ) # For evaluation, we use a deterministic Resize eval_tfm = Compose([Resize(image_size), ToTensor()]) eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders. train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Freezing the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # We normalize the batches of images to be a bit faster. mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) # Instantiate optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) # Instantiate learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for _, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) accurate_preds = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument("--data_dir", required=True, help="The data folder on disk.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") args = parser.parse_args() config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(config, args) if __name__ == "__main__": main() accelerate-1.9.0/examples/deepspeed_config_templates/000077500000000000000000000000001503574341000227725ustar00rootroot00000000000000accelerate-1.9.0/examples/deepspeed_config_templates/zero_stage1_config.json000066400000000000000000000021441503574341000274360ustar00rootroot00000000000000{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 1, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }accelerate-1.9.0/examples/deepspeed_config_templates/zero_stage2_config.json000066400000000000000000000021441503574341000274370ustar00rootroot00000000000000{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }accelerate-1.9.0/examples/deepspeed_config_templates/zero_stage2_offload_config.json000066400000000000000000000023121503574341000311260ustar00rootroot00000000000000{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }accelerate-1.9.0/examples/deepspeed_config_templates/zero_stage3_config.json000066400000000000000000000023151503574341000274400ustar00rootroot00000000000000{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "sub_group_size": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }accelerate-1.9.0/examples/deepspeed_config_templates/zero_stage3_offload_config.json000066400000000000000000000026251503574341000311360ustar00rootroot00000000000000{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "sub_group_size": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }accelerate-1.9.0/examples/fsdp2/000077500000000000000000000000001503574341000164475ustar00rootroot00000000000000accelerate-1.9.0/examples/fsdp2/README.md000066400000000000000000000056201503574341000177310ustar00rootroot00000000000000## FSDP2 Examples This folder contains examples of using FSDP2 with Accelerate, utilizing extra methods to improve training speed, performance or accuracy. ### FSDP2 + ao Float8Linear In file `fsdp2_fp8.py` we use `Float8Linear` from `ao` to train a model partially in FP8 precision. We utilize `AORecipeKwargs` to pass the `Float8LinearConfig` to the accelerator, which replaces the default `torch.nn.Linear` with `Float8Linear`. We also utilize `TorchDynamoPlugin` together with regional compilation to compile the model, gaining even more speed and memory savings, as `ao` doesn't ship with any kernels by default, so we have to gain the performance from compiling the model. Replacing linear layers with `Float8Linear` can greatly improve performance, if used correctly and on hardware that supports FP8 tensor cores. This highly depends on the model dimensions and sequence length used for training. You can view the performance of `Float8Linear` as a function of matrix dimensions in [this document](https://github.com/pytorch/ao/blob/main/torchao/float8/README.md#performance). In our example, we use a 8B Llama3.1 model, which has a hidden dimension of 4096 and we train on sequence length of 8192. In the below images, we can see that this improves performance by ~25% compared to `bf16`, reaching ~10000 tokens per second, per device on 8x H100 GPUs, compared to ~8000 tokens per second using `bf16`, while loss function stays roughly the same. We can also see that the FLOPS raise by using FP8.
tps

TPs per device, bf16 vs fp8

tflops

TFLOPS per device, bf16 vs fp8. We cannot really compare MFU as fp8 tensor cores are used as well.

loss

Loss curve, bf16 vs fp8, it's hard to see the difference as the curves mostly overlap

The figures above were generated on 8x H100 SXM GPUs, with 8192 sequence length and 1000 steps. To run the example, you can use the following command, where you can specify the precision to train in: ```bash accelerate launch fsdp2_fp8.py --sequence-length 8192 --num-steps 1000 --log_with wandb --precision [fp8 | bf16] ``` accelerate-1.9.0/examples/fsdp2/fsdp2_fp8.py000066400000000000000000000223451503574341000206220ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Minimal example of training with FP8 precision using FSDP2 via Accelerate. This example demonstrates how to use torchao's Float8LinearConfig with Accelerate's AORecipeKwargs. """ import argparse import time import torch from datasets import Dataset, load_dataset from torch.utils.data import DataLoader from torchao.float8 import Float8LinearConfig from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer from accelerate import Accelerator from accelerate.utils import AORecipeKwargs, FullyShardedDataParallelPlugin, TorchDynamoPlugin, set_seed WARMUP_STEPS = 10 MODEL_ID = "NousResearch/Hermes-3-Llama-3.1-8B" def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--sequence-length", type=int, default=8192, help="Sequence length for the dataset") parser.add_argument("--num-steps", type=int, default=1000, help="Number of steps to train for") parser.add_argument("--precision", type=str, default="fp8", choices=["fp8", "bf16"], help="Precision to train in") parser.add_argument("--log-with", type=str, default="wandb", help="Log with wandb or tensorboard") return parser.parse_args() def get_model_flops_per_token(model: AutoModelForCausalLM, args: argparse.Namespace) -> float: """ Get the number of flops per token for the model. Args: model (AutoModelForCausalLM): Model to get the flops for """ cfg = model.config head_dim = cfg.hidden_size // cfg.num_attention_heads # MLP: 3 matmuls mlp_flops = 18 * cfg.hidden_size * cfg.intermediate_size # Attn (w/o dotproduct) attn_flops = 12 * head_dim * (cfg.num_attention_heads + cfg.num_key_value_heads) # attn (dotproduct) - this scales quadratically with sequence length, therefore we have to account for it here too attn_dotproduct_flops = 12 * cfg.num_attention_heads * head_dim * args.sequence_length # we also ignore embeddings and layernorms, etc return (mlp_flops + attn_flops + attn_dotproduct_flops) * cfg.num_hidden_layers def get_dataset(accelerator: Accelerator, tokenizer: AutoTokenizer, seq_len: int) -> Dataset: """ Load and prepare TinyStories dataset. Args: accelerator (Accelerate): Accelerate accelerator instance tokenizer (AutoTokenizer): Hugging Face tokenizer seq_len (int): Sequence length for the dataset Returns: Dataset: Packed dataset """ raw_dataset = load_dataset("roneneldan/TinyStories", split="train[:50%]") def tokenize_function(examples): tokenized_batch = tokenizer( examples["text"], padding=False, truncation=True, max_length=seq_len, return_tensors=None, ) tokenized_batch["labels"] = tokenized_batch["input_ids"].copy() return tokenized_batch with accelerator.main_process_first(): tokenized_dataset = raw_dataset.map(tokenize_function, batched=True, remove_columns=["text"]) def create_packed_sequences(examples): all_tokens = [] for input_ids in examples["input_ids"]: all_tokens.extend(input_ids) num_sequences = len(all_tokens) // (seq_len + 1) packed_input_ids = [] packed_labels = [] for i in range(num_sequences): start_idx = i * (seq_len + 1) end_idx = start_idx + (seq_len + 1) full_sequence = all_tokens[start_idx:end_idx] packed_input_ids.append(full_sequence[:-1]) packed_labels.append(full_sequence[1:]) return {"input_ids": packed_input_ids, "labels": packed_labels} with accelerator.main_process_first(): packed_dataset = tokenized_dataset.map( create_packed_sequences, batched=True, remove_columns=tokenized_dataset.column_names, batch_size=1000, ) return packed_dataset.shuffle(seed=42) def main(): """ Main function to train the model. """ set_seed(42) args = parse_args() fsdp2_plugin = FullyShardedDataParallelPlugin( fsdp_version=2, cpu_ram_efficient_loading=False, # CPU RAM efficient loading CANNOT work with fp8 torchao auto_wrap_policy="transformer_based_wrap", transformer_cls_names_to_wrap=["LlamaDecoderLayer"], ) fsdp2_plugin.set_mixed_precision(args.precision) dynamo_plugin = TorchDynamoPlugin( backend="inductor", use_regional_compilation=True, # We use regional compilation to compile the model way faster ) fp8_config = Float8LinearConfig( enable_fsdp_float8_all_gather=True, # extra saving by gathering parameters in fp8 and upcasting after force_recompute_fp8_weight_in_bwd=True, ) kwargs = [] if args.precision == "fp8": kwargs = [AORecipeKwargs(config=fp8_config)] accelerator = Accelerator( fsdp_plugin=fsdp2_plugin, dynamo_plugin=dynamo_plugin, kwargs_handlers=kwargs, log_with=args.log_with, ) accelerator.init_trackers( project_name="FSDP2_torchao_fp8", config={"sequence_length": args.sequence_length, "num_steps": args.num_steps}, ) model = AutoModelForCausalLM.from_config( AutoConfig.from_pretrained(MODEL_ID, use_cache=False), torch_dtype=torch.bfloat16, ) tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5) model, optimizer = accelerator.prepare(model, optimizer) dataset = get_dataset(accelerator, tokenizer, args.sequence_length) def collate_fn(batch): input_ids = torch.tensor([item["input_ids"] for item in batch], dtype=torch.long) labels = torch.tensor([item["labels"] for item in batch], dtype=torch.long) # Transformers expect `labels` to not be shifted, though we already shifted them, so we pass them both # We need to pass both `shift_labels` and `labels` to the model, as the loss is calculated inside `if labels is not None` # `shift_labels` take precedence over `labels` in this case return {"input_ids": input_ids, "labels": labels, "shift_labels": labels} # We keep batch size at 1, as it is basically the same as sequence length, which we use instead dataloader = DataLoader(dataset, batch_size=1, collate_fn=collate_fn) dataloader = accelerator.prepare(dataloader) model.train() total_num_steps = min(args.num_steps, len(dataloader)) num_tokens = 0 is_in_warmup = True model_flops_per_token = get_model_flops_per_token(model, args) accelerator.print(f"Warming up for {WARMUP_STEPS} steps...") for step, batch in enumerate(dataloader): if step == WARMUP_STEPS: accelerator.print("Warm up completed! Starting training") start_time = time.perf_counter() num_tokens = 0 is_in_warmup = False if step >= total_num_steps: break outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() steps_from_warmup = step - WARMUP_STEPS print_msg = f"Step {step}/{total_num_steps}, Loss: {loss.item():.4f}" metrics = {"loss": loss.item()} if not is_in_warmup and steps_from_warmup > 0: num_tokens += batch["input_ids"].shape[1] total_time = time.perf_counter() - start_time tps = num_tokens / total_time tflops = num_tokens * model_flops_per_token / (total_time * 1e12) # it's rather hard to get a good estimate of MFU as we train with FP8, so both FP8 and BF16 tensor cores are used, therefore we just report TFLOPS (Tera floating point operations per second) # Given H100 SXM, the theoretical peak flops are ~990 TFLOPS for bf16 and ~1980 TFLOPS for fp8 [https://resources.nvidia.com/en-us-gpu-resources/h100-datasheet-24306] # This is WITH sparsity, so we divide by 2 to get the answer w/o sparsity print_msg += f", Average steps/s: {steps_from_warmup / total_time:.2f}, TPS per device: {tps:.2f}, TFLOPS per device: {tflops:.2f}" metrics.update( { "steps_per_second": steps_from_warmup / total_time, "tps_per_device": tps, "tflops_per_device": tflops, } ) if steps_from_warmup % 10 == 0 or step == total_num_steps: accelerator.print(print_msg) accelerator.log(metrics) accelerator.wait_for_everyone() accelerator.end_training() accelerator.print("Training completed!") if __name__ == "__main__": main() accelerate-1.9.0/examples/inference/000077500000000000000000000000001503574341000173675ustar00rootroot00000000000000accelerate-1.9.0/examples/inference/distributed/000077500000000000000000000000001503574341000217115ustar00rootroot00000000000000accelerate-1.9.0/examples/inference/distributed/README.md000066400000000000000000000011211503574341000231630ustar00rootroot00000000000000# Distributed inference examples This folder contains a variety of tutorials for running distributed inference with the following strategy: Load an entire model onto each GPU and sending chunks of a batch through each GPU’s model copy at a time ## Installation ```bash pip install accelerate torch ``` ## Running code You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script: ```bash accelerate launch --num_processes {NUM_GPUS} phi2.py ``` Or: ```bash torchrun --nproc-per-node {NUM_GPUS} phi2.py ``` accelerate-1.9.0/examples/inference/distributed/distributed_image_generation.py000066400000000000000000000073541503574341000301730ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Originally by jiwooya1000, put together together by sayakpaul. Documentation: https://huggingface.co/docs/diffusers/main/en/training/distributed_inference Run: accelerate launch distributed_image_generation.py --batch_size 8 # Enable memory optimizations for large models like SD3 accelerate launch distributed_image_generation.py --batch_size 8 --low_mem """ import os import time import fire import torch from datasets import load_dataset from diffusers import DiffusionPipeline from tqdm import tqdm from accelerate import PartialState from accelerate.utils import gather_object START_TIME = time.strftime("%Y%m%d_%H%M%S") DTYPE_MAP = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16} def get_batches(items, batch_size): num_batches = (len(items) + batch_size - 1) // batch_size batches = [] for i in range(num_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, len(items)) batch = items[start_index:end_index] batches.append(batch) return batches def main( ckpt_id: str = "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", save_dir: str = "./evaluation/examples", seed: int = 1, batch_size: int = 4, num_inference_steps: int = 20, guidance_scale: float = 4.5, dtype: str = "fp16", low_mem: bool = False, ): pipeline = DiffusionPipeline.from_pretrained(ckpt_id, torch_dtype=DTYPE_MAP[dtype]) save_dir = save_dir + f"_{START_TIME}" parti_prompts = load_dataset("nateraw/parti-prompts", split="train") data_loader = get_batches(items=parti_prompts["Prompt"], batch_size=batch_size) distributed_state = PartialState() if low_mem: pipeline.enable_model_cpu_offload(gpu_id=distributed_state.device.index) else: pipeline = pipeline.to(distributed_state.device) if distributed_state.is_main_process: if not os.path.exists(save_dir): os.makedirs(save_dir) print(f"Directory '{save_dir}' created successfully.") else: print(f"Directory '{save_dir}' already exists.") count = 0 for _, prompts_raw in tqdm(enumerate(data_loader), total=len(data_loader)): input_prompts = [] with distributed_state.split_between_processes(prompts_raw) as prompts: generator = torch.manual_seed(seed) images = pipeline( prompts, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator ).images input_prompts.extend(prompts) distributed_state.wait_for_everyone() images = gather_object(images) input_prompts = gather_object(input_prompts) if distributed_state.is_main_process: for image, prompt in zip(images, input_prompts): count += 1 temp_dir = os.path.join(save_dir, f"example_{count}") os.makedirs(temp_dir) prompt = "_".join(prompt.split()) image.save(f"image_{prompt}.png") if distributed_state.is_main_process: print(f">>> Image Generation Finished. Saved in {save_dir}") if __name__ == "__main__": fire.Fire(main) accelerate-1.9.0/examples/inference/distributed/distributed_speech_generation.py000066400000000000000000000200001503574341000303370ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pathlib import queue from concurrent.futures import ThreadPoolExecutor from typing import Union import fire import scipy.io.wavfile import torch from datasets import load_dataset from transformers import AutoTokenizer, VitsModel from accelerate import PartialState from accelerate.utils import tqdm """ Requirements: transformers accelerate fire scipy datasets pip install transformers accelerate fire scipy datasets Example usage: accelerate launch distributed_speech_generation.py --output_path outputs --batch_size 8 --num_workers 2 --dataset_split train """ """ To run the speech generation import scipy.io.wavfile import numpy as np from IPython.display import Audio sample_rate, audio_data = scipy.io.wavfile.read('path_to_you_wav_file.wav') audio_data = audio_data.astype(np.float32) / 32762.0 Audio(audio_data, rate=sample_rate) """ def load_pokemon_data(split: str, max_text_length: int): """Load Pokemon descriptions from the dataset""" ds = load_dataset("svjack/pokemon-blip-captions-en-zh", split=split) # Create dataset of dictionaries dataset = [] for idx, text in enumerate(ds["en_text"]): if len(text.strip()) > 0: # Skip empty descriptions dataset.append( { "id": f"pokemon_{idx:06d}", "text": text.strip()[:max_text_length], # Truncate long descriptions "original_text": text.strip(), # Keep original for metadata } ) return dataset class ExistsFilter: def __init__(self, output_dir: Union[pathlib.Path, str]): current_files = [f.split(".wav")[0] for f in os.listdir(output_dir) if f.endswith(".wav")] self.processed_files = set(current_files) print(f"Existing audio files found: {len(self.processed_files)}.") def __call__(self, x): return x["id"] not in self.processed_files def preprocess_fn(sample, tokenizer, max_text_length: int): inputs = tokenizer(sample["text"], padding=False, truncation=True, max_length=max_text_length, return_tensors="pt") return { "input_ids": inputs["input_ids"][0].tolist(), "attention_mask": inputs["attention_mask"][0].tolist(), "id": sample["id"], "text": sample["text"], "original_text": sample["original_text"], } def collate_fn(examples, tokenizer): """Collate batch of examples with proper padding""" # Find max length in this batch max_length = max(len(example["input_ids"]) for example in examples) # Pad sequences to max_length input_ids_list = [] attention_mask_list = [] for example in examples: # Get current lengths curr_len = len(example["input_ids"]) padding_length = max_length - curr_len # Pad sequences padded_input_ids = example["input_ids"] + [tokenizer.pad_token_id] * padding_length padded_attention_mask = example["attention_mask"] + [0] * padding_length input_ids_list.append(padded_input_ids) attention_mask_list.append(padded_attention_mask) # Convert to tensors input_ids = torch.tensor(input_ids_list, dtype=torch.long) attention_mask = torch.tensor(attention_mask_list, dtype=torch.long) ids = [example["id"] for example in examples] texts = [example["text"] for example in examples] original_texts = [example["original_text"] for example in examples] return { "input_ids": input_ids, "attention_mask": attention_mask, "ids": ids, "texts": texts, "original_texts": original_texts, } def create_dataloader(dataset, batch_size, distributed_state, tokenizer): """Create dataloader with preprocessing""" processed_dataset = [preprocess_fn(item, tokenizer, max_text_length=200) for item in dataset] # Split dataset for distributed processing if distributed_state.num_processes > 1: chunk_size = len(processed_dataset) // distributed_state.num_processes start_idx = distributed_state.process_index * chunk_size end_idx = ( start_idx + chunk_size if distributed_state.process_index < distributed_state.num_processes - 1 else len(processed_dataset) ) processed_dataset = processed_dataset[start_idx:end_idx] # Create batches batches = [] for i in range(0, len(processed_dataset), batch_size): batch = processed_dataset[i : i + batch_size] batches.append(collate_fn(batch, tokenizer)) return batches def save_results(output_queue: queue.Queue, output_dir: pathlib.Path, sampling_rate: int): while True: try: item = output_queue.get(timeout=5) if item is None: break waveforms, ids, texts, original_texts = item # Save each audio file and its metadata for waveform, file_id, text, original_text in zip(waveforms, ids, texts, original_texts): # Save audio wav_path = output_dir / f"{file_id}.wav" scipy.io.wavfile.write(wav_path, rate=sampling_rate, data=waveform.cpu().float().numpy()) # Save metadata with both truncated and original text metadata = { "text_used": text, "original_text": original_text, "model": "facebook/mms-tts-eng", "sampling_rate": sampling_rate, } metadata_path = output_dir / f"{file_id}_metadata.json" with metadata_path.open("w") as f: json.dump(metadata, f, indent=4) except queue.Empty: continue def main( output_path: str = "speech_data", batch_size: int = 8, num_workers: int = 2, dataset_split: str = "train", model_name: str = "facebook/mms-tts-eng", max_text_length: int = 200, ): output_dir = pathlib.Path(output_path) output_dir.mkdir(parents=True, exist_ok=True) distributed_state = PartialState() # Load model and tokenizer model = VitsModel.from_pretrained( model_name, device_map=distributed_state.device, torch_dtype=torch.float32, ) tokenizer = AutoTokenizer.from_pretrained(model_name) # Load and filter data dataset = load_pokemon_data(dataset_split, max_text_length) exist_filter = ExistsFilter(output_dir) dataset = [item for item in dataset if exist_filter(item)] distributed_state.print(f"Processing {len(dataset)} Pokemon descriptions") # Create dataloader batches = create_dataloader(dataset, batch_size, distributed_state, tokenizer) # Setup output queue and save thread output_queue = queue.Queue() save_thread = ThreadPoolExecutor(max_workers=num_workers) save_future = save_thread.submit(save_results, output_queue, output_dir, model.config.sampling_rate) try: for batch in tqdm(batches, desc="Generating Pokemon descriptions"): with torch.no_grad(): outputs = model( input_ids=batch["input_ids"].to(distributed_state.device, dtype=torch.long), attention_mask=batch["attention_mask"].to(distributed_state.device, dtype=torch.long), ).waveform output_queue.put((outputs, batch["ids"], batch["texts"], batch["original_texts"])) finally: output_queue.put(None) save_thread.shutdown(wait=True) save_future.result() if __name__ == "__main__": fire.Fire(main) accelerate-1.9.0/examples/inference/distributed/florence2.py000066400000000000000000000160251503574341000241460ustar00rootroot00000000000000# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pathlib import queue from concurrent.futures import ThreadPoolExecutor from functools import partial from typing import Union import fire import torch import webdataset as wds from huggingface_hub.utils import insecure_hashlib from PIL import Image from tqdm import tqdm from transformers import AutoModelForCausalLM, AutoProcessor from accelerate import PartialState """ Additional requirements: flash_attn einops timm webdataset fire tqdm huggingface_hub pip install flash_attn einops timm webdataset fire tqdm huggingface_hub Example: accelerate launch --num_processes=2 florence2.py --data_path "https://huggingface.co/datasets/pixparse/cc3m-wds/resolve/main/cc3m-train-0000.tar" --output_path outputs --batch_size 12 --num_workers 1 --prompt "" """ def main( data_path: str, output_path: str, batch_size: int, num_workers: int, prompt: str = "", model_name: str = "microsoft/Florence-2-large", max_new_tokens: int = 1024, num_beams: int = 3, ): output_dir = pathlib.Path(output_path) distributed_state = PartialState() if distributed_state.is_main_process: output_dir.mkdir(exist_ok=True) model = AutoModelForCausalLM.from_pretrained( model_name, device_map=distributed_state.device, torch_dtype=torch.float16, trust_remote_code=True, ) processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True, clean_up_tokenization_spaces=True) class ExistsFilter: def __init__(self, output_dir: Union[pathlib.Path, str]): current_training_img_hashes = [f.split(".jpg")[0] for f in os.listdir(output_dir) if f.endswith(".jpg")] self.current_training_img_hashes = set(current_training_img_hashes) if distributed_state.is_main_process: print(f"Existing images found: {len(self.current_training_img_hashes)}.") def __call__(self, x): if len(self.current_training_img_hashes) > 0: if x["img_hash"] in self.current_training_img_hashes: return False else: return True else: return True def preprocess_fn(sample, processor): image: Image.Image = sample["jpg"].convert("RGB") img_hash = insecure_hashlib.sha1(image.tobytes()).hexdigest() inputs = processor( text=prompt, images=image, return_tensors="pt", ) return { "input_ids": inputs["input_ids"], "pixel_values": inputs["pixel_values"], "image": image, "img_hash": img_hash, "original_caption": sample["txt"], } def collate_fn(examples): input_ids = torch.cat([example["input_ids"] for example in examples]) pixel_values = torch.cat([example["pixel_values"] for example in examples]) images = [example["image"] for example in examples] img_hashes = [example["img_hash"] for example in examples] captions = [example["original_caption"] for example in examples] return { "input_ids": input_ids, "pixel_values": pixel_values, "images": images, "img_hashes": img_hashes, "original_captions": captions, } exist_filter = ExistsFilter(output_dir) dataset = ( wds.WebDataset( data_path, handler=wds.warn_and_continue, nodesplitter=None, shardshuffle=False, empty_check=False, ) .decode("pil", handler=wds.warn_and_continue) .map(partial(preprocess_fn, processor=processor), handler=wds.warn_and_continue) ) if len(exist_filter.current_training_img_hashes) > 0: dataset = dataset.select(exist_filter) dataset = dataset.batched( batch_size, partial=False, collation_fn=collate_fn, ) dataloader = wds.WebLoader( dataset, batch_size=None, num_workers=num_workers, pin_memory=True, persistent_workers=True, ) def save_results(output_queue: queue.Queue, output_dir: pathlib.Path, processor): while True: try: item = output_queue.get(timeout=5) if item is None: break original_captions, predictions, images, img_hashes = item predicted_captions = processor.batch_decode( predictions, skip_special_tokens=False, ) for caption, pred_caption, image, img_hash in zip( original_captions, predicted_captions, images, img_hashes ): processed_caption = processor.post_process_generation( pred_caption, task=prompt, image_size=(image.width, image.height) )[prompt] img_path = output_dir.joinpath(f"{img_hash}.jpg") image.save(img_path) caption_dict = {"original": caption, "predicted": processed_caption} with output_dir.joinpath(f"{img_hash}_caption.json").open("w") as f: json.dump(caption_dict, f, indent=4) except queue.Empty: continue output_queue = queue.Queue() save_thread = ThreadPoolExecutor(max_workers=num_workers) save_future = save_thread.submit(save_results, output_queue, output_dir, processor) try: for _, batch_raw in tqdm( enumerate(dataloader), disable=not distributed_state.is_main_process, ): with distributed_state.split_between_processes(batch_raw) as batch: outputs = model.generate( input_ids=batch["input_ids"].to(distributed_state.device), pixel_values=batch["pixel_values"].to(distributed_state.device, model.dtype), max_new_tokens=max_new_tokens, num_beams=num_beams, ) output_queue.put( ( batch["original_captions"], outputs, batch["images"], batch["img_hashes"], ) ) finally: output_queue.put(None) save_thread.shutdown(wait=True) save_future.result() if __name__ == "__main__": fire.Fire(main) accelerate-1.9.0/examples/inference/distributed/llava_next_video.py000066400000000000000000000150311503574341000256060ustar00rootroot00000000000000# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pathlib import queue import time from concurrent.futures import ThreadPoolExecutor import av import fire import numpy as np import torch from huggingface_hub import snapshot_download from tqdm import tqdm from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoProcessor from accelerate import PartialState START_TIME = time.strftime("%Y%m%d_%H%M%S") DTYPE_MAP = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16} """ Example: accelerate launch llava_next_video.py """ def save_results(output_queue: queue.Queue, output_dir: pathlib.Path): count = 0 while True: try: item = output_queue.get(timeout=5) if item is None: break prompt, video, generated_text = item example_file = f"example_{count}" temp_dir = os.path.join(output_dir, example_file) metadata = {"prompt": prompt, "video": video, "generated_text": generated_text} with open(temp_dir, "w") as f: json.dump(metadata, f, indent=4) count += 1 except queue.Empty: continue def get_batches(processed_videos, batch_size): num_batches = (len(processed_videos) + batch_size - 1) // batch_size batches = [] for i in range(num_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, len(processed_videos)) batch = processed_videos[start_index:end_index] batches.append(batch) return batches def read_video_pyav(container, indices): """ Decode the video with PyAV decoder. Args: container (`av.container.input.InputContainer`): PyAV container. indices (`List[int]`): List of frame indices to decode. Returns: result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). """ frames = [] container.seek(0) start_index = indices[0] end_index = indices[-1] for i, frame in enumerate(container.decode(video=0)): if i > end_index: break if i >= start_index and i in indices: frames.append(frame) return np.stack([x.to_ndarray(format="rgb24") for x in frames]) def get_video_paths(video_dir): """Get paths to all video files in the directory and its subdirectories.""" video_extensions = (".mp4", ".avi", ".mov", ".mkv") # Add more extensions if needed video_paths = [] for root, _, files in os.walk(video_dir): for file in files: if file.lower().endswith(video_extensions): video_paths.append(os.path.join(root, file)) return video_paths def process_videos(video_paths, processor, prompt, frames_per_video): """Process a batch of videos and prepare them for the model.""" batch_inputs = [] for video_path in video_paths: try: with av.open(video_path) as container: total_frames = container.streams.video[0].frames indices = np.arange(0, total_frames, total_frames / frames_per_video).astype(int) clip = read_video_pyav(container, indices) processed = processor(text=prompt, videos=clip, return_tensors="pt") batch_inputs.append( { "input_ids": processed["input_ids"], "pixel_values_videos": processed["pixel_values_videos"], "video": video_path, } ) except Exception as e: print(f"Error processing video {video_path}: {str(e)}") continue return batch_inputs def main( model_name: str = "llava-hf/LLaVA-NeXT-Video-7B-hf", save_dir: str = "./evaluation/examples", prompt: str = "USER: